summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-05-27 16:37:42 +0200
committerMichaël Zasso <targos@protonmail.com>2016-06-29 09:04:28 +0200
commit2cc29517966de7257a2f1b34c58c77225a21e05d (patch)
tree210bd177df2f06eec16e1e22edafdbcbffe66f8a /deps/v8
parentbbf3838c70aaec1dd296fa75ae334fd1c7866df3 (diff)
downloadandroid-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.tar.gz
android-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.tar.bz2
android-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.zip
deps: update V8 to 5.1.281.69
Pick up the latest branch-head for V8 5.1. This branch brings in improved language support and performance improvements. For full details: http://v8project.blogspot.com/2016/04/v8-release-51.html * Picks up the latest branch head for 5.1 [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/dc81244 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/c8c8665 PR-URL: https://github.com/nodejs/node/pull/7016 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.ycm_extra_conf.py2
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.gn444
-rw-r--r--deps/v8/CODE_OF_CONDUCT.md8
-rw-r--r--deps/v8/ChangeLog1761
-rw-r--r--deps/v8/DEPS16
-rw-r--r--deps/v8/Makefile7
-rw-r--r--deps/v8/OWNERS7
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h16
-rwxr-xr-xdeps/v8/build/coverage_wrapper.py36
-rwxr-xr-xdeps/v8/build/get_landmines.py1
-rw-r--r--deps/v8/build/isolate.gypi2
-rw-r--r--deps/v8/build/standalone.gypi64
-rw-r--r--deps/v8/build/toolchain.gypi24
-rw-r--r--deps/v8/include/libplatform/libplatform.h2
-rw-r--r--deps/v8/include/v8-debug.h14
-rw-r--r--deps/v8/include/v8-experimental.h2
-rw-r--r--deps/v8/include/v8-platform.h6
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h255
-rw-r--r--deps/v8/include/v8config.h8
-rw-r--r--deps/v8/infra/config/cq.cfg7
-rw-r--r--deps/v8/src/DEPS3
-rw-r--r--deps/v8/src/accessors.cc3
-rw-r--r--deps/v8/src/api-arguments.cc31
-rw-r--r--deps/v8/src/api-arguments.h254
-rw-r--r--deps/v8/src/api-experimental.cc15
-rw-r--r--deps/v8/src/api-natives.cc74
-rw-r--r--deps/v8/src/api.cc415
-rw-r--r--deps/v8/src/api.h49
-rw-r--r--deps/v8/src/arguments.cc85
-rw-r--r--deps/v8/src/arguments.h222
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h16
-rw-r--r--deps/v8/src/arm/assembler-arm.cc41
-rw-r--r--deps/v8/src/arm/assembler-arm.h8
-rw-r--r--deps/v8/src/arm/builtins-arm.cc147
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc248
-rw-r--r--deps/v8/src/arm/codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc13
-rw-r--r--deps/v8/src/arm/disasm-arm.cc30
-rw-r--r--deps/v8/src/arm/frames-arm.h15
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc72
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc371
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h39
-rw-r--r--deps/v8/src/arm/simulator-arm.cc61
-rw-r--r--deps/v8/src/arm/simulator-arm.h5
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h16
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc10
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h8
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc142
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc240
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc16
-rw-r--r--deps/v8/src/arm64/frames-arm64.h15
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc80
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc229
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h16
-rw-r--r--deps/v8/src/assembler.cc247
-rw-r--r--deps/v8/src/assembler.h89
-rw-r--r--deps/v8/src/ast/ast-numbering.cc14
-rw-r--r--deps/v8/src/ast/ast-value-factory.h1
-rw-r--r--deps/v8/src/ast/ast.cc55
-rw-r--r--deps/v8/src/ast/ast.h136
-rw-r--r--deps/v8/src/ast/prettyprinter.cc7
-rw-r--r--deps/v8/src/ast/scopes.cc68
-rw-r--r--deps/v8/src/ast/scopes.h32
-rw-r--r--deps/v8/src/background-parsing-task.cc5
-rw-r--r--deps/v8/src/bailout-reason.h16
-rw-r--r--deps/v8/src/base/accounting-allocator.cc33
-rw-r--r--deps/v8/src/base/accounting-allocator.h34
-rw-r--r--deps/v8/src/base/atomicops_internals_arm_gcc.h5
-rw-r--r--deps/v8/src/base/cpu.cc7
-rw-r--r--deps/v8/src/base/logging.cc11
-rw-r--r--deps/v8/src/base/logging.h2
-rw-r--r--deps/v8/src/base/macros.h11
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc6
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc17
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc1
-rw-r--r--deps/v8/src/base/platform/platform.h5
-rw-r--r--deps/v8/src/base/platform/semaphore.cc3
-rw-r--r--deps/v8/src/base/platform/time.cc84
-rw-r--r--deps/v8/src/base/platform/time.h7
-rw-r--r--deps/v8/src/base/win32-headers.h2
-rw-r--r--deps/v8/src/bootstrapper.cc420
-rw-r--r--deps/v8/src/builtins.cc1155
-rw-r--r--deps/v8/src/builtins.h62
-rw-r--r--deps/v8/src/code-factory.cc180
-rw-r--r--deps/v8/src/code-factory.h34
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc284
-rw-r--r--deps/v8/src/code-stubs.cc2748
-rw-r--r--deps/v8/src/code-stubs.h573
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/collector.h247
-rw-r--r--deps/v8/src/compiler.cc853
-rw-r--r--deps/v8/src/compiler.h274
-rw-r--r--deps/v8/src/compiler/access-info.cc8
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc180
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h6
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc6
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc202
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc213
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc282
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc130
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h9
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc46
-rw-r--r--deps/v8/src/compiler/branch-elimination.h7
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc225
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h8
-rw-r--r--deps/v8/src/compiler/c-linkage.cc30
-rw-r--r--deps/v8/src/compiler/change-lowering.cc34
-rw-r--r--deps/v8/src/compiler/change-lowering.h2
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h13
-rw-r--r--deps/v8/src/compiler/code-generator.cc171
-rw-r--r--deps/v8/src/compiler/code-generator.h27
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.cc860
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.h297
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc34
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/common-operator.cc26
-rw-r--r--deps/v8/src/compiler/common-operator.h2
-rw-r--r--deps/v8/src/compiler/frame-elider.cc35
-rw-r--r--deps/v8/src/compiler/frame-states.cc3
-rw-r--r--deps/v8/src/compiler/frame-states.h2
-rw-r--r--deps/v8/src/compiler/frame.cc33
-rw-r--r--deps/v8/src/compiler/frame.h55
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc6
-rw-r--r--deps/v8/src/compiler/graph-replay.cc2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc40
-rw-r--r--deps/v8/src/compiler/graph.cc2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc271
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h11
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc10
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc279
-rw-r--r--deps/v8/src/compiler/instruction-codes.h79
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc18
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h37
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc191
-rw-r--r--deps/v8/src/compiler/instruction-selector.h14
-rw-r--r--deps/v8/src/compiler/instruction.cc60
-rw-r--r--deps/v8/src/compiler/instruction.h24
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc604
-rw-r--r--deps/v8/src/compiler/int64-lowering.h22
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc109
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h5
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc48
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h9
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc89
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc36
-rw-r--r--deps/v8/src/compiler/js-graph.cc4
-rw-r--r--deps/v8/src/compiler/js-graph.h2
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc5
-rw-r--r--deps/v8/src/compiler/js-inlining.cc62
-rw-r--r--deps/v8/src/compiler/js-inlining.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc173
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h10
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc348
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-operator.cc33
-rw-r--r--deps/v8/src/compiler/js-operator.h19
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc68
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h2
-rw-r--r--deps/v8/src/compiler/jump-threading.cc6
-rw-r--r--deps/v8/src/compiler/linkage.cc22
-rw-r--r--deps/v8/src/compiler/linkage.h14
-rw-r--r--deps/v8/src/compiler/load-elimination.cc23
-rw-r--r--deps/v8/src/compiler/load-elimination.h12
-rw-r--r--deps/v8/src/compiler/loop-analysis.h2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc22
-rw-r--r--deps/v8/src/compiler/machine-operator.h11
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc107
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc90
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc99
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc82
-rw-r--r--deps/v8/src/compiler/opcodes.h29
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc59
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc220
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h6
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc6
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc217
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc44
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h49
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc93
-rw-r--r--deps/v8/src/compiler/register-allocator.cc118
-rw-r--r--deps/v8/src/compiler/register-allocator.h5
-rw-r--r--deps/v8/src/compiler/representation-change.cc102
-rw-r--r--deps/v8/src/compiler/representation-change.h2
-rw-r--r--deps/v8/src/compiler/s390/OWNERS5
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc2079
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h160
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc163
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc1769
-rw-r--r--deps/v8/src/compiler/schedule.cc79
-rw-r--r--deps/v8/src/compiler/schedule.h7
-rw-r--r--deps/v8/src/compiler/scheduler.cc5
-rw-r--r--deps/v8/src/compiler/select-lowering.cc53
-rw-r--r--deps/v8/src/compiler/select-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc976
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h8
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc16
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc11
-rw-r--r--deps/v8/src/compiler/simplified-operator.h8
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc8
-rw-r--r--deps/v8/src/compiler/typer.cc135
-rw-r--r--deps/v8/src/compiler/verifier.cc86
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1105
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h64
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc20
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc109
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h5
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc419
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc337
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h10
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc290
-rw-r--r--deps/v8/src/compiler/zone-pool.cc11
-rw-r--r--deps/v8/src/compiler/zone-pool.h3
-rw-r--r--deps/v8/src/contexts-inl.h4
-rw-r--r--deps/v8/src/contexts.cc47
-rw-r--r--deps/v8/src/contexts.h51
-rw-r--r--deps/v8/src/counters.cc25
-rw-r--r--deps/v8/src/counters.h30
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc112
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h79
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc236
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h5
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc109
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h79
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc243
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h5
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.cc44
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.h41
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.cc379
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.h33
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc4
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc600
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h660
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc11
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc881
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h131
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc488
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h11
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc151
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h122
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc5
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h1
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc25
-rw-r--r--deps/v8/src/crankshaft/lithium-inl.h2
-rw-r--r--deps/v8/src/crankshaft/lithium.cc93
-rw-r--r--deps/v8/src/crankshaft/lithium.h11
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc238
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h5
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc113
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h79
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc296
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h5
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc113
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h79
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc242
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h5
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc109
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h84
-rw-r--r--deps/v8/src/crankshaft/s390/OWNERS5
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc5668
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h359
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc280
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h58
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc2290
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h2414
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc310
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h5
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc138
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h122
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc478
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h13
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc125
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h79
-rw-r--r--deps/v8/src/d8.cc131
-rw-r--r--deps/v8/src/d8.h4
-rw-r--r--deps/v8/src/d8.js4
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc23
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc27
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc293
-rw-r--r--deps/v8/src/debug/debug-evaluate.h39
-rw-r--r--deps/v8/src/debug/debug-frames.cc4
-rw-r--r--deps/v8/src/debug/debug-scopes.cc144
-rw-r--r--deps/v8/src/debug/debug-scopes.h43
-rw-r--r--deps/v8/src/debug/debug.cc251
-rw-r--r--deps/v8/src/debug/debug.h37
-rw-r--r--deps/v8/src/debug/debug.js37
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc27
-rw-r--r--deps/v8/src/debug/liveedit.cc82
-rw-r--r--deps/v8/src/debug/liveedit.h13
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc25
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc25
-rw-r--r--deps/v8/src/debug/mirrors.js71
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc23
-rw-r--r--deps/v8/src/debug/s390/OWNERS5
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc165
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc27
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc27
-rw-r--r--deps/v8/src/deoptimizer.cc593
-rw-r--r--deps/v8/src/deoptimizer.h60
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements.cc724
-rw-r--r--deps/v8/src/elements.h44
-rw-r--r--deps/v8/src/execution.cc10
-rw-r--r--deps/v8/src/external-reference-table.cc354
-rw-r--r--deps/v8/src/external-reference-table.h50
-rw-r--r--deps/v8/src/factory.cc99
-rw-r--r--deps/v8/src/factory.h24
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc (renamed from deps/v8/src/compiler/fast-accessor-assembler.cc)191
-rw-r--r--deps/v8/src/fast-accessor-assembler.h (renamed from deps/v8/src/compiler/fast-accessor-assembler.h)33
-rw-r--r--deps/v8/src/flag-definitions.h118
-rw-r--r--deps/v8/src/frames-inl.h34
-rw-r--r--deps/v8/src/frames.cc286
-rw-r--r--deps/v8/src/frames.h249
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc370
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc360
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc226
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h106
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc370
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc378
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc392
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc362
-rw-r--r--deps/v8/src/full-codegen/s390/OWNERS5
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc3981
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc363
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc370
-rw-r--r--deps/v8/src/gdb-jit.cc22
-rw-r--r--deps/v8/src/global-handles.cc109
-rw-r--r--deps/v8/src/global-handles.h5
-rw-r--r--deps/v8/src/globals.h104
-rw-r--r--deps/v8/src/handles.cc3
-rw-r--r--deps/v8/src/heap-symbols.h2
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc56
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h24
-rw-r--r--deps/v8/src/heap/gc-tracer.cc404
-rw-r--r--deps/v8/src/heap/gc-tracer.h374
-rw-r--r--deps/v8/src/heap/heap-inl.h47
-rw-r--r--deps/v8/src/heap/heap.cc517
-rw-r--r--deps/v8/src/heap/heap.h140
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h5
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc16
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h3
-rw-r--r--deps/v8/src/heap/incremental-marking.cc153
-rw-r--r--deps/v8/src/heap/incremental-marking.h42
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h24
-rw-r--r--deps/v8/src/heap/mark-compact.cc1407
-rw-r--r--deps/v8/src/heap/mark-compact.h244
-rw-r--r--deps/v8/src/heap/memory-reducer.cc8
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h19
-rw-r--r--deps/v8/src/heap/objects-visiting.cc1
-rw-r--r--deps/v8/src/heap/page-parallel-job.h194
-rw-r--r--deps/v8/src/heap/remembered-set.cc38
-rw-r--r--deps/v8/src/heap/remembered-set.h120
-rw-r--r--deps/v8/src/heap/scavenge-job.cc28
-rw-r--r--deps/v8/src/heap/scavenge-job.h4
-rw-r--r--deps/v8/src/heap/scavenger.cc6
-rw-r--r--deps/v8/src/heap/slot-set.h123
-rw-r--r--deps/v8/src/heap/slots-buffer.cc164
-rw-r--r--deps/v8/src/heap/slots-buffer.h175
-rw-r--r--deps/v8/src/heap/spaces-inl.h145
-rw-r--r--deps/v8/src/heap/spaces.cc841
-rw-r--r--deps/v8/src/heap/spaces.h767
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h36
-rw-r--r--deps/v8/src/heap/store-buffer.cc30
-rw-r--r--deps/v8/src/heap/store-buffer.h48
-rw-r--r--deps/v8/src/i18n.cc9
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h31
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc113
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h52
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc198
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc295
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc9
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc33
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc92
-rw-r--r--deps/v8/src/ia32/frames-ia32.h18
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc71
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc274
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h33
-rw-r--r--deps/v8/src/ic/access-compiler.cc3
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc42
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc46
-rw-r--r--deps/v8/src/ic/handler-compiler.cc7
-rw-r--r--deps/v8/src/ic/handler-compiler.h1
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc49
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc15
-rw-r--r--deps/v8/src/ic/ic-compiler.cc49
-rw-r--r--deps/v8/src/ic/ic-compiler.h4
-rw-r--r--deps/v8/src/ic/ic-state.cc11
-rw-r--r--deps/v8/src/ic/ic.cc227
-rw-r--r--deps/v8/src/ic/ic.h12
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc46
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc46
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc45
-rw-r--r--deps/v8/src/ic/s390/OWNERS5
-rw-r--r--deps/v8/src/ic/s390/access-compiler-s390.cc41
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc750
-rw-r--r--deps/v8/src/ic/s390/ic-compiler-s390.cc29
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc897
-rw-r--r--deps/v8/src/ic/s390/stub-cache-s390.cc187
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc46
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc49
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc15
-rw-r--r--deps/v8/src/interface-descriptors.cc80
-rw-r--r--deps/v8/src/interface-descriptors.h206
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc1127
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h86
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc163
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h23
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc92
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h215
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc500
-rw-r--r--deps/v8/src/interpreter/bytecodes.h638
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc171
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h27
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc386
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h51
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc159
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h62
-rw-r--r--deps/v8/src/interpreter/interpreter.cc795
-rw-r--r--deps/v8/src/interpreter/interpreter.h21
-rw-r--r--deps/v8/src/interpreter/register-translator.cc173
-rw-r--r--deps/v8/src/interpreter/register-translator.h119
-rw-r--r--deps/v8/src/interpreter/source-position-table.cc212
-rw-r--r--deps/v8/src/interpreter/source-position-table.h74
-rw-r--r--deps/v8/src/isolate-inl.h18
-rw-r--r--deps/v8/src/isolate.cc234
-rw-r--r--deps/v8/src/isolate.h65
-rw-r--r--deps/v8/src/js/array-iterator.js31
-rw-r--r--deps/v8/src/js/array.js296
-rw-r--r--deps/v8/src/js/harmony-atomics.js74
-rw-r--r--deps/v8/src/js/harmony-regexp-exec.js37
-rw-r--r--deps/v8/src/js/harmony-regexp.js60
-rw-r--r--deps/v8/src/js/harmony-string-padding.js77
-rw-r--r--deps/v8/src/js/harmony-unicode-regexps.js5
-rw-r--r--deps/v8/src/js/i18n.js350
-rw-r--r--deps/v8/src/js/json.js30
-rw-r--r--deps/v8/src/js/macros.py21
-rw-r--r--deps/v8/src/js/math.js115
-rw-r--r--deps/v8/src/js/messages.js51
-rw-r--r--deps/v8/src/js/prologue.js33
-rw-r--r--deps/v8/src/js/promise.js15
-rw-r--r--deps/v8/src/js/regexp.js555
-rw-r--r--deps/v8/src/js/runtime.js12
-rw-r--r--deps/v8/src/js/string-iterator.js1
-rw-r--r--deps/v8/src/js/string.js37
-rw-r--r--deps/v8/src/js/symbol.js4
-rw-r--r--deps/v8/src/js/typedarray.js165
-rw-r--r--deps/v8/src/js/uri.js7
-rw-r--r--deps/v8/src/js/v8natives.js18
-rw-r--r--deps/v8/src/json-parser.h3
-rw-r--r--deps/v8/src/json-stringifier.h9
-rw-r--r--deps/v8/src/keys.cc (renamed from deps/v8/src/key-accumulator.cc)187
-rw-r--r--deps/v8/src/keys.h (renamed from deps/v8/src/key-accumulator.h)37
-rw-r--r--deps/v8/src/libplatform/default-platform.cc12
-rw-r--r--deps/v8/src/libplatform/default-platform.h7
-rw-r--r--deps/v8/src/log-utils.h2
-rw-r--r--deps/v8/src/log.cc416
-rw-r--r--deps/v8/src/log.h260
-rw-r--r--deps/v8/src/lookup.cc269
-rw-r--r--deps/v8/src/lookup.h100
-rw-r--r--deps/v8/src/macro-assembler.h5
-rw-r--r--deps/v8/src/messages.cc13
-rw-r--r--deps/v8/src/messages.h88
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h68
-rw-r--r--deps/v8/src/mips/assembler-mips.cc217
-rw-r--r--deps/v8/src/mips/assembler-mips.h38
-rw-r--r--deps/v8/src/mips/builtins-mips.cc190
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc238
-rw-r--r--deps/v8/src/mips/codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc17
-rw-r--r--deps/v8/src/mips/frames-mips.h12
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc70
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc593
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h83
-rw-r--r--deps/v8/src/mips/simulator-mips.cc2
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h16
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc37
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h25
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc186
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc245
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc4
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc17
-rw-r--r--deps/v8/src/mips64/frames-mips64.h12
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc71
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc930
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h144
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc31
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h1
-rw-r--r--deps/v8/src/objects-debug.cc9
-rw-r--r--deps/v8/src/objects-inl.h398
-rw-r--r--deps/v8/src/objects-printer.cc31
-rw-r--r--deps/v8/src/objects.cc1484
-rw-r--r--deps/v8/src/objects.h452
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc142
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.h34
-rw-r--r--deps/v8/src/parsing/expression-classifier.h34
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc2
-rw-r--r--deps/v8/src/parsing/parser-base.h472
-rw-r--r--deps/v8/src/parsing/parser.cc1243
-rw-r--r--deps/v8/src/parsing/parser.h92
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc293
-rw-r--r--deps/v8/src/parsing/preparse-data.h1
-rw-r--r--deps/v8/src/parsing/preparser.cc270
-rw-r--r--deps/v8/src/parsing/preparser.h65
-rw-r--r--deps/v8/src/parsing/rewriter.cc9
-rw-r--r--deps/v8/src/parsing/scanner.cc54
-rw-r--r--deps/v8/src/parsing/scanner.h36
-rw-r--r--deps/v8/src/parsing/token.h8
-rw-r--r--deps/v8/src/perf-jit.cc336
-rw-r--r--deps/v8/src/perf-jit.h122
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h16
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc13
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h21
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc160
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc246
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc10
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc13
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc8
-rw-r--r--deps/v8/src/ppc/frames-ppc.h15
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc67
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc383
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h76
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc72
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc5
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc148
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h49
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc6
-rw-r--r--deps/v8/src/profiler/profile-generator.cc90
-rw-r--r--deps/v8/src/profiler/profile-generator.h15
-rw-r--r--deps/v8/src/profiler/sampler.cc34
-rw-r--r--deps/v8/src/profiler/sampler.h2
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc24
-rw-r--r--deps/v8/src/property-descriptor.cc28
-rw-r--r--deps/v8/src/prototype.h7
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc3
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc3
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc6
-rw-r--r--deps/v8/src/regexp/jsregexp.cc9
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc3
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc3
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc3
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc148
-rw-r--r--deps/v8/src/regexp/regexp-parser.h3
-rw-r--r--deps/v8/src/regexp/s390/OWNERS5
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc1256
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h216
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc6
-rw-r--r--deps/v8/src/register-configuration.cc4
-rw-r--r--deps/v8/src/runtime-profiler.cc3
-rw-r--r--deps/v8/src/runtime/runtime-array.cc54
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc20
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc119
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc114
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc82
-rw-r--r--deps/v8/src/runtime/runtime-function.cc81
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc33
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc4
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc33
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc111
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc117
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc14
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc100
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc13
-rw-r--r--deps/v8/src/runtime/runtime-object.cc230
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc13
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc22
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc154
-rw-r--r--deps/v8/src/runtime/runtime-test.cc11
-rw-r--r--deps/v8/src/runtime/runtime-utils.h42
-rw-r--r--deps/v8/src/runtime/runtime.h51
-rw-r--r--deps/v8/src/s390/OWNERS5
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h593
-rw-r--r--deps/v8/src/s390/assembler-s390.cc3061
-rw-r--r--deps/v8/src/s390/assembler-s390.h1466
-rw-r--r--deps/v8/src/s390/builtins-s390.cc2555
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc5695
-rw-r--r--deps/v8/src/s390/code-stubs-s390.h467
-rw-r--r--deps/v8/src/s390/codegen-s390.cc675
-rw-r--r--deps/v8/src/s390/codegen-s390.h44
-rw-r--r--deps/v8/src/s390/constants-s390.cc48
-rw-r--r--deps/v8/src/s390/constants-s390.h1561
-rw-r--r--deps/v8/src/s390/cpu-s390.cc25
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc338
-rw-r--r--deps/v8/src/s390/disasm-s390.cc1421
-rw-r--r--deps/v8/src/s390/frames-s390.cc35
-rw-r--r--deps/v8/src/s390/frames-s390.h189
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc373
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc5409
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h1887
-rw-r--r--deps/v8/src/s390/simulator-s390.cc5128
-rw-r--r--deps/v8/src/s390/simulator-s390.h552
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc421
-rw-r--r--deps/v8/src/snapshot/code-serializer.h127
-rw-r--r--deps/v8/src/snapshot/deserializer.cc818
-rw-r--r--deps/v8/src/snapshot/deserializer.h150
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc32
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc123
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h62
-rw-r--r--deps/v8/src/snapshot/serialize.cc2877
-rw-r--r--deps/v8/src/snapshot/serialize.h816
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc76
-rw-r--r--deps/v8/src/snapshot/serializer-common.h290
-rw-r--r--deps/v8/src/snapshot/serializer.cc770
-rw-r--r--deps/v8/src/snapshot/serializer.h321
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc50
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc1
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc1
-rw-r--r--deps/v8/src/snapshot/snapshot.h38
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc167
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h55
-rw-r--r--deps/v8/src/string-stream.cc1
-rw-r--r--deps/v8/src/tracing/trace-event.h212
-rw-r--r--deps/v8/src/transitions-inl.h1
-rw-r--r--deps/v8/src/transitions.cc1
-rw-r--r--deps/v8/src/type-cache.h3
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h12
-rw-r--r--deps/v8/src/type-feedback-vector.h8
-rw-r--r--deps/v8/src/type-info.cc5
-rw-r--r--deps/v8/src/type-info.h2
-rw-r--r--deps/v8/src/types.cc30
-rw-r--r--deps/v8/src/types.h10
-rw-r--r--deps/v8/src/typing-asm.cc170
-rw-r--r--deps/v8/src/typing-asm.h5
-rw-r--r--deps/v8/src/utils.h269
-rw-r--r--deps/v8/src/v8.cc1
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/vm-state-inl.h26
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.cc463
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.h4
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc655
-rw-r--r--deps/v8/src/wasm/ast-decoder.h169
-rw-r--r--deps/v8/src/wasm/decoder.h146
-rw-r--r--deps/v8/src/wasm/encoder.cc297
-rw-r--r--deps/v8/src/wasm/encoder.h14
-rw-r--r--deps/v8/src/wasm/module-decoder.cc514
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h181
-rw-r--r--deps/v8/src/wasm/wasm-js.cc107
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h300
-rw-r--r--deps/v8/src/wasm/wasm-module.cc293
-rw-r--r--deps/v8/src/wasm/wasm-module.h174
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc3
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h117
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h22
-rw-r--r--deps/v8/src/x64/assembler-x64.cc60
-rw-r--r--deps/v8/src/x64/assembler-x64.h23
-rw-r--r--deps/v8/src/x64/builtins-x64.cc154
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc255
-rw-r--r--deps/v8/src/x64/codegen-x64.cc5
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc15
-rw-r--r--deps/v8/src/x64/frames-x64.h12
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc69
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc192
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h22
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h31
-rw-r--r--deps/v8/src/x87/assembler-x87.cc113
-rw-r--r--deps/v8/src/x87/assembler-x87.h52
-rw-r--r--deps/v8/src/x87/builtins-x87.cc197
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc292
-rw-r--r--deps/v8/src/x87/codegen-x87.cc9
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc34
-rw-r--r--deps/v8/src/x87/disasm-x87.cc94
-rw-r--r--deps/v8/src/x87/frames-x87.h18
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc70
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc272
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h32
-rw-r--r--deps/v8/src/zone.cc9
-rw-r--r--deps/v8/src/zone.h8
-rw-r--r--deps/v8/test/cctest/OWNERS5
-rw-r--r--deps/v8/test/cctest/cctest.cc23
-rw-r--r--deps/v8/test/cctest/cctest.gyp39
-rw-r--r--deps/v8/test/cctest/cctest.h39
-rw-r--r--deps/v8/test/cctest/cctest.isolate1
-rw-r--r--deps/v8/test/cctest/cctest.status253
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h7
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc8
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc21
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-code-stub-assembler.cc126
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc51
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc88
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc531
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc30
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc25
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc11
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc492
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc58
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc63
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h14
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h4
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc136
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc26
-rw-r--r--deps/v8/test/cctest/heap/utils-inl.h2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc58
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden124
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden1051
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden278
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden91
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden859
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden231
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden63
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden57
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden90
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden93
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden260
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden241
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden139
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden54
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden116
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden138
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden105
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden920
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden297
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden147
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden115
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden83
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden127
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden146
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden88
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden78
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden49
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden258
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden760
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden72
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden57
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden105
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden115
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden1112
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden764
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden63
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden1303
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden114
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden136
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden604
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden952
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden139
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden106
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden4219
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden57
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden462
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden1052
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden74
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden139
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden276
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden177
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden625
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden1575
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden3358
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden70
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden1051
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden103
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden1171
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden67
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden523
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden65
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden66
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden47
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden98
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden180
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden55
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden193
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden1326
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc265
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc73
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h128
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc10436
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc96
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc421
-rw-r--r--deps/v8/test/cctest/test-accessors.cc25
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc25
-rw-r--r--deps/v8/test/cctest/test-api.cc668
-rw-r--r--deps/v8/test/cctest/test-api.h6
-rw-r--r--deps/v8/test/cctest/test-asm-validator.cc378
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc84
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc9
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc91
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc21
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc416
-rw-r--r--deps/v8/test/cctest/test-ast-expression-visitor.cc13
-rw-r--r--deps/v8/test/cctest/test-ast.cc3
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc3
-rw-r--r--deps/v8/test/cctest/test-compiler.cc23
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc120
-rw-r--r--deps/v8/test/cctest/test-debug.cc139
-rw-r--r--deps/v8/test/cctest/test-decls.cc90
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc8
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc53
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-s390.cc297
-rw-r--r--deps/v8/test/cctest/test-disasm-x87.cc14
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc338
-rw-r--r--deps/v8/test/cctest/test-gc-tracer.cc124
-rw-r--r--deps/v8/test/cctest/test-hashing.cc9
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc23
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc3
-rw-r--r--deps/v8/test/cctest/test-lockers.cc8
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc2
-rw-r--r--deps/v8/test/cctest/test-log.cc11
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc190
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc267
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc2
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc21
-rw-r--r--deps/v8/test/cctest/test-parsing.cc1294
-rw-r--r--deps/v8/test/cctest/test-platform.cc4
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc26
-rw-r--r--deps/v8/test/cctest/test-regexp.cc68
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc80
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc82
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc89
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc83
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x87.cc89
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc6
-rw-r--r--deps/v8/test/cctest/test-serialize.cc502
-rw-r--r--deps/v8/test/cctest/test-strings.cc22
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc28
-rw-r--r--deps/v8/test/cctest/test-types.cc4
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc20
-rw-r--r--deps/v8/test/cctest/test-unique.cc24
-rw-r--r--deps/v8/test/cctest/test-utils.cc1
-rw-r--r--deps/v8/test/cctest/testcfg.py27
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc1360
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc132
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc73
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc1296
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h126
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp52
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate4
-rw-r--r--deps/v8/test/fuzzer/parser.cc2
-rw-r--r--deps/v8/test/fuzzer/testcfg.py2
-rw-r--r--deps/v8/test/fuzzer/wasm-asmjs.cc39
-rw-r--r--deps/v8/test/fuzzer/wasm.cc39
-rw-r--r--deps/v8/test/fuzzer/wasm/foo.wasmbin0 -> 47 bytes
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs/foo.wasmbin0 -> 47 bytes
-rw-r--r--deps/v8/test/ignition.isolate2
-rw-r--r--deps/v8/test/intl/number-format/format-is-bound.js3
-rw-r--r--deps/v8/test/intl/regexp-assert.js19
-rw-r--r--deps/v8/test/intl/regexp-prepare.js5
-rw-r--r--deps/v8/test/intl/testcfg.py5
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json32
-rw-r--r--deps/v8/test/js-perf-test/Keys/keys.js209
-rw-r--r--deps/v8/test/js-perf-test/Keys/run.js22
-rw-r--r--deps/v8/test/js-perf-test/Object/ObjectTests.json31
-rw-r--r--deps/v8/test/js-perf-test/Object/entries.js75
-rw-r--r--deps/v8/test/js-perf-test/Object/run.js3
-rw-r--r--deps/v8/test/js-perf-test/Object/values.js75
-rw-r--r--deps/v8/test/message/const-decl-no-init-sloppy.js (renamed from deps/v8/test/message/strong-object-set-proto.js)10
-rw-r--r--deps/v8/test/message/const-decl-no-init-sloppy.out7
-rw-r--r--deps/v8/test/message/default-parameter-tdz-arrow.js2
-rw-r--r--deps/v8/test/message/default-parameter-tdz.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-array.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-array2.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-obj.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-obj2.js2
-rw-r--r--deps/v8/test/message/destructuring-modify-const.js2
-rw-r--r--deps/v8/test/message/for-in-loop-initializers-destructuring.js2
-rw-r--r--deps/v8/test/message/for-of-throw-in-body.js5
-rw-r--r--deps/v8/test/message/for-of-throw-in-body.out6
-rw-r--r--deps/v8/test/message/instanceof-noncallable.js (renamed from deps/v8/test/preparser/empty.js)6
-rw-r--r--deps/v8/test/message/instanceof-noncallable.out5
-rw-r--r--deps/v8/test/message/instanceof-nonobject.js (renamed from deps/v8/test/message/instanceof.js)2
-rw-r--r--deps/v8/test/message/instanceof-nonobject.out5
-rw-r--r--deps/v8/test/message/instanceof.out5
-rw-r--r--deps/v8/test/message/let-lexical-name-in-array-prohibited.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-in-object-prohibited.js2
-rw-r--r--deps/v8/test/message/message.status4
-rw-r--r--deps/v8/test/message/no-legacy-const-2.js2
-rw-r--r--deps/v8/test/message/no-legacy-const-3.js2
-rw-r--r--deps/v8/test/message/no-legacy-const.js2
-rw-r--r--deps/v8/test/message/non-alphanum.js (renamed from deps/v8/test/preparser/non-alphanum.js)2
-rw-r--r--deps/v8/test/message/non-alphanum.out6
-rw-r--r--deps/v8/test/message/non-use-strict-hex-escape.js (renamed from deps/v8/test/preparser/non-use-strict-hex-escape.js)0
-rw-r--r--deps/v8/test/message/non-use-strict-hex-escape.out0
-rw-r--r--deps/v8/test/message/non-use-strict-octal-escape.js (renamed from deps/v8/test/preparser/non-use-strict-octal-escape.js)0
-rw-r--r--deps/v8/test/message/non-use-strict-octal-escape.out0
-rw-r--r--deps/v8/test/message/non-use-strict-uhex-escape.js (renamed from deps/v8/test/preparser/non-use-strict-uhex-escape.js)0
-rw-r--r--deps/v8/test/message/non-use-strict-uhex-escape.out0
-rw-r--r--deps/v8/test/message/nonstrict-arguments.js (renamed from deps/v8/test/preparser/nonstrict-arguments.js)40
-rw-r--r--deps/v8/test/message/nonstrict-arguments.out0
-rw-r--r--deps/v8/test/message/nonstrict-eval.js (renamed from deps/v8/test/preparser/nonstrict-eval.js)40
-rw-r--r--deps/v8/test/message/nonstrict-eval.out0
-rw-r--r--deps/v8/test/message/nonstrict-with.js (renamed from deps/v8/test/preparser/nonstrict-with.js)18
-rw-r--r--deps/v8/test/message/nonstrict-with.out0
-rw-r--r--deps/v8/test/message/regress/regress-4829-1.js7
-rw-r--r--deps/v8/test/message/regress/regress-4829-1.out8
-rw-r--r--deps/v8/test/message/regress/regress-4829-2.js7
-rw-r--r--deps/v8/test/message/regress/regress-4829-2.out8
-rw-r--r--deps/v8/test/message/strict-octal-indirect-regexp.js (renamed from deps/v8/test/preparser/strict-octal-indirect-regexp.js)0
-rw-r--r--deps/v8/test/message/strict-octal-indirect-regexp.out0
-rw-r--r--deps/v8/test/message/strict-octal-number.js (renamed from deps/v8/test/preparser/strict-octal-number.js)0
-rw-r--r--deps/v8/test/message/strict-octal-number.out4
-rw-r--r--deps/v8/test/message/strict-octal-regexp.js (renamed from deps/v8/test/preparser/strict-octal-regexp.js)0
-rw-r--r--deps/v8/test/message/strict-octal-regexp.out0
-rw-r--r--deps/v8/test/message/strict-octal-string.js (renamed from deps/v8/test/preparser/strict-octal-string.js)0
-rw-r--r--deps/v8/test/message/strict-octal-string.out4
-rw-r--r--deps/v8/test/message/strict-octal-use-strict-after.js (renamed from deps/v8/test/preparser/strict-octal-use-strict-after.js)0
-rw-r--r--deps/v8/test/message/strict-octal-use-strict-after.out4
-rw-r--r--deps/v8/test/message/strict-octal-use-strict-before.js (renamed from deps/v8/test/preparser/strict-octal-use-strict-before.js)0
-rw-r--r--deps/v8/test/message/strict-octal-use-strict-before.out4
-rw-r--r--deps/v8/test/message/strict-with.js (renamed from deps/v8/test/preparser/strict-with.js)0
-rw-r--r--deps/v8/test/message/strict-with.out4
-rw-r--r--deps/v8/test/message/strong-object-freeze-prop.out9
-rw-r--r--deps/v8/test/message/strong-object-set-proto.out9
-rw-r--r--deps/v8/test/message/testcfg.py4
-rw-r--r--deps/v8/test/message/try-catch-lexical-conflict.js2
-rw-r--r--deps/v8/test/message/try-catch-variable-conflict.js2
-rw-r--r--deps/v8/test/message/unicode-escape-invalid-2.js5
-rw-r--r--deps/v8/test/message/unicode-escape-invalid-2.out8
-rw-r--r--deps/v8/test/message/unicode-escape-invalid.js5
-rw-r--r--deps/v8/test/message/unicode-escape-invalid.out8
-rw-r--r--deps/v8/test/message/unicode-escape-undefined.js5
-rw-r--r--deps/v8/test/message/unicode-escape-undefined.out8
-rw-r--r--deps/v8/test/message/yield-in-arrow-param.js5
-rw-r--r--deps/v8/test/message/yield-in-arrow-param.out4
-rw-r--r--deps/v8/test/message/yield-in-generator-param.js (renamed from deps/v8/test/message/strong-object-freeze-prop.js)8
-rw-r--r--deps/v8/test/message/yield-in-generator-param.out4
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js23
-rw-r--r--deps/v8/test/mjsunit/array-foreach.js17
-rw-r--r--deps/v8/test/mjsunit/array-isarray.js2
-rw-r--r--deps/v8/test/mjsunit/array-join.js13
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js20
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-add.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-and.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-compareexchange.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-exchange.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-load.js18
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-or.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-store.js18
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-sub.js4
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-xor.js4
-rw-r--r--deps/v8/test/mjsunit/asm/math-clz32.js5
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/call-intrinsic-type-error.js11
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects-tostringtag.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-600593.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-96989.js43
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-const.js5
-rw-r--r--deps/v8/test/mjsunit/const-declaration.js173
-rw-r--r--deps/v8/test/mjsunit/const-eval-init.js118
-rw-r--r--deps/v8/test/mjsunit/const-redecl.js228
-rw-r--r--deps/v8/test/mjsunit/const.js72
-rw-r--r--deps/v8/test/mjsunit/constant-folding.js7
-rw-r--r--deps/v8/test/mjsunit/debug-backtrace.js4
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-closure.js17
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-const.js118
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js4
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals.js18
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js14
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-modify-this.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-605581.js)23
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-nested-let.js52
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-nested.js49
-rw-r--r--deps/v8/test/mjsunit/debug-function-scopes.js2
-rw-r--r--deps/v8/test/mjsunit/debug-multiple-var-decl.js74
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js104
-rw-r--r--deps/v8/test/mjsunit/debug-step-end-of-script.js2
-rw-r--r--deps/v8/test/mjsunit/debug-step.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-accessor.js26
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-positions.js227
-rw-r--r--deps/v8/test/mjsunit/declare-locally.js8
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat.js22
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator.js4
-rw-r--r--deps/v8/test/mjsunit/es6/array-prototype-values.js15
-rw-r--r--deps/v8/test/mjsunit/es6/array-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js86
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js12
-rw-r--r--deps/v8/test/mjsunit/es6/built-in-accessor-names.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-proxy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-subclass-builtins.js113
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js61
-rw-r--r--deps/v8/test/mjsunit/es6/classof-proxy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collection-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections.js2
-rw-r--r--deps/v8/test/mjsunit/es6/completion.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-blockscopes.js1
-rw-r--r--deps/v8/test/mjsunit/es6/debug-evaluate-receiver-before-super.js39
-rw-r--r--deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/events.js122
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js32
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js29
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js30
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js1
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js69
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js85
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js (renamed from deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js)14
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-default-parameters.js (renamed from deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js)4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-proxies.js (renamed from deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js)8
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-string-template.js3
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js46
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepnext-for.js8
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js45
-rw-r--r--deps/v8/test/mjsunit/es6/default-parameters-debug.js (renamed from deps/v8/test/mjsunit/harmony/default-parameters-debug.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/default-parameters-destructuring.js (renamed from deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/default-parameters.js (renamed from deps/v8/test/mjsunit/harmony/default-parameters.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-assignment-lazy.js (renamed from deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-assignment.js (renamed from deps/v8/test/mjsunit/harmony/destructuring-assignment.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js (renamed from deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js (renamed from deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring.js (renamed from deps/v8/test/mjsunit/harmony/destructuring.js)6
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js2
-rw-r--r--deps/v8/test/mjsunit/es6/instanceof-proxies.js2
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-semantics.js2
-rw-r--r--deps/v8/test/mjsunit/es6/json.js2
-rw-r--r--deps/v8/test/mjsunit/es6/math-trunc.js97
-rw-r--r--deps/v8/test/mjsunit/es6/math.js2
-rw-r--r--deps/v8/test/mjsunit/es6/new-target.js2
-rw-r--r--deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js2
-rw-r--r--deps/v8/test/mjsunit/es6/object-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/pattern-brand-check.js54
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-accesschecks.js (renamed from deps/v8/test/mjsunit/harmony/proxies-accesschecks.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-apply.js (renamed from deps/v8/test/mjsunit/harmony/proxies-apply.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-bind.js (renamed from deps/v8/test/mjsunit/harmony/proxies-bind.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-construct.js (renamed from deps/v8/test/mjsunit/harmony/proxies-construct.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js (renamed from deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-define-property.js (renamed from deps/v8/test/mjsunit/harmony/proxies-define-property.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-delete-property.js (renamed from deps/v8/test/mjsunit/harmony/proxies-delete-property.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-example-membrane.js308
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-for.js (renamed from deps/v8/test/mjsunit/harmony/proxies-for.js)94
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-function.js630
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js (renamed from deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-get-prototype-of.js (renamed from deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-get.js (renamed from deps/v8/test/mjsunit/harmony/proxies-get.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-global-reference.js (renamed from deps/v8/test/mjsunit/harmony/proxies-global-reference.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-has-own-property.js (renamed from deps/v8/test/mjsunit/harmony/proxies-has-own-property.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-has.js (renamed from deps/v8/test/mjsunit/harmony/proxies-has.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-hash.js (renamed from deps/v8/test/mjsunit/harmony/proxies-hash.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-integrity.js (renamed from deps/v8/test/mjsunit/harmony/proxies-integrity.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-is-extensible.js (renamed from deps/v8/test/mjsunit/harmony/proxies-is-extensible.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-json.js (renamed from deps/v8/test/mjsunit/harmony/proxies-json.js)13
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-keys.js (renamed from deps/v8/test/mjsunit/harmony/proxies-keys.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-object-assign.js (renamed from deps/v8/test/mjsunit/harmony/proxies-object-assign.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-ownkeys.js (renamed from deps/v8/test/mjsunit/harmony/proxies-ownkeys.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js (renamed from deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-property-is-enumerable.js (renamed from deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-prototype-handler-stackoverflow.js (renamed from deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-prototype-target-stackoverflow.js (renamed from deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-revocable.js (renamed from deps/v8/test/mjsunit/harmony/proxies-revocable.js)3
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-set-prototype-of.js (renamed from deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-set.js (renamed from deps/v8/test/mjsunit/harmony/proxies-set.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-with-unscopables.js (renamed from deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-with.js (renamed from deps/v8/test/mjsunit/harmony/proxies-with.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies.js (renamed from deps/v8/test/mjsunit/harmony/proxies.js)16
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-apply.js (renamed from deps/v8/test/mjsunit/harmony/reflect-apply.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-construct.js (renamed from deps/v8/test/mjsunit/harmony/reflect-construct.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-define-property.js (renamed from deps/v8/test/mjsunit/harmony/reflect-define-property.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-get-own-property-descriptor.js (renamed from deps/v8/test/mjsunit/harmony/reflect-get-own-property-descriptor.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-get-prototype-of.js (renamed from deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-own-keys.js (renamed from deps/v8/test/mjsunit/harmony/reflect-own-keys.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-prevent-extensions.js (renamed from deps/v8/test/mjsunit/harmony/reflect-prevent-extensions.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-set-prototype-of.js (renamed from deps/v8/test/mjsunit/harmony/reflect-set-prototype-of.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect.js (renamed from deps/v8/test/mjsunit/harmony/reflect.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-constructor.js19
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-flags.js67
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-sticky.js (renamed from deps/v8/test/mjsunit/harmony/regexp-sticky.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-tostring.js11
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2219.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2219.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2225.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2225.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4395-global-eval.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4395.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4395.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4400.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4400.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4585.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4585.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4759.js23
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-517455.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-517455.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-576662.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-576662.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr493566.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr512574.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-crbug-461520.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-lookup-transition.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-endswith.js8
-rw-r--r--deps/v8/test/mjsunit/es6/string-includes.js8
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/es6/string-startswith.js8
-rw-r--r--deps/v8/test/mjsunit/es6/super.js2
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js3
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard0.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard1.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard2.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard3.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard4.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard5.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard6.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard7.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard8.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest-shard9.js13
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest.js283
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-proxies.js2
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-simple.js40
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call.js159
-rw-r--r--deps/v8/test/mjsunit/es6/typed-array-iterator.js8
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-of.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-proto.js10
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js2
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js2
-rw-r--r--deps/v8/test/mjsunit/for-in-opt.js47
-rw-r--r--deps/v8/test/mjsunit/for-in.js32
-rw-r--r--deps/v8/test/mjsunit/function-caller.js2
-rw-r--r--deps/v8/test/mjsunit/global-const-var-conflicts.js62
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-constructor.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-delete.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-modified.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-proto.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js168
-rw-r--r--deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-for-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-leave-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-sloppy-function.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js86
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions-control.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/exponentiation-operator.js278
-rw-r--r--deps/v8/test/mjsunit/harmony/function-name.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/generators.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/iterator-close.js958
-rw-r--r--deps/v8/test/mjsunit/harmony/module-linking.js298
-rw-r--r--deps/v8/test/mjsunit/harmony/module-parsing-eval.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/module-recompile.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/module-resolution.js152
-rw-r--r--deps/v8/test/mjsunit/harmony/object-entries.js68
-rw-r--r--deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/object-values.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/private-symbols.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-example-membrane.js512
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js768
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-change-exec.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-flags.js56
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-blocks.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-char-class.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-disabled.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js33
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-general-category.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js)3
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-scripts.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-343928.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4482.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js97
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js52
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js2
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-scope-on-return.js31
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-step-prefix-bytecodes.js375
-rw-r--r--deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js37
-rw-r--r--deps/v8/test/mjsunit/ignition/elided-instruction.js41
-rw-r--r--deps/v8/test/mjsunit/ignition/optimized-debug-frame.js38
-rw-r--r--deps/v8/test/mjsunit/ignition/optimized-stack-trace.js22
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-597565-double-to-object-transition.js18
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js17
-rw-r--r--deps/v8/test/mjsunit/ignition/tracing.js325
-rw-r--r--deps/v8/test/mjsunit/json-stringify-stack.js8
-rw-r--r--deps/v8/test/mjsunit/lookup-behind-property.js6
-rw-r--r--deps/v8/test/mjsunit/math-ceil.js78
-rw-r--r--deps/v8/test/mjsunit/math-floor-part1.js29
-rw-r--r--deps/v8/test/mjsunit/math-floor-part2.js29
-rw-r--r--deps/v8/test/mjsunit/math-floor-part3.js29
-rw-r--r--deps/v8/test/mjsunit/math-floor-part4.js29
-rw-r--r--deps/v8/test/mjsunit/messages.js11
-rw-r--r--deps/v8/test/mjsunit/mirror-regexp.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status341
-rw-r--r--deps/v8/test/mjsunit/object-literal.js6
-rw-r--r--deps/v8/test/mjsunit/parallel-optimize-disabled.js5
-rw-r--r--deps/v8/test/mjsunit/property-load-across-eval.js38
-rw-r--r--deps/v8/test/mjsunit/readonly.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-lastIndex.js18
-rw-r--r--deps/v8/test/mjsunit/regexp-not-sticky-yet.js66
-rw-r--r--deps/v8/test/mjsunit/regexp-string-methods.js2
-rw-r--r--deps/v8/test/mjsunit/regress-3225.js12
-rw-r--r--deps/v8/test/mjsunit/regress-sync-optimized-lists.js1
-rw-r--r--deps/v8/test/mjsunit/regress/get-array-keys-oob.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1178598.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1182832.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1199637.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1201933.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1207276.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1213575.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-186.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-220.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3138.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-325676.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-343609.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-436896.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4576.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4693.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4769.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4825.js95
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4970.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5006.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5036.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-536751.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542099.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542100.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-552302.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-554865.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-572589.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-575364.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-590074.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-592341.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-592353.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-593299.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-595319.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-596718.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599089-array-push.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599412.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599710.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599825.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-602970.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-641.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-799761.js94
-rw-r--r--deps/v8/test/mjsunit/regress/regress-88591.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-323936.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-476477-1.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-501809.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505907.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506956.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513471.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-537444.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-548580.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-573858.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-587068.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-589472.js94
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-592340.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-594955.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-595615.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-595738.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-596394.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-598998.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599067.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599073-1.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599073-2.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599073-3.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599073-4.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599714.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-600995.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-601617.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-604680.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-608278.js62
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-617524.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debugger-redirect.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5009.js61
-rw-r--r--deps/v8/test/mjsunit/stack-traces-custom.js16
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js6
-rw-r--r--deps/v8/test/mjsunit/string-fromcharcode.js5
-rw-r--r--deps/v8/test/mjsunit/strong/arrays.js12
-rw-r--r--deps/v8/test/mjsunit/strong/class-extend-null.js97
-rw-r--r--deps/v8/test/mjsunit/strong/class-literals.js159
-rw-r--r--deps/v8/test/mjsunit/strong/class-object-frozen.js98
-rw-r--r--deps/v8/test/mjsunit/strong/classes.js118
-rw-r--r--deps/v8/test/mjsunit/strong/delete.js11
-rw-r--r--deps/v8/test/mjsunit/strong/destructuring.js31
-rw-r--r--deps/v8/test/mjsunit/strong/empty-statement.js18
-rw-r--r--deps/v8/test/mjsunit/strong/equality.js10
-rw-r--r--deps/v8/test/mjsunit/strong/eval-direct.js22
-rw-r--r--deps/v8/test/mjsunit/strong/for-in.js11
-rw-r--r--deps/v8/test/mjsunit/strong/function-arity.js341
-rw-r--r--deps/v8/test/mjsunit/strong/functions.js87
-rw-r--r--deps/v8/test/mjsunit/strong/implicit-conversions-constants.js203
-rw-r--r--deps/v8/test/mjsunit/strong/implicit-conversions-count.js168
-rw-r--r--deps/v8/test/mjsunit/strong/implicit-conversions-inlining.js442
-rw-r--r--deps/v8/test/mjsunit/strong/implicit-conversions.js412
-rw-r--r--deps/v8/test/mjsunit/strong/literals.js291
-rw-r--r--deps/v8/test/mjsunit/strong/load-builtins.js42
-rw-r--r--deps/v8/test/mjsunit/strong/load-element-mutate-backing-store.js239
-rw-r--r--deps/v8/test/mjsunit/strong/load-element.js267
-rw-r--r--deps/v8/test/mjsunit/strong/load-property-mutate-backing-store.js174
-rw-r--r--deps/v8/test/mjsunit/strong/load-property.js203
-rw-r--r--deps/v8/test/mjsunit/strong/load-proxy.js100
-rw-r--r--deps/v8/test/mjsunit/strong/load-super.js102
-rw-r--r--deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js25
-rw-r--r--deps/v8/test/mjsunit/strong/object-delete.js255
-rw-r--r--deps/v8/test/mjsunit/strong/object-freeze-property.js75
-rw-r--r--deps/v8/test/mjsunit/strong/object-set-prototype.js83
-rw-r--r--deps/v8/test/mjsunit/strong/super.js62
-rw-r--r--deps/v8/test/mjsunit/strong/switch.js110
-rw-r--r--deps/v8/test/mjsunit/strong/undefined.js200
-rw-r--r--deps/v8/test/mjsunit/strong/use-strong.js26
-rw-r--r--deps/v8/test/mjsunit/strong/var-let-const.js22
-rw-r--r--deps/v8/test/mjsunit/tail-call-intrinsic.js108
-rw-r--r--deps/v8/test/mjsunit/tools/profviz-test.log928
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js1
-rw-r--r--deps/v8/test/mjsunit/undetectable-compare.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/adapter-frame.js321
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-copy.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js29
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f32.js242
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f64.js313
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-heap.js239
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-i32.js252
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-literals.js261
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js358
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-u32.js225
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js475
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js211
-rw-r--r--deps/v8/test/mjsunit/wasm/divrem-trap.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/README3
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/box2d.js20325
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/copy.js5979
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/corrections.js5986
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js8438
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/fasta.js8608
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js42713
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/memops.js8090
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/primes.js5987
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/zlib.js14755
-rw-r--r--deps/v8/test/mjsunit/wasm/export-table.js50
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js166
-rw-r--r--deps/v8/test/mjsunit/wasm/function-prototype.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-frame.js74
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js219
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js85
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-run-basic.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js129
-rw-r--r--deps/v8/test/mjsunit/wasm/params.js142
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js65
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js49
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js179
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js153
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-simple.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js52
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js335
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js12
-rw-r--r--deps/v8/test/mozilla/mozilla.status8
-rw-r--r--deps/v8/test/mozilla/testcfg.py6
-rw-r--r--deps/v8/test/preparser/functions-only.js38
-rw-r--r--deps/v8/test/preparser/preparser.expectation14
-rw-r--r--deps/v8/test/preparser/preparser.status13
-rw-r--r--deps/v8/test/preparser/symbols-only.js49
-rw-r--r--deps/v8/test/preparser/testcfg.py52
-rw-r--r--deps/v8/test/promises-aplus/testcfg.py8
-rw-r--r--deps/v8/test/simdjs/testcfg.py6
-rwxr-xr-xdeps/v8/test/test262/archive.py5
-rw-r--r--deps/v8/test/test262/test262.status391
-rw-r--r--deps/v8/test/test262/testcfg.py34
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc544
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc75
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc15
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc164
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h36
-rw-r--r--deps/v8/test/unittests/compiler/s390/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc49
-rw-r--r--deps/v8/test/unittests/compiler/select-lowering-unittest.cc74
-rw-r--r--deps/v8/test/unittests/compiler/zone-pool-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc37
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc49
-rw-r--r--deps/v8/test/unittests/heap/scavenge-job-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc32
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc173
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc90
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc23
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc224
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc168
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc410
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h25
-rw-r--r--deps/v8/test/unittests/interpreter/register-translator-unittest.cc260
-rw-r--r--deps/v8/test/unittests/interpreter/source-position-table-unittest.cc84
-rw-r--r--deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc73
-rw-r--r--deps/v8/test/unittests/test-utils.h6
-rw-r--r--deps/v8/test/unittests/unittests.gyp11
-rw-r--r--deps/v8/test/unittests/unittests.status4
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc1740
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc667
-rw-r--r--deps/v8/test/unittests/wasm/encoder-unittest.cc93
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc38
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc1129
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc53
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-expression.js2
-rw-r--r--deps/v8/test/webkit/const-without-initializer-expected.txt34
-rw-r--r--deps/v8/test/webkit/const-without-initializer.js36
-rw-r--r--deps/v8/test/webkit/constant-count-expected.txt34
-rw-r--r--deps/v8/test/webkit/constant-count.js47
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject-expected.txt2
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject.js2
-rw-r--r--deps/v8/test/webkit/fast/js/arguments-expected.txt1
-rw-r--r--deps/v8/test/webkit/fast/js/arguments.js7
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt16
-rw-r--r--deps/v8/test/webkit/fast/js/kde/parse-expected.txt8
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check.js2
-rw-r--r--deps/v8/test/webkit/fast/regex/lastIndex-expected.txt6
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt2
-rw-r--r--deps/v8/test/webkit/function-declaration-statement.js2
-rw-r--r--deps/v8/test/webkit/instance-of-immediates-expected.txt12
-rw-r--r--deps/v8/test/webkit/instance-of-immediates.js2
-rw-r--r--deps/v8/test/webkit/testcfg.py10
-rw-r--r--deps/v8/test/webkit/webkit.status13
-rwxr-xr-xdeps/v8/tools/eval_gc_nvp.py42
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh125
-rw-r--r--deps/v8/tools/external-reference-check.py2
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua1
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py4
-rw-r--r--deps/v8/tools/gyp/v8.gyp118
-rwxr-xr-xdeps/v8/tools/ll_prof.py100
-rw-r--r--deps/v8/tools/parser-shell.cc4
-rwxr-xr-xdeps/v8/tools/presubmit.py1
-rw-r--r--deps/v8/tools/profviz/composer.js10
-rw-r--r--deps/v8/tools/profviz/worker.js2
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py1
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py9
-rw-r--r--deps/v8/tools/release/common_includes.py2
-rwxr-xr-xdeps/v8/tools/release/test_mergeinfo.py180
-rw-r--r--deps/v8/tools/release/test_scripts.py2
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py6
-rwxr-xr-xdeps/v8/tools/run-perf.sh52
-rwxr-xr-xdeps/v8/tools/run-tests.py42
-rwxr-xr-xdeps/v8/tools/sanitizers/sancov_formatter.py446
-rw-r--r--deps/v8/tools/sanitizers/sancov_formatter_test.py222
-rwxr-xr-xdeps/v8/tools/sanitizers/sancov_merger.py229
-rw-r--r--deps/v8/tools/sanitizers/sancov_merger_test.py82
-rwxr-xr-xdeps/v8/tools/sanitizers/sanitize_pcs.py11
-rw-r--r--deps/v8/tools/testrunner/local/commands.py16
-rw-r--r--deps/v8/tools/testrunner/local/execution.py42
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py9
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py26
-rw-r--r--deps/v8/tools/testrunner/local/utils.py2
-rw-r--r--deps/v8/tools/testrunner/objects/context.py7
-rw-r--r--deps/v8/tools/testrunner/objects/output.py7
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py21
-rw-r--r--deps/v8/tools/testrunner/testrunner.isolate12
-rw-r--r--deps/v8/tools/tick-processor.html6
-rw-r--r--deps/v8/tools/tickprocessor-driver.js6
-rw-r--r--deps/v8/tools/tickprocessor.js90
1487 files changed, 280066 insertions, 62434 deletions
diff --git a/deps/v8/.ycm_extra_conf.py b/deps/v8/.ycm_extra_conf.py
index e065a0896b..a451d9f31c 100644
--- a/deps/v8/.ycm_extra_conf.py
+++ b/deps/v8/.ycm_extra_conf.py
@@ -42,7 +42,7 @@ import sys
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
-'-std=gnu++0x',
+'-std=gnu++11',
'-x',
'c++',
]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index ceffb49a99..60b6c51db5 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -71,6 +71,7 @@ Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
+Julien Brianceau <jbriance@cisco.com>
JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
@@ -87,6 +88,7 @@ Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Milton Chiang <milton.chiang@mediatek.com>
+Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 5f3baf23c9..28aca24afc 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -21,6 +21,12 @@ declare_args() {
# Enable the snapshot feature, for fast context creation.
# http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
v8_use_snapshot = true
+
+ # Similar to vfp but on MIPS.
+ v8_can_use_fpu_instructions = true
+
+ # Similar to the ARM hard float ABI but on MIPS.
+ v8_use_mips_abi_hardfloat = true
}
# TODO(jochen): These will need to be user-settable to support standalone V8
@@ -28,7 +34,7 @@ declare_args() {
v8_deprecation_warnings = false
v8_enable_disassembler = false
v8_enable_gdbjit = false
-v8_enable_handle_zapping = is_debug
+v8_enable_handle_zapping = false
v8_enable_i18n_support = true
v8_enable_verify_heap = false
v8_interpreted_regexp = false
@@ -77,6 +83,11 @@ config("internal_config_base") {
include_dirs = [ "." ]
}
+# This config should be applied to code using the libplatform.
+config("libplatform_config") {
+ include_dirs = [ "include" ]
+}
+
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
@@ -137,48 +148,93 @@ config("toolchain") {
defines = []
cflags = []
- # TODO(jochen): Add support for arm subarchs, mips, mipsel, mips64el.
-
if (v8_target_arch == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
- if (current_cpu == "arm") {
- if (arm_version == 7) {
- defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
- }
- if (arm_fpu == "vfpv3-d16") {
- defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
- } else if (arm_fpu == "vfpv3") {
- defines += [
- "CAN_USE_VFP3_INSTRUCTIONS",
- "CAN_USE_VFP32DREGS",
- ]
- } else if (arm_fpu == "neon") {
- defines += [
- "CAN_USE_VFP3_INSTRUCTIONS",
- "CAN_USE_VFP32DREGS",
- "CAN_USE_NEON",
- ]
- }
- } else {
- # These defines ares used for the ARM simulator.
+ if (arm_version == 7) {
+ defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
+ }
+ if (arm_fpu == "vfpv3-d16") {
+ defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
+ } else if (arm_fpu == "vfpv3") {
+ defines += [
+ "CAN_USE_VFP3_INSTRUCTIONS",
+ "CAN_USE_VFP32DREGS",
+ ]
+ } else if (arm_fpu == "neon") {
defines += [
- "CAN_USE_ARMV7_INSTRUCTIONS",
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
- "USE_EABI_HARDFLOAT=0",
+ "CAN_USE_NEON",
]
}
-
# TODO(jochen): Add support for arm_test_noprobe.
+
+ if (current_cpu != "arm") {
+ # These defines ares used for the ARM simulator.
+ if (arm_float_abi == "hard") {
+ defines += [ "USE_EABI_HARDFLOAT=1" ]
+ } else if (arm_float_abi == "softfp") {
+ defines += [ "USE_EABI_HARDFLOAT=0" ]
+ }
+ }
}
if (v8_target_arch == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
+ # TODO(jochen): Add support for mips.
if (v8_target_arch == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
+ if (v8_can_use_fpu_instructions) {
+ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
+ }
+ if (v8_use_mips_abi_hardfloat) {
+ defines += [
+ "__mips_hard_float=1",
+ "CAN_USE_FPU_INSTRUCTIONS",
+ ]
+ } else {
+ defines += [ "__mips_soft_float=1" ]
+ }
+ if (mips_arch_variant == "r6") {
+ defines += [
+ "_MIPS_ARCH_MIPS32R6",
+ "FPU_MODE_FP64",
+ ]
+ } else if (mips_arch_variant == "r2") {
+ defines += [ "_MIPS_ARCH_MIPS32R2" ]
+ if (mips_fpu_mode == "fp64") {
+ defines += [ "FPU_MODE_FP64" ]
+ } else if (mips_fpu_mode == "fpxx") {
+ defines += [ "FPU_MODE_FPXX" ]
+ } else if (mips_fpu_mode == "fp32") {
+ defines += [ "FPU_MODE_FP32" ]
+ }
+ } else if (mips_arch_variant == "r1") {
+ defines += [ "FPU_MODE_FP32" ]
+ }
+ # TODO(jochen): Add support for mips_arch_variant rx and loongson.
}
+ # TODO(jochen): Add support for mips64.
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
+ if (v8_can_use_fpu_instructions) {
+ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
+ }
+ # TODO(jochen): Add support for big endian host byteorder.
+ defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
+ if (v8_use_mips_abi_hardfloat) {
+ defines += [
+ "__mips_hard_float=1",
+ "CAN_USE_FPU_INSTRUCTIONS",
+ ]
+ } else {
+ defines += [ "__mips_soft_float=1" ]
+ }
+ if (mips_arch_variant == "r6") {
+ defines += [ "_MIPS_ARCH_MIPS64R6" ]
+ } else if (mips_arch_variant == "r2") {
+ defines += [ "_MIPS_ARCH_MIPS64R2" ]
+ }
}
if (v8_target_arch == "s390") {
defines += [ "V8_TARGET_ARCH_S390" ]
@@ -227,8 +283,11 @@ action("js2c") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- inputs = [ "tools/jsmin.py" ]
+ inputs = [
+ "tools/jsmin.py",
+ ]
+ # NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
@@ -257,6 +316,7 @@ action("js2c") {
"src/js/string-iterator.js",
"src/js/templates.js",
"src/js/spread.js",
+ "src/js/proxy.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@@ -291,21 +351,24 @@ action("js2c_experimental") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- inputs = [ "tools/jsmin.py" ]
+ inputs = [
+ "tools/jsmin.py",
+ ]
+ # NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
- "src/js/proxy.js",
"src/js/generator.js",
"src/js/harmony-atomics.js",
- "src/js/harmony-regexp.js",
+ "src/js/harmony-regexp-exec.js",
"src/js/harmony-object-observe.js",
"src/js/harmony-sharedarraybuffer.js",
"src/js/harmony-simd.js",
"src/js/harmony-species.js",
"src/js/harmony-unicode-regexps.js",
- "src/js/promise-extra.js"
+ "src/js/harmony-string-padding.js",
+ "src/js/promise-extra.js",
]
outputs = [
@@ -334,7 +397,9 @@ action("js2c_extras") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- inputs = [ "tools/jsmin.py" ]
+ inputs = [
+ "tools/jsmin.py",
+ ]
sources = v8_extra_library_files
@@ -343,8 +408,7 @@ action("js2c_extras") {
]
args = [
- rebase_path("$target_gen_dir/extras-libraries.cc",
- root_build_dir),
+ rebase_path("$target_gen_dir/extras-libraries.cc", root_build_dir),
"EXTRAS",
] + rebase_path(sources, root_build_dir)
@@ -364,7 +428,9 @@ action("js2c_experimental_extras") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- inputs = [ "tools/jsmin.py" ]
+ inputs = [
+ "tools/jsmin.py",
+ ]
sources = v8_experimental_extra_library_files
@@ -382,7 +448,8 @@ action("js2c_experimental_extras") {
outputs += [ "$target_gen_dir/libraries_experimental_extras.bin" ]
args += [
"--startup_blob",
- rebase_path("$target_gen_dir/libraries_experimental_extras.bin", root_build_dir),
+ rebase_path("$target_gen_dir/libraries_experimental_extras.bin",
+ root_build_dir),
]
}
}
@@ -392,6 +459,7 @@ action("d8_js2c") {
script = "tools/js2c.py"
+ # NOSORT
inputs = [
"src/d8.js",
"src/js/macros.py",
@@ -425,10 +493,11 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
- ":js2c_extras",
":js2c_experimental_extras",
+ ":js2c_extras",
]
+ # NOSORT
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin",
@@ -456,6 +525,7 @@ action("postmortem-metadata") {
script = "tools/gen-postmortem-metadata.py"
+ # NOSORT
sources = [
"src/objects.h",
"src/objects-inl.h",
@@ -486,9 +556,6 @@ action("run_mksnapshot") {
"./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
- "--log-snapshot-positions",
- "--logfile",
- rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
"--startup_src",
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
@@ -519,16 +586,16 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_experimental",
- ":js2c_extras",
":js2c_experimental_extras",
+ ":js2c_extras",
":v8_base",
]
sources = [
- "$target_gen_dir/libraries.cc",
+ "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
- "$target_gen_dir/experimental-extras-libraries.cc",
+ "$target_gen_dir/libraries.cc",
"src/snapshot/snapshot-empty.cc",
]
@@ -552,8 +619,8 @@ source_set("v8_snapshot") {
deps = [
":js2c",
":js2c_experimental",
- ":js2c_extras",
":js2c_experimental_extras",
+ ":js2c_extras",
":v8_base",
]
public_deps = [
@@ -563,10 +630,10 @@ source_set("v8_snapshot") {
]
sources = [
- "$target_gen_dir/libraries.cc",
+ "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
- "$target_gen_dir/experimental-extras-libraries.cc",
+ "$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
]
@@ -586,8 +653,8 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
- ":js2c_extras",
":js2c_experimental_extras",
+ ":js2c_extras",
":v8_base",
]
public_deps = [
@@ -629,22 +696,24 @@ source_set("v8_base") {
"src/accessors.h",
"src/address-map.cc",
"src/address-map.h",
- "src/allocation.cc",
- "src/allocation.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
- "src/api.cc",
- "src/api.h",
+ "src/allocation.cc",
+ "src/allocation.h",
+ "src/api-arguments.cc",
+ "src/api-arguments.h",
"src/api-experimental.cc",
"src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
+ "src/api.cc",
+ "src/api.h",
"src/arguments.cc",
"src/arguments.h",
"src/assembler.cc",
"src/assembler.h",
- "src/assert-scope.h",
"src/assert-scope.cc",
+ "src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-expression-visitor.cc",
@@ -684,27 +753,30 @@ source_set("v8_base") {
"src/bootstrapper.h",
"src/builtins.cc",
"src/builtins.h",
- "src/cancelable-task.cc",
- "src/cancelable-task.h",
"src/cached-powers.cc",
"src/cached-powers.h",
- "src/char-predicates.cc",
+ "src/cancelable-task.cc",
+ "src/cancelable-task.h",
"src/char-predicates-inl.h",
+ "src/char-predicates.cc",
"src/char-predicates.h",
"src/checks.h",
"src/code-factory.cc",
"src/code-factory.h",
+ "src/code-stubs-hydrogen.cc",
"src/code-stubs.cc",
"src/code-stubs.h",
- "src/code-stubs-hydrogen.cc",
"src/codegen.cc",
"src/codegen.h",
+ "src/collector.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-dependencies.cc",
"src/compilation-dependencies.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
+ "src/compiler.cc",
+ "src/compiler.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
"src/compiler/access-info.cc",
@@ -723,9 +795,9 @@ source_set("v8_base") {
"src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
+ "src/compiler/c-linkage.cc",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
- "src/compiler/c-linkage.cc",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
@@ -748,18 +820,16 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
- "src/compiler/escape-analysis.cc",
- "src/compiler/escape-analysis.h",
"src/compiler/escape-analysis-reducer.cc",
"src/compiler/escape-analysis-reducer.h",
- "src/compiler/fast-accessor-assembler.cc",
- "src/compiler/fast-accessor-assembler.h",
- "src/compiler/frame.cc",
- "src/compiler/frame.h",
+ "src/compiler/escape-analysis.cc",
+ "src/compiler/escape-analysis.h",
"src/compiler/frame-elider.cc",
"src/compiler/frame-elider.h",
"src/compiler/frame-states.cc",
"src/compiler/frame-states.h",
+ "src/compiler/frame.cc",
+ "src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-reducer.cc",
@@ -800,10 +870,10 @@ source_set("v8_base") {
"src/compiler/js-global-object-specialization.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
- "src/compiler/js-inlining.cc",
- "src/compiler/js-inlining.h",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining-heuristic.h",
+ "src/compiler/js-inlining.cc",
+ "src/compiler/js-inlining.h",
"src/compiler/js-intrinsic-lowering.cc",
"src/compiler/js-intrinsic-lowering.h",
"src/compiler/js-native-context-specialization.cc",
@@ -822,9 +892,9 @@ source_set("v8_base") {
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
- "src/compiler/loop-peeling.cc",
"src/compiler/loop-analysis.cc",
"src/compiler/loop-analysis.h",
+ "src/compiler/loop-peeling.cc",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
@@ -850,16 +920,16 @@ source_set("v8_base") {
"src/compiler/operator.h",
"src/compiler/osr.cc",
"src/compiler/osr.h",
- "src/compiler/pipeline.cc",
- "src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
+ "src/compiler/pipeline.cc",
+ "src/compiler/pipeline.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
- "src/compiler/register-allocator.cc",
- "src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
+ "src/compiler/register-allocator.cc",
+ "src/compiler/register-allocator.h",
"src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
@@ -895,8 +965,6 @@ source_set("v8_base") {
"src/compiler/wasm-linkage.cc",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
- "src/compiler.cc",
- "src/compiler.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts-inl.h",
@@ -907,11 +975,11 @@ source_set("v8_base") {
"src/conversions.h",
"src/counters.cc",
"src/counters.h",
+ "src/crankshaft/compilation-phase.cc",
+ "src/crankshaft/compilation-phase.h",
"src/crankshaft/hydrogen-alias-analysis.h",
"src/crankshaft/hydrogen-bce.cc",
"src/crankshaft/hydrogen-bce.h",
- "src/crankshaft/hydrogen-bch.cc",
- "src/crankshaft/hydrogen-bch.h",
"src/crankshaft/hydrogen-canonicalize.cc",
"src/crankshaft/hydrogen-canonicalize.h",
"src/crankshaft/hydrogen-check-elimination.cc",
@@ -1011,12 +1079,16 @@ source_set("v8_base") {
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.cc",
"src/extensions/trigger-failure-extension.h",
+ "src/external-reference-table.cc",
+ "src/external-reference-table.h",
"src/factory.cc",
"src/factory.h",
+ "src/fast-accessor-assembler.cc",
+ "src/fast-accessor-assembler.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
- "src/field-index.h",
"src/field-index-inl.h",
+ "src/field-index.h",
"src/field-type.cc",
"src/field-type.h",
"src/fixed-dtoa.cc",
@@ -1064,47 +1136,43 @@ source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/page-parallel-job.h",
"src/heap/remembered-set.cc",
"src/heap/remembered-set.h",
- "src/heap/scavenge-job.h",
"src/heap/scavenge-job.cc",
+ "src/heap/scavenge-job.h",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
- "src/heap/slots-buffer.cc",
- "src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
- "src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/i18n.cc",
"src/i18n.h",
- "src/icu_util.cc",
- "src/icu_util.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
+ "src/ic/ic-compiler.cc",
+ "src/ic/ic-compiler.h",
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
"src/ic/ic.cc",
"src/ic/ic.h",
- "src/ic/ic-compiler.cc",
- "src/ic/ic-compiler.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
+ "src/icu_util.cc",
+ "src/icu_util.h",
"src/identity-map.cc",
"src/identity-map.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
- "src/interpreter/bytecodes.cc",
- "src/interpreter/bytecodes.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.cc",
@@ -1114,18 +1182,20 @@ source_set("v8_base") {
"src/interpreter/bytecode-register-allocator.cc",
"src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-traits.h",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/bytecodes.h",
"src/interpreter/constant-array-builder.cc",
"src/interpreter/constant-array-builder.h",
"src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
"src/interpreter/handler-table-builder.cc",
"src/interpreter/handler-table-builder.h",
- "src/interpreter/interpreter.cc",
- "src/interpreter/interpreter.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
- "src/interpreter/register-translator.cc",
- "src/interpreter/register-translator.h",
+ "src/interpreter/interpreter-intrinsics.cc",
+ "src/interpreter/interpreter-intrinsics.h",
+ "src/interpreter/interpreter.cc",
+ "src/interpreter/interpreter.h",
"src/interpreter/source-position-table.cc",
"src/interpreter/source-position-table.h",
"src/isolate-inl.h",
@@ -1133,8 +1203,8 @@ source_set("v8_base") {
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
- "src/key-accumulator.h",
- "src/key-accumulator.cc",
+ "src/keys.cc",
+ "src/keys.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
@@ -1147,9 +1217,9 @@ source_set("v8_base") {
"src/log.h",
"src/lookup.cc",
"src/lookup.h",
- "src/macro-assembler.h",
"src/machine-type.cc",
"src/machine-type.h",
+ "src/macro-assembler.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
@@ -1188,6 +1258,8 @@ source_set("v8_base") {
"src/parsing/token.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
+ "src/perf-jit.cc",
+ "src/perf-jit.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@@ -1279,19 +1351,29 @@ source_set("v8_base") {
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
- "src/snapshot/natives.h",
+ "src/snapshot/code-serializer.cc",
+ "src/snapshot/code-serializer.h",
+ "src/snapshot/deserializer.cc",
+ "src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",
- "src/snapshot/serialize.cc",
- "src/snapshot/serialize.h",
+ "src/snapshot/natives.h",
+ "src/snapshot/partial-serializer.cc",
+ "src/snapshot/partial-serializer.h",
+ "src/snapshot/serializer-common.cc",
+ "src/snapshot/serializer-common.h",
+ "src/snapshot/serializer.cc",
+ "src/snapshot/serializer.h",
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
+ "src/snapshot/snapshot.h",
+ "src/snapshot/startup-serializer.cc",
+ "src/snapshot/startup-serializer.h",
"src/source-position.h",
- "src/splay-tree.h",
"src/splay-tree-inl.h",
- "src/snapshot/snapshot.h",
- "src/startup-data-util.h",
+ "src/splay-tree.h",
"src/startup-data-util.cc",
+ "src/startup-data-util.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.h",
@@ -1299,6 +1381,8 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
+ "src/third_party/fdlibm/fdlibm.cc",
+ "src/third_party/fdlibm/fdlibm.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/transitions-inl.h",
@@ -1317,13 +1401,13 @@ source_set("v8_base") {
"src/typing-asm.h",
"src/typing-reset.cc",
"src/typing-reset.h",
- "src/unicode-inl.h",
- "src/unicode.cc",
- "src/unicode.h",
"src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
+ "src/unicode-inl.h",
+ "src/unicode.cc",
+ "src/unicode.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
@@ -1345,6 +1429,7 @@ source_set("v8_base") {
"src/wasm/encoder.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
+ "src/wasm/wasm-external-refs.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-macro-gen.h",
@@ -1354,26 +1439,24 @@ source_set("v8_base") {
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
- "src/zone.cc",
- "src/zone.h",
"src/zone-allocator.h",
"src/zone-containers.h",
- "src/third_party/fdlibm/fdlibm.cc",
- "src/third_party/fdlibm/fdlibm.h",
+ "src/zone.cc",
+ "src/zone.h",
]
if (v8_target_arch == "x86") {
sources += [
+ "src/compiler/ia32/code-generator-ia32.cc",
+ "src/compiler/ia32/instruction-codes-ia32.h",
+ "src/compiler/ia32/instruction-scheduler-ia32.cc",
+ "src/compiler/ia32/instruction-selector-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.h",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.cc",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.h",
"src/crankshaft/ia32/lithium-ia32.cc",
"src/crankshaft/ia32/lithium-ia32.h",
- "src/compiler/ia32/code-generator-ia32.cc",
- "src/compiler/ia32/instruction-codes-ia32.h",
- "src/compiler/ia32/instruction-scheduler-ia32.cc",
- "src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
@@ -1394,8 +1477,8 @@ source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
- "src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
+ "src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
@@ -1416,8 +1499,8 @@ source_set("v8_base") {
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
- "src/ic/x64/ic-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
+ "src/ic/x64/ic-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
@@ -1448,8 +1531,8 @@ source_set("v8_base") {
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
"src/arm/codegen-arm.h",
- "src/arm/constants-arm.h",
"src/arm/constants-arm.cc",
+ "src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
@@ -1483,19 +1566,19 @@ source_set("v8_base") {
]
} else if (v8_target_arch == "arm64") {
sources += [
+ "src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
- "src/arm64/assembler-arm64-inl.h",
"src/arm64/builtins-arm64.cc",
- "src/arm64/codegen-arm64.cc",
- "src/arm64/codegen-arm64.h",
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
+ "src/arm64/codegen-arm64.cc",
+ "src/arm64/codegen-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
+ "src/arm64/decoder-arm64-inl.h",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
- "src/arm64/decoder-arm64-inl.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
@@ -1507,9 +1590,9 @@ source_set("v8_base") {
"src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/interface-descriptors-arm64.h",
+ "src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
- "src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
@@ -1518,9 +1601,9 @@ source_set("v8_base") {
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
+ "src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
- "src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/lithium-arm64.cc",
"src/crankshaft/arm64/lithium-arm64.h",
"src/crankshaft/arm64/lithium-codegen-arm64.cc",
@@ -1553,17 +1636,17 @@ source_set("v8_base") {
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
- "src/ic/mips/ic-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
+ "src/ic/mips/ic-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
+ "src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
- "src/mips/assembler-mips-inl.h",
"src/mips/builtins-mips.cc",
- "src/mips/codegen-mips.cc",
- "src/mips/codegen-mips.h",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
+ "src/mips/codegen-mips.cc",
+ "src/mips/codegen-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@@ -1595,17 +1678,17 @@ source_set("v8_base") {
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
- "src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
+ "src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
+ "src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
- "src/mips64/assembler-mips64-inl.h",
"src/mips64/builtins-mips64.cc",
- "src/mips64/codegen-mips64.cc",
- "src/mips64/codegen-mips64.h",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
+ "src/mips64/codegen-mips64.cc",
+ "src/mips64/codegen-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
@@ -1621,6 +1704,48 @@ source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
+ } else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
+ sources += [
+ "src/compiler/s390/code-generator-s390.cc",
+ "src/compiler/s390/instruction-codes-s390.h",
+ "src/compiler/s390/instruction-scheduler-s390.cc",
+ "src/compiler/s390/instruction-selector-s390.cc",
+ "src/crankshaft/s390/lithium-codegen-s390.cc",
+ "src/crankshaft/s390/lithium-codegen-s390.h",
+ "src/crankshaft/s390/lithium-gap-resolver-s390.cc",
+ "src/crankshaft/s390/lithium-gap-resolver-s390.h",
+ "src/crankshaft/s390/lithium-s390.cc",
+ "src/crankshaft/s390/lithium-s390.h",
+ "src/debug/s390/debug-s390.cc",
+ "src/full-codegen/s390/full-codegen-s390.cc",
+ "src/ic/s390/access-compiler-s390.cc",
+ "src/ic/s390/handler-compiler-s390.cc",
+ "src/ic/s390/ic-compiler-s390.cc",
+ "src/ic/s390/ic-s390.cc",
+ "src/ic/s390/stub-cache-s390.cc",
+ "src/regexp/s390/regexp-macro-assembler-s390.cc",
+ "src/regexp/s390/regexp-macro-assembler-s390.h",
+ "src/s390/assembler-s390-inl.h",
+ "src/s390/assembler-s390.cc",
+ "src/s390/assembler-s390.h",
+ "src/s390/builtins-s390.cc",
+ "src/s390/code-stubs-s390.cc",
+ "src/s390/code-stubs-s390.h",
+ "src/s390/codegen-s390.cc",
+ "src/s390/codegen-s390.h",
+ "src/s390/constants-s390.cc",
+ "src/s390/constants-s390.h",
+ "src/s390/cpu-s390.cc",
+ "src/s390/deoptimizer-s390.cc",
+ "src/s390/disasm-s390.cc",
+ "src/s390/frames-s390.cc",
+ "src/s390/frames-s390.h",
+ "src/s390/interface-descriptors-s390.cc",
+ "src/s390/macro-assembler-s390.cc",
+ "src/s390/macro-assembler-s390.h",
+ "src/s390/simulator-s390.cc",
+ "src/s390/simulator-s390.h",
+ ]
}
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -1671,14 +1796,16 @@ source_set("v8_libbase") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ "src/base/accounting-allocator.cc",
+ "src/base/accounting-allocator.h",
"src/base/adapters.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
- "src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
+ "src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_s390_gcc.h",
"src/base/atomicops_internals_tsan.h",
@@ -1702,16 +1829,16 @@ source_set("v8_libbase") {
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
- "src/base/platform/elapsed-timer.h",
- "src/base/platform/time.cc",
- "src/base/platform/time.h",
"src/base/platform/condition-variable.cc",
"src/base/platform/condition-variable.h",
+ "src/base/platform/elapsed-timer.h",
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
"src/base/platform/platform.h",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
+ "src/base/platform/time.cc",
+ "src/base/platform/time.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
@@ -1745,10 +1872,16 @@ source_set("v8_libbase") {
if (is_linux) {
sources += [ "src/base/platform/platform-linux.cc" ]
- libs = [ "dl", "rt" ]
+ libs = [
+ "dl",
+ "rt",
+ ]
} else if (is_android) {
if (current_toolchain == host_toolchain) {
- libs = [ "dl", "rt" ]
+ libs = [
+ "dl",
+ "rt",
+ ]
if (host_os == "mac") {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
@@ -1818,6 +1951,7 @@ source_set("fuzzer_support") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config_base",
+ ":libplatform_config",
":features",
":toolchain",
]
@@ -1844,6 +1978,7 @@ if (current_toolchain == snapshot_toolchain) {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
+ ":libplatform_config",
":features",
":toolchain",
]
@@ -1933,9 +2068,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
}
if (!is_component_build) {
- sources += [
- "$target_gen_dir/d8-js.cc",
- ]
+ sources += [ "$target_gen_dir/d8-js.cc" ]
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
@@ -1956,6 +2089,7 @@ source_set("json_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
+ ":libplatform_config",
":features",
":toolchain",
]
@@ -1974,6 +2108,7 @@ source_set("parser_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
+ ":libplatform_config",
":features",
":toolchain",
]
@@ -1992,6 +2127,45 @@ source_set("regexp_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
+ ":libplatform_config",
+ ":features",
+ ":toolchain",
+ ]
+}
+
+source_set("wasm_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":libplatform_config",
+ ":features",
+ ":toolchain",
+ ]
+}
+
+source_set("wasm_asmjs_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-asmjs.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":libplatform_config",
":features",
":toolchain",
]
diff --git a/deps/v8/CODE_OF_CONDUCT.md b/deps/v8/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..15fc2259ad
--- /dev/null
+++ b/deps/v8/CODE_OF_CONDUCT.md
@@ -0,0 +1,8 @@
+# V8 Code of Conduct
+
+As part of the Chromium team, the V8 team is committed to preserving and
+fostering a diverse, welcoming community. To this end, the [Chromium Code of
+Conduct](https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md)
+applies to our repos and organizations, mailing lists, blog content, and any
+other Chromium-supported communication group, as well as any private
+communication initiated in the context of these spaces.
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 9e21ba1cd7..dc97b80422 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1764 @@
+2016-04-06: Version 5.1.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.279
+
+ Ship --harmony-regexp-exec (issue 4602).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.278
+
+ [V8] Removed debugger V8::PromiseEvent (Chromium issue 526811).
+
+ [asm.js] Fix typing bug for non-literals in heap access (Chromium issue
+ 599825).
+
+ Ensure CreateDataProperty works correctly on TypedArrays (Chromium issue
+ 596394).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.277
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.276
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.275
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.274
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.273
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.272
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-05: Version 5.1.271
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.270
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.269
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.268
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.267
+
+ [api] Restrict Template::Set to take templates or primitive values.
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.266
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.265
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.264
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.263
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.262
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.261
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.260
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-04: Version 5.1.259
+
+ Further ES2015 RegExp spec compliance fixes (issue 4602).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-03: Version 5.1.258
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-02: Version 5.1.257
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-02: Version 5.1.256
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-02: Version 5.1.255
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.254
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.252
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.246
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.244
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-01: Version 5.1.241
+
+ [GN] Define USE_EABI_HARDFLOAT=1 when arm_float_abi=="hard" (Chromium
+ issue 592660).
+
+ Ship --harmony-regexp-exec (issue 4602).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.236
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.234
+
+ [arm/Linux] Don't rely on KUSER_HELPERS feature (Chromium issue 599051).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.231
+
+ Turn scavenge_reclaim_unmodified_objects on by default (Chromium issue
+ 4880).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.229
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.228
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.227
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.224
+
+ Raise minimum Mac OS version to 10.7 (issue 4847).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-31: Version 5.1.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-30: Version 5.1.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-30: Version 5.1.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-30: Version 5.1.220
+
+ Stage --harmony-regexp-exec (issue 4602).
+
+ Add fast paths for native RegExps in ES2015 subclass-aware code (issue
+ 4602).
+
+ [V8] Add FunctionMirror.prototype.contextDebugId method (Chromium issue
+ 595206).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-30: Version 5.1.219
+
+ Remove RegExp.prototype.source getter compat workaround (issue 4827,
+ Chromium issue 581577).
+
+ Check for proper types from error handling code (Chromium issue 596718).
+
+ Add ES2015 RegExp full subclassing semantics behind a flag (issue 4602).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-24: Version 5.1.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-24: Version 5.1.217
+
+ [esnext] implement String padding proposal.
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-24: Version 5.1.216
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-24: Version 5.1.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-24: Version 5.1.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.213
+
+ Implement ES2015 labelled function declaration restrictions (Chromium
+ issue 595309).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.204
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-23: Version 5.1.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.197
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.196
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.194
+
+ Fix match default behavior on strings for ES2015 semantics (issue 4602).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.193
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.192
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.191
+
+ [Interpreter] TurboFan implementation of intrinsics (issue 4822).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-22: Version 5.1.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.183
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.181
+
+ Temporarily undeprecate ForceSet (Chromium issue 595601).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-21: Version 5.1.180
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-20: Version 5.1.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-20: Version 5.1.178
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-20: Version 5.1.177
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-19: Version 5.1.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.175
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.174
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.173
+
+ Parser: Make skipping HTML comments optional (Chromium issue 573887).
+
+ [es7] implement exponentiation operator proposal (issue 3915).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.172
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.171
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.170
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-18: Version 5.1.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.168
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.167
+
+ Throw the right exceptions from setting elements in
+ Array.prototype.concat (Chromium issue 595319).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.166
+
+ Throw exceptions from CreateDataProperty when should_throw (Chromium
+ issue 595319).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.165
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.161
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.160
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.159
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.156
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.151
+
+ Move FastAccessorAssembler from RawMachineAssembler to CodeStubAssembler
+ (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.150
+
+ [serializer] Add API to warm up startup snapshot with an additional
+ script (issue 4836).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-17: Version 5.1.147
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.146
+
+ Ship ES2015 restrictions on function declaration locations (issue 4824).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.142
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-16: Version 5.1.140
+
+ Put RegExp js code in strict mode (issue 4504).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.138
+
+ [builtins] Fix Array.prototype.concat bug (Chromium issue 594574).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.135
+
+ Ship Array.prototype.values (issue 4247).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-15: Version 5.1.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.126
+
+ Remove --harmony-modules flag and let embedder decide when modules are
+ used (issue 1569, Chromium issue 594639).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.125
+
+ Make test262 test runner check for which exception is thrown (issue
+ 4803).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-14: Version 5.1.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-13: Version 5.1.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.117
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.114
+
+ [arm64] Fix i/d cache line size confusion typo (Chromium issue 593867).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.113
+
+ Fix expression positions for for-loops (issue 4690).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.112
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-11: Version 5.1.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.110
+
+ Minor library function fixes for TypedArray spec compliance (issue
+ 4785).
+
+ Check that Promise subclasses have callable resolve/reject (issue 4633).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.105
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.100
+
+ [strong] Remove all remainders of strong mode (issue 3956).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.99
+
+ Marks the label associated with the runtime call in
+ CodeStubAssembler::Allocate as deferred (Chromium issue 593359).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.98
+
+ Implement iterator finalization in array destructuring (issue 3566).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-10: Version 5.1.95
+
+ String.prototype[Symbol.iterator] does RequireObjectCoercible(this)
+ (issue 4348).
+
+ Stage restrictive declarations flag (issue 4824).
+
+ Expose Array.prototype.values behind a flag and stage it (issue 4247).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.93
+
+ Ensure appropriate bounds checking for Array subclass concat (Chromium
+ issue 592340).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-09: Version 5.1.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.81
+
+ Optimize new TypedArray(typedArray) constructor (Chromium issue 592007).
+
+ Ensure the @@species protector is updated for accessors (issue 4093).
+
+ Add UseCounters for various RegExp compatibility issues (Chromium issue
+ 581577).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-08: Version 5.1.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.68
+
+ [key-accumulator] Starting to reimplement the key-accumulator (issue
+ 4758, Chromium issue 545503).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.65
+
+ [key-accumulator] Starting to reimplement the key-accumulator (issue
+ 4758, Chromium issue 545503).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.59
+
+ Use v8::kGCCallbackFlagCollectAllAvailableGarbage in
+ Heap::CollectAllAvailableGarbage (Chromium issue 591463).
+
+ [key-accumulator] Starting to reimplement the key-accumulator (issue
+ 4758, Chromium issue 545503).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.58
+
+ [regexp] Fix off-by-one in CharacterRange::Negate (Chromium issue
+ 592343).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-07: Version 5.1.56
+
+ Use v8::kGCCallbackFlagCollectAllAvailableGarbage in
+ Heap::CollectAllAvailableGarbage (Chromium issue 591463).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-06: Version 5.1.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-06: Version 5.1.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.46
+
+ Introduce v8::MicrotasksScope (Chromium issue 585949).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.44
+
+ Use a different GCCallbackFlag for GCs triggered by
+ CollectAllAvailableGarbage (Chromium issue 591463).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.42
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.39
+
+ Ship ES2015 Function.name reform (issue 3699, Chromium issue 588803).
+
+ Introduce v8::MicrotasksScope (Chromium issue 585949).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-04: Version 5.1.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.37
+
+ Restrict FunctionDeclarations in Statement position (issue 4647).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-03: Version 5.1.30
+
+ Implement TypedArray(typedarray) constructor (issue 4726).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.28
+
+ [turbofan] Adds an Allocate macro to the CodeStubAssembler (Chromium
+ issue 588692).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-02: Version 5.1.24
+
+ Devtools: expose scopes source location to debugger (Chromium issue
+ 327092).
+
+ CodeStubAssembler can generate code for builtins (issue 4614).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.14
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-03-01: Version 5.1.11
+
+ Make %TypedArray%.from spec-compliant (issue 4782).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-29: Version 5.1.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-29: Version 5.1.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-28: Version 5.1.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-27: Version 5.1.1
+
+ Fix strict mode function error message (issue 2198).
+
+ Reland of Make Intl install properties more like how other builtins do
+ (patchset #1 id:1 of https://codereview.chromium.org/1733293003/ )
+ (issue 4778).
+
+ [turbofan] Bailout if LoadBuffer typing assumption doesn't hold
+ (Chromium issue 589792).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-26: Version 5.0.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-26: Version 5.0.103
+
+ Make Intl install properties more like how other builtins do (issue
+ 4778).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-26: Version 5.0.102
+
+ Make TypedArray.from and TypedArray.of writable and configurable (issue
+ 4315).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.100
+
+ Ship ES2015 iterator finalization (issue 3566).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.99
+
+ Introduce MicrotasksCompletedCallback (Chromium issue 585949).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-25: Version 5.0.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.82
+
+ Ensure IteratorClose is called for errors in non-declaring assignments
+ (issue 4776).
+
+ Fix priority of exceptions being thrown from for-of loops (issue 4775).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.80
+
+ Encode interpreter::SourcePositionTable as variable-length ints (issue
+ 4690).
+
+ Stage ES2015 iterator finalization (issue 3566).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-24: Version 5.0.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.73
+
+ Intl: Use private symbols to memoize bound functions (issue 3785).
+
+ Ensure Array.prototype.indexOf returns +0 rather than -0.
+
+ Ship ES2015 Symbol.species (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.72
+
+ Performance and stability improvements on all platforms.
+
+
2016-02-23: Version 5.0.71
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 5f26e91ecf..0559523283 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "ed163ce233f76a950dce1751ac851dbe4b1c00cc",
+ Var("git_url") + "/external/gyp.git" + "@" + "4ec6c4e3a94bd04a6da2858163d40b2429b8aad1",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "e466f6ac8f60bb9697af4a91c6911c6fc4aec95f",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "c291cde264469b20ca969ce8832088acb21e0c48",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "97b5c485707335dd2952c05bf11412ada3f4fb6f",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "80b5126f91be4eb359248d28696746ef09d5be67",
"v8/base/trace_event/common":
- Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "4b09207e447ae5bd34643b4c6321bee7b76d35f9",
+ Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "c8c8665c2deaf1cc749d9f8e153256d4f67bf1b8",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "0b908f18767c8304dc089454bc1c91755d21f1f5",
+ Var('git_url') + '/external/swarming.client.git' + '@' + "df6e95e7669883c8fe9ef956c69a544154701a49",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@@ -27,15 +27,15 @@ deps = {
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/test262/data":
- Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "738a24b109f3fa71be44d5c3701d73141d494510",
+ Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "57d3e2216fa86ad63b6c0a54914ba9dcbff96003",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "a8adb78c8eda9bddb2aa9c51f3fee60296de1ad4",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "faee82e064e04e5cbf60cc7327e7a81d2a4557ad",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "f4c36ad89b2696b37d9cd7ca7d984b691888b188",
+ Var("git_url") + "/android_tools.git" + "@" + "adfd31794011488cd0fc716b53558b2d8a67af8b",
},
"win": {
"v8/third_party/cygwin":
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 4fb6ee0162..a0c08a6d96 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -162,6 +162,9 @@ endif
ifdef embedscript
GYPFLAGS += -Dembed_script=$(embedscript)
endif
+ifdef warmupscript
+ GYPFLAGS += -Dwarmup_script=$(warmupscript)
+endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
@@ -219,6 +222,10 @@ endif
ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
+# Do not omit the frame pointer, needed for profiling with perf
+ifeq ($(no_omit_framepointer), on)
+ GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
+endif
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 2c5caeb125..3f2caecd49 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,5 +1,7 @@
adamk@chromium.org
+ahaas@chromium.org
bmeurer@chromium.org
+cbruni@chromium.org
danno@chromium.org
epertoso@chromium.org
hablich@chromium.org
@@ -10,10 +12,13 @@ jkummerow@chromium.org
jochen@chromium.org
littledan@chromium.org
machenbach@chromium.org
-mlippautz@chromium.org
marja@chromium.org
+mlippautz@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
+mythria@chromium.org
+neis@chromium.org
+oth@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 8d13fc2e8c..7a1533ed82 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -156,7 +156,7 @@
// };
//
// TRACE_EVENT1("foo", "bar", "data",
-// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+// std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
//
// The trace framework will take ownership if the passed pointer and it will
// be free'd when the trace buffer is flushed.
@@ -926,6 +926,20 @@
name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
arg1_val, arg2_name, arg2_val)
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ TRACE_EVENT2("toplevel", run_function, "src_file", \
+ (task).posted_from.file_name(), "src_func", \
+ (task).posted_from.function_name()); \
+ TRACE_EVENT_API_SCOPED_TASK_EXECUTION_EVENT INTERNAL_TRACE_EVENT_UID( \
+ task_event)((task).posted_from.file_name());
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
// Records a clock sync event.
#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
INTERNAL_TRACE_EVENT_ADD( \
diff --git a/deps/v8/build/coverage_wrapper.py b/deps/v8/build/coverage_wrapper.py
new file mode 100755
index 0000000000..5b365d8e63
--- /dev/null
+++ b/deps/v8/build/coverage_wrapper.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# CC/CXX wrapper script that excludes certain file patterns from coverage
+# instrumentation.
+
+import re
+import subprocess
+import sys
+
+exclusions = [
+ 'buildtools',
+ 'src/third_party',
+ 'third_party',
+ 'test',
+ 'testing',
+]
+
+def remove_if_exists(string_list, item):
+ if item in string_list:
+ string_list.remove(item)
+
+args = sys.argv[1:]
+text = ' '.join(sys.argv[2:])
+for exclusion in exclusions:
+ if re.search(r'\-o obj/%s[^ ]*\.o' % exclusion, text):
+ remove_if_exists(args, '-fprofile-arcs')
+ remove_if_exists(args, '-ftest-coverage')
+ remove_if_exists(args, '-fsanitize-coverage=func')
+ remove_if_exists(args, '-fsanitize-coverage=bb')
+ remove_if_exists(args, '-fsanitize-coverage=edge')
+ break
+
+sys.exit(subprocess.check_call(args))
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/build/get_landmines.py
index ea0ae0d415..2bbf7a61bf 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/build/get_landmines.py
@@ -26,6 +26,7 @@ def main():
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
+ print 'Clobber after ICU roll.'
return 0
diff --git a/deps/v8/build/isolate.gypi b/deps/v8/build/isolate.gypi
index 546870a755..4cfbbfddd1 100644
--- a/deps/v8/build/isolate.gypi
+++ b/deps/v8/build/isolate.gypi
@@ -76,6 +76,8 @@
'--config-variable', 'icu_use_data_file_flag=0',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
+ '--config-variable', 'coverage=<(coverage)',
+ '--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 5269b95f64..6c88409dbe 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -44,7 +44,7 @@
'v8_deprecation_warnings': 1,
'v8_imminent_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
- 'mac_deployment_target%': '10.5',
+ 'mac_deployment_target%': '10.7',
'release_extra_cflags%': '',
'variables': {
'variables': {
@@ -68,7 +68,9 @@
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
- # Instrument for code coverage with gcov.
+ # Instrument for code coverage and use coverage wrapper to exclude some
+ # files. Uses gcov if clang=0 is set explicitly. Otherwise,
+ # sanitizer_coverage must be set too.
'coverage%': 0,
},
'base_dir%': '<(base_dir)',
@@ -113,6 +115,8 @@
# Check if valgrind directories are present.
'has_valgrind%': '<!pymod_do_main(has_valgrind)',
+ 'test_isolation_mode%': 'noop',
+
'conditions': [
# Set default gomadir.
['OS=="win"', {
@@ -120,8 +124,7 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
- ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \
- coverage==0', {
+ ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x"', {
'host_clang%': 1,
}, {
'host_clang%': 0,
@@ -136,14 +139,6 @@
}, {
'linux_use_bundled_gold%': 0,
}],
-
- # TODO(machenbach): Remove the conditions as more configurations are
- # supported.
- ['OS=="linux" or OS=="win"', {
- 'test_isolation_mode%': 'check',
- }, {
- 'test_isolation_mode%': 'noop',
- }],
],
},
'base_dir%': '<(base_dir)',
@@ -234,7 +229,7 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
- (v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', {
+ (v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1,
}, {
'clang%': 0,
@@ -706,7 +701,7 @@
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
- '-std=gnu++0x',
+ '-std=gnu++11',
],
'ldflags': [ '-pthread', ],
'conditions': [
@@ -733,7 +728,7 @@
[ 'component=="shared_library"', {
'cflags': [ '-fPIC', ],
}],
- [ 'coverage==1', {
+ [ 'clang==0 and coverage==1', {
'cflags': [ '-fprofile-arcs', '-ftest-coverage'],
'ldflags': [ '-fprofile-arcs'],
}],
@@ -756,7 +751,7 @@
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
- '-std=gnu++0x',
+ '-std=gnu++11',
],
'conditions': [
[ 'visibility=="hidden"', {
@@ -986,7 +981,7 @@
['clang==1', {
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
- 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x
+ 'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', # -std=c++11
},
'conditions': [
['v8_target_arch=="x64" or v8_target_arch=="arm64" \
@@ -1262,11 +1257,36 @@
# make generator doesn't support CC_wrapper without CC
# in make_global_settings yet.
['use_goma==1 and ("<(GENERATOR)"=="ninja" or clang==1)', {
- 'make_global_settings': [
- ['CC_wrapper', '<(gomadir)/gomacc'],
- ['CXX_wrapper', '<(gomadir)/gomacc'],
- ['CC.host_wrapper', '<(gomadir)/gomacc'],
- ['CXX.host_wrapper', '<(gomadir)/gomacc'],
+ 'conditions': [
+ ['coverage==1', {
+ # Wrap goma with coverage wrapper.
+ 'make_global_settings': [
+ ['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
+ ],
+ }, {
+ # Use only goma wrapper.
+ 'make_global_settings': [
+ ['CC_wrapper', '<(gomadir)/gomacc'],
+ ['CXX_wrapper', '<(gomadir)/gomacc'],
+ ['CC.host_wrapper', '<(gomadir)/gomacc'],
+ ['CXX.host_wrapper', '<(gomadir)/gomacc'],
+ ],
+ }],
+ ],
+ }, {
+ 'conditions': [
+ ['coverage==1', {
+ # Use only coverage wrapper.
+ 'make_global_settings': [
+ ['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
+ ['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
+ ['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
+ ['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
+ ],
+ }],
],
}],
['use_lto==1', {
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index c2974c52bf..6090898073 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -1287,7 +1287,8 @@
}],
],
}, # Debug
- 'Release': {
+ 'ReleaseBase': {
+ 'abstract': 1,
'variables': {
'v8_enable_slow_dchecks%': 0,
},
@@ -1367,6 +1368,27 @@
}],
], # conditions
}, # Release
+ 'Release': {
+ 'inherit_from': ['ReleaseBase'],
+ }, # Debug
+ 'conditions': [
+ [ 'OS=="win"', {
+ # TODO(bradnelson): add a gyp mechanism to make this more graceful.
+ 'Debug_x64': {
+ 'inherit_from': ['DebugBaseCommon'],
+ 'conditions': [
+ ['v8_optimized_debug==0', {
+ 'inherit_from': ['DebugBase0'],
+ }, {
+ 'inherit_from': ['DebugBase1'],
+ }],
+ ],
+ },
+ 'Release_x64': {
+ 'inherit_from': ['ReleaseBase'],
+ },
+ }],
+ ],
}, # configurations
}, # target_defaults
}
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 2125e9746b..f180b4fe82 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -5,7 +5,7 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
-#include "include/v8-platform.h"
+#include "v8-platform.h" // NOLINT(build/include)
namespace v8 {
namespace platform {
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 0d0ee739c0..e41df29ad0 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -18,13 +18,11 @@ enum DebugEvent {
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
- AfterCompile = 5,
+ AfterCompile = 5,
CompileError = 6,
- PromiseEvent = 7,
- AsyncTaskEvent = 8,
+ AsyncTaskEvent = 7,
};
-
class V8_EXPORT Debug {
public:
/**
@@ -276,6 +274,14 @@ class V8_EXPORT Debug {
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
+
+ /**
+ * Defines if the ES2015 tail call elimination feature is enabled or not.
+ * The change of this flag triggers deoptimization of all functions that
+ * contain calls at tail position.
+ */
+ static bool IsTailCallEliminationEnabled(Isolate* isolate);
+ static void SetTailCallEliminationEnabled(Isolate* isolate, bool enabled);
};
diff --git a/deps/v8/include/v8-experimental.h b/deps/v8/include/v8-experimental.h
index 3874e91101..294ba647f0 100644
--- a/deps/v8/include/v8-experimental.h
+++ b/deps/v8/include/v8-experimental.h
@@ -10,7 +10,7 @@
#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
#define V8_INCLUDE_V8_EXPERIMENTAL_H_
-#include "include/v8.h"
+#include "v8.h" // NOLINT(build/include)
namespace v8 {
namespace experimental {
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 11f8d51f02..4023a5b234 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -152,9 +152,9 @@ class Platform {
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
- uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values,
- unsigned int flags) {
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
return 0;
}
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index c24f07202b..4a331318bd 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 0
-#define V8_BUILD_NUMBER 71
-#define V8_PATCH_LEVEL 52
+#define V8_MINOR_VERSION 1
+#define V8_BUILD_NUMBER 281
+#define V8_PATCH_LEVEL 69
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 9ccbc6eb18..703a4f4f4a 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -18,6 +18,8 @@
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
+#include <utility>
+#include <vector>
#include "v8-version.h" // NOLINT(build/include)
#include "v8config.h" // NOLINT(build/include)
@@ -328,6 +330,8 @@ class Local {
template <class F1, class F2, class F3>
friend class PersistentValueMapBase;
template<class F1, class F2> friend class PersistentValueVector;
+ template <class F>
+ friend class ReturnValue;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
@@ -594,6 +598,13 @@ template <class T> class PersistentBase {
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
+ * Allows the embedder to tell the v8 garbage collector that a certain object
+ * is alive. Only allowed when the embedder is asked to trace its heap by
+ * EmbedderHeapTracer.
+ */
+ V8_INLINE void RegisterExternalReference(Isolate* isolate);
+
+ /**
* Marks the reference to this object independent. Garbage collector is free
* to ignore any object groups containing this object. Weak callback for an
* independent handle should not assume that it will be preceded by a global
@@ -2628,6 +2639,10 @@ enum AccessControl {
PROHIBITS_OVERWRITING = 1 << 2
};
+/**
+ * Integrity level for objects.
+ */
+enum class IntegrityLevel { kFrozen, kSealed };
/**
* A JavaScript object (ECMA-262, 4.3.3)
@@ -2819,6 +2834,11 @@ class V8_EXPORT Object : public Value {
*/
Local<String> GetConstructorName();
+ /**
+ * Sets the integrity level of the object.
+ */
+ Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
+
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
@@ -3118,12 +3138,17 @@ class ReturnValue {
V8_INLINE void SetUndefined();
V8_INLINE void SetEmptyString();
// Convenience getter for Isolate
- V8_INLINE Isolate* GetIsolate();
+ V8_INLINE Isolate* GetIsolate() const;
// Pointer setter: Uncompilable to prevent inadvertent misuse.
template <typename S>
V8_INLINE void Set(S* whatever);
+ // Getter. Creates a new Local<> so it comes with a certain performance
+ // hit. If the ReturnValue was not yet set, this will return the undefined
+ // value.
+ V8_INLINE Local<Value> Get() const;
+
private:
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
@@ -4886,7 +4911,6 @@ V8_INLINE Local<Primitive> Null(Isolate* isolate);
V8_INLINE Local<Boolean> True(Isolate* isolate);
V8_INLINE Local<Boolean> False(Isolate* isolate);
-
/**
* A set of constraints that specifies the limits of the runtime's memory use.
* You must set the heap size before initializing the VM - the size cannot be
@@ -4895,6 +4919,9 @@ V8_INLINE Local<Boolean> False(Isolate* isolate);
* If you are using threads then you should hold the V8::Locker lock while
* setting the stack limit and you must set a non-default stack limit separately
* for each thread.
+ *
+ * The arguments for set_max_semi_space_size, set_max_old_space_size,
+ * set_max_executable_size, set_code_range_size specify limits in MB.
*/
class V8_EXPORT ResourceConstraints {
public:
@@ -4913,17 +4940,23 @@ class V8_EXPORT ResourceConstraints {
uint64_t virtual_memory_limit);
int max_semi_space_size() const { return max_semi_space_size_; }
- void set_max_semi_space_size(int value) { max_semi_space_size_ = value; }
+ void set_max_semi_space_size(int limit_in_mb) {
+ max_semi_space_size_ = limit_in_mb;
+ }
int max_old_space_size() const { return max_old_space_size_; }
- void set_max_old_space_size(int value) { max_old_space_size_ = value; }
+ void set_max_old_space_size(int limit_in_mb) {
+ max_old_space_size_ = limit_in_mb;
+ }
int max_executable_size() const { return max_executable_size_; }
- void set_max_executable_size(int value) { max_executable_size_ = value; }
+ void set_max_executable_size(int limit_in_mb) {
+ max_executable_size_ = limit_in_mb;
+ }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
size_t code_range_size() const { return code_range_size_; }
- void set_code_range_size(size_t value) {
- code_range_size_ = value;
+ void set_code_range_size(size_t limit_in_mb) {
+ code_range_size_ = limit_in_mb;
}
private:
@@ -5047,9 +5080,57 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
-// --- Microtask Callback ---
+// --- Microtasks Callbacks ---
+typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotaskCallback)(void* data);
+
+/**
+ * Policy for running microtasks:
+ * - explicit: microtasks are invoked with Isolate::RunMicrotasks() method;
+ * - scoped: microtasks invocation is controlled by MicrotasksScope objects;
+ * - auto: microtasks are invoked when the script call depth decrements
+ * to zero.
+ */
+enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
+
+
+/**
+ * This scope is used to control microtasks when kScopeMicrotasksInvocation
+ * is used on Isolate. In this mode every non-primitive call to V8 should be
+ * done inside some MicrotasksScope.
+ * Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
+ * exits.
+ * kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
+ * microtasks.
+ */
+class V8_EXPORT MicrotasksScope {
+ public:
+ enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
+
+ MicrotasksScope(Isolate* isolate, Type type);
+ ~MicrotasksScope();
+
+ /**
+ * Runs microtasks if no kRunMicrotasks scope is currently active.
+ */
+ static void PerformCheckpoint(Isolate* isolate);
+
+ /**
+ * Returns current depth of nested kRunMicrotasks scopes.
+ */
+ static int GetCurrentDepth(Isolate* isolate);
+
+ private:
+ internal::Isolate* const isolate_;
+ bool run_;
+
+ // Prevent copying.
+ MicrotasksScope(const MicrotasksScope&);
+ MicrotasksScope& operator=(const MicrotasksScope&);
+};
+
+
// --- Failed Access Check Callback ---
typedef void (*FailedAccessCheckCallback)(Local<Object> target,
AccessType type,
@@ -5121,6 +5202,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size() { return total_available_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
+ size_t malloced_memory() { return malloced_memory_; }
size_t does_zap_garbage() { return does_zap_garbage_; }
private:
@@ -5130,6 +5212,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
+ size_t malloced_memory_;
bool does_zap_garbage_;
friend class V8;
@@ -5294,6 +5377,52 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
uint16_t class_id) {}
};
+/**
+ * Memory pressure level for the MemoryPressureNotification.
+ * kNone hints V8 that there is no memory pressure.
+ * kModerate hints V8 to speed up incremental garbage collection at the cost of
+ * of higher latency due to garbage collection pauses.
+ * kCritical hints V8 to free memory as soon as possible. Garbage collection
+ * pauses at this level will be large.
+ */
+enum class MemoryPressureLevel { kNone, kModerate, kCritical };
+
+/**
+ * Interface for tracing through the embedder heap. During the v8 garbage
+ * collection, v8 collects hidden fields of all potential wrappers, and at the
+ * end of its marking phase iterates the collection and asks the embedder to
+ * trace through its heap and call PersistentBase::RegisterExternalReference on
+ * each js object reachable from any of the given wrappers.
+ *
+ * Before the first call to the TraceWrappableFrom function v8 will call
+ * TraceRoots. When the v8 garbage collection is finished, v8 will call
+ * ClearTracingMarks.
+ */
+class EmbedderHeapTracer {
+ public:
+ /**
+ * V8 will call this method at the beginning of the gc cycle.
+ */
+ virtual void TraceRoots(Isolate* isolate) = 0;
+
+ /**
+ * V8 will call this method with internal fields of a potential wrappers.
+ * Embedder is expected to trace its heap (synchronously) and call
+ * PersistentBase::RegisterExternalReference() on all wrappers reachable from
+ * any of the given wrappers.
+ */
+ virtual void TraceWrappableFrom(
+ Isolate* isolate,
+ const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
+ /**
+ * V8 will call this method at the end of the gc cycle. Allocation is *not*
+ * allowed in the ClearTracingMarks.
+ */
+ virtual void ClearTracingMarks(Isolate* isolate) = 0;
+
+ protected:
+ virtual ~EmbedderHeapTracer() = default;
+};
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
@@ -5489,6 +5618,9 @@ class V8_EXPORT Isolate {
kArrayPrototypeConstructorModified = 26,
kArrayInstanceProtoModified = 27,
kArrayInstanceConstructorModified = 28,
+ kLegacyFunctionDeclaration = 29,
+ kRegExpPrototypeSourceGetter = 30,
+ kRegExpPrototypeOldFlagGetter = 31,
// If you add new values here, you'll also need to update V8Initializer.cpp
// in Chromium.
@@ -5532,6 +5664,14 @@ class V8_EXPORT Isolate {
AbortOnUncaughtExceptionCallback callback);
/**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to guide heuristics.
+ * It is allowed to call this function from another thread while
+ * the isolate is executing long running JavaScript code.
+ */
+ void MemoryPressureNotification(MemoryPressureLevel level);
+
+ /**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
*/
@@ -5753,6 +5893,11 @@ class V8_EXPORT Isolate {
void RemoveGCPrologueCallback(GCCallback callback);
/**
+ * Sets the embedder heap tracer for the isolate.
+ */
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ /**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are allowed in the callback function,
* but the callback is not re-entrant: if the allocation inside it will
@@ -5888,17 +6033,39 @@ class V8_EXPORT Isolate {
*/
void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
- /**
- * Experimental: Controls whether the Microtask Work Queue is automatically
- * run when the script call depth decrements to zero.
+ /**
+ * Experimental: Controls how Microtasks are invoked. See MicrotasksPolicy
+ * for details.
*/
- void SetAutorunMicrotasks(bool autorun);
+ void SetMicrotasksPolicy(MicrotasksPolicy policy);
+ V8_DEPRECATE_SOON("Use SetMicrotasksPolicy",
+ void SetAutorunMicrotasks(bool autorun));
/**
- * Experimental: Returns whether the Microtask Work Queue is automatically
- * run when the script call depth decrements to zero.
+ * Experimental: Returns the policy controlling how Microtasks are invoked.
*/
- bool WillAutorunMicrotasks() const;
+ MicrotasksPolicy GetMicrotasksPolicy() const;
+ V8_DEPRECATE_SOON("Use GetMicrotasksPolicy",
+ bool WillAutorunMicrotasks() const);
+
+ /**
+ * Experimental: adds a callback to notify the host application after
+ * microtasks were run. The callback is triggered by explicit RunMicrotasks
+ * call or automatic microtasks execution (see SetAutorunMicrotasks).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scriptsinside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
/**
* Sets a callback for counting the number of times a feature of V8 is used.
@@ -6195,11 +6362,23 @@ class V8_EXPORT V8 {
static void SetSnapshotDataBlob(StartupData* startup_blob);
/**
- * Create a new isolate and context for the purpose of capturing a snapshot
+ * Bootstrap an isolate and a context from scratch to create a startup
+ * snapshot. Include the side-effects of running the optional script.
* Returns { NULL, 0 } on failure.
- * The caller owns the data array in the return value.
+ * The caller acquires ownership of the data array in the return value.
*/
- static StartupData CreateSnapshotDataBlob(const char* custom_source = NULL);
+ static StartupData CreateSnapshotDataBlob(const char* embedded_source = NULL);
+
+ /**
+ * Bootstrap an isolate and a context from the cold startup blob, run the
+ * warm-up script to trigger code compilation. The side effects are then
+ * discarded. The resulting startup snapshot will include compiled code.
+ * Returns { NULL, 0 } on failure.
+ * The caller acquires ownership of the data array in the return value.
+ * The argument startup blob is untouched.
+ */
+ static StartupData WarmUpSnapshotDataBlob(StartupData cold_startup_blob,
+ const char* warmup_source);
/**
* Adds a message listener.
@@ -6475,6 +6654,8 @@ class V8_EXPORT V8 {
static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
+ static void RegisterExternallyReferencedObject(internal::Object** object,
+ internal::Isolate* isolate);
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback);
static void MakeWeak(internal::Object** global_handle, void* data,
@@ -7149,7 +7330,7 @@ class Internals {
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
- static const int kOddballKindOffset = 4 * kApiPointerSize;
+ static const int kOddballKindOffset = 5 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
@@ -7168,11 +7349,12 @@ class Internals {
static const int kIsolateRootsOffset =
kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset + kApiInt64Size +
kApiPointerSize;
- static const int kUndefinedValueRootIndex = 5;
- static const int kNullValueRootIndex = 7;
- static const int kTrueValueRootIndex = 8;
- static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 10;
+ static const int kUndefinedValueRootIndex = 4;
+ static const int kTheHoleValueRootIndex = 5;
+ static const int kNullValueRootIndex = 6;
+ static const int kTrueValueRootIndex = 7;
+ static const int kFalseValueRootIndex = 8;
+ static const int kEmptyStringRootIndex = 9;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
@@ -7188,7 +7370,7 @@ class Internals {
static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
- static const int kJSObjectType = 0xb5;
+ static const int kJSObjectType = 0xb8;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -7492,6 +7674,13 @@ P* PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
}
+template <class T>
+void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) {
+ if (IsEmpty()) return;
+ V8::RegisterExternallyReferencedObject(
+ reinterpret_cast<internal::Object**>(this->val_),
+ reinterpret_cast<internal::Isolate*>(isolate));
+}
template <class T>
void PersistentBase<T>::MarkIndependent() {
@@ -7641,14 +7830,22 @@ void ReturnValue<T>::SetEmptyString() {
*value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
-template<typename T>
-Isolate* ReturnValue<T>::GetIsolate() {
+template <typename T>
+Isolate* ReturnValue<T>::GetIsolate() const {
// Isolate is always the pointer below the default value on the stack.
return *reinterpret_cast<Isolate**>(&value_[-2]);
}
-template<typename T>
-template<typename S>
+template <typename T>
+Local<Value> ReturnValue<T>::Get() const {
+ typedef internal::Internals I;
+ if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
+ return Local<Value>(*Undefined(GetIsolate()));
+ return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
+}
+
+template <typename T>
+template <typename S>
void ReturnValue<T>::Set(S* whatever) {
// Uncompilable to prevent inadvertent misuse.
TYPE_CHECK(S*, Primitive);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index d2be68561c..ce3a9d2f4f 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -266,13 +266,7 @@
# define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
-// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
-// without warnings (functionality used by the macros below). These modes
-// are detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or,
-// more standardly, by checking whether __cplusplus has a C++11 or greater
-// value. Current versions of g++ do not correctly set __cplusplus, so we check
-// both for forward compatibility.
-# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+# if __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# endif
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index fbf090bb59..5f85111f20 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -65,13 +65,14 @@ verifiers {
name: "v8_win_rel_ng_triggered"
triggered_by: "v8_win_rel_ng"
}
- }
- buckets {
- name: "tryserver.v8"
builders {
name: "v8_linux_blink_rel"
experiment_percentage: 20
}
+ builders {
+ name: "v8_linux64_sanitizer_coverage_rel"
+ experiment_percentage: 100
+ }
}
}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index b0b703b7cc..1bb616ef33 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -21,7 +21,4 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
- "api-experimental\.cc": [
- "+src/compiler/fast-accessor-assembler.h",
- ],
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 766509e2a5..374c0a21f8 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
+ info->set_is_sloppy(false);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@@ -817,7 +818,7 @@ void Accessors::FunctionLengthGetter(
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
- if (Compiler::Compile(function, KEEP_EXCEPTION)) {
+ if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
length = function->shared()->length();
}
if (isolate->has_pending_exception()) {
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
new file mode 100644
index 0000000000..c4b698c5a2
--- /dev/null
+++ b/deps/v8/src/api-arguments.cc
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-arguments.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
+ is_construct_call_);
+ f(info);
+ return GetReturnValue<Object>(isolate);
+}
+
+Handle<JSObject> PropertyCallbackArguments::Call(
+ IndexedPropertyEnumeratorCallback f) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Array> info(begin());
+ f(info);
+ return GetReturnValue<JSObject>(isolate);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
new file mode 100644
index 0000000000..3bfe34dc89
--- /dev/null
+++ b/deps/v8/src/api-arguments.h
@@ -0,0 +1,254 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_ARGUMENTS_H_
+#define V8_API_ARGUMENTS_H_
+
+#include "src/api.h"
+#include "src/isolate.h"
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Custom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+template <int kArrayLength>
+class CustomArgumentsBase : public Relocatable {
+ public:
+ virtual inline void IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + kArrayLength);
+ }
+
+ protected:
+ inline Object** begin() { return values_; }
+ explicit inline CustomArgumentsBase(Isolate* isolate)
+ : Relocatable(isolate) {}
+ Object* values_[kArrayLength];
+};
+
+template <typename T>
+class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
+ public:
+ static const int kReturnValueOffset = T::kReturnValueIndex;
+
+ typedef CustomArgumentsBase<T::kArgsLength> Super;
+ ~CustomArguments() {
+ this->begin()[kReturnValueOffset] =
+ reinterpret_cast<Object*>(kHandleZapValue);
+ }
+
+ protected:
+ explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
+
+ template <typename V>
+ Handle<V> GetReturnValue(Isolate* isolate);
+
+ inline Isolate* isolate() {
+ return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
+ }
+};
+
+template <typename T>
+template <typename V>
+Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+ // Check the ReturnValue.
+ Object** handle = &this->begin()[kReturnValueOffset];
+ // Nothing was set, return empty handle as per previous behaviour.
+ if ((*handle)->IsTheHole()) return Handle<V>();
+ Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
+ result->VerifyApiCallResultType();
+ return result;
+}
+
+class PropertyCallbackArguments
+ : public CustomArguments<PropertyCallbackInfo<Value> > {
+ public:
+ typedef PropertyCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
+ static const int kThisIndex = T::kThisIndex;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
+
+ PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
+ JSObject* holder, Object::ShouldThrow should_throw)
+ : Super(isolate) {
+ Object** values = this->begin();
+ values[T::kThisIndex] = self;
+ values[T::kHolderIndex] = holder;
+ values[T::kDataIndex] = data;
+ values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ values[T::kShouldThrowOnErrorIndex] =
+ Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
+
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ DCHECK(values[T::kHolderIndex]->IsHeapObject());
+ DCHECK(values[T::kIsolateIndex]->IsSmi());
+ }
+
+/*
+ * The following Call functions wrap the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+ Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
+ F(AccessorNameGetterCallback, "get", v8::Value, Object) \
+ F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
+ F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
+ Handle<InternalReturn> Call(Function f, Handle<Name> name) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ApiReturn> info(begin()); \
+ LOG(isolate, \
+ ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
+ f(v8::Utils::ToLocal(name), info); \
+ return GetReturnValue<InternalReturn>(isolate); \
+ }
+
+ FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
+#undef WRITE_CALL_1_NAME
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
+ F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
+ F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
+ F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
+ Handle<InternalReturn> Call(Function f, uint32_t index) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ApiReturn> info(begin()); \
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
+ holder(), index)); \
+ f(index, info); \
+ return GetReturnValue<InternalReturn>(isolate); \
+ }
+
+ FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
+#undef WRITE_CALL_1_INDEX
+
+ Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ return GetReturnValue<Object>(isolate);
+ }
+
+ Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
+ f(index, v8::Utils::ToLocal(value), info);
+ return GetReturnValue<Object>(isolate);
+ }
+
+ void Call(AccessorNameSetterCallback f, Handle<Name> name,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<void> info(begin());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ }
+
+ private:
+ inline JSObject* holder() {
+ return JSObject::cast(this->begin()[T::kHolderIndex]);
+ }
+};
+
+class FunctionCallbackArguments
+ : public CustomArguments<FunctionCallbackInfo<Value> > {
+ public:
+ typedef FunctionCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kCalleeIndex = T::kCalleeIndex;
+ static const int kContextSaveIndex = T::kContextSaveIndex;
+
+ FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
+ internal::HeapObject* callee,
+ internal::Object* holder, internal::Object** argv,
+ int argc, bool is_construct_call)
+ : Super(isolate),
+ argv_(argv),
+ argc_(argc),
+ is_construct_call_(is_construct_call) {
+ Object** values = begin();
+ values[T::kDataIndex] = data;
+ values[T::kCalleeIndex] = callee;
+ values[T::kHolderIndex] = holder;
+ values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
+ values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ values[T::kReturnValueDefaultValueIndex] =
+ isolate->heap()->the_hole_value();
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
+ values[T::kCalleeIndex]->IsFunctionTemplateInfo());
+ DCHECK(values[T::kHolderIndex]->IsHeapObject());
+ DCHECK(values[T::kIsolateIndex]->IsSmi());
+ }
+
+ /*
+ * The following Call function wraps the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+ Handle<Object> Call(FunctionCallback f);
+
+ private:
+ internal::Object** argv_;
+ int argc_;
+ bool is_construct_call_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_API_ARGUMENTS_H_
diff --git a/deps/v8/src/api-experimental.cc b/deps/v8/src/api-experimental.cc
index 98d62e33a2..3928434270 100644
--- a/deps/v8/src/api-experimental.cc
+++ b/deps/v8/src/api-experimental.cc
@@ -11,20 +11,17 @@
#include "include/v8.h"
#include "include/v8-experimental.h"
#include "src/api.h"
-#include "src/compiler/fast-accessor-assembler.h"
+#include "src/fast-accessor-assembler.h"
namespace {
-
-v8::internal::compiler::FastAccessorAssembler* FromApi(
+v8::internal::FastAccessorAssembler* FromApi(
v8::experimental::FastAccessorBuilder* builder) {
- return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
- builder);
+ return reinterpret_cast<v8::internal::FastAccessorAssembler*>(builder);
}
-
v8::experimental::FastAccessorBuilder* FromInternal(
- v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
+ v8::internal::FastAccessorAssembler* fast_accessor_assembler) {
return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
fast_accessor_assembler);
}
@@ -57,8 +54,8 @@ namespace experimental {
FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal::compiler::FastAccessorAssembler* faa =
- new internal::compiler::FastAccessorAssembler(i_isolate);
+ internal::FastAccessorAssembler* faa =
+ new internal::FastAccessorAssembler(i_isolate);
return FromInternal(faa);
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 3be2df0bb6..adf4b6af57 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -266,28 +266,45 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
-void CacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number,
+void CacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number,
Handle<JSObject> object) {
auto cache = isolate->template_instantiations_cache();
- auto new_cache = ObjectHashTable::Put(cache, serial_number, object);
+ auto new_cache =
+ UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
}
-void UncacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number) {
+void UncacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number) {
auto cache = isolate->template_instantiations_cache();
- bool was_present = false;
- auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
- DCHECK(was_present);
+ int entry = cache->FindEntry(serial_number);
+ DCHECK(entry != UnseededNumberDictionary::kNotFound);
+ Handle<Object> result =
+ UnseededNumberDictionary::DeleteProperty(cache, entry);
+ USE(result);
+ DCHECK(result->IsTrue());
+ auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
bool is_hidden_prototype) {
- // Enter a new scope. Recursion could otherwise create a lot of handles.
- HandleScope scope(isolate);
// Fast path.
Handle<JSObject> result;
+ uint32_t serial_number =
+ static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
+ if (serial_number) {
+ // Probe cache.
+ auto cache = isolate->template_instantiations_cache();
+ int entry = cache->FindEntry(serial_number);
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Object* boilerplate = cache->ValueAt(entry);
+ result = handle(JSObject::cast(boilerplate), isolate);
+ return isolate->factory()->CopyJSObject(result);
+ }
+ }
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
auto constructor = handle(info->constructor(), isolate);
Handle<JSFunction> cons;
if (constructor->IsUndefined()) {
@@ -297,18 +314,6 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
}
- auto serial_number = handle(Smi::cast(info->serial_number()), isolate);
- if (serial_number->value()) {
- // Probe cache.
- auto cache = isolate->template_instantiations_cache();
- Object* boilerplate = cache->Lookup(serial_number);
- if (boilerplate->IsJSObject()) {
- result = handle(JSObject::cast(boilerplate), isolate);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
- return scope.CloseAndEscape(result);
- }
- }
auto object = isolate->factory()->NewJSObject(cons);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@@ -317,10 +322,9 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
// TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
- if (serial_number->value()) {
+ if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+ result = isolate->factory()->CopyJSObject(result);
}
return scope.CloseAndEscape(result);
}
@@ -329,12 +333,14 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name) {
- auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
- if (serial_number->value()) {
+ uint32_t serial_number =
+ static_cast<uint32_t>(Smi::cast(data->serial_number())->value());
+ if (serial_number) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
- Object* element = cache->Lookup(serial_number);
- if (element->IsJSFunction()) {
+ int entry = cache->FindEntry(serial_number);
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Object* element = cache->ValueAt(entry);
return handle(JSFunction::cast(element), isolate);
}
}
@@ -378,7 +384,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
}
- if (serial_number->value()) {
+ if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, function);
}
@@ -386,7 +392,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
- if (serial_number->value()) {
+ if (serial_number) {
UncacheTemplateInstantiation(isolate, serial_number);
}
return MaybeHandle<JSFunction>();
@@ -536,7 +542,13 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:
- type = JS_OBJECT_TYPE;
+ if (!obj->needs_access_check() &&
+ obj->named_property_handler()->IsUndefined() &&
+ obj->indexed_property_handler()->IsUndefined()) {
+ type = JS_OBJECT_TYPE;
+ } else {
+ type = JS_SPECIAL_API_OBJECT_TYPE;
+ }
instance_size += JSObject::kHeaderSize;
break;
case GlobalObjectType:
@@ -564,7 +576,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
}
- result->shared()->set_function_data(*obj);
+ result->shared()->set_api_func_data(*obj);
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 9b6ae72052..853bd50f21 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -156,6 +156,18 @@ class InternalEscapableScope : public v8::EscapableHandleScope {
};
+#ifdef DEBUG
+void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
+ auto handle_scope_implementer = isolate->handle_scope_implementer();
+ if (handle_scope_implementer->microtasks_policy() ==
+ v8::MicrotasksPolicy::kScoped) {
+ DCHECK(handle_scope_implementer->GetMicrotasksScopeDepth() ||
+ !handle_scope_implementer->DebugMicrotasksScopeDepthIsZero());
+ }
+}
+#endif
+
+
class CallDepthScope {
public:
explicit CallDepthScope(i::Isolate* isolate, Local<Context> context,
@@ -175,6 +187,9 @@ class CallDepthScope {
if (!context_.IsEmpty()) context_->Exit();
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
if (do_callback_) isolate_->FireCallCompletedCallback();
+#ifdef DEBUG
+ if (do_callback_) CheckMicrotasksScopesConsistency(isolate_);
+#endif
}
void Escape() {
@@ -226,7 +241,7 @@ void i::FatalProcessOutOfMemory(const char* location) {
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
-void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
+void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
i::Isolate* isolate = i::Isolate::Current();
char last_few_messages[Heap::kTraceRingBufferSize + 1];
char js_stacktrace[Heap::kStacktraceBufferSize + 1];
@@ -288,9 +303,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
}
- Utils::ApiCheck(false, location, is_heap_oom
- ? "Allocation failed - JavaScript heap out of memory"
- : "Allocation failed - process out of memory");
+ Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
// If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
@@ -328,12 +341,23 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
+namespace {
+
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) {
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
+ }
+ virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+ virtual void Free(void* data, size_t) { free(data); }
+};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
- const char* utf8_source) {
- // Run custom script if provided.
+ const char* utf8_source, const char* name) {
base::ElapsedTimer timer;
timer.Start();
+ Context::Scope context_scope(context);
TryCatch try_catch(isolate);
Local<String> source_string;
if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
@@ -341,7 +365,7 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
return false;
}
Local<String> resource_name =
- String::NewFromUtf8(isolate, "<embedded script>", NewStringType::kNormal)
+ String::NewFromUtf8(isolate, name, NewStringType::kNormal)
.ToLocalChecked();
ScriptOrigin origin(resource_name);
ScriptCompiler::Source source(source_string, origin);
@@ -349,7 +373,7 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
if (script->Run(context).IsEmpty()) return false;
if (i::FLAG_profile_deserialization) {
- i::PrintF("Executing custom snapshot script took %0.3f ms\n",
+ i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
timer.Elapsed().InMillisecondsF());
}
timer.Stop();
@@ -357,92 +381,152 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
return true;
}
+StartupData SerializeIsolateAndContext(
+ Isolate* isolate, Persistent<Context>* context,
+ i::Snapshot::Metadata metadata,
+ i::StartupSerializer::FunctionCodeHandling function_code_handling) {
+ if (context->IsEmpty()) return {NULL, 0};
-namespace {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of the context.
+ internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+
+ // GC may have cleared weak cells, so compact any WeakFixedArrays
+ // found on the heap.
+ i::HeapIterator iterator(internal_isolate->heap(),
+ i::HeapIterator::kFilterUnreachable);
+ for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+ if (o->IsPrototypeInfo()) {
+ i::Object* prototype_users = i::PrototypeInfo::cast(o)->prototype_users();
+ if (prototype_users->IsWeakFixedArray()) {
+ i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
+ array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
+ }
+ } else if (o->IsScript()) {
+ i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
+ if (shared_list->IsWeakFixedArray()) {
+ i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
+ array->Compact<i::WeakFixedArray::NullCallback>();
+ }
+ }
}
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
+
+ i::Object* raw_context = *v8::Utils::OpenPersistent(*context);
+ context->Reset();
+
+ i::SnapshotByteSink snapshot_sink;
+ i::StartupSerializer ser(internal_isolate, &snapshot_sink,
+ function_code_handling);
+ ser.SerializeStrongReferences();
+
+ i::SnapshotByteSink context_sink;
+ i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
+ context_ser.Serialize(&raw_context);
+ ser.SerializeWeakReferencesAndDeferred();
+
+ return i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
+}
} // namespace
+StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
+ // Create a new isolate and a new context from scratch, optionally run
+ // a script to embed, and serialize to create a snapshot blob.
+ StartupData result = {NULL, 0};
+
+ base::ElapsedTimer timer;
+ timer.Start();
-StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
- i::Isolate* internal_isolate = new i::Isolate(true);
ArrayBufferAllocator allocator;
+ i::Isolate* internal_isolate = new i::Isolate(true);
internal_isolate->set_array_buffer_allocator(&allocator);
Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
- StartupData result = {NULL, 0};
+
{
- base::ElapsedTimer timer;
- timer.Start();
Isolate::Scope isolate_scope(isolate);
internal_isolate->Init(NULL);
Persistent<Context> context;
- i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
Local<Context> new_context = Context::New(isolate);
context.Reset(isolate, new_context);
- if (custom_source != NULL) {
- metadata.set_embeds_script(true);
- Context::Scope context_scope(new_context);
- if (!RunExtraCode(isolate, new_context, custom_source)) context.Reset();
+ if (embedded_source != NULL &&
+ !RunExtraCode(isolate, new_context, embedded_source, "<embedded>")) {
+ context.Reset();
}
}
- if (!context.IsEmpty()) {
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of the context.
- internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
-
- // GC may have cleared weak cells, so compact any WeakFixedArrays
- // found on the heap.
- i::HeapIterator iterator(internal_isolate->heap(),
- i::HeapIterator::kFilterUnreachable);
- for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
- if (o->IsPrototypeInfo()) {
- i::Object* prototype_users =
- i::PrototypeInfo::cast(o)->prototype_users();
- if (prototype_users->IsWeakFixedArray()) {
- i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
- array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
- }
- } else if (o->IsScript()) {
- i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
- if (shared_list->IsWeakFixedArray()) {
- i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
- array->Compact<i::WeakFixedArray::NullCallback>();
- }
- }
- }
- i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Reset();
+ i::Snapshot::Metadata metadata;
+ metadata.set_embeds_script(embedded_source != NULL);
- i::SnapshotByteSink snapshot_sink;
- i::StartupSerializer ser(internal_isolate, &snapshot_sink);
- ser.SerializeStrongReferences();
+ result = SerializeIsolateAndContext(
+ isolate, &context, metadata, i::StartupSerializer::CLEAR_FUNCTION_CODE);
+ DCHECK(context.IsEmpty());
+ }
+ isolate->Dispose();
- i::SnapshotByteSink context_sink;
- i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
- context_ser.Serialize(&raw_context);
- ser.SerializeWeakReferencesAndDeferred();
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Creating snapshot took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
+ return result;
+}
+
+StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
+ const char* warmup_source) {
+ CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != NULL);
+ CHECK(warmup_source != NULL);
+ // Use following steps to create a warmed up snapshot blob from a cold one:
+ // - Create a new isolate from the cold snapshot.
+ // - Create a new context to run the warmup script. This will trigger
+ // compilation of executed functions.
+ // - Create a new context. This context will be unpolluted.
+ // - Serialize the isolate and the second context into a new snapshot blob.
+ StartupData result = {NULL, 0};
+
+ base::ElapsedTimer timer;
+ timer.Start();
- result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
+ ArrayBufferAllocator allocator;
+ i::Isolate* internal_isolate = new i::Isolate(true);
+ internal_isolate->set_array_buffer_allocator(&allocator);
+ internal_isolate->set_snapshot_blob(&cold_snapshot_blob);
+ Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
+
+ {
+ Isolate::Scope isolate_scope(isolate);
+ i::Snapshot::Initialize(internal_isolate);
+ Persistent<Context> context;
+ bool success;
+ {
+ HandleScope handle_scope(isolate);
+ Local<Context> new_context = Context::New(isolate);
+ success = RunExtraCode(isolate, new_context, warmup_source, "<warm-up>");
}
- if (i::FLAG_profile_deserialization) {
- i::PrintF("Creating snapshot took %0.3f ms\n",
- timer.Elapsed().InMillisecondsF());
+ if (success) {
+ HandleScope handle_scope(isolate);
+ isolate->ContextDisposedNotification(false);
+ Local<Context> new_context = Context::New(isolate);
+ context.Reset(isolate, new_context);
}
- timer.Stop();
+
+ i::Snapshot::Metadata metadata;
+ metadata.set_embeds_script(i::Snapshot::EmbedsScript(internal_isolate));
+
+ result = SerializeIsolateAndContext(
+ isolate, &context, metadata, i::StartupSerializer::KEEP_FUNCTION_CODE);
+ DCHECK(context.IsEmpty());
}
isolate->Dispose();
+
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Warming up snapshot took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
return result;
}
@@ -593,6 +677,10 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
return result.location();
}
+void V8::RegisterExternallyReferencedObject(i::Object** object,
+ i::Isolate* isolate) {
+ isolate->heap()->RegisterExternallyReferencedObject(object);
+}
void V8::MakeWeak(i::Object** object, void* parameter,
WeakCallback weak_callback) {
@@ -940,19 +1028,15 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
- if (i::FLAG_warn_template_set &&
- value_obj->IsJSReceiver() &&
- !value_obj->IsTemplateInfo()) {
- base::OS::PrintError(
- "(node) v8::%sTemplate::Set() with non-primitive values is deprecated\n"
- "(node) and will stop working in the next major release.\n",
- templ->IsFunctionTemplateInfo() ? "Function" : "Object");
- isolate->PrintStack(stderr, i::Isolate::kPrintStackConcise);
- base::DumpBacktrace();
+ if (value_obj->IsObjectTemplateInfo()) {
+ templ->set_serial_number(i::Smi::FromInt(0));
+ if (templ->IsFunctionTemplateInfo()) {
+ i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
+ }
}
// TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
- Utils::OpenHandle(*value),
+ value_obj,
static_cast<i::PropertyAttributes>(attribute));
}
@@ -1772,7 +1856,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
- result = i::Compiler::CompileScript(
+ result = i::Compiler::GetSharedFunctionInfoForScript(
str, name_obj, line_offset, column_offset, source->resource_options,
source_map_url, isolate->native_context(), NULL, &script_data, options,
i::NOT_NATIVES_CODE, is_module);
@@ -1841,7 +1925,6 @@ Local<Script> ScriptCompiler::Compile(
MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
Source* source,
CompileOptions options) {
- CHECK(i::FLAG_harmony_modules);
auto isolate = context->GetIsolate();
auto maybe = CompileUnboundInternal(isolate, source, options, true);
Local<UnboundScript> generic;
@@ -2038,8 +2121,8 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
i::Handle<i::SharedFunctionInfo> result;
if (source->info->literal() != nullptr) {
// Parsing has succeeded.
- result = i::Compiler::CompileStreamedScript(script, source->info.get(),
- str->length());
+ result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
+ script, source->info.get(), str->length());
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
@@ -2213,7 +2296,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!maybe.FromJust()) return v8::Local<Value>();
Local<Value> result;
has_pending_exception =
- !ToLocal<Value>(i::Object::GetProperty(obj, name), &result);
+ !ToLocal<Value>(i::JSReceiver::GetProperty(obj, name), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -2442,7 +2525,7 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
auto self = Utils::OpenHandle(this);
- auto obj = i::Object::GetElement(isolate, self, index).ToHandleChecked();
+ auto obj = i::JSReceiver::GetElement(isolate, self, index).ToHandleChecked();
auto jsobj = i::Handle<i::JSObject>::cast(obj);
return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
@@ -2482,7 +2565,7 @@ static int getIntProperty(const StackFrame* f, const char* propertyName,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
- i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+ i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsSmi() ? i::Smi::cast(*obj)->value() : defaultValue;
}
@@ -2509,7 +2592,7 @@ static Local<String> getStringProperty(const StackFrame* f,
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
- i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+ i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
: Local<String>();
@@ -2537,7 +2620,7 @@ static bool getBoolProperty(const StackFrame* f, const char* propertyName) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
- i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+ i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsTrue();
}
@@ -3485,7 +3568,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, i::LookupIterator::OWN);
+ isolate, self, key_obj, self, i::LookupIterator::OWN);
Maybe<bool> result =
i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
@@ -3502,7 +3585,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
+ i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
Maybe<bool> result =
i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
@@ -3608,7 +3691,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
}
auto js_object = i::Handle<i::JSObject>::cast(self);
- i::LookupIterator it(js_object, key_obj);
+ i::LookupIterator it(js_object, key_obj, js_object);
has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes(
&it, value_obj, i::DONT_ENUM)
.is_null();
@@ -3641,7 +3724,7 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
has_pending_exception =
- !i::Object::GetElement(isolate, self, index).ToHandle(&result);
+ !i::JSReceiver::GetElement(isolate, self, index).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -3768,11 +3851,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
!i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
.ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
- // Because we use caching to speed up enumeration it is important
- // to never change the result of the basic enumeration function so
- // we clone the result.
- auto elms = isolate->factory()->CopyFixedArray(value);
- auto result = isolate->factory()->NewJSArrayWithElements(elms);
+ DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
+ self->map()->EnumLength() == 0 ||
+ self->map()->instance_descriptors()->GetEnumCache() != *value);
+ auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -3791,11 +3873,10 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
!i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
.ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
- // Because we use caching to speed up enumeration it is important
- // to never change the result of the basic enumeration function so
- // we clone the result.
- auto elms = isolate->factory()->CopyFixedArray(value);
- auto result = isolate->factory()->NewJSArrayWithElements(elms);
+ DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
+ self->map()->EnumLength() == 0 ||
+ self->map()->instance_descriptors()->GetEnumCache() != *value);
+ auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -3829,6 +3910,19 @@ Local<String> v8::Object::GetConstructorName() {
return Utils::ToLocal(name);
}
+Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
+ IntegrityLevel level) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetIntegrityLevel()",
+ bool);
+ auto self = Utils::OpenHandle(this);
+ i::JSReceiver::IntegrityLevel i_level =
+ level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
+ Maybe<bool> result =
+ i::JSReceiver::SetIntegrityLevel(self, i_level, i::Object::DONT_THROW);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+}
Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
@@ -4159,7 +4253,7 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj,
+ isolate, self, key_obj, self,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
@@ -4183,7 +4277,7 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj,
+ isolate, self, key_obj, self,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
@@ -5413,13 +5507,15 @@ bool v8::V8::Dispose() {
return true;
}
-
-HeapStatistics::HeapStatistics(): total_heap_size_(0),
- total_heap_size_executable_(0),
- total_physical_size_(0),
- used_heap_size_(0),
- heap_size_limit_(0) { }
-
+HeapStatistics::HeapStatistics()
+ : total_heap_size_(0),
+ total_heap_size_executable_(0),
+ total_physical_size_(0),
+ total_available_size_(0),
+ used_heap_size_(0),
+ heap_size_limit_(0),
+ malloced_memory_(0),
+ does_zap_garbage_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
@@ -6861,7 +6957,7 @@ static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
else
symbol = isolate->factory()->NewSymbol();
i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
- i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
+ i::Object::SetPropertyOrElement(symbols, name, symbol, i::STRICT).Assert();
}
return i::Handle<i::Symbol>::cast(symbol);
}
@@ -7097,6 +7193,10 @@ void V8::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
+void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->SetEmbedderHeapTracer(tracer);
+}
void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -7296,10 +7396,12 @@ Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
isolate_->handle_scope_implementer()->IncrementCallDepth();
+ isolate_->handle_scope_implementer()->IncrementMicrotasksSuppressions();
}
Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
+ isolate_->handle_scope_implementer()->DecrementMicrotasksSuppressions();
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
@@ -7314,6 +7416,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
+ heap_statistics->malloced_memory_ =
+ isolate->allocator()->GetCurrentMemoryUsage();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -7441,6 +7545,7 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
void Isolate::RunMicrotasks() {
+ DCHECK(MicrotasksPolicy::kScoped != GetMicrotasksPolicy());
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
@@ -7464,12 +7569,41 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
void Isolate::SetAutorunMicrotasks(bool autorun) {
- reinterpret_cast<i::Isolate*>(this)->set_autorun_microtasks(autorun);
+ SetMicrotasksPolicy(
+ autorun ? MicrotasksPolicy::kAuto : MicrotasksPolicy::kExplicit);
}
bool Isolate::WillAutorunMicrotasks() const {
- return reinterpret_cast<const i::Isolate*>(this)->autorun_microtasks();
+ return GetMicrotasksPolicy() == MicrotasksPolicy::kAuto;
+}
+
+
+void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->handle_scope_implementer()->set_microtasks_policy(policy);
+}
+
+
+MicrotasksPolicy Isolate::GetMicrotasksPolicy() const {
+ i::Isolate* isolate =
+ reinterpret_cast<i::Isolate*>(const_cast<Isolate*>(this));
+ return isolate->handle_scope_implementer()->microtasks_policy();
+}
+
+
+void Isolate::AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ DCHECK(callback);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->AddMicrotasksCompletedCallback(callback);
+}
+
+
+void Isolate::RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->RemoveMicrotasksCompletedCallback(callback);
}
@@ -7548,6 +7682,11 @@ void Isolate::IsolateInBackgroundNotification() {
return isolate->heap()->SetOptimizeForMemoryUsage();
}
+void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->MemoryPressureNotification(level,
+ Locker::IsLocked(this));
+}
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
@@ -7693,6 +7832,49 @@ void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
}
+MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+ run_(type == MicrotasksScope::kRunMicrotasks) {
+ auto handle_scope_implementer = isolate_->handle_scope_implementer();
+ if (run_) handle_scope_implementer->IncrementMicrotasksScopeDepth();
+#ifdef DEBUG
+ if (!run_) handle_scope_implementer->IncrementDebugMicrotasksScopeDepth();
+#endif
+}
+
+
+MicrotasksScope::~MicrotasksScope() {
+ auto handle_scope_implementer = isolate_->handle_scope_implementer();
+ if (run_) {
+ handle_scope_implementer->DecrementMicrotasksScopeDepth();
+ if (MicrotasksPolicy::kScoped ==
+ handle_scope_implementer->microtasks_policy()) {
+ PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
+ }
+ }
+#ifdef DEBUG
+ if (!run_) handle_scope_implementer->DecrementDebugMicrotasksScopeDepth();
+#endif
+}
+
+
+void MicrotasksScope::PerformCheckpoint(Isolate* v8Isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+ if (IsExecutionTerminatingCheck(isolate)) return;
+ auto handle_scope_implementer = isolate->handle_scope_implementer();
+ if (!handle_scope_implementer->GetMicrotasksScopeDepth() &&
+ !handle_scope_implementer->HasMicrotasksSuppressions()) {
+ isolate->RunMicrotasks();
+ }
+}
+
+
+int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+ return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
+}
+
+
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
@@ -7887,7 +8069,7 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
RETURN_ON_FAILED_EXECUTION(Value);
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
- auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
+ auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
@@ -7932,6 +8114,15 @@ void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
internal_isolate->debug()->set_live_edit_enabled(enable);
}
+bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return internal_isolate->is_tail_call_elimination_enabled();
+}
+
+void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->SetTailCallEliminationEnabled(enabled);
+}
MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
Local<Value> value) {
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 556765264a..cb2b5c386c 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -452,6 +452,12 @@ class HandleScopeImplementer {
saved_contexts_(0),
spare_(NULL),
call_depth_(0),
+ microtasks_depth_(0),
+ microtasks_suppressions_(0),
+#ifdef DEBUG
+ debug_microtasks_depth_(0),
+#endif
+ microtasks_policy_(v8::MicrotasksPolicy::kAuto),
last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
@@ -472,10 +478,36 @@ class HandleScopeImplementer {
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(internal::Object** prev_limit);
+ // Call depth represents nested v8 api calls.
inline void IncrementCallDepth() {call_depth_++;}
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
+ // Microtasks scope depth represents nested scopes controlling microtasks
+ // invocation, which happens when depth reaches zero.
+ inline void IncrementMicrotasksScopeDepth() {microtasks_depth_++;}
+ inline void DecrementMicrotasksScopeDepth() {microtasks_depth_--;}
+ inline int GetMicrotasksScopeDepth() { return microtasks_depth_; }
+
+ // Possibly nested microtasks suppression scopes prevent microtasks
+ // from running.
+ inline void IncrementMicrotasksSuppressions() {microtasks_suppressions_++;}
+ inline void DecrementMicrotasksSuppressions() {microtasks_suppressions_--;}
+ inline bool HasMicrotasksSuppressions() { return !!microtasks_suppressions_; }
+
+#ifdef DEBUG
+ // In debug we check that calls not intended to invoke microtasks are
+ // still correctly wrapped with microtask scopes.
+ inline void IncrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_++;}
+ inline void DecrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_--;}
+ inline bool DebugMicrotasksScopeDepthIsZero() {
+ return debug_microtasks_depth_ == 0;
+ }
+#endif
+
+ inline void set_microtasks_policy(v8::MicrotasksPolicy policy);
+ inline v8::MicrotasksPolicy microtasks_policy() const;
+
inline void EnterContext(Handle<Context> context);
inline void LeaveContext();
inline bool LastEnteredContextWas(Handle<Context> context);
@@ -532,6 +564,12 @@ class HandleScopeImplementer {
List<Context*> saved_contexts_;
Object** spare_;
int call_depth_;
+ int microtasks_depth_;
+ int microtasks_suppressions_;
+#ifdef DEBUG
+ int debug_microtasks_depth_;
+#endif
+ v8::MicrotasksPolicy microtasks_policy_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
HandleScopeData handle_scope_data_;
@@ -550,6 +588,17 @@ class HandleScopeImplementer {
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
+void HandleScopeImplementer::set_microtasks_policy(
+ v8::MicrotasksPolicy policy) {
+ microtasks_policy_ = policy;
+}
+
+
+v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
+ return microtasks_policy_;
+}
+
+
void HandleScopeImplementer::SaveContext(Context* context) {
saved_contexts_.Add(context);
}
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index 077991bbee..815f5de577 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -4,93 +4,9 @@
#include "src/arguments.h"
-#include "src/api.h"
-#include "src/vm-state-inl.h"
-
namespace v8 {
namespace internal {
-
-template <typename T>
-template <typename V>
-v8::Local<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
- // Check the ReturnValue.
- Object** handle = &this->begin()[kReturnValueOffset];
- // Nothing was set, return empty handle as per previous behaviour.
- if ((*handle)->IsTheHole()) return v8::Local<V>();
- return Utils::Convert<Object, V>(Handle<Object>(handle));
-}
-
-
-v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
- Isolate* isolate = this->isolate();
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(begin(),
- argv_,
- argc_,
- is_construct_call_);
- f(info);
- return GetReturnValue<v8::Value>(isolate);
-}
-
-
-#define WRITE_CALL_0(Function, ReturnValue) \
- v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(info); \
- return GetReturnValue<ReturnValue>(isolate); \
- }
-
-
-#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
- v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f, \
- Arg1 arg1) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, info); \
- return GetReturnValue<ReturnValue>(isolate); \
- }
-
-
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
- v8::Local<ReturnValue> PropertyCallbackArguments::Call( \
- Function f, Arg1 arg1, Arg2 arg2) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, arg2, info); \
- return GetReturnValue<ReturnValue>(isolate); \
- }
-
-
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
- void PropertyCallbackArguments::Call(Function f, Arg1 arg1, Arg2 arg2) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, arg2, info); \
- }
-
-
-FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
-FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
-
-#undef WRITE_CALL_0
-#undef WRITE_CALL_1
-#undef WRITE_CALL_2
-#undef WRITE_CALL_2_VOID
-
-
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
// TODO(ulan): This clobbers only subset of registers depending on compiler,
// Rewrite this in assembly to really clobber all registers.
@@ -98,6 +14,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 35096774db..02090f9fe3 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -6,7 +6,8 @@
#define V8_ARGUMENTS_H_
#include "src/allocation.h"
-#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -70,217 +71,30 @@ class Arguments BASE_EMBEDDED {
Object** arguments_;
};
-
-// For each type of callback, we have a list of arguments
-// They are used to generate the Call() functions below
-// These aren't included in the list as they have duplicate signatures
-// F(GenericNamedPropertyEnumeratorCallback, ...)
-// F(GenericNamedPropertyGetterCallback, ...)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
- F(IndexedPropertyEnumeratorCallback, v8::Array)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
- F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
- F(GenericNamedPropertyQueryCallback, v8::Integer, v8::Local<v8::Name>) \
- F(GenericNamedPropertyDeleterCallback, v8::Boolean, v8::Local<v8::Name>) \
- F(IndexedPropertyGetterCallback, v8::Value, uint32_t) \
- F(IndexedPropertyQueryCallback, v8::Integer, uint32_t) \
- F(IndexedPropertyDeleterCallback, v8::Boolean, uint32_t)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
- F(GenericNamedPropertySetterCallback, v8::Value, v8::Local<v8::Name>, \
- v8::Local<v8::Value>) \
- F(IndexedPropertySetterCallback, v8::Value, uint32_t, v8::Local<v8::Value>)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
- F(AccessorNameSetterCallback, \
- void, \
- v8::Local<v8::Name>, \
- v8::Local<v8::Value>) \
-
-
-// Custom arguments replicate a small segment of stack that can be
-// accessed through an Arguments object the same way the actual stack
-// can.
-template<int kArrayLength>
-class CustomArgumentsBase : public Relocatable {
- public:
- virtual inline void IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + kArrayLength);
- }
- protected:
- inline Object** begin() { return values_; }
- explicit inline CustomArgumentsBase(Isolate* isolate)
- : Relocatable(isolate) {}
- Object* values_[kArrayLength];
-};
-
-
-template<typename T>
-class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
- public:
- static const int kReturnValueOffset = T::kReturnValueIndex;
-
- typedef CustomArgumentsBase<T::kArgsLength> Super;
- ~CustomArguments() {
- this->begin()[kReturnValueOffset] =
- reinterpret_cast<Object*>(kHandleZapValue);
- }
-
- protected:
- explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
-
- template <typename V>
- v8::Local<V> GetReturnValue(Isolate* isolate);
-
- inline Isolate* isolate() {
- return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
- }
-};
-
-
-class PropertyCallbackArguments
- : public CustomArguments<PropertyCallbackInfo<Value> > {
- public:
- typedef PropertyCallbackInfo<Value> T;
- typedef CustomArguments<T> Super;
- static const int kArgsLength = T::kArgsLength;
- static const int kThisIndex = T::kThisIndex;
- static const int kHolderIndex = T::kHolderIndex;
- static const int kDataIndex = T::kDataIndex;
- static const int kReturnValueDefaultValueIndex =
- T::kReturnValueDefaultValueIndex;
- static const int kIsolateIndex = T::kIsolateIndex;
- static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
-
- PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
- JSObject* holder, Object::ShouldThrow should_throw)
- : Super(isolate) {
- Object** values = this->begin();
- values[T::kThisIndex] = self;
- values[T::kHolderIndex] = holder;
- values[T::kDataIndex] = data;
- values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
- values[T::kShouldThrowOnErrorIndex] =
- Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
-
- // Here the hole is set as default value.
- // It cannot escape into js as it's remove in Call below.
- values[T::kReturnValueDefaultValueIndex] =
- isolate->heap()->the_hole_value();
- values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
- }
-
- /*
- * The following Call functions wrap the calling of all callbacks to handle
- * calling either the old or the new style callbacks depending on which one
- * has been registered.
- * For old callbacks which return an empty handle, the ReturnValue is checked
- * and used if it's been set to anything inside the callback.
- * New style callbacks always use the return value.
- */
-#define WRITE_CALL_0(Function, ReturnValue) \
- v8::Local<ReturnValue> Call(Function f);
-
-#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
- v8::Local<ReturnValue> Call(Function f, Arg1 arg1);
-
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
- v8::Local<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2);
-
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
- void Call(Function f, Arg1 arg1, Arg2 arg2); \
-
-FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
-FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
-
-#undef WRITE_CALL_0
-#undef WRITE_CALL_1
-#undef WRITE_CALL_2
-#undef WRITE_CALL_2_VOID
-};
-
-
-class FunctionCallbackArguments
- : public CustomArguments<FunctionCallbackInfo<Value> > {
- public:
- typedef FunctionCallbackInfo<Value> T;
- typedef CustomArguments<T> Super;
- static const int kArgsLength = T::kArgsLength;
- static const int kHolderIndex = T::kHolderIndex;
- static const int kDataIndex = T::kDataIndex;
- static const int kReturnValueDefaultValueIndex =
- T::kReturnValueDefaultValueIndex;
- static const int kIsolateIndex = T::kIsolateIndex;
- static const int kCalleeIndex = T::kCalleeIndex;
- static const int kContextSaveIndex = T::kContextSaveIndex;
-
- FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
- internal::HeapObject* callee,
- internal::Object* holder, internal::Object** argv,
- int argc, bool is_construct_call)
- : Super(isolate),
- argv_(argv),
- argc_(argc),
- is_construct_call_(is_construct_call) {
- Object** values = begin();
- values[T::kDataIndex] = data;
- values[T::kCalleeIndex] = callee;
- values[T::kHolderIndex] = holder;
- values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
- values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
- // Here the hole is set as default value.
- // It cannot escape into js as it's remove in Call below.
- values[T::kReturnValueDefaultValueIndex] =
- isolate->heap()->the_hole_value();
- values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
- DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
- values[T::kCalleeIndex]->IsFunctionTemplateInfo());
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
- }
-
- /*
- * The following Call function wraps the calling of all callbacks to handle
- * calling either the old or the new style callbacks depending on which one
- * has been registered.
- * For old callbacks which return an empty handle, the ReturnValue is checked
- * and used if it's been set to anything inside the callback.
- * New style callbacks always use the return value.
- */
- v8::Local<v8::Value> Call(FunctionCallback f);
-
- private:
- internal::Object** argv_;
- int argc_;
- bool is_construct_call_;
-};
-
-
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
-
#ifdef DEBUG
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
#else
#define CLOBBER_DOUBLE_REGISTERS()
#endif
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
- static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
- Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- CLOBBER_DOUBLE_REGISTERS(); \
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
- RuntimeCallTimerScope timer(isolate, &stats->Name); \
- Arguments args(args_length, args_object); \
- Type value = __RT_impl_##Name(args, isolate); \
- return value; \
- } \
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
+ static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
+ Type value; \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name); \
+ Arguments args(args_length, args_object); \
+ if (FLAG_runtime_call_stats) { \
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
+ RuntimeCallTimerScope timer(isolate, &stats->Name); \
+ value = __RT_impl_##Name(args, isolate); \
+ } else { \
+ value = __RT_impl_##Name(args, isolate); \
+ } \
+ return value; \
+ } \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 0de96428f3..b0b22b63c2 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -71,6 +71,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -114,6 +118,18 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index b0fa462c9f..62516e82c9 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -145,7 +145,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- supported_ |= 1u << COHERENT_CACHE;
+ // TODO(jkummerow): This is turned off as an experiment to see if it
+ // affects crash rates. Keep an eye on crash reports and either remove
+ // coherent cache support permanently, or re-enable it!
+ // supported_ |= 1u << COHERENT_CACHE;
}
#endif
@@ -1966,7 +1969,8 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
- DCHECK(fields >= B16 && fields < B20); // at least one field set
+ DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
+ DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
if (!src.rm_.is_valid()) {
// Immediate.
@@ -2546,12 +2550,6 @@ void Assembler::vstm(BlockAddrMode am,
}
-void Assembler::vmov(const SwVfpRegister dst, float imm) {
- mov(ip, Operand(bit_cast<int32_t>(imm)));
- vmov(dst, ip);
-}
-
-
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -2563,7 +2561,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
+static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
DCHECK(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
@@ -2592,12 +2590,12 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
return false;
}
- // Bits 62:55 must be all clear or all set.
+ // Bits 61:54 must be all clear or all set.
if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
return false;
}
- // Bit 63 must be NOT bit 62.
+ // Bit 62 must be NOT bit 61.
if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
return false;
}
@@ -2612,6 +2610,25 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
}
+void Assembler::vmov(const SwVfpRegister dst, float imm) {
+ uint32_t enc;
+ if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+ // The float can be encoded in the instruction.
+ //
+ // Sd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
+ } else {
+ mov(ip, Operand(bit_cast<int32_t>(imm)));
+ vmov(dst, ip);
+ }
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
@@ -2622,7 +2639,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// pointer (pp) is valid.
bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available();
- if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
// Dd = immediate
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index d381653bde..08ad64c2a2 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1390,7 +1390,9 @@ class Assembler : public AssemblerBase {
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -1637,8 +1639,8 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index a6bfdb128d..1fffcb67e5 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -531,6 +531,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r1 : constructor function
// -- r2 : allocation site or undefined
// -- r3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -543,6 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r2, r4);
+ __ Push(cp);
__ SmiTag(r0);
__ Push(r2, r0);
@@ -622,7 +624,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -751,9 +753,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand::Zero());
-
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -855,8 +854,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushFixedFrame(r1);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r1);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -1192,8 +1190,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
- __ PushFixedFrame(r1);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r1);
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
@@ -1430,24 +1427,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1494,6 +1473,27 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[4] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ ldr(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ ldr(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Ret(2);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1933,19 +1933,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ mov(scratch1, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ ldrb(scratch1, MemOperand(scratch1));
__ cmp(scratch1, Operand(0));
- __ b(ne, &done);
+ __ b(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ ldr(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ b(ne, &no_interpreter_frame);
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1953,73 +1955,37 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ldr(scratch1,
+ __ ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
- __ add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
- // Count receiver argument as well (not included in args_reg).
- __ add(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ cmp(src_reg, dst_reg);
- __ Check(lo, kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- __ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(sp, src_reg);
- __ b(ne, &loop);
-
- // Leave current frame.
- __ mov(sp, dst_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2473,27 +2439,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
-
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
- kSmiTagSize)));
- __ b(eq, &no_strong_error);
-
- // What we really care about is the required number of arguments.
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
- __ cmp(r0, Operand::SmiUntag(r4));
- __ b(ge, &no_strong_error);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 82fb51d2f1..31e3e95f03 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_ARM
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -77,6 +78,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -477,7 +482,9 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
}
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
@@ -486,7 +493,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
@@ -524,6 +531,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ tst(r5, Operand(1 << Map::kIsUndetectable));
__ b(eq, &return_unequal);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
+ __ b(eq, &return_equal);
+ __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
+ __ b(ne, &return_unequal);
+
+ __ bind(&return_equal);
__ mov(r0, Operand(EQUAL));
__ Ret();
}
@@ -1049,9 +1066,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
- __ ldr(r2, MemOperand(r0, 2 * kPointerSize));
- __ ldr(r1, MemOperand(r0, 1 * kPointerSize));
- __ ldr(r0, MemOperand(r0, 0 * kPointerSize));
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
@@ -1358,8 +1375,12 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow_case);
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &slow_case);
+
+ // Ensure that {function} has an instance prototype.
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
@@ -1427,7 +1448,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -1480,29 +1502,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ NonNegativeSmiTst(key);
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2633,29 +2632,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r0.
- Label not_smi;
- __ JumpIfNotSmi(r0, &not_smi);
- __ Ret();
- __ bind(&not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ Ret(eq);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
// r0: receiver
// r1: receiver instance type
__ Ret(eq);
- Label not_string, slow_string;
- __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &not_string);
- // Check if string has a cached array index.
- __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
- __ b(ne, &slow_string);
- __ IndexFromHash(r2, r0);
- __ Ret();
- __ bind(&slow_string);
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
- __ bind(&not_string);
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in r0.
+ __ AssertNotNumber(r0);
+
+ __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub, lo);
Label not_oddball;
__ cmp(r1, Operand(ODDBALL_TYPE));
@@ -2664,26 +2662,27 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_oddball);
- __ push(r0); // Push argument.
+ __ Push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in r0.
+ __ AssertString(r0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes one argument in r0.
- Label not_smi;
- __ JumpIfNotSmi(r0, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, r0);
- __ mov(r0, Operand(0), LeaveCC, lt);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+ __ b(ne, &runtime);
+ __ IndexFromHash(r2, r0);
__ Ret();
- __ bind(&not_smi);
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ Push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r0.
Label is_number;
@@ -2839,42 +2838,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : left
- // -- r0 : right
- // -- lr : return address
- // -----------------------------------
- __ AssertString(r1);
- __ AssertString(r0);
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
- r2);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
-
- // Compare flat one-byte strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
- r3);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ Push(r1, r0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
@@ -3168,10 +3131,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(r1, Heap::kTrueValueRootIndex);
+ __ sub(r0, r0, r1);
+ __ Ret();
} else {
+ __ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3710,7 +3680,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
@@ -4703,7 +4673,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@@ -4712,7 +4682,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_rest_parameters);
@@ -4851,7 +4821,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
@@ -5050,7 +5020,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@@ -5058,7 +5028,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
@@ -5424,16 +5394,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
- // -- r3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5459,11 +5425,9 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || r3.is(argc.reg()));
-
// context save
__ push(context);
- if (!is_lazy) {
+ if (!is_lazy()) {
// load context from callee
__ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
@@ -5475,7 +5439,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
@@ -5504,29 +5468,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::values_
- __ add(ip, scratch,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc.immediate()));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
- } else {
- // FunctionCallbackInfo::values_
- __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_
- __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
- __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
- }
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc()));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5536,7 +5486,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5545,33 +5495,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
int stack_space = 0;
MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 2dee363bbf..7e1a5500f1 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -898,10 +898,8 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->PushFixedFrame(r1);
+ patcher->masm()->PushStandardFrame(r1);
patcher->masm()->nop(ip.code());
- patcher->masm()->add(
- fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index b9d4788eb5..a1620516a2 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -654,7 +654,7 @@ class Instruction {
inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
- // Decoding the double immediate in the vmov instruction.
+ // Decode the double immediate from a vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 3e9fac7d12..2785b755d5 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -103,12 +103,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on ARM in the input frame.
- return false;
-}
-
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -162,7 +156,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6, r5);
+ __ mov(r0, Operand(0));
+ Label context_check;
+ __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
@@ -235,6 +234,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
+ __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 9258703fbc..287152ad59 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -604,6 +604,26 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
Print("s");
}
return 4;
+ } else if (format[1] == 'p') {
+ if (format[8] == '_') { // 'spec_reg_fields
+ DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
+ Print("_");
+ int mask = instr->Bits(19, 16);
+ if (mask == 0) Print("(none)");
+ if ((mask & 0x8) != 0) Print("f");
+ if ((mask & 0x4) != 0) Print("s");
+ if ((mask & 0x2) != 0) Print("x");
+ if ((mask & 0x1) != 0) Print("c");
+ return 15;
+ } else { // 'spec_reg
+ DCHECK(STRING_STARTS_WITH(format, "spec_reg"));
+ if (instr->Bit(22) == 0) {
+ Print("CPSR");
+ } else {
+ Print("SPSR");
+ }
+ return 8;
+ }
}
// 's: S field of data processing instructions
if (instr->HasS()) {
@@ -822,7 +842,13 @@ void Decoder::DecodeType01(Instruction* instr) {
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
+ if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+ (instr->Bits(15, 4) == 0xf00)) {
+ Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
+ } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+ (instr->Bits(11, 0) == 0)) {
+ Format(instr, "mrs'cond 'rd, 'spec_reg");
+ } else if (instr->Bits(22, 21) == 1) {
switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
@@ -1404,7 +1430,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
- Unknown(instr); // Not used by V8.
+ Format(instr, "vmov'cond.f32 'Sd, 'd");
}
} else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
// vrintz - round towards zero (truncate)
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 1ea7b1af56..37927758c3 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -93,16 +93,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize =
- FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
-
- static const int kConstantPoolOffset =
- FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
@@ -120,7 +115,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 1f55c0bb4b..b6cac760b2 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -111,35 +111,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
// static
-const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -267,6 +240,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -311,24 +291,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
+ Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -408,25 +380,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- r0, // callee
- r4, // call_data
- r2, // holder
- r1, // api_function_address
- r3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 80aef0c4ff..6af3d6c20c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -738,12 +738,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
if (and_then == kFallThroughAtEnd) {
- b(eq, &done);
+ b(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
- Ret(eq);
+ Ret(ne);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -755,20 +755,65 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
-
-void MacroAssembler::PushFixedFrame(Register marker_reg) {
- DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
- (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
- fp.bit() | lr.bit());
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.code() > pp.code()) {
+ stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(kPointerSize));
+ Push(marker_reg);
+ } else {
+ stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(2 * kPointerSize));
+ }
+ } else {
+ if (marker_reg.code() > fp.code()) {
+ stm(db_w, sp, fp.bit() | lr.bit());
+ mov(fp, Operand(sp));
+ Push(marker_reg);
+ } else {
+ stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(kPointerSize));
+ }
+ }
+ } else {
+ stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
+ }
}
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.code() > pp.code()) {
+ pop(marker_reg);
+ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+ }
+ } else {
+ if (marker_reg.code() > fp.code()) {
+ pop(marker_reg);
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ } else {
+ ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ }
+ }
+ } else {
+ ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ }
+}
-void MacroAssembler::PopFixedFrame(Register marker_reg) {
- DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
+ stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
+ int offset = -StandardFrameConstants::kContextOffset;
+ offset += function_reg.is_valid() ? kPointerSize : 0;
+ add(fp, sp, Operand(offset));
}
@@ -1056,7 +1101,144 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
vmov(dst, VmovIndexLo, src);
}
}
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_high, src_low));
+ DCHECK(!AreAliased(dst_high, shift));
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ lsl(dst_high, src_low, Operand(scratch));
+ mov(dst_low, Operand(0));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ lsl(dst_high, src_high, Operand(shift));
+ orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
+ lsl(dst_low, src_low, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_high, src_low));
+ Label less_than_32;
+ Label done;
+ if (shift == 0) {
+ Move(dst_high, src_high);
+ Move(dst_low, src_low);
+ } else if (shift == 32) {
+ Move(dst_high, src_low);
+ Move(dst_low, Operand(0));
+ } else if (shift >= 32) {
+ shift &= 0x1f;
+ lsl(dst_high, src_low, Operand(shift));
+ mov(dst_low, Operand(0));
+ } else {
+ lsl(dst_high, src_high, Operand(shift));
+ orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
+ lsl(dst_low, src_low, Operand(shift));
+ }
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_low, shift));
+
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ lsr(dst_low, src_high, Operand(scratch));
+ mov(dst_high, Operand(0));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+ lsr(dst_high, src_high, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ Label less_than_32;
+ Label done;
+ if (shift == 32) {
+ mov(dst_low, src_high);
+ mov(dst_high, Operand(0));
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ lsr(dst_low, src_high, Operand(shift));
+ mov(dst_high, Operand(0));
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+ lsr(dst_high, src_high, Operand(shift));
+ }
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_low, shift));
+
+ Label less_than_32;
+ Label done;
+ rsb(scratch, shift, Operand(32), SetCC);
+ b(gt, &less_than_32);
+ // If shift >= 32
+ and_(scratch, shift, Operand(0x1f));
+ asr(dst_low, src_high, Operand(scratch));
+ asr(dst_high, src_high, Operand(31));
+ jmp(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+ asr(dst_high, src_high, Operand(shift));
+ bind(&done);
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ Label less_than_32;
+ Label done;
+ if (shift == 32) {
+ mov(dst_low, src_high);
+ asr(dst_high, src_high, Operand(31));
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ asr(dst_low, src_high, Operand(shift));
+ asr(dst_high, src_high, Operand(31));
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ lsr(dst_low, src_low, Operand(shift));
+ orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+ asr(dst_high, src_high, Operand(shift));
+ }
+}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -1074,19 +1256,15 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
-
-void MacroAssembler::StubPrologue() {
- PushFixedFrame();
- Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+ mov(ip, Operand(Smi::FromInt(type)));
+ PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
{ PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@@ -1099,10 +1277,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
ldr(pc, MemOperand(pc, -4));
emit_code_stub_address(stub);
} else {
- PushFixedFrame(r1);
+ PushStandardFrame(r1);
nop(ip.code());
- // Adjust FP to point to saved FP.
- add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
if (FLAG_enable_embedded_constant_pool) {
@@ -1123,17 +1299,15 @@ void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
- PushFixedFrame();
+ mov(ip, Operand(Smi::FromInt(type)));
+ PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
- mov(ip, Operand(Smi::FromInt(type)));
- push(ip);
- mov(ip, Operand(CodeObject()));
- push(ip);
- // Adjust FP to point to saved FP.
- add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ if (type == StackFrame::INTERNAL) {
+ mov(ip, Operand(CodeObject()));
+ push(ip);
+ }
}
@@ -1164,10 +1338,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- Push(lr, fp);
- mov(fp, Operand(sp)); // Set up new frame pointer.
+ mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
+ PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+ sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -1249,7 +1423,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int offset = ExitFrameConstants::kFrameSize;
+ const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
@@ -1300,6 +1474,64 @@ void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ add(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ add(src_reg, sp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ cmp(src_reg, dst_reg);
+ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ b(&entry);
+ bind(&loop);
+ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ bind(&entry);
+ cmp(sp, src_reg);
+ b(ne, &loop);
+
+ // Leave current frame.
+ mov(sp, dst_reg);
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -1578,8 +1810,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(ip));
DCHECK(!scratch.is(ip));
- // Load current lexical context from the stack frame.
- ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ DCHECK(!ip.is(scratch));
+ mov(ip, fp);
+ bind(&load_context);
+ ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch, &has_context);
+ ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+ b(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
cmp(scratch, Operand::Zero());
@@ -2803,6 +3046,17 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
b(eq, on_either_smi);
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsANumber);
+ push(object);
+ CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+ pop(object);
+ Check(ne, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -3510,29 +3764,46 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
b(ne, &next);
}
-
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- add(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- cmp(scratch_reg, Operand(new_space_start));
- b(lt, no_memento_found);
- mov(ip, Operand(new_space_allocation_top));
- ldr(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+ b(eq, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ eor(scratch_reg, scratch_reg, Operand(receiver_reg));
+ tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+ b(ne, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ cmp(scratch_reg, Operand(new_space_allocation_top));
b(gt, no_memento_found);
- ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- cmp(scratch_reg,
- Operand(isolate()->factory()->allocation_memento_map()));
+ // Memento map check.
+ bind(&map_check);
+ ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
}
-
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 468f4b521a..f32630444e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -457,10 +457,14 @@ class MacroAssembler: public Assembler {
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
- // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
- // marker_reg is a valid register.
- void PushFixedFrame(Register marker_reg = no_reg);
- void PopFixedFrame(Register marker_reg = no_reg);
+ // FLAG_enable_embedded_constant_pool)
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of lr, fp, constant pool (if
+ // FLAG_enable_embedded_constant_pool), context and JS function
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@@ -545,6 +549,19 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@@ -580,7 +597,7 @@ class MacroAssembler: public Assembler {
Label* not_int32);
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter exit frame.
@@ -637,6 +654,15 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1280,6 +1306,9 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 4630b94b63..6c22a0a86a 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1041,6 +1041,32 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
return value;
}
+void Simulator::SetSpecialRegister(SRegisterFieldMask reg_and_mask,
+ uint32_t value) {
+ // Only CPSR_f is implemented. Of that, only N, Z, C and V are implemented.
+ if ((reg_and_mask == CPSR_f) && ((value & ~kSpecialCondition) == 0)) {
+ n_flag_ = ((value & (1 << 31)) != 0);
+ z_flag_ = ((value & (1 << 30)) != 0);
+ c_flag_ = ((value & (1 << 29)) != 0);
+ v_flag_ = ((value & (1 << 28)) != 0);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
+ uint32_t result = 0;
+ // Only CPSR_f is implemented.
+ if (reg == CPSR) {
+ if (n_flag_) result |= (1 << 31);
+ if (z_flag_) result |= (1 << 30);
+ if (c_flag_) result |= (1 << 29);
+ if (v_flag_) result |= (1 << 28);
+ } else {
+ UNIMPLEMENTED();
+ }
+ return result;
+}
// Runtime FP routines take:
// - two double arguments
@@ -1307,11 +1333,12 @@ bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
// Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
- return (uright > uleft);
+ return (uright > uleft) ||
+ (!carry && (((uright + 1) > uleft) || (uright > (uleft - 1))));
}
@@ -2312,7 +2339,22 @@ void Simulator::DecodeType01(Instruction* instr) {
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
+ if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+ (instr->Bits(15, 4) == 0xf00)) {
+ // MSR
+ int rm = instr->RmValue();
+ DCHECK_NE(pc, rm); // UNPREDICTABLE
+ SRegisterFieldMask sreg_and_mask =
+ instr->BitField(22, 22) | instr->BitField(19, 16);
+ SetSpecialRegister(sreg_and_mask, get_register(rm));
+ } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+ (instr->Bits(11, 0) == 0)) {
+ // MRS
+ int rd = instr->RdValue();
+ DCHECK_NE(pc, rd); // UNPREDICTABLE
+ SRegister sreg = static_cast<SRegister>(instr->BitField(22, 22));
+ set_register(rd, GetFromSpecialRegister(sreg));
+ } else if (instr->Bits(22, 21) == 1) {
int rm = instr->RmValue();
switch (instr->BitField(7, 4)) {
case BX:
@@ -2452,8 +2494,15 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ // Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand, GetCarry()));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
break;
}
@@ -3215,7 +3264,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
- UNREACHABLE(); // Not used by v8.
+ set_s_register_from_float(d, instr->DoubleImmedVmov());
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 6567607bb8..b3c8eb41e5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -262,7 +262,7 @@ class Simulator {
void SetCFlag(bool val);
void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
- bool BorrowFrom(int32_t left, int32_t right);
+ bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
bool OverflowFrom(int32_t alu_out,
int32_t left,
int32_t right,
@@ -363,6 +363,9 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
+ void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
+ uint32_t GetFromSpecialRegister(SRegister reg);
+
void CallInternal(byte* entry);
// Architecture state.
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index aeca563c37..6191216281 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -41,6 +41,18 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
inline int CPURegister::code() const {
DCHECK(IsValid());
@@ -693,6 +705,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index ea7a732f8a..2471d5eebd 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -56,7 +56,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- supported_ |= 1u << COHERENT_CACHE;
+ // TODO(jkummerow): This is turned off as an experiment to see if it
+ // affects crash rates. Keep an eye on crash reports and either remove
+ // coherent cache support permanently, or re-enable it!
+ // supported_ |= 1u << COHERENT_CACHE;
}
}
@@ -437,7 +440,8 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) ||
- (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+ (!assm_->serializer_enabled() &&
+ (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
}
@@ -2871,7 +2875,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 47786eb710..546025475e 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -922,7 +922,9 @@ class Assembler : public AssemblerBase {
}
// Debugging ----------------------------------------------------------------
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
@@ -2135,8 +2137,8 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private:
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 11f66a4ef4..44bfc1762d 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -518,6 +518,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- x2 : allocation site or undefined
// -- x3 : new target
// -- lr : return address
+ // -- cp : context pointer
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -537,6 +538,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ __ Push(cp);
__ SmiTag(argc);
__ Push(allocation_site, argc);
@@ -623,7 +625,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: result
// jssp[0]: receiver
// jssp[1]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -763,9 +765,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ Mov(cp, 0);
-
{
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1394,23 +1393,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1456,6 +1438,29 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : first argument (left-hand side)
+ // -- jssp[8] : receiver (right-hand side)
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Ldr(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ Ldr(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Drop(2);
+ __ Ret();
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1972,19 +1977,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ Mov(scratch1, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ Ldrb(scratch1, MemOperand(scratch1));
__ Cmp(scratch1, Operand(0));
- __ B(ne, &done);
+ __ B(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ B(ne, &no_interpreter_frame);
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1992,18 +1999,19 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ Ldr(scratch1,
+ __ Ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ B(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
@@ -2011,54 +2019,14 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(scratch1,
+ __ Ldrsw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
- __ add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
- // Count receiver argument as well (not included in args_reg).
- __ add(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ Cmp(src_reg, dst_reg);
- __ Check(lo, kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ B(&entry);
- __ bind(&loop);
- __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ Cmp(jssp, src_reg);
- __ B(ne, &loop);
-
- // Leave current frame.
- __ Mov(jssp, dst_reg);
- __ SetStackPointer(jssp);
- __ AssertStackConsistency();
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2610,30 +2578,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ Ldr(scratch1,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAllClear(scratch2.W(),
- (1 << SharedFunctionInfo::kStrongModeFunction),
- &no_strong_error);
-
- // What we really care about is the required number of arguments.
- DCHECK_EQ(kPointerSize, kInt64Size);
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
- __ Cmp(argc_actual, Operand(scratch2, LSR, 1));
- __ B(ge, &no_strong_error);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ad566e68fc..ee4053515a 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -81,6 +82,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -425,7 +430,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
// See call site for description.
static void EmitCheckForInternalizedStringsOrObjects(
MacroAssembler* masm, Register left, Register right, Register left_map,
@@ -435,7 +442,7 @@ static void EmitCheckForInternalizedStringsOrObjects(
Register result = x0;
DCHECK(left.is(x0) || right.is(x0));
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// TODO(all): reexamine this branch sequence for optimisation wrt branch
// prediction.
@@ -463,12 +470,22 @@ static void EmitCheckForInternalizedStringsOrObjects(
__ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, runtime_call);
- __ bind(&return_unequal);
+ __ Bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in x0.
__ Ret();
- __ bind(&undetectable);
+ __ Bind(&undetectable);
__ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
+ __ B(eq, &return_equal);
+ __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
+ __ B(ne, &return_unequal);
+
+ __ Bind(&return_equal);
__ Mov(result, EQUAL);
__ Ret();
}
@@ -1324,7 +1341,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
- __ Push(x13, xzr, x12, x10);
+ __ Push(x13, x12, xzr, x10);
// Set up fp.
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
@@ -1544,8 +1561,11 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
&slow_case);
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ Tbz(scratch, Map::kIsConstructor, &slow_case);
+
+ // Ensure that {function} has an instance prototype.
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
@@ -1612,27 +1632,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
-}
-
-
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ Bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -2856,10 +2857,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ Bind(&runtime);
- __ Push(lhs, rhs);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(lhs, rhs);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(x1, Heap::kTrueValueRootIndex);
+ __ Sub(x0, x0, x1);
+ __ Ret();
} else {
+ __ Push(lhs, rhs);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3227,27 +3235,28 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_smi);
Label not_heap_number;
- __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
- // x0: object
- // x1: instance type
- __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+ // x0: receiver
+ // x1: receiver instance type
__ B(ne, &not_heap_number);
__ Ret();
__ Bind(&not_heap_number);
- Label not_string, slow_string;
- __ Cmp(x1, FIRST_NONSTRING_TYPE);
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in x0.
+ __ AssertNotNumber(x0);
+
+ Label not_string;
+ __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+ // x0: receiver
+ // x1: receiver instance type
__ B(hs, &not_string);
- // Check if string has a cached array index.
- __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
- __ B(ne, &slow_string);
- __ IndexFromHash(x2, x0);
- __ Ret();
- __ Bind(&slow_string);
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ Bind(&not_string);
Label not_oddball;
@@ -3261,22 +3270,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in x0.
+ __ AssertString(x0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes one argument in x0.
- Label not_smi;
- __ JumpIfNotSmi(x0, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(x0, x0);
- __ Csel(x0, x0, Operand(0), ge);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+ __ B(ne, &runtime);
+ __ IndexFromHash(x2, x0);
__ Ret();
- __ Bind(&not_smi);
+ __ Bind(&runtime);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in x0.
Label is_number;
@@ -3449,43 +3459,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : left
- // -- x0 : right
- // -- lr : return address
- // -----------------------------------
- __ AssertString(x1);
- __ AssertString(x0);
-
- Label not_same;
- __ Cmp(x0, x1);
- __ B(ne, &not_same);
- __ Mov(x0, Smi::FromInt(EQUAL));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
- x4);
- __ Ret();
-
- __ Bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
-
- // Compare flat one-byte strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
- x4);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
- x15);
-
- // Call the runtime.
- // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ Bind(&runtime);
- __ Push(x1, x0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left
@@ -3682,7 +3655,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
@@ -4972,7 +4945,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@@ -4980,8 +4953,8 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Check if we have rest parameters (only possible if we have an
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
+ __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &no_rest_parameters);
@@ -5137,8 +5110,9 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Label runtime;
Label adaptor_frame, try_allocate;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
+ __ Ldr(
+ caller_ctx,
+ MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &adaptor_frame);
@@ -5401,7 +5375,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@@ -5409,7 +5383,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
+ __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
@@ -5804,16 +5778,12 @@ static void CallApiFunctionAndReturn(
__ B(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
- // -- x3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5839,17 +5809,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || x3.is(argc.reg()));
-
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
- if (!is_lazy) {
+ if (!is_lazy()) {
// Load context from callee
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
@@ -5878,26 +5846,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call = 0
- __ Mov(x10, argc.immediate());
- __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
- } else {
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call
- __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
- __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
- __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
- }
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc());
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5907,7 +5862,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5917,10 +5872,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand is_construct_call_operand =
MemOperand(masm->StackPointer(), 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5929,23 +5882,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 37bb4a22ba..712dbbd650 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -19,7 +19,7 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
- __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT
+ __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
: [ctr] "=r"(cache_type_register_));
#endif
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 3aa1e4dfa1..fe2a269935 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -65,12 +65,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on ARM64 in the input frame.
- return false;
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -132,12 +126,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
- __ Add(fp_to_sp, masm()->StackPointer(),
+ __ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
+ __ Mov(x0, 0);
+ Label context_check;
+ __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(x1, &context_check);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@@ -212,6 +211,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+ __ Ldr(__ StackPointer(),
+ MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 783514437f..f1e45f5fdc 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -34,16 +34,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCallerSPDisplacement = 2 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
- static const int kSPOffset = -1 * kPointerSize;
- static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
static const int kLastExitFrameField = kCodeOffset;
static const int kConstantPoolOffset = 0; // Not used
@@ -59,7 +54,7 @@ class JavaScriptFrameConstants : public AllStatic {
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
};
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index c6ae37e733..f307aeb6d4 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -114,37 +114,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
-
-
// static
-const Register ToStringDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -294,6 +265,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -336,28 +314,18 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x0: value to compare
+ // stack param count needs (arg count)
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -444,25 +412,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- x3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 953c3fd7f2..12ddd8145e 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1355,6 +1355,14 @@ void MacroAssembler::AssertStackConsistency() {
}
}
+void MacroAssembler::AssertCspAligned() {
+ if (emit_debug_code() && use_real_aborts()) {
+ // TODO(titzer): use a real assert for alignment check?
+ UseScratchRegisterScope scope(this);
+ Register temp = scope.AcquireX();
+ ldr(temp, MemOperand(csp));
+ }
+}
void MacroAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
@@ -1548,24 +1556,38 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Register scratch1,
Register scratch2,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
-
- Add(scratch1, receiver,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
- Cmp(scratch1, new_space_start);
- B(lt, no_memento_found);
-
- Mov(scratch2, new_space_allocation_top);
- Ldr(scratch2, MemOperand(scratch2));
- Cmp(scratch1, scratch2);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver, no_memento_found);
+ Add(scratch1, receiver, kMementoEndOffset);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ Eor(scratch2, scratch1, new_space_allocation_top);
+ Tst(scratch2, ~Page::kPageAlignmentMask);
+ B(eq, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ Eor(scratch2, scratch1, receiver);
+ Tst(scratch2, ~Page::kPageAlignmentMask);
+ B(ne, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ Cmp(scratch1, new_space_allocation_top);
B(gt, no_memento_found);
-
- Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
- Cmp(scratch1,
- Operand(isolate()->factory()->allocation_memento_map()));
+ // Memento map check.
+ bind(&map_check);
+ Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
+ Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
}
@@ -1690,6 +1712,18 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
+void MacroAssembler::AssertNotNumber(Register value) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(value, kSmiTagMask);
+ Check(ne, kOperandIsANumber);
+ Label done;
+ JumpIfNotHeapNumber(value, &done);
+ Abort(kOperandIsANumber);
+ Bind(&done);
+ }
+}
+
void MacroAssembler::AssertNumber(Register value) {
if (emit_debug_code()) {
Label done;
@@ -2330,6 +2364,66 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
B(ne, not_unique_name);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ __ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ add(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ add(src_reg, jssp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ __ Cmp(src_reg, dst_reg);
+ __ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ __ B(&entry);
+ __ bind(&loop);
+ __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ Cmp(jssp, src_reg);
+ __ B(ne, &loop);
+
+ // Leave current frame.
+ __ Mov(jssp, dst_reg);
+ __ SetStackPointer(jssp);
+ __ AssertStackConsistency();
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2651,18 +2745,17 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
Bind(&done);
}
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
UseScratchRegisterScope temps(this);
+ frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(StackFrame::STUB));
- // Compiled stubs don't age, and so they don't need the predictable code
- // ageing sequence.
- __ Push(lr, fp, cp, temp);
- __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Mov(temp, Smi::FromInt(type));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Claim(frame_slots);
+ str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
if (code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
@@ -2694,18 +2787,26 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
- Push(lr, fp, cp);
- Mov(type_reg, Smi::FromInt(type));
- Mov(code_reg, Operand(CodeObject()));
- Push(type_reg, code_reg);
- // jssp[4] : lr
- // jssp[3] : fp
- // jssp[2] : cp
- // jssp[1] : type
- // jssp[0] : code object
-
- // Adjust FP to point to saved FP.
- Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ if (type == StackFrame::INTERNAL) {
+ Mov(type_reg, Smi::FromInt(type));
+ Push(lr, fp);
+ Push(type_reg);
+ Mov(code_reg, Operand(CodeObject()));
+ Push(code_reg);
+ Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[1] : type
+ // jssp[0] : [code object]
+ } else {
+ Mov(type_reg, Smi::FromInt(type));
+ Push(lr, fp);
+ Push(type_reg);
+ Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
+ // jssp[2] : lr
+ // jssp[1] : fp
+ // jssp[0] : type
+ }
}
@@ -2746,20 +2847,23 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
- Mov(scratch, Operand(CodeObject()));
Push(lr, fp);
Mov(fp, StackPointer());
- Push(xzr, scratch);
+ Mov(scratch, Smi::FromInt(StackFrame::EXIT));
+ Push(scratch);
+ Push(xzr);
+ Mov(scratch, Operand(CodeObject()));
+ Push(scratch);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // jssp -> fp[-16]: CodeObject()
- STATIC_ASSERT((2 * kPointerSize) ==
- ExitFrameConstants::kCallerSPDisplacement);
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // jssp -> fp[-24]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
- STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
- STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
// Save the frame pointer and context pointer in the top frame.
Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@@ -2769,8 +2873,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
isolate())));
Str(cp, MemOperand(scratch));
- STATIC_ASSERT((-2 * kPointerSize) ==
- ExitFrameConstants::kLastExitFrameField);
+ STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
if (save_doubles) {
ExitFramePreserveFPRegs();
}
@@ -2781,9 +2884,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
Claim(extra_space + 1, kXRegSize);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // fp[-24]: CodeObject()
+ // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
// jssp -> jssp[0]: Space reserved for the return address.
@@ -2793,9 +2897,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // fp[-24]: CodeObject()
+ // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
@@ -3678,8 +3783,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
- // Load current lexical context from the stack frame.
- Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ Mov(scratch2, fp);
+ bind(&load_context);
+ Ldr(scratch1,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch1, &has_context);
+ Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+ B(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Cmp(scratch1, 0);
@@ -3916,13 +4032,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
- DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
- (1 << (14 + kPointerSizeLog2)));
+ Tst(scratch1, StoreBuffer::kStoreBufferMask);
if (and_then == kFallThroughAtEnd) {
- Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+ B(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
- Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ B(eq, &store_buffer_overflow);
Ret();
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index ff41c4f27f..4b6b3c0fb1 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -788,6 +788,9 @@ class MacroAssembler : public Assembler {
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
+ // Emits a runtime assert that the CSP is aligned.
+ void AssertCspAligned();
+
// Preserve the callee-saved registers (as defined by AAPCS64).
//
// Higher-numbered registers are pushed before lower-numbered registers, and
@@ -895,6 +898,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
+ inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
@@ -986,6 +990,7 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a number (heap number or smi).
void AssertNumber(Register value);
+ void AssertNotNumber(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@@ -1165,6 +1170,15 @@ class MacroAssembler : public Assembler {
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None());
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1621,7 +1635,7 @@ class MacroAssembler : public Assembler {
void ExitFrameRestoreFPRegs();
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type, int frame_slots);
void Prologue(bool code_pre_aging);
// Enter exit frame. Exit frames are used when calling C code from generated
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 5c8c2ce16d..a912bb60e4 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -60,7 +60,8 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/wasm/wasm-external-refs.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
@@ -76,6 +77,8 @@
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
@@ -98,6 +101,8 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
@@ -833,10 +838,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
+ case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
+ return "debug break slot at tail call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
return "generator continuation";
+ case WASM_MEMORY_REFERENCE:
+ return "wasm memory reference";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -929,7 +938,9 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
+ case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case GENERATOR_CONTINUATION:
+ case WASM_MEMORY_REFERENCE:
case NONE32:
case NONE64:
break;
@@ -1147,66 +1158,199 @@ ExternalReference ExternalReference::compute_output_frames_function(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
-static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+ExternalReference ExternalReference::wasm_f32_trunc(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_floor(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_floor_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_ceil(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_nearest_int(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_trunc(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_floor(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_floor_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_ceil(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_nearest_int(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float32(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float32(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_int64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_uint64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_int64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_uint64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
+}
-ExternalReference ExternalReference::f32_trunc_wrapper_function(
+ExternalReference ExternalReference::wasm_int64_div(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_mod(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_mod_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_div(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
+}
+
+static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
+
+ExternalReference ExternalReference::f64_acos_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_trunc_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
}
-static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+static void f64_asin_wrapper(double* param) { *param = std::asin(*param); }
-ExternalReference ExternalReference::f32_floor_wrapper_function(
+ExternalReference ExternalReference::f64_asin_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_floor_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
}
-static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+static void f64_atan_wrapper(double* param) { *param = std::atan(*param); }
-ExternalReference ExternalReference::f32_ceil_wrapper_function(
+ExternalReference ExternalReference::f64_atan_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_ceil_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan_wrapper)));
}
-static void f32_nearest_int_wrapper(float* param) {
- *param = nearbyintf(*param);
+static void f64_cos_wrapper(double* param) { *param = std::cos(*param); }
+
+ExternalReference ExternalReference::f64_cos_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_cos_wrapper)));
}
-ExternalReference ExternalReference::f32_nearest_int_wrapper_function(
+static void f64_sin_wrapper(double* param) { *param = std::sin(*param); }
+
+ExternalReference ExternalReference::f64_sin_wrapper_function(
Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(f32_nearest_int_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_sin_wrapper)));
}
-static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+static void f64_tan_wrapper(double* param) { *param = std::tan(*param); }
-ExternalReference ExternalReference::f64_trunc_wrapper_function(
+ExternalReference ExternalReference::f64_tan_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_trunc_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_tan_wrapper)));
}
-static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+static void f64_exp_wrapper(double* param) { *param = std::exp(*param); }
-ExternalReference ExternalReference::f64_floor_wrapper_function(
+ExternalReference ExternalReference::f64_exp_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_floor_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_exp_wrapper)));
}
-static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+static void f64_log_wrapper(double* param) { *param = std::log(*param); }
-ExternalReference ExternalReference::f64_ceil_wrapper_function(
+ExternalReference ExternalReference::f64_log_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_ceil_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_log_wrapper)));
}
-static void f64_nearest_int_wrapper(double* param) {
- *param = nearbyint(*param);
+static void f64_pow_wrapper(double* param0, double* param1) {
+ *param0 = power_double_double(*param0, *param1);
}
-ExternalReference ExternalReference::f64_nearest_int_wrapper_function(
+ExternalReference ExternalReference::f64_pow_wrapper_function(
Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(f64_nearest_int_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_pow_wrapper)));
+}
+
+static void f64_atan2_wrapper(double* param0, double* param1) {
+ double x = *param0;
+ double y = *param1;
+ // TODO(bradnelson): Find a good place to put this to share
+ // with the same code in src/runtime/runtime-math.cc
+ static const double kPiDividedBy4 = 0.78539816339744830962;
+ if (std::isinf(x) && std::isinf(y)) {
+ // Make sure that the result in case of two infinite arguments
+ // is a multiple of Pi / 4. The sign of the result is determined
+ // by the first argument (x) and the sign of the second argument
+ // determines the multiplier: one or three.
+ int multiplier = (x < 0) ? -1 : 1;
+ if (y < 0) multiplier *= 3;
+ *param0 = multiplier * kPiDividedBy4;
+ } else {
+ *param0 = std::atan2(x, y);
+ }
+}
+
+ExternalReference ExternalReference::f64_atan2_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan2_wrapper)));
+}
+
+static void f64_mod_wrapper(double* param0, double* param1) {
+ *param0 = modulo(*param0, *param1);
+}
+
+ExternalReference ExternalReference::f64_mod_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
}
ExternalReference ExternalReference::log_enter_external_function(
@@ -1262,12 +1406,6 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address());
}
-
-ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceStart());
-}
-
-
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@@ -1404,6 +1542,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_S390
+ function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
@@ -1489,6 +1629,10 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
+ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
+}
ExternalReference ExternalReference::debug_is_active_address(
Isolate* isolate) {
@@ -1559,34 +1703,12 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
-#if (defined(__MINGW64_VERSION_MAJOR) && \
- (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
- defined(V8_OS_AIX)
- // MinGW64 and AIX have a custom implementation for pow. This handles certain
- // special cases that are different.
- if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
- double f;
- double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
- /* retain sign if odd integer exponent */
- return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
- ? copysign(result, x)
- : result;
- }
-
- if (x == 2.0) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return std::ldexp(1.0, y_int);
- }
- }
-#endif
-
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return std::numeric_limits<double>::quiet_NaN();
}
- return std::pow(x, y);
+ return Pow(x, y);
}
@@ -1648,8 +1770,7 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
-
-void PositionsRecorder::RecordPosition(int pos) {
+void AssemblerPositionsRecorder::RecordPosition(int pos) {
DCHECK(pos != RelocInfo::kNoPosition);
DCHECK(pos >= 0);
state_.current_position = pos;
@@ -1659,8 +1780,7 @@ void PositionsRecorder::RecordPosition(int pos) {
pos));
}
-
-void PositionsRecorder::RecordStatementPosition(int pos) {
+void AssemblerPositionsRecorder::RecordStatementPosition(int pos) {
DCHECK(pos != RelocInfo::kNoPosition);
DCHECK(pos >= 0);
state_.current_statement_position = pos;
@@ -1671,8 +1791,7 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
pos));
}
-
-bool PositionsRecorder::WriteRecordedPositions() {
+bool AssemblerPositionsRecorder::WriteRecordedPositions() {
bool written = false;
// Write the statement position if it is different from what was written last
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 7bd9ee65f2..192d16b64d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -38,6 +38,7 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
+#include "src/log.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -384,6 +385,8 @@ class RelocInfo {
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
+ // To relocate pointers into the wasm memory embedded in wasm code
+ WASM_MEMORY_REFERENCE,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@@ -395,6 +398,7 @@ class RelocInfo {
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
+ DEBUG_BREAK_SLOT_AT_TAIL_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -426,7 +430,8 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
- LAST_GCED_ENUM = CELL,
+ LAST_GCED_ENUM = WASM_MEMORY_REFERENCE,
+ FIRST_SHAREABLE_RELOC_MODE = CELL,
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
@@ -487,7 +492,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
- IsDebugBreakSlotAtCall(mode);
+ IsDebugBreakSlotAtCall(mode) || IsDebugBreakSlotAtTailCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@@ -498,6 +503,9 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
+ static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
+ }
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
@@ -510,6 +518,9 @@ class RelocInfo {
static inline bool IsGeneratorContinuation(Mode mode) {
return mode == GENERATOR_CONTINUATION;
}
+ static inline bool IsWasmMemoryReference(Mode mode) {
+ return mode == WASM_MEMORY_REFERENCE;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -570,6 +581,10 @@ class RelocInfo {
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
+ INLINE(Address wasm_memory_reference());
+ INLINE(void update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
INLINE(Address constant_pool_entry_address());
@@ -913,14 +928,38 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
- static ExternalReference f32_trunc_wrapper_function(Isolate* isolate);
- static ExternalReference f32_floor_wrapper_function(Isolate* isolate);
- static ExternalReference f32_ceil_wrapper_function(Isolate* isolate);
- static ExternalReference f32_nearest_int_wrapper_function(Isolate* isolate);
- static ExternalReference f64_trunc_wrapper_function(Isolate* isolate);
- static ExternalReference f64_floor_wrapper_function(Isolate* isolate);
- static ExternalReference f64_ceil_wrapper_function(Isolate* isolate);
- static ExternalReference f64_nearest_int_wrapper_function(Isolate* isolate);
+ static ExternalReference wasm_f32_trunc(Isolate* isolate);
+ static ExternalReference wasm_f32_floor(Isolate* isolate);
+ static ExternalReference wasm_f32_ceil(Isolate* isolate);
+ static ExternalReference wasm_f32_nearest_int(Isolate* isolate);
+ static ExternalReference wasm_f64_trunc(Isolate* isolate);
+ static ExternalReference wasm_f64_floor(Isolate* isolate);
+ static ExternalReference wasm_f64_ceil(Isolate* isolate);
+ static ExternalReference wasm_f64_nearest_int(Isolate* isolate);
+ static ExternalReference wasm_int64_to_float32(Isolate* isolate);
+ static ExternalReference wasm_uint64_to_float32(Isolate* isolate);
+ static ExternalReference wasm_int64_to_float64(Isolate* isolate);
+ static ExternalReference wasm_uint64_to_float64(Isolate* isolate);
+ static ExternalReference wasm_float32_to_int64(Isolate* isolate);
+ static ExternalReference wasm_float32_to_uint64(Isolate* isolate);
+ static ExternalReference wasm_float64_to_int64(Isolate* isolate);
+ static ExternalReference wasm_float64_to_uint64(Isolate* isolate);
+ static ExternalReference wasm_int64_div(Isolate* isolate);
+ static ExternalReference wasm_int64_mod(Isolate* isolate);
+ static ExternalReference wasm_uint64_div(Isolate* isolate);
+ static ExternalReference wasm_uint64_mod(Isolate* isolate);
+
+ static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_atan_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_cos_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_sin_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_tan_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_exp_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_log_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_atan2_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_pow_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
@@ -952,9 +991,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_regexp_stack_memory_size(
Isolate* isolate);
- // Static variable Heap::NewSpaceStart()
- static ExternalReference new_space_start(Isolate* isolate);
-
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
@@ -994,6 +1030,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference cpu_features();
+ static ExternalReference is_tail_call_elimination_enabled_address(
+ Isolate* isolate);
+
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
@@ -1085,23 +1124,11 @@ struct PositionState {
int written_statement_position;
};
-
-class PositionsRecorder BASE_EMBEDDED {
+class AssemblerPositionsRecorder : public PositionsRecorder {
public:
- explicit PositionsRecorder(Assembler* assembler)
- : assembler_(assembler) {
- jit_handler_data_ = NULL;
- }
-
- void AttachJITHandlerData(void* user_data) {
- jit_handler_data_ = user_data;
- }
+ explicit AssemblerPositionsRecorder(Assembler* assembler)
+ : assembler_(assembler) {}
- void* DetachJITHandlerData() {
- void* old_data = jit_handler_data_;
- jit_handler_data_ = NULL;
- return old_data;
- }
// Set current position to pos.
void RecordPosition(int pos);
@@ -1121,11 +1148,7 @@ class PositionsRecorder BASE_EMBEDDED {
Assembler* assembler_;
PositionState state_;
- // Currently jit_handler_data_ is used to store JITHandler-specific data
- // over the lifetime of a PositionsRecorder
- void* jit_handler_data_;
-
- DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
+ DISALLOW_COPY_AND_ASSIGN(AssemblerPositionsRecorder);
};
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 272f9bde11..f54333ff1f 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -138,7 +138,6 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
IncrementNodeCount();
- DisableCrankshaft(kDoExpression);
node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
Visit(node->block());
Visit(node->result());
@@ -267,10 +266,6 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
- if (node->is_jsruntime()) {
- // Don't try to optimize JS runtime calls because we bailout on them.
- DisableOptimization(kCallToAJavaScriptRuntimeFunction);
- }
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
@@ -504,9 +499,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
IncrementNodeCount();
- if (node->tail_call_mode() == TailCallMode::kAllow) {
- DisableOptimization(kTailCall);
- }
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
@@ -571,12 +563,6 @@ bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
Scope* scope = node->scope();
-
- if (scope->HasIllegalRedeclaration()) {
- Visit(scope->GetIllegalRedeclaration());
- DisableOptimization(kFunctionWithIllegalRedeclaration);
- return Finish(node);
- }
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 85e8277d80..8b3f0ed252 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -271,7 +271,6 @@ class AstValue : public ZoneObject {
F(throw, "throw") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
- F(use_strong, "use strong") \
F(use_strict, "use strict") \
F(value, "value")
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 9b2c6388c1..e8b6269648 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -36,17 +36,11 @@ AST_NODE_LIST(DECL_ACCEPT)
#ifdef DEBUG
-void AstNode::Print() { Print(Isolate::Current()); }
-
-
void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this);
}
-void AstNode::PrettyPrint() { PrettyPrint(Isolate::Current()); }
-
-
void AstNode::PrettyPrint(Isolate* isolate) {
PrettyPrinter::PrintOut(isolate, this);
}
@@ -68,8 +62,11 @@ bool Expression::IsNullLiteral() const {
return IsLiteral() && AsLiteral()->value()->IsNull();
}
+bool Expression::IsUndefinedLiteral() const {
+ if (IsLiteral() && AsLiteral()->value()->IsUndefined()) {
+ return true;
+ }
-bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
const VariableProxy* var_proxy = AsVariableProxy();
if (var_proxy == NULL) return false;
Variable* var = var_proxy->var();
@@ -154,15 +151,11 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
}
}
-
-void ForEachStatement::AssignFeedbackVectorSlots(
- Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
- // TODO(adamk): for-of statements do not make use of this feedback slot.
- // The each_slot_ should be specific to ForInStatement, and this work moved
- // there.
- if (IsForOfStatement()) return;
+void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
+ for_in_feedback_slot_ = spec->AddGeneralSlot();
}
@@ -475,18 +468,15 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
- if (key->IsString()
- && Handle<String>::cast(key)->AsArrayIndex(&element_index)
- && element_index > max_element_index) {
- max_element_index = element_index;
+ if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+ max_element_index = Max(element_index, max_element_index);
elements++;
- } else if (key->IsSmi()) {
- int key_value = Smi::cast(*key)->value();
- if (key_value > 0
- && static_cast<uint32_t>(key_value) > max_element_index) {
- max_element_index = key_value;
- }
+ key = isolate->factory()->NewNumberFromUint(element_index);
+ } else if (key->ToArrayIndex(&element_index)) {
+ max_element_index = Max(element_index, max_element_index);
elements++;
+ } else if (key->IsNumber()) {
+ key = isolate->factory()->NumberToString(key);
}
// Add name, value pair to the fixed array.
@@ -513,7 +503,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array = isolate->factory()->NewJSArray(
FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
- Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
// Fill in the literals.
bool is_simple = true;
@@ -678,24 +668,21 @@ static bool IsVoidOfLiteral(Expression* expr) {
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
- Expression** expr,
- Isolate* isolate) {
+ Expression** expr) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
- if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
+ if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
return false;
}
-
-bool CompareOperation::IsLiteralCompareUndefined(
- Expression** expr, Isolate* isolate) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
+bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
+ return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+ MatchLiteralCompareUndefined(right_, op_, left_, expr);
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index dcb440d7c7..52bac8efbe 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -198,9 +198,7 @@ class AstNode: public ZoneObject {
#ifdef DEBUG
void PrettyPrint(Isolate* isolate);
- void PrettyPrint();
void Print(Isolate* isolate);
- void Print();
#endif // DEBUG
// Type testing & conversion functions overridden by concrete subclasses.
@@ -332,8 +330,9 @@ class Expression : public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral() const;
- // True if we can prove that the expression is the undefined literal.
- bool IsUndefinedLiteral(Isolate* isolate) const;
+ // True if we can prove that the expression is the undefined literal. Note
+ // that this also checks for loads of the global "undefined" variable.
+ bool IsUndefinedLiteral() const;
// True iff the expression is a valid target for an assignment.
bool IsValidReferenceExpressionOrThis() const;
@@ -792,10 +791,6 @@ class ForEachStatement : public IterationStatement {
void set_each(Expression* e) { each_ = e; }
void set_subject(Expression* e) { subject_ = e; }
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
- FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
-
static const char* VisitModeString(VisitMode mode) {
return mode == ITERATE ? "for-of" : "for-in";
}
@@ -807,7 +802,6 @@ class ForEachStatement : public IterationStatement {
private:
Expression* each_;
Expression* subject_;
- FeedbackVectorSlot each_slot_;
};
@@ -821,11 +815,8 @@ class ForInStatement final : public ForEachStatement {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override {
- ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
- for_in_feedback_slot_ = spec->AddGeneralSlot();
- }
-
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
DCHECK(!for_in_feedback_slot_.IsInvalid());
return for_in_feedback_slot_;
@@ -854,6 +845,7 @@ class ForInStatement final : public ForEachStatement {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ForInType for_in_type_;
+ FeedbackVectorSlot each_slot_;
FeedbackVectorSlot for_in_feedback_slot_;
};
@@ -1191,18 +1183,33 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
+ // The clear_pending_message flag indicates whether or not to clear the
+ // isolate's pending exception message before executing the catch_block. In
+ // the normal use case, this flag is always on because the message object
+ // is not needed anymore when entering the catch block and should not be kept
+ // alive.
+ // The use case where the flag is off is when the catch block is guaranteed to
+ // rethrow the caught exception (using %ReThrow), which reuses the pending
+ // message instead of generating a new one.
+ // (When the catch block doesn't rethrow but is guaranteed to perform an
+ // ordinary throw, not clearing the old message is safe but not very useful.)
+ bool clear_pending_message() { return clear_pending_message_; }
+
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
- Variable* variable, Block* catch_block, int pos)
+ Variable* variable, Block* catch_block,
+ bool clear_pending_message, int pos)
: TryStatement(zone, try_block, pos),
scope_(scope),
variable_(variable),
- catch_block_(catch_block) {}
+ catch_block_(catch_block),
+ clear_pending_message_(clear_pending_message) {}
private:
Scope* scope_;
Variable* variable_;
Block* catch_block_;
+ bool clear_pending_message_;
};
@@ -1339,14 +1346,11 @@ class MaterializedLiteral : public Expression {
return depth_;
}
- bool is_strong() const { return is_strong_; }
-
protected:
- MaterializedLiteral(Zone* zone, int literal_index, bool is_strong, int pos)
+ MaterializedLiteral(Zone* zone, int literal_index, int pos)
: Expression(zone, pos),
literal_index_(literal_index),
is_simple_(false),
- is_strong_(is_strong),
depth_(0) {}
// A materialized literal is simple if the values consist of only
@@ -1375,7 +1379,6 @@ class MaterializedLiteral : public Expression {
private:
int literal_index_;
bool is_simple_;
- bool is_strong_;
int depth_;
friend class AstLiteralReindexer;
@@ -1463,7 +1466,6 @@ class ObjectLiteral final : public MaterializedLiteral {
ZoneList<Property*>* properties() const { return properties_; }
bool fast_elements() const { return fast_elements_; }
bool may_store_doubles() const { return may_store_doubles_; }
- bool has_function() const { return has_function_; }
bool has_elements() const { return has_elements_; }
bool has_shallow_properties() const {
return depth() == 1 && !has_elements() && !may_store_doubles();
@@ -1483,26 +1485,20 @@ class ObjectLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
- flags |= has_function() ? kHasFunction : kNoFlags;
if (has_shallow_properties()) {
flags |= kShallowProperties;
}
if (disable_mementos) {
flags |= kDisableMementos;
}
- if (is_strong()) {
- flags |= kIsStrong;
- }
return flags;
}
enum Flags {
kNoFlags = 0,
kFastElements = 1,
- kHasFunction = 1 << 1,
- kShallowProperties = 1 << 2,
- kDisableMementos = 1 << 3,
- kIsStrong = 1 << 4
+ kShallowProperties = 1 << 1,
+ kDisableMementos = 1 << 2
};
struct Accessors: public ZoneObject {
@@ -1534,15 +1530,13 @@ class ObjectLiteral final : public MaterializedLiteral {
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
- int boilerplate_properties, bool has_function, bool is_strong,
- int pos)
- : MaterializedLiteral(zone, literal_index, is_strong, pos),
+ int boilerplate_properties, int pos)
+ : MaterializedLiteral(zone, literal_index, pos),
properties_(properties),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
has_elements_(false),
- may_store_doubles_(false),
- has_function_(has_function) {}
+ may_store_doubles_(false) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1553,7 +1547,6 @@ class ObjectLiteral final : public MaterializedLiteral {
bool fast_elements_;
bool has_elements_;
bool may_store_doubles_;
- bool has_function_;
FeedbackVectorSlot slot_;
};
@@ -1589,8 +1582,8 @@ class RegExpLiteral final : public MaterializedLiteral {
protected:
RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
- int literal_index, bool is_strong, int pos)
- : MaterializedLiteral(zone, literal_index, is_strong, pos),
+ int literal_index, int pos)
+ : MaterializedLiteral(zone, literal_index, pos),
pattern_(pattern),
flags_(flags) {
set_depth(1);
@@ -1635,9 +1628,6 @@ class ArrayLiteral final : public MaterializedLiteral {
if (disable_mementos) {
flags |= kDisableMementos;
}
- if (is_strong()) {
- flags |= kIsStrong;
- }
return flags;
}
@@ -1657,8 +1647,7 @@ class ArrayLiteral final : public MaterializedLiteral {
enum Flags {
kNoFlags = 0,
kShallowElements = 1,
- kDisableMementos = 1 << 1,
- kIsStrong = 1 << 2
+ kDisableMementos = 1 << 1
};
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
@@ -1667,9 +1656,8 @@ class ArrayLiteral final : public MaterializedLiteral {
protected:
ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
- int first_spread_index, int literal_index, bool is_strong,
- int pos)
- : MaterializedLiteral(zone, literal_index, is_strong, pos),
+ int first_spread_index, int literal_index, int pos)
+ : MaterializedLiteral(zone, literal_index, pos),
values_(values),
first_spread_index_(first_spread_index) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
@@ -2313,7 +2301,7 @@ class CompareOperation final : public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
- bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
+ bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
protected:
@@ -2529,37 +2517,29 @@ class RewritableExpression : public Expression {
Expression* expr_;
};
-
+// Our Yield is different from the JS yield in that it "returns" its argument as
+// is, without wrapping it in an iterator result object. Such wrapping, if
+// desired, must be done beforehand (see the parser).
class Yield final : public Expression {
public:
DECLARE_NODE_TYPE(Yield)
- enum Kind {
- kInitial, // The initial yield that returns the unboxed generator object.
- kSuspend, // A normal yield: { value: EXPRESSION, done: false }
- kDelegating, // A yield*.
- kFinal // A return: { value: EXPRESSION, done: true }
- };
-
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
- Kind yield_kind() const { return yield_kind_; }
void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
protected:
Yield(Zone* zone, Expression* generator_object, Expression* expression,
- Kind yield_kind, int pos)
+ int pos)
: Expression(zone, pos),
generator_object_(generator_object),
- expression_(expression),
- yield_kind_(yield_kind) {}
+ expression_(expression) {}
private:
Expression* generator_object_;
Expression* expression_;
- Kind yield_kind_;
};
@@ -3169,8 +3149,17 @@ class AstNodeFactory final BASE_EMBEDDED {
TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
Block* catch_block, int pos) {
- return new (local_zone_) TryCatchStatement(local_zone_, try_block, scope,
- variable, catch_block, pos);
+ return new (local_zone_) TryCatchStatement(
+ local_zone_, try_block, scope, variable, catch_block, true, pos);
+ }
+
+ TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
+ Scope* scope,
+ Variable* variable,
+ Block* catch_block,
+ int pos) {
+ return new (local_zone_) TryCatchStatement(
+ local_zone_, try_block, scope, variable, catch_block, false, pos);
}
TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
@@ -3243,12 +3232,9 @@ class AstNodeFactory final BASE_EMBEDDED {
ZoneList<ObjectLiteral::Property*>* properties,
int literal_index,
int boilerplate_properties,
- bool has_function,
- bool is_strong,
int pos) {
- return new (local_zone_)
- ObjectLiteral(local_zone_, properties, literal_index,
- boilerplate_properties, has_function, is_strong, pos);
+ return new (local_zone_) ObjectLiteral(
+ local_zone_, properties, literal_index, boilerplate_properties, pos);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3267,24 +3253,23 @@ class AstNodeFactory final BASE_EMBEDDED {
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
- int literal_index, bool is_strong, int pos) {
- return new (local_zone_) RegExpLiteral(local_zone_, pattern, flags,
- literal_index, is_strong, pos);
+ int literal_index, int pos) {
+ return new (local_zone_)
+ RegExpLiteral(local_zone_, pattern, flags, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
- bool is_strong,
int pos) {
return new (local_zone_)
- ArrayLiteral(local_zone_, values, -1, literal_index, is_strong, pos);
+ ArrayLiteral(local_zone_, values, -1, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int first_spread_index, int literal_index,
- bool is_strong, int pos) {
+ int pos) {
return new (local_zone_) ArrayLiteral(
- local_zone_, values, first_spread_index, literal_index, is_strong, pos);
+ local_zone_, values, first_spread_index, literal_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@@ -3399,11 +3384,10 @@ class AstNodeFactory final BASE_EMBEDDED {
Yield* NewYield(Expression *generator_object,
Expression* expression,
- Yield::Kind yield_kind,
int pos) {
if (!expression) expression = NewUndefinedLiteral(pos);
return new (local_zone_)
- Yield(local_zone_, generator_object, expression, yield_kind, pos);
+ Yield(local_zone_, generator_object, expression, pos);
}
Throw* NewThrow(Expression* exception, int pos) {
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 0e9986a438..2a79049b08 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -471,7 +471,7 @@ static int FormatSlotNode(Vector<char>* buf, Expression* node,
const char* node_name, FeedbackVectorSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
- pos = SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
+ pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
}
return pos;
}
@@ -1563,6 +1563,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
switch (var->location()) {
case VariableLocation::UNALLOCATED:
+ SNPrintF(buf + pos, " unallocated");
break;
case VariableLocation::PARAMETER:
SNPrintF(buf + pos, " parameter[%d]", var->index());
@@ -1593,9 +1594,7 @@ void AstPrinter::VisitAssignment(Assignment* node) {
void AstPrinter::VisitYield(Yield* node) {
- EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "YIELD (kind %d)", node->yield_kind());
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, "YIELD", node->position());
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 7c87ce39e9..5d4b809876 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -100,7 +100,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
function_kind);
// The outermost scope must be a script scope.
DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
- DCHECK(!HasIllegalRedeclaration());
}
Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
@@ -169,9 +168,7 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
function_ = nullptr;
arguments_ = nullptr;
this_function_ = nullptr;
- illegal_redecl_ = nullptr;
scope_inside_with_ = false;
- scope_contains_with_ = false;
scope_calls_eval_ = false;
scope_uses_arguments_ = false;
scope_uses_super_property_ = false;
@@ -210,15 +207,14 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
- bool contains_with = false;
while (!context->IsNativeContext()) {
- if (context->IsWithContext()) {
+ if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
+ // For scope analysis, debug-evaluate is equivalent to a with scope.
Scope* with_scope = new (zone)
Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(),
script_scope->ast_value_factory_);
current_scope = with_scope;
// All the inner scopes are inside a with.
- contains_with = true;
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
@@ -252,13 +248,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
script_scope->ast_value_factory_->GetString(Handle<String>(name)),
script_scope->ast_value_factory_);
}
- if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
-
- // Forget about a with when we move to a context for a different function.
- if (context->previous()->closure() != context->closure()) {
- contains_with = false;
- }
context = context->previous();
}
@@ -392,7 +382,6 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
if (uses_arguments()) other->RecordArgumentsUsage();
if (uses_super_property()) other->RecordSuperPropertyUsage();
if (calls_eval()) other->RecordEvalCall();
- if (scope_contains_with_) other->RecordWithStatement();
}
@@ -583,21 +572,6 @@ void Scope::AddDeclaration(Declaration* declaration) {
}
-void Scope::SetIllegalRedeclaration(Expression* expression) {
- // Record only the first illegal redeclaration.
- if (!HasIllegalRedeclaration()) {
- illegal_redecl_ = expression;
- }
- DCHECK(HasIllegalRedeclaration());
-}
-
-
-Expression* Scope::GetIllegalRedeclaration() {
- DCHECK(HasIllegalRedeclaration());
- return illegal_redecl_;
-}
-
-
Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
@@ -817,25 +791,7 @@ Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
return scope_info_;
}
-
-void Scope::GetNestedScopeChain(Isolate* isolate,
- List<Handle<ScopeInfo> >* chain, int position) {
- if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo(isolate)));
-
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* scope = inner_scopes_[i];
- int beg_pos = scope->start_position();
- int end_pos = scope->end_position();
- DCHECK(beg_pos >= 0 && end_pos >= 0);
- if (beg_pos <= position && position < end_pos) {
- scope->GetNestedScopeChain(isolate, chain, position);
- return;
- }
- }
-}
-
-
-void Scope::CollectNonLocals(HashMap* non_locals) {
+Handle<StringSet> Scope::CollectNonLocals(Handle<StringSet> non_locals) {
// Collect non-local variables referenced in the scope.
// TODO(yangguo): store non-local variables explicitly if we can no longer
// rely on unresolved_ to find them.
@@ -843,13 +799,12 @@ void Scope::CollectNonLocals(HashMap* non_locals) {
VariableProxy* proxy = unresolved_[i];
if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
Handle<String> name = proxy->name();
- void* key = reinterpret_cast<void*>(name.location());
- HashMap::Entry* entry = non_locals->LookupOrInsert(key, name->Hash());
- entry->value = key;
+ non_locals = StringSet::Add(non_locals, name);
}
for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->CollectNonLocals(non_locals);
+ non_locals = inner_scopes_[i]->CollectNonLocals(non_locals);
}
+ return non_locals;
}
@@ -999,7 +954,6 @@ void Scope::Print(int n) {
Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
- if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
@@ -1271,8 +1225,8 @@ bool Scope::MustAllocate(Variable* var) {
// visible name.
if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
(var->has_forced_context_allocation() || scope_calls_eval_ ||
- inner_scope_calls_eval_ || scope_contains_with_ || is_catch_scope() ||
- is_block_scope() || is_module_scope() || is_script_scope())) {
+ inner_scope_calls_eval_ || is_catch_scope() || is_block_scope() ||
+ is_module_scope() || is_script_scope())) {
var->set_is_used();
if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
}
@@ -1295,10 +1249,8 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope() || is_module_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
- return var->has_forced_context_allocation() ||
- scope_calls_eval_ ||
- inner_scope_calls_eval_ ||
- scope_contains_with_;
+ return var->has_forced_context_allocation() || scope_calls_eval_ ||
+ inner_scope_calls_eval_;
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 76f761dba3..dae70c0142 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -224,20 +224,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Illegal redeclaration support.
- // Set an expression node that will be executed when the scope is
- // entered. We only keep track of one illegal redeclaration node per
- // scope - the first one - so if you try to set it multiple times
- // the additional requests will be silently ignored.
- void SetIllegalRedeclaration(Expression* expression);
-
- // Retrieve the illegal redeclaration expression. Do not call if the
- // scope doesn't have an illegal redeclaration node.
- Expression* GetIllegalRedeclaration();
-
- // Check if the scope has (at least) one illegal redeclaration.
- bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
-
- // For harmony block scoping mode: Check if the scope has conflicting var
+ // Check if the scope has conflicting var
// declarations, i.e. a var declaration that has been hoisted from a nested
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
@@ -245,9 +232,6 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Scope-specific info.
- // Inform the scope that the corresponding code contains a with statement.
- void RecordWithStatement() { scope_contains_with_ = true; }
-
// Inform the scope that the corresponding code contains an eval call.
void RecordEvalCall() { scope_calls_eval_ = true; }
@@ -556,14 +540,7 @@ class Scope: public ZoneObject {
Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
- // Get the chain of nested scopes within this scope for the source statement
- // position. The scopes will be added to the list from the outermost scope to
- // the innermost scope. Only nested block, catch or with scopes are tracked
- // and will be returned, but no inner function scopes.
- void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
- int statement_position);
-
- void CollectNonLocals(HashMap* non_locals);
+ Handle<StringSet> CollectNonLocals(Handle<StringSet> non_locals);
// ---------------------------------------------------------------------------
// Strict mode support.
@@ -646,15 +623,10 @@ class Scope: public ZoneObject {
// Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap sloppy_block_function_map_;
- // Illegal redeclaration.
- Expression* illegal_redecl_;
-
// Scope-specific information computed during parsing.
//
// This scope is inside a 'with' of some outer scope.
bool scope_inside_with_;
- // This scope contains a 'with' statement.
- bool scope_contains_with_;
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index cc80e01143..3e0a5dcc42 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -21,7 +21,7 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
- Zone* zone = new Zone();
+ Zone* zone = new Zone(isolate->allocator());
ParseInfo* info = new ParseInfo(zone);
source->zone.Reset(zone);
source->info.Reset(info);
@@ -32,7 +32,8 @@ BackgroundParsingTask::BackgroundParsingTask(
info->set_global();
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
- info->set_allow_lazy_parsing(true);
+ // Parse eagerly with ignition since we will compile eagerly.
+ info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
}
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 272b6a4180..92929cf38c 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -14,7 +14,6 @@ namespace internal {
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
- V(kAlignmentMarkerExpected, "Alignment marker expected") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
@@ -37,8 +36,6 @@ namespace internal {
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
- V(kCallToAJavaScriptRuntimeFunction, \
- "Call to a JavaScript runtime function") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
@@ -57,7 +54,8 @@ namespace internal {
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
- V(kDoExpression, "Do expression encountered") \
+ V(kDoExpressionUnmodelable, \
+ "Encountered a do-expression with unmodelable control statements") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
@@ -84,11 +82,11 @@ namespace internal {
V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
- V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
+ V(kGeneratorResumeMethod, "Generator resume method is being called") \
V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
@@ -103,6 +101,7 @@ namespace internal {
V(kInputStringTooLong, "Input string too long") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
+ V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
@@ -140,6 +139,7 @@ namespace internal {
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsANumber, "Operand is a number") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
@@ -230,6 +230,8 @@ namespace internal {
V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedFunctionIDForInvokeIntrinsic, \
+ "Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
@@ -249,7 +251,7 @@ namespace internal {
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
- "Unsupported phi use of const variable") \
+ "Unsupported phi use of const or let variable") \
V(kUnexpectedReturnFromBytecodeHandler, \
"Unexpectedly returned from a bytecode handler") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
@@ -262,6 +264,8 @@ namespace internal {
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
+ V(kWrongArgumentCountForInvokeIntrinsic, \
+ "Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
V(kYield, "Yield")
diff --git a/deps/v8/src/base/accounting-allocator.cc b/deps/v8/src/base/accounting-allocator.cc
new file mode 100644
index 0000000000..2269c60680
--- /dev/null
+++ b/deps/v8/src/base/accounting-allocator.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/accounting-allocator.h"
+
+#include <cstdlib>
+
+#if V8_LIBC_BIONIC
+#include <malloc.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+void* AccountingAllocator::Allocate(size_t bytes) {
+ void* memory = malloc(bytes);
+ if (memory) NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+ return memory;
+}
+
+void AccountingAllocator::Free(void* memory, size_t bytes) {
+ free(memory);
+ NoBarrier_AtomicIncrement(&current_memory_usage_,
+ -static_cast<AtomicWord>(bytes));
+}
+
+size_t AccountingAllocator::GetCurrentMemoryUsage() const {
+ return NoBarrier_Load(&current_memory_usage_);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/accounting-allocator.h b/deps/v8/src/base/accounting-allocator.h
new file mode 100644
index 0000000000..ce67f3790e
--- /dev/null
+++ b/deps/v8/src/base/accounting-allocator.h
@@ -0,0 +1,34 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ACCOUNTING_ALLOCATOR_H_
+#define V8_BASE_ACCOUNTING_ALLOCATOR_H_
+
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+class AccountingAllocator final {
+ public:
+ AccountingAllocator() = default;
+ ~AccountingAllocator() = default;
+
+ // Returns nullptr on failed allocation.
+ void* Allocate(size_t bytes);
+ void Free(void* memory, size_t bytes);
+
+ size_t GetCurrentMemoryUsage() const;
+
+ private:
+ AtomicWord current_memory_usage_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ACCOUNTING_ALLOCATOR_H_
diff --git a/deps/v8/src/base/atomicops_internals_arm_gcc.h b/deps/v8/src/base/atomicops_internals_arm_gcc.h
index 6c8b27ea24..8d049e04b4 100644
--- a/deps/v8/src/base/atomicops_internals_arm_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_arm_gcc.h
@@ -44,14 +44,15 @@ namespace base {
//
inline void MemoryBarrier() {
-#if defined(__linux__) || defined(__ANDROID__)
+#if defined(__ANDROID__)
// Note: This is a function call, which is also an implicit compiler barrier.
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
#elif defined(__QNXNTO__)
__cpu_membarrier();
#else
-#error MemoryBarrier() is not implemented on this platform.
+ // Fallback to GCC built-in function
+ __sync_synchronize();
#endif
}
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 777f379bae..12a3881919 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -468,7 +468,12 @@ CPU::CPU()
char* end;
architecture_ = strtol(architecture, &end, 10);
if (end == architecture) {
- architecture_ = 0;
+ // Kernels older than 3.18 report "CPU architecture: AArch64" on ARMv8.
+ if (strcmp(architecture, "AArch64") == 0) {
+ architecture_ = 8;
+ } else {
+ architecture_ = 0;
+ }
}
delete[] architecture;
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index a2688c9c9a..ebab129f93 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -115,3 +115,14 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stderr);
v8::base::OS::Abort();
}
+
+extern "C" void V8_RuntimeError(const char* file, int line,
+ const char* message) {
+ fflush(stdout);
+ fflush(stderr);
+ v8::base::OS::PrintError("\n\n#\n# Runtime error in %s, line %d\n# ", file,
+ line);
+ v8::base::OS::PrintError("\n# %s\n", message);
+ v8::base::DumpBacktrace();
+ fflush(stderr);
+}
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index e4e3f49bfa..15322f6126 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -14,6 +14,8 @@
extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
const char* format, ...);
+extern "C" void V8_RuntimeError(const char* file, int line,
+ const char* message);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 10cab4b2bf..3f09b2b9ce 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -278,6 +278,17 @@ inline void USE(T) { }
#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
+#undef V8PRIuPTR
+#define V8PRIuPTR "lxu"
+#endif
+
+// GCC on S390 31-bit expands 'size_t' to 'long unsigned int'
+// instead of 'int', resulting in compilation errors with %d.
+// The printf format specifier needs to be %zd instead.
+#if V8_HOST_ARCH_S390 && !V8_HOST_ARCH_64_BIT
+#define V8_SIZET_PREFIX "z"
+#else
+#define V8_SIZET_PREFIX ""
#endif
// The following macro works on both 32 and 64-bit platforms.
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index a4b742adc7..1323a0dd91 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -72,14 +72,14 @@ bool OS::ArmUsingHardFloat() {
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
+#if GCC_VERSION >= 40600 && !defined(__clang__)
#if defined(__ARM_PCS_VFP)
return true;
#else
return false;
#endif
-#elif GCC_VERSION < 40500
+#elif GCC_VERSION < 40500 && !defined(__clang__)
return false;
#else
@@ -89,7 +89,7 @@ bool OS::ArmUsingHardFloat() {
!defined(__VFP_FP__)
return false;
#else
-#error "Your version of GCC does not report the FP ABI compiled for." \
+#error "Your version of compiler does not report the FP ABI compiled for." \
"Please report it on this issue" \
"http://code.google.com/p/v8/issues/detail?id=2140"
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 046dbb69c3..bb340ab5f9 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -81,6 +81,8 @@ int OS::ActivationFrameAlignment() {
return 8;
#elif V8_TARGET_ARCH_MIPS
return 8;
+#elif V8_TARGET_ARCH_S390
+ return 8;
#else
// Otherwise we just assume 16 byte alignment, i.e.:
// - With gcc 4.4 the tree vectorization optimizer can generate code
@@ -185,6 +187,15 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
+#elif V8_TARGET_ARCH_S390X
+ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
+ // of virtual addressing. Truncate to 40 bits to allow kernel chance to
+ // fulfill request.
+ raw_addr &= V8_UINT64_C(0xfffffff000);
+#elif V8_TARGET_ARCH_S390
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
@@ -252,6 +263,9 @@ void OS::DebugBreak() {
#endif // V8_OS_NACL
#elif V8_HOST_ARCH_X64
asm("int $3");
+#elif V8_HOST_ARCH_S390
+ // Software breakpoint instruction is 0x0001
+ asm volatile(".word 0x0001");
#else
#error Unsupported host architecture.
#endif
@@ -415,9 +429,10 @@ bool OS::Remove(const char* path) {
return (remove(path) == 0);
}
+char OS::DirectorySeparator() { return '/'; }
bool OS::isDirectorySeparator(const char ch) {
- return ch == '/';
+ return ch == DirectorySeparator();
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 6afa6f9c37..0076a35ce5 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -574,6 +574,7 @@ bool OS::Remove(const char* path) {
return (DeleteFileA(path) != 0);
}
+char OS::DirectorySeparator() { return '\\'; }
bool OS::isDirectorySeparator(const char ch) {
return ch == '/' || ch == '\\';
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 89d6225ede..5b2dbc9a0b 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -142,6 +142,7 @@ class OS {
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
+ static char DirectorySeparator();
static bool isDirectorySeparator(const char ch);
// Opens a temporary file, the file is auto removed on close.
@@ -290,6 +291,10 @@ class VirtualMemory {
// by address().
VirtualMemory(size_t size, size_t alignment);
+ // Construct a virtual memory by assigning it some already mapped address
+ // and size.
+ VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
+
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 9e7b59a1d2..284474e937 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -94,8 +94,7 @@ Semaphore::~Semaphore() {
void Semaphore::Signal() {
int result = sem_post(&native_handle_);
- DCHECK_EQ(0, result);
- USE(result);
+ CHECK_EQ(0, result);
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index e847d54de8..6d5e538970 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -520,14 +520,6 @@ bool TimeTicks::IsHighResolutionClockWorking() {
return high_res_tick_clock.Pointer()->IsHighResolution();
}
-
-// static
-TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
-
-
-// static
-bool TimeTicks::KernelTimestampAvailable() { return false; }
-
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
@@ -566,82 +558,6 @@ bool TimeTicks::IsHighResolutionClockWorking() {
return true;
}
-
-#if V8_OS_LINUX
-
-class KernelTimestampClock {
- public:
- KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
- clock_fd_ = open(kTraceClockDevice, O_RDONLY);
- if (clock_fd_ == -1) {
- return;
- }
- clock_id_ = get_clockid(clock_fd_);
- }
-
- virtual ~KernelTimestampClock() {
- if (clock_fd_ != -1) {
- close(clock_fd_);
- }
- }
-
- int64_t Now() {
- if (clock_id_ == kClockInvalid) {
- return 0;
- }
-
- struct timespec ts;
-
- clock_gettime(clock_id_, &ts);
- return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
- }
-
- bool Available() { return clock_id_ != kClockInvalid; }
-
- private:
- static const clockid_t kClockInvalid = -1;
- static const char kTraceClockDevice[];
- static const uint64_t kNsecPerSec = 1000000000;
-
- int clock_fd_;
- clockid_t clock_id_;
-
- static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
-};
-
-
-// Timestamp module name
-const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
-
-#else
-
-class KernelTimestampClock {
- public:
- KernelTimestampClock() {}
-
- int64_t Now() { return 0; }
- bool Available() { return false; }
-};
-
-#endif // V8_OS_LINUX
-
-static LazyStaticInstance<KernelTimestampClock,
- DefaultConstructTrait<KernelTimestampClock>,
- ThreadSafeInitOnceTrait>::type kernel_tick_clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-// static
-TimeTicks TimeTicks::KernelTimestampNow() {
- return TimeTicks(kernel_tick_clock.Pointer()->Now());
-}
-
-
-// static
-bool TimeTicks::KernelTimestampAvailable() {
- return kernel_tick_clock.Pointer()->Available();
-}
-
#endif // V8_OS_WIN
} // namespace base
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 29300e5404..c8140efe4a 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -318,13 +318,6 @@ class TimeTicks final {
// Returns true if the high-resolution clock is working on this system.
static bool IsHighResolutionClockWorking();
- // Returns Linux kernel timestamp for generating profiler events. This method
- // returns null TimeTicks if the kernel cannot provide the timestamps (e.g.,
- // on non-Linux OS or if the kernel module for timestamps is not loaded).
-
- static TimeTicks KernelTimestampNow();
- static bool KernelTimestampAvailable();
-
// Returns true if this object has not been initialized.
bool IsNull() const { return ticks_ == 0; }
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index 2d94abd417..20ec8e0261 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -76,6 +76,8 @@
#undef CreateSemaphore
#undef Yield
#undef RotateRight32
+#undef RotateLeft32
#undef RotateRight64
+#undef RotateLeft64
#endif // V8_BASE_WIN32_HEADERS_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 252c51cae4..f67065dec4 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -157,8 +157,8 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
- void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
void CreateIteratorMaps();
+ void CreateJSProxyMaps();
// Make the "arguments" and "caller" properties throw a TypeError on access.
void AddRestrictedFunctionProperties(Handle<Map> map);
@@ -218,7 +218,6 @@ class Genesis BASE_EMBEDDED {
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
void InitializeNormalizedMapCaches();
- void InstallJSProxyMaps();
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
@@ -284,13 +283,10 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
Handle<JSFunction> empty_function);
- Handle<Map> CreateStrongFunctionMap(Handle<JSFunction> empty_function,
- bool is_constructor);
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
- void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
static bool CallUtilsFunction(Isolate* isolate, const char* name);
@@ -547,12 +543,6 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_initial_array_prototype(*object_function_prototype);
Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
.Assert();
-
- // Allocate initial strong object map.
- Handle<Map> strong_object_map =
- Map::Copy(Handle<Map>(object_fun->initial_map()), "EmptyStrongObject");
- strong_object_map->set_is_strong();
- native_context()->set_js_object_strong_map(*strong_object_map);
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
@@ -637,29 +627,6 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
}
-void Genesis::SetStrongFunctionInstanceDescriptor(Handle<Map> map) {
- Map::EnsureDescriptorSlack(map, 2);
-
- PropertyAttributes ro_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), ro_attribs);
- { // Add length.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, ro_attribs);
- map->AppendDescriptor(&d);
- }
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), ro_attribs);
- { // Add name.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- ro_attribs);
- map->AppendDescriptor(&d);
- }
-}
-
-
// Creates the %ThrowTypeError% function.
Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
Builtins::Name builtin_name) {
@@ -722,19 +689,6 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
}
-Handle<Map> Genesis::CreateStrongFunctionMap(
- Handle<JSFunction> empty_function, bool is_constructor) {
- Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetStrongFunctionInstanceDescriptor(map);
- map->set_is_constructor(is_constructor);
- Map::SetPrototype(map, empty_function);
- map->set_is_callable();
- map->set_is_extensible(is_constructor);
- map->set_is_strong();
- return map;
-}
-
-
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_function_without_prototype_map =
@@ -756,16 +710,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
}
-void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
- // Allocate map for strong mode instances, which never have prototypes.
- Handle<Map> strong_function_map = CreateStrongFunctionMap(empty, false);
- native_context()->set_strong_function_map(*strong_function_map);
- // Constructors do, though.
- Handle<Map> strong_constructor_map = CreateStrongFunctionMap(empty, true);
- native_context()->set_strong_constructor_map(*strong_constructor_map);
-}
-
-
void Genesis::CreateIteratorMaps() {
// Create iterator-related meta-objects.
Handle<JSObject> iterator_prototype =
@@ -803,15 +747,6 @@ void Genesis::CreateIteratorMaps() {
native_context()->set_strict_generator_function_map(
*strict_generator_function_map);
- Handle<Map> strong_function_map(native_context()->strong_function_map());
- Handle<Map> strong_generator_function_map =
- Map::Copy(strong_function_map, "StrongGeneratorFunction");
- strong_generator_function_map->set_is_constructor(false);
- Map::SetPrototype(strong_generator_function_map,
- generator_function_prototype);
- native_context()->set_strong_generator_function_map(
- *strong_generator_function_map);
-
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
@@ -819,6 +754,30 @@ void Genesis::CreateIteratorMaps() {
*generator_object_prototype_map);
}
+void Genesis::CreateJSProxyMaps() {
+ // Allocate the different maps for all Proxy types.
+ // Next to the default proxy, we need maps indicating callable and
+ // constructable proxies.
+ Handle<Map> proxy_function_map =
+ Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
+ proxy_function_map->set_is_constructor(true);
+ native_context()->set_proxy_function_map(*proxy_function_map);
+
+ Handle<Map> proxy_map =
+ factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
+ proxy_map->set_dictionary_map(true);
+ native_context()->set_proxy_map(*proxy_map);
+
+ Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
+ proxy_callable_map->set_is_callable();
+ native_context()->set_proxy_callable_map(*proxy_callable_map);
+ proxy_callable_map->SetConstructor(native_context()->function_function());
+
+ Handle<Map> proxy_constructor_map =
+ Map::Copy(proxy_callable_map, "constructor Proxy");
+ proxy_constructor_map->set_is_constructor(true);
+ native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+}
static void ReplaceAccessors(Handle<Map> map,
Handle<String> name,
@@ -942,7 +901,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
#ifdef DEBUG
LookupIterator it(prototype, factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
- Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked();
+ Handle<Object> value = Object::GetProperty(&it).ToHandleChecked();
DCHECK(it.IsFound());
DCHECK_EQ(*isolate()->object_function(), *value);
#endif
@@ -1121,6 +1080,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kObjectPreventExtensions, 1, false);
SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
false);
+
+ SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
+ Builtins::kObjectHasOwnProperty, 1, true);
}
Handle<JSObject> global(native_context()->global_object());
@@ -1171,7 +1133,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
strict_function_map_writable_prototype_->SetConstructor(*function_fun);
- native_context()->strong_function_map()->SetConstructor(*function_fun);
}
{ // --- A r r a y ---
@@ -1180,7 +1141,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(),
Builtins::kArrayCode);
array_function->shared()->DontAdaptArguments();
- array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
+ array_function->shared()->set_builtin_function_id(kArrayCode);
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
@@ -1214,11 +1175,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Code> code = array_constructor_stub.GetCode();
array_function->shared()->set_construct_stub(*code);
- Handle<Map> initial_strong_map =
- Map::Copy(initial_map, "SetInstancePrototype");
- initial_strong_map->set_is_strong();
- CacheInitialJSArrayMaps(native_context(), initial_strong_map);
-
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
array_function, isolate->factory()->InternalizeUtf8String("isArray"),
Builtins::kArrayIsArray, 1, true);
@@ -1292,6 +1248,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
attribs);
string_map->AppendDescriptor(&d);
}
+
+ // Install the String.fromCharCode function.
+ SimpleInstallFunction(string_fun, "fromCharCode",
+ Builtins::kStringFromCharCode, 1, false);
}
{
@@ -1303,7 +1263,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
prototype, Builtins::kSymbolConstructor);
symbol_fun->shared()->set_construct_stub(
*isolate->builtins()->SymbolConstructor_ConstructStub());
- symbol_fun->shared()->set_length(1);
+ symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -1560,8 +1520,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
DCHECK(math->IsJSObject());
JSObject::AddProperty(global, name, math, DONT_ENUM);
+ SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
+ SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
+ SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
+ SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
+ SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
+ Handle<JSFunction> math_floor =
+ SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
+ native_context()->set_math_floor(*math_floor);
+ SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
+ SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
+ SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
+ Handle<JSFunction> math_sqrt =
+ SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
+ native_context()->set_math_sqrt(*math_sqrt);
+ SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
}
{ // -- A r r a y B u f f e r
@@ -1649,6 +1624,74 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_WEAK_SET_FUN_INDEX);
}
+ { // -- P r o x y
+ CreateJSProxyMaps();
+
+ Handle<String> name = factory->Proxy_string();
+ Handle<Code> code(isolate->builtins()->ProxyConstructor());
+
+ Handle<JSFunction> proxy_function =
+ factory->NewFunction(isolate->proxy_function_map(),
+ factory->Proxy_string(), MaybeHandle<Code>(code));
+
+ JSFunction::SetInitialMap(
+ proxy_function, Handle<Map>(native_context()->proxy_map(), isolate),
+ factory->null_value());
+
+ proxy_function->shared()->set_construct_stub(
+ *isolate->builtins()->ProxyConstructor_ConstructStub());
+ proxy_function->shared()->set_internal_formal_parameter_count(2);
+ proxy_function->shared()->set_length(2);
+
+ native_context()->set_proxy_function(*proxy_function);
+ InstallFunction(global, name, proxy_function, factory->Object_string());
+ }
+
+ { // -- R e f l e c t
+ Handle<String> reflect_string = factory->InternalizeUtf8String("Reflect");
+ Handle<JSObject> reflect =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
+
+ Handle<JSFunction> define_property =
+ SimpleInstallFunction(reflect, factory->defineProperty_string(),
+ Builtins::kReflectDefineProperty, 3, true);
+ native_context()->set_reflect_define_property(*define_property);
+
+ Handle<JSFunction> delete_property =
+ SimpleInstallFunction(reflect, factory->deleteProperty_string(),
+ Builtins::kReflectDeleteProperty, 2, true);
+ native_context()->set_reflect_delete_property(*delete_property);
+
+ Handle<JSFunction> apply = SimpleInstallFunction(
+ reflect, factory->apply_string(), Builtins::kReflectApply, 3, false);
+ native_context()->set_reflect_apply(*apply);
+
+ Handle<JSFunction> construct =
+ SimpleInstallFunction(reflect, factory->construct_string(),
+ Builtins::kReflectConstruct, 2, false);
+ native_context()->set_reflect_construct(*construct);
+
+ SimpleInstallFunction(reflect, factory->get_string(), Builtins::kReflectGet,
+ 2, false);
+ SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
+ Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
+ SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
+ Builtins::kReflectGetPrototypeOf, 1, true);
+ SimpleInstallFunction(reflect, factory->has_string(), Builtins::kReflectHas,
+ 2, true);
+ SimpleInstallFunction(reflect, factory->isExtensible_string(),
+ Builtins::kReflectIsExtensible, 1, true);
+ SimpleInstallFunction(reflect, factory->ownKeys_string(),
+ Builtins::kReflectOwnKeys, 1, true);
+ SimpleInstallFunction(reflect, factory->preventExtensions_string(),
+ Builtins::kReflectPreventExtensions, 1, true);
+ SimpleInstallFunction(reflect, factory->set_string(), Builtins::kReflectSet,
+ 3, false);
+ SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
+ Builtins::kReflectSetPrototypeOf, 2, true);
+ }
+
{ // --- B o u n d F u n c t i o n
Handle<Map> map =
factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
@@ -1924,10 +1967,11 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
- Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
- source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
- false);
+ Handle<SharedFunctionInfo> function_info =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
+ context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
+ false);
if (function_info.is_null()) return false;
DCHECK(context->IsNativeContext());
@@ -1981,7 +2025,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
if (!cache->Lookup(name, &function_info)) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
- function_info = Compiler::CompileScript(
+ function_info = Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, extension, NULL, ScriptCompiler::kNoCompileOptions,
EXTENSION_CODE, false);
@@ -2021,7 +2065,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Handle<String> property_string = factory->InternalizeUtf8String(property);
DCHECK(!property_string.is_null());
Handle<JSObject> object = Handle<JSObject>::cast(
- Object::GetProperty(global, property_string).ToHandleChecked());
+ JSReceiver::GetProperty(global, property_string).ToHandleChecked());
if (strcmp("prototype", inner) == 0) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
return Handle<JSObject>(JSObject::cast(function->prototype()));
@@ -2029,7 +2073,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Handle<String> inner_string = factory->InternalizeUtf8String(inner);
DCHECK(!inner_string.is_null());
Handle<Object> value =
- Object::GetProperty(object, inner_string).ToHandleChecked();
+ JSReceiver::GetProperty(object, inner_string).ToHandleChecked();
return Handle<JSObject>::cast(value);
}
@@ -2129,8 +2173,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
*generator_function_function);
native_context->strict_generator_function_map()->SetConstructor(
*generator_function_function);
- native_context->strong_generator_function_map()->SetConstructor(
- *generator_function_function);
}
{ // -- S e t I t e r a t o r
@@ -2315,7 +2357,6 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
- INITIALIZE_FLAG(FLAG_harmony_tostring)
INITIALIZE_FLAG(FLAG_harmony_species)
#undef INITIALIZE_FLAG
@@ -2325,18 +2366,14 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_bind)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
@@ -2344,6 +2381,9 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -2359,13 +2399,6 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
}
-void Genesis::InitializeGlobal_harmony_tostring() {
- if (!FLAG_harmony_tostring) return;
- InstallPublicSymbol(factory(), native_context(), "toStringTag",
- factory()->to_string_tag_symbol());
-}
-
-
void Genesis::InitializeGlobal_harmony_regexp_subclass() {
if (!FLAG_harmony_regexp_subclass) return;
InstallPublicSymbol(factory(), native_context(), "match",
@@ -2379,66 +2412,6 @@ void Genesis::InitializeGlobal_harmony_regexp_subclass() {
}
-void Genesis::InitializeGlobal_harmony_reflect() {
- Factory* factory = isolate()->factory();
-
- // We currently use some of the Reflect functions internally, even when
- // the --harmony-reflect flag is not given.
-
- Handle<JSFunction> define_property =
- SimpleCreateFunction(isolate(), factory->defineProperty_string(),
- Builtins::kReflectDefineProperty, 3, true);
- native_context()->set_reflect_define_property(*define_property);
-
- Handle<JSFunction> delete_property =
- SimpleCreateFunction(isolate(), factory->deleteProperty_string(),
- Builtins::kReflectDeleteProperty, 2, true);
- native_context()->set_reflect_delete_property(*delete_property);
-
- Handle<JSFunction> apply = SimpleCreateFunction(
- isolate(), factory->apply_string(), Builtins::kReflectApply, 3, false);
- native_context()->set_reflect_apply(*apply);
-
- Handle<JSFunction> construct =
- SimpleCreateFunction(isolate(), factory->construct_string(),
- Builtins::kReflectConstruct, 2, false);
- native_context()->set_reflect_construct(*construct);
-
- if (!FLAG_harmony_reflect) return;
-
- Handle<JSGlobalObject> global(JSGlobalObject::cast(
- native_context()->global_object()));
- Handle<String> reflect_string = factory->NewStringFromStaticChars("Reflect");
- Handle<JSObject> reflect =
- factory->NewJSObject(isolate()->object_function(), TENURED);
- JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
-
- InstallFunction(reflect, define_property, factory->defineProperty_string());
- InstallFunction(reflect, delete_property, factory->deleteProperty_string());
- InstallFunction(reflect, apply, factory->apply_string());
- InstallFunction(reflect, construct, factory->construct_string());
-
- SimpleInstallFunction(reflect, factory->get_string(),
- Builtins::kReflectGet, 2, false);
- SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
- Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
- SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
- Builtins::kReflectGetPrototypeOf, 1, true);
- SimpleInstallFunction(reflect, factory->has_string(),
- Builtins::kReflectHas, 2, true);
- SimpleInstallFunction(reflect, factory->isExtensible_string(),
- Builtins::kReflectIsExtensible, 1, true);
- SimpleInstallFunction(reflect, factory->ownKeys_string(),
- Builtins::kReflectOwnKeys, 1, true);
- SimpleInstallFunction(reflect, factory->preventExtensions_string(),
- Builtins::kReflectPreventExtensions, 1, true);
- SimpleInstallFunction(reflect, factory->set_string(),
- Builtins::kReflectSet, 3, false);
- SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
- Builtins::kReflectSetPrototypeOf, 2, true);
-}
-
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -2509,64 +2482,27 @@ void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
}
-void Genesis::InstallJSProxyMaps() {
- // Allocate the different maps for all Proxy types.
- // Next to the default proxy, we need maps indicating callable and
- // constructable proxies.
-
- Handle<Map> proxy_function_map =
- Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
- proxy_function_map->set_is_constructor(true);
- native_context()->set_proxy_function_map(*proxy_function_map);
-
- Handle<Map> proxy_map =
- factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
- proxy_map->set_dictionary_map(true);
- native_context()->set_proxy_map(*proxy_map);
-
- Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
- proxy_callable_map->set_is_callable();
- native_context()->set_proxy_callable_map(*proxy_callable_map);
- proxy_callable_map->SetConstructor(native_context()->function_function());
-
- Handle<Map> proxy_constructor_map =
- Map::Copy(proxy_callable_map, "constructor Proxy");
- proxy_constructor_map->set_is_constructor(true);
- native_context()->set_proxy_constructor_map(*proxy_constructor_map);
-}
-
-
-void Genesis::InitializeGlobal_harmony_proxies() {
- if (!FLAG_harmony_proxies) return;
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
-
- InstallJSProxyMaps();
-
- // Create the Proxy object.
- Handle<String> name = factory->Proxy_string();
- Handle<Code> code(isolate->builtins()->ProxyConstructor());
-
- Handle<JSFunction> proxy_function =
- factory->NewFunction(isolate->proxy_function_map(),
- factory->Proxy_string(), MaybeHandle<Code>(code));
-
- JSFunction::SetInitialMap(proxy_function,
- Handle<Map>(native_context()->proxy_map(), isolate),
- factory->null_value());
-
- proxy_function->shared()->set_construct_stub(
- *isolate->builtins()->ProxyConstructor_ConstructStub());
- proxy_function->shared()->set_internal_formal_parameter_count(2);
- proxy_function->shared()->set_length(2);
+void Genesis::InitializeGlobal_harmony_array_prototype_values() {
+ if (!FLAG_harmony_array_prototype_values) return;
+ Handle<JSFunction> array_constructor(native_context()->array_function());
+ Handle<JSObject> array_prototype(
+ JSObject::cast(array_constructor->instance_prototype()));
+ Handle<Object> values_iterator =
+ JSObject::GetProperty(array_prototype, factory()->iterator_symbol())
+ .ToHandleChecked();
+ DCHECK(values_iterator->IsJSFunction());
+ JSObject::AddProperty(array_prototype, factory()->values_string(),
+ values_iterator, DONT_ENUM);
- native_context()->set_proxy_function(*proxy_function);
- InstallFunction(global, name, proxy_function, factory->Object_string());
+ Handle<Object> unscopables =
+ JSObject::GetProperty(array_prototype, factory()->unscopables_symbol())
+ .ToHandleChecked();
+ DCHECK(unscopables->IsJSObject());
+ JSObject::AddProperty(Handle<JSObject>::cast(unscopables),
+ factory()->values_string(), factory()->true_value(),
+ NONE);
}
-
Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
const char* name) {
// Setup the {prototype} with the given {name} for @@toStringTag.
@@ -2708,9 +2644,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
- auto template_instantiations_cache =
- ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
- USE_CUSTOM_MINIMUM_CAPACITY);
+ auto template_instantiations_cache = UnseededNumberDictionary::New(
+ isolate(), ApiNatives::kInitialFunctionCacheSize);
native_context()->set_template_instantiations_cache(
*template_instantiations_cache);
@@ -2777,7 +2712,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
Handle<String> key = factory()->Promise_string();
Handle<JSFunction> function = Handle<JSFunction>::cast(
- Object::GetProperty(handle(native_context()->global_object()), key)
+ JSReceiver::GetProperty(handle(native_context()->global_object()), key)
.ToHandleChecked());
JSFunction::EnsureHasInitialMap(function);
function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
@@ -2789,6 +2724,37 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallBuiltinFunctionIds();
+ // Also install builtin function ids to some generator object methods. These
+ // three methods use the three resume operations (Runtime_GeneratorNext,
+ // Runtime_GeneratorReturn, Runtime_GeneratorThrow) respectively. Those
+ // operations are not supported by Crankshaft, TurboFan, nor Ignition.
+ {
+ Handle<JSObject> generator_object_prototype(JSObject::cast(
+ native_context()->generator_object_prototype_map()->prototype()));
+
+ { // GeneratorObject.prototype.next
+ Handle<String> key = factory()->next_string();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(generator_object_prototype, key)
+ .ToHandleChecked());
+ function->shared()->set_builtin_function_id(kGeneratorObjectNext);
+ }
+ { // GeneratorObject.prototype.return
+ Handle<String> key = factory()->NewStringFromAsciiChecked("return");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(generator_object_prototype, key)
+ .ToHandleChecked());
+ function->shared()->set_builtin_function_id(kGeneratorObjectReturn);
+ }
+ { // GeneratorObject.prototype.throw
+ Handle<String> key = factory()->throw_string();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ JSReceiver::GetProperty(generator_object_prototype, key)
+ .ToHandleChecked());
+ function->shared()->set_builtin_function_id(kGeneratorObjectThrow);
+ }
+ }
+
// Create a map for accessor property descriptors (a variant of JSObject
// that predefines four properties get, set, configurable and enumerable).
{
@@ -2969,11 +2935,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
- static const char* harmony_modules_natives[] = {nullptr};
- static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
- nullptr};
- static const char* harmony_tostring_natives[] = {nullptr};
static const char* harmony_iterator_close_natives[] = {nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
@@ -2983,11 +2944,6 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_unicode_regexps_natives[] = {
"native harmony-unicode-regexps.js", nullptr};
- static const char* harmony_default_parameters_natives[] = {nullptr};
- static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
- nullptr};
- static const char* harmony_destructuring_bind_natives[] = {nullptr};
- static const char* harmony_destructuring_assignment_natives[] = {nullptr};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
@@ -2995,9 +2951,12 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
+ static const char* harmony_regexp_exec_natives[] = {
+ "native harmony-regexp-exec.js", nullptr};
static const char* harmony_regexp_subclass_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
static const char* harmony_instanceof_natives[] = {nullptr};
+ static const char* harmony_restrictive_declarations_natives[] = {nullptr};
static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_name_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
@@ -3006,6 +2965,10 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_object_values_entries_natives[] = {nullptr};
static const char* harmony_object_own_property_descriptors_natives[] = {
nullptr};
+ static const char* harmony_array_prototype_values_natives[] = {nullptr};
+ static const char* harmony_exponentiation_operator_natives[] = {nullptr};
+ static const char* harmony_string_padding_natives[] = {
+ "native harmony-string-padding.js", nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3075,9 +3038,9 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
BuiltinFunctionId id) {
Isolate* isolate = holder->GetIsolate();
Handle<Object> function_object =
- Object::GetProperty(isolate, holder, function_name).ToHandleChecked();
+ JSReceiver::GetProperty(isolate, holder, function_name).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->set_function_data(Smi::FromInt(id));
+ function->shared()->set_builtin_function_id(id);
}
@@ -3596,7 +3559,6 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
- CreateStrongModeFunctionMaps(empty_function);
CreateIteratorMaps();
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 23c41f706e..9c3ff5956b 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -5,10 +5,12 @@
#include "src/builtins.h"
#include "src/api.h"
+#include "src/api-arguments.h"
#include "src/api-natives.h"
-#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
+#include "src/code-factory.h"
+#include "src/compiler/code-stub-assembler.h"
#include "src/dateparser-inl.h"
#include "src/elements.h"
#include "src/frames-inl.h"
@@ -142,11 +144,18 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
Isolate* isolate); \
MUST_USE_RESULT static Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
+ Object* value; \
isolate->counters()->runtime_calls()->Increment(); \
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
- RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
+ "V8.Builtin_" #name); \
name##ArgumentsType args(args_length, args_object); \
- Object* value = Builtin_Impl_##name(args, isolate); \
+ if (FLAG_runtime_call_stats) { \
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
+ RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name); \
+ value = Builtin_Impl_##name(args, isolate); \
+ } else { \
+ value = Builtin_Impl_##name(args, isolate); \
+ } \
return value; \
} \
\
@@ -207,58 +216,38 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
return *out <= object->elements()->length();
}
-
-inline bool PrototypeHasNoElements(PrototypeIterator* iter) {
+inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
DisallowHeapAllocation no_gc;
- for (; !iter->IsAtEnd(); iter->Advance()) {
- if (iter->GetCurrent()->IsJSProxy()) return false;
- JSObject* current = iter->GetCurrent<JSObject>();
- if (current->IsAccessCheckNeeded()) return false;
- if (current->HasIndexedInterceptor()) return false;
- if (current->HasStringWrapperElements()) return false;
- if (current->elements()->length() != 0) return false;
+ HeapObject* prototype = HeapObject::cast(object->map()->prototype());
+ HeapObject* null = isolate->heap()->null_value();
+ HeapObject* empty = isolate->heap()->empty_fixed_array();
+ while (prototype != null) {
+ Map* map = prototype->map();
+ if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false;
+ if (JSObject::cast(prototype)->elements() != empty) return false;
+ prototype = HeapObject::cast(map->prototype());
}
return true;
}
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
JSArray* receiver) {
- DisallowHeapAllocation no_gc;
- // If the array prototype chain is intact (and free of elements), and if the
- // receiver's prototype is the array prototype, then we are done.
- Object* prototype = receiver->map()->prototype();
- if (prototype->IsJSArray() &&
- isolate->is_initial_array_prototype(JSArray::cast(prototype)) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact()) {
- return true;
- }
-
- // Slow case.
- PrototypeIterator iter(isolate, receiver);
- return PrototypeHasNoElements(&iter);
+ return PrototypeHasNoElements(isolate, receiver);
}
inline bool HasSimpleElements(JSObject* current) {
- if (current->IsAccessCheckNeeded()) return false;
- if (current->HasIndexedInterceptor()) return false;
- if (current->HasStringWrapperElements()) return false;
- if (current->GetElementsAccessor()->HasAccessors(current)) return false;
- return true;
+ return current->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+ !current->GetElementsAccessor()->HasAccessors(current);
}
inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
- JSReceiver* receiver) {
+ JSObject* receiver) {
// Check that we have no accessors on the receiver's elements.
- JSObject* object = JSObject::cast(receiver);
- if (!HasSimpleElements(object)) return false;
- // Check that ther are not elements on the prototype.
- DisallowHeapAllocation no_gc;
- PrototypeIterator iter(isolate, receiver);
- return PrototypeHasNoElements(&iter);
+ if (!HasSimpleElements(receiver)) return false;
+ return PrototypeHasNoElements(isolate, receiver);
}
inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
- // Check that ther are not elements on the prototype.
DisallowHeapAllocation no_gc;
PrototypeIterator iter(isolate, receiver,
PrototypeIterator::START_AT_RECEIVER);
@@ -270,65 +259,39 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
return true;
}
-// Returns empty handle if not applicable.
+// Returns |false| if not applicable.
MUST_USE_RESULT
-inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
- Isolate* isolate, Handle<Object> receiver, Arguments* args,
- int first_added_arg) {
- // We explicitly add a HandleScope to avoid creating several copies of the
- // same handle which would otherwise cause issue when left-trimming later-on.
- HandleScope scope(isolate);
- if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
+inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
+ Handle<Object> receiver,
+ Arguments* args,
+ int first_added_arg) {
+ if (!receiver->IsJSArray()) return false;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ ElementsKind origin_kind = array->GetElementsKind();
+ if (IsDictionaryElementsKind(origin_kind)) return false;
+ if (array->map()->is_observed()) return false;
+ if (!array->map()->is_extensible()) return false;
+ if (args == nullptr) return true;
+
// If there may be elements accessors in the prototype chain, the fast path
// cannot be used if there arguments to add to the array.
- Heap* heap = isolate->heap();
- if (args != NULL && !IsJSArrayFastElementMovingAllowed(isolate, *array)) {
- return MaybeHandle<FixedArrayBase>();
- }
- if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
- if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
- Handle<FixedArrayBase> elms(array->elements(), isolate);
- Map* map = elms->map();
- if (map == heap->fixed_array_map()) {
- if (args == NULL || array->HasFastObjectElements()) {
- return scope.CloseAndEscape(elms);
- }
- } else if (map == heap->fixed_cow_array_map()) {
- elms = JSObject::EnsureWritableFastElements(array);
- if (args == NULL || array->HasFastObjectElements()) {
- return scope.CloseAndEscape(elms);
- }
- } else if (map == heap->fixed_double_array_map()) {
- if (args == NULL) {
- return scope.CloseAndEscape(elms);
- }
- } else {
- return MaybeHandle<FixedArrayBase>();
- }
+ if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
// Adding elements to the array prototype would break code that makes sure
// it has no elements. Handle that elsewhere.
- if (isolate->IsAnyInitialArrayPrototype(array)) {
- return MaybeHandle<FixedArrayBase>();
- }
+ if (isolate->IsAnyInitialArrayPrototype(array)) return false;
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) {
- return scope.CloseAndEscape(elms);
- }
+ if (first_added_arg >= args_length) return true;
- ElementsKind origin_kind = array->map()->elements_kind();
- DCHECK(!IsFastObjectElementsKind(origin_kind));
+ if (IsFastObjectElementsKind(origin_kind)) return true;
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
- int arg_count = args_length - first_added_arg;
- Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
- for (int i = 0; i < arg_count; i++) {
- Object* arg = arguments[i];
+ for (int i = first_added_arg; i < args_length; i++) {
+ Object* arg = (*args)[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
target_kind = FAST_DOUBLE_ELEMENTS;
@@ -340,10 +303,12 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
}
}
if (target_kind != origin_kind) {
+ // Use a short-lived HandleScope to avoid creating several copies of the
+ // elements handle which would cause issues when left-trimming later-on.
+ HandleScope scope(isolate);
JSObject::TransitionElementsKind(array, target_kind);
- elms = handle(array->elements(), isolate);
}
- return scope.CloseAndEscape(elms);
+ return true;
}
@@ -379,41 +344,235 @@ BUILTIN(Illegal) {
BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
+void Builtins::Generate_ObjectHasOwnProperty(
+ compiler::CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* object = assembler->Parameter(0);
+ Node* key = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ Label call_runtime(assembler), return_true(assembler),
+ return_false(assembler);
+
+ // Smi receivers do not have own properties.
+ Label if_objectisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(object), &return_false,
+ &if_objectisnotsmi);
+ assembler->Bind(&if_objectisnotsmi);
+
+ Node* map = assembler->LoadMap(object);
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+
+ Variable var_index(assembler, MachineRepresentation::kWord32);
+
+ Label if_keyissmi(assembler), if_keyisnotsmi(assembler),
+ keyisindex(assembler);
+ assembler->Branch(assembler->WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
+ assembler->Bind(&if_keyissmi);
+ {
+ // Negative smi keys are named properties. Handle in the runtime.
+ Label if_keyispositive(assembler);
+ assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositive,
+ &call_runtime);
+ assembler->Bind(&if_keyispositive);
+
+ var_index.Bind(assembler->SmiUntag(key));
+ assembler->Goto(&keyisindex);
+ }
+
+ assembler->Bind(&if_keyisnotsmi);
+
+ Node* key_instance_type = assembler->LoadInstanceType(key);
+ Label if_iskeyunique(assembler), if_iskeynotsymbol(assembler);
+ assembler->Branch(
+ assembler->Word32Equal(key_instance_type,
+ assembler->Int32Constant(SYMBOL_TYPE)),
+ &if_iskeyunique, &if_iskeynotsymbol);
+ assembler->Bind(&if_iskeynotsymbol);
+ {
+ Label if_iskeyinternalized(assembler);
+ Node* bits = assembler->WordAnd(
+ key_instance_type,
+ assembler->Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+ assembler->Branch(
+ assembler->Word32Equal(
+ bits, assembler->Int32Constant(kStringTag | kInternalizedTag)),
+ &if_iskeyinternalized, &call_runtime);
+ assembler->Bind(&if_iskeyinternalized);
+
+ // Check whether the key is an array index passed in as string. Handle
+ // uniform with smi keys if so.
+ // TODO(verwaest): Also support non-internalized strings.
+ Node* hash = assembler->LoadNameHash(key);
+ Node* bit = assembler->Word32And(
+ hash, assembler->Int32Constant(internal::Name::kIsNotArrayIndexMask));
+ Label if_isarrayindex(assembler);
+ assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
+ &if_isarrayindex, &if_iskeyunique);
+ assembler->Bind(&if_isarrayindex);
+ var_index.Bind(
+ assembler->BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
+ assembler->Goto(&keyisindex);
+ }
+ assembler->Bind(&if_iskeyunique);
+
+ {
+ Label if_objectissimple(assembler);
+ assembler->Branch(assembler->Int32LessThanOrEqual(
+ instance_type,
+ assembler->Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+ &call_runtime, &if_objectissimple);
+ assembler->Bind(&if_objectissimple);
+ }
+
+ // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
+ Node* bit_field3 = assembler->LoadMapBitField3(map);
+ Node* bit = assembler->BitFieldDecode<Map::DictionaryMap>(bit_field3);
+ Label if_isfastmap(assembler);
+ assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
+ &if_isfastmap, &call_runtime);
+ assembler->Bind(&if_isfastmap);
+ Node* nof =
+ assembler->BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+ // Bail out to the runtime for large numbers of own descriptors. The stub only
+ // does linear search, which becomes too expensive in that case.
+ {
+ static const int32_t kMaxLinear = 256;
+ Label above_max(assembler), below_max(assembler);
+ assembler->Branch(assembler->Int32LessThanOrEqual(
+ nof, assembler->Int32Constant(kMaxLinear)),
+ &below_max, &call_runtime);
+ assembler->Bind(&below_max);
+ }
+ Node* descriptors = assembler->LoadMapDescriptors(map);
+
+ Variable var_descriptor(assembler, MachineRepresentation::kWord32);
+ Label loop(assembler, &var_descriptor);
+ var_descriptor.Bind(assembler->Int32Constant(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* index = var_descriptor.value();
+ Node* offset = assembler->Int32Constant(DescriptorArray::ToKeyIndex(0));
+ Node* factor = assembler->Int32Constant(DescriptorArray::kDescriptorSize);
+ Label if_notdone(assembler);
+ assembler->Branch(assembler->Word32Equal(index, nof), &return_false,
+ &if_notdone);
+ assembler->Bind(&if_notdone);
+ {
+ Node* array_index =
+ assembler->Int32Add(offset, assembler->Int32Mul(index, factor));
+ Node* current =
+ assembler->LoadFixedArrayElementInt32Index(descriptors, array_index);
+ Label if_unequal(assembler);
+ assembler->Branch(assembler->WordEqual(current, key), &return_true,
+ &if_unequal);
+ assembler->Bind(&if_unequal);
+
+ var_descriptor.Bind(
+ assembler->Int32Add(index, assembler->Int32Constant(1)));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&keyisindex);
+ {
+ Label if_objectissimple(assembler);
+ assembler->Branch(assembler->Int32LessThanOrEqual(
+ instance_type, assembler->Int32Constant(
+ LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &call_runtime, &if_objectissimple);
+ assembler->Bind(&if_objectissimple);
+ }
+
+ Node* index = var_index.value();
+ Node* bit_field2 = assembler->LoadMapBitField2(map);
+ Node* elements_kind =
+ assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+ // TODO(verwaest): Support other elements kinds as well.
+ Label if_isobjectorsmi(assembler);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ elements_kind, assembler->Int32Constant(FAST_HOLEY_ELEMENTS)),
+ &if_isobjectorsmi, &call_runtime);
+ assembler->Bind(&if_isobjectorsmi);
+ {
+ Node* elements = assembler->LoadElements(object);
+ Node* length = assembler->LoadFixedArrayBaseLength(elements);
-BUILTIN(ArrayPush) {
+ Label if_iskeyinrange(assembler);
+ assembler->Branch(
+ assembler->Int32LessThan(index, assembler->SmiToWord32(length)),
+ &if_iskeyinrange, &return_false);
+
+ assembler->Bind(&if_iskeyinrange);
+ Node* element = assembler->LoadFixedArrayElementInt32Index(elements, index);
+ Node* the_hole = assembler->LoadRoot(Heap::kTheHoleValueRootIndex);
+ assembler->Branch(assembler->WordEqual(element, the_hole), &return_false,
+ &return_true);
+ }
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&call_runtime);
+ assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
+ context, object, key));
+}
+
+namespace {
+
+Object* DoArrayPush(Isolate* isolate,
+ BuiltinArguments<BuiltinExtraArguments::kNone> args) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- MaybeHandle<FixedArrayBase> maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
- Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
return CallJsIntrinsic(isolate, isolate->array_push(), args);
}
// Fast Elements Path
- int push_size = args.length() - 1;
+ int to_add = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
int len = Smi::cast(array->length())->value();
- if (push_size == 0) {
- return Smi::FromInt(len);
- }
- if (push_size > 0 &&
- JSArray::WouldChangeReadOnlyLength(array, len + push_size)) {
+ if (to_add == 0) return Smi::FromInt(len);
+
+ // Currently fixed arrays cannot grow too big, so we should never hit this.
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+ if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_push(), args);
}
- DCHECK(!array->map()->is_observed());
+
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Push(array, elms_obj, &args, push_size);
+ int new_length = accessor->Push(array, &args, to_add);
return Smi::FromInt(new_length);
}
+} // namespace
+
+BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
+// tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_ArrayPush) {
+ DCHECK_EQ(2, args.length());
+ Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+ // Rewrap the arguments as builtins arguments.
+ BuiltinArguments<BuiltinExtraArguments::kNone> caller_args(
+ incoming->length() + 1, incoming->arguments() + 1);
+ return DoArrayPush(isolate, caller_args);
+}
BUILTIN(ArrayPop) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- MaybeHandle<FixedArrayBase> maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
- Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
return CallJsIntrinsic(isolate, isolate->array_pop(), args);
}
@@ -430,12 +589,12 @@ BUILTIN(ArrayPop) {
Handle<Object> result;
if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
// Fast Elements Path
- result = array->GetElementsAccessor()->Pop(array, elms_obj);
+ result = array->GetElementsAccessor()->Pop(array);
} else {
// Use Slow Lookup otherwise
uint32_t new_length = len - 1;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::GetElement(isolate, array, new_length));
+ isolate, result, JSReceiver::GetElement(isolate, array, new_length));
JSArray::SetLength(array, new_length);
}
return *result;
@@ -446,10 +605,7 @@ BUILTIN(ArrayShift) {
HandleScope scope(isolate);
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
- MaybeHandle<FixedArrayBase> maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
- Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
!IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
return CallJsIntrinsic(isolate, isolate->array_shift(), args);
}
@@ -463,7 +619,7 @@ BUILTIN(ArrayShift) {
return CallJsIntrinsic(isolate, isolate->array_shift(), args);
}
- Handle<Object> first = array->GetElementsAccessor()->Shift(array, elms_obj);
+ Handle<Object> first = array->GetElementsAccessor()->Shift(array);
return *first;
}
@@ -471,28 +627,23 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- MaybeHandle<FixedArrayBase> maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
- Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
int to_add = args.length() - 1;
- if (to_add == 0) {
- return array->length();
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- DCHECK(to_add <= (Smi::kMaxValue - Smi::cast(array->length())->value()));
+ if (to_add == 0) return array->length();
+
+ // Currently fixed arrays cannot grow too big, so we should never hit this.
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
- if (to_add > 0 && JSArray::HasReadOnlyLength(array)) {
+ if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
}
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Unshift(array, elms_obj, &args, to_add);
+ int new_length = accessor->Unshift(array, &args, to_add);
return Smi::FromInt(new_length);
}
@@ -500,41 +651,34 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- Handle<JSObject> object;
- Handle<FixedArrayBase> elms_obj;
int len = -1;
int relative_start = 0;
int relative_end = 0;
- bool is_sloppy_arguments = false;
if (receiver->IsJSArray()) {
DisallowHeapAllocation no_gc;
JSArray* array = JSArray::cast(*receiver);
- if (!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsArraySpeciesLookupChainIntact() ||
- // If this is a subclass of Array, then call out to JS
- !array->map()->new_target_is_base()) {
+ if (V8_UNLIKELY(!array->HasFastElements() ||
+ !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+ !isolate->IsArraySpeciesLookupChainIntact() ||
+ // If this is a subclass of Array, then call out to JS
+ !array->HasArrayPrototype(isolate))) {
AllowHeapAllocation allow_allocation;
return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
len = Smi::cast(array->length())->value();
- object = Handle<JSObject>::cast(receiver);
- elms_obj = handle(array->elements(), isolate);
} else if (receiver->IsJSObject() &&
GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
&len)) {
+ DCHECK_EQ(FAST_ELEMENTS, JSObject::cast(*receiver)->GetElementsKind());
// Array.prototype.slice(arguments, ...) is quite a common idiom
// (notably more than 50% of invocations in Web apps).
// Treat it in C++ as well.
- is_sloppy_arguments = true;
- object = Handle<JSObject>::cast(receiver);
- elms_obj = handle(object->elements(), isolate);
} else {
AllowHeapAllocation allow_allocation;
return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
- DCHECK(len >= 0);
+ DCHECK_LE(0, len);
int argument_count = args.length() - 1;
// Note carefully chosen defaults---if argument is missing,
// it's undefined which gets converted to 0 for relative_start
@@ -567,36 +711,21 @@ BUILTIN(ArraySlice) {
uint32_t actual_end =
(relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
- if (actual_end <= actual_start) {
- Handle<JSArray> result_array = isolate->factory()->NewJSArray(
- GetPackedElementsKind(object->GetElementsKind()), 0, 0);
- return *result_array;
- }
-
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
ElementsAccessor* accessor = object->GetElementsAccessor();
- if (is_sloppy_arguments &&
- !accessor->IsPacked(object, elms_obj, actual_start, actual_end)) {
- // Don't deal with arguments with holes in C++
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- Handle<JSArray> result_array =
- accessor->Slice(object, elms_obj, actual_start, actual_end);
- return *result_array;
+ return *accessor->Slice(object, actual_start, actual_end);
}
BUILTIN(ArraySplice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- MaybeHandle<FixedArrayBase> maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
- Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj) ||
- // If this is a subclass of Array, then call out to JS
- !JSArray::cast(*receiver)->map()->new_target_is_base() ||
- // If anything with @@species has been messed with, call out to JS
- !isolate->IsArraySpeciesLookupChainIntact()) {
+ if (V8_UNLIKELY(
+ !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
+ // If this is a subclass of Array, then call out to JS.
+ !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
+ // If anything with @@species has been messed with, call out to JS.
+ !isolate->IsArraySpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -645,7 +774,7 @@ BUILTIN(ArraySplice) {
}
ElementsAccessor* accessor = array->GetElementsAccessor();
Handle<JSArray> result_array = accessor->Splice(
- array, elms_obj, actual_start, actual_delete_count, &args, add_count);
+ array, actual_start, actual_delete_count, &args, add_count);
return *result_array;
}
@@ -680,17 +809,9 @@ class ArrayConcatVisitor {
~ArrayConcatVisitor() { clear_storage(); }
- bool visit(uint32_t i, Handle<Object> elm) {
+ MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
uint32_t index = index_offset_ + i;
- if (!is_fixed_array()) {
- Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, element_value,
- Object::SetElement(isolate_, storage_, index, elm, STRICT), false);
- return true;
- }
-
if (i >= JSObject::kMaxElementCount - index_offset_) {
set_exceeds_array_limit(true);
// Exception hasn't been thrown at this point. Return true to
@@ -699,6 +820,14 @@ class ArrayConcatVisitor {
return true;
}
+ if (!is_fixed_array()) {
+ LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
+ false);
+ return true;
+ }
+
if (fast_elements()) {
if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
storage_fixed_array()->set(index, *elm);
@@ -778,28 +907,26 @@ class ArrayConcatVisitor {
Handle<SeededNumberDictionary> slow_storage(
SeededNumberDictionary::New(isolate_, current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
- for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope(isolate_);
- Handle<Object> element(current_storage->get(i), isolate_);
- if (!element->IsTheHole()) {
- // The object holding this backing store has just been allocated, so
- // it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- false);
- if (!new_storage.is_identical_to(slow_storage)) {
- slow_storage = loop_scope.CloseAndEscape(new_storage);
- }
- }
- }
+ FOR_WITH_HANDLE_SCOPE(
+ isolate_, uint32_t, i = 0, i, i < current_length, i++, {
+ Handle<Object> element(current_storage->get(i), isolate_);
+ if (!element->IsTheHole()) {
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
+ Handle<SeededNumberDictionary> new_storage =
+ SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+ false);
+ if (!new_storage.is_identical_to(slow_storage)) {
+ slow_storage = loop_scope.CloseAndEscape(new_storage);
+ }
+ }
+ });
clear_storage();
set_storage(*slow_storage);
set_fast_elements(false);
}
- inline void clear_storage() {
- GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
- }
+ inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
inline void set_storage(FixedArray* storage) {
DCHECK(is_fixed_array());
@@ -913,7 +1040,8 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()));
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(object->elements());
uint32_t length = static_cast<uint32_t>(elements->length());
if (range < length) length = range;
for (uint32_t i = 0; i < length; i++) {
@@ -941,20 +1069,23 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(object->elements()));
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dict =
+ SeededNumberDictionary::cast(object->elements());
uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> k(dict->KeyAt(j), isolate);
- if (dict->IsKey(*k)) {
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- indices->Add(index);
- }
+ Heap* heap = isolate->heap();
+ Object* undefined = heap->undefined_value();
+ Object* the_hole = heap->the_hole_value();
+ FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
+ Object* k = dict->KeyAt(j);
+ if (k == undefined) continue;
+ if (k == the_hole) continue;
+ DCHECK(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < range) {
+ indices->Add(index);
}
- }
+ });
break;
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
@@ -1022,18 +1153,17 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t length, ArrayConcatVisitor* visitor) {
- for (uint32_t i = 0; i < length; ++i) {
- HandleScope loop_scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
- Object::GetElement(isolate, receiver, i),
- false);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value, JSReceiver::GetElement(isolate, receiver, i),
+ false);
if (!visitor->visit(i, element_value)) return false;
}
- }
+ });
visitor->increase_index_offset(length);
return true;
}
@@ -1086,9 +1216,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
- DCHECK_LE(fast_length, elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
+ DCHECK(fast_length <= elements->length());
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
if (!visitor->visit(j, element_value)) return false;
@@ -1099,12 +1228,12 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value, Object::GetElement(isolate, array, j),
- false);
+ isolate, element_value,
+ JSReceiver::GetElement(isolate, array, j), false);
if (!visitor->visit(j, element_value)) return false;
}
}
- }
+ });
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
@@ -1121,8 +1250,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
FixedDoubleArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
if (!elements->is_the_hole(j)) {
double double_value = elements->get_scalar(j);
Handle<Object> element_value =
@@ -1136,12 +1264,12 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// have the correct receiver.
Handle<Object> element_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value, Object::GetElement(isolate, array, j),
- false);
+ isolate, element_value,
+ JSReceiver::GetElement(isolate, array, j), false);
if (!visitor->visit(j, element_value)) return false;
}
}
- }
+ });
break;
}
@@ -1152,31 +1280,31 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// than length. This might introduce duplicates in the indices list.
CollectElementIndices(array, length, &indices);
indices.Sort(&compareUInt32);
- int j = 0;
int n = indices.length();
- while (j < n) {
- HandleScope loop_scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < n, (void)0, {
uint32_t index = indices[j];
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, array, index), false);
+ isolate, element, JSReceiver::GetElement(isolate, array, index),
+ false);
if (!visitor->visit(index, element)) return false;
// Skip to next different index (i.e., omit duplicates).
do {
j++;
} while (j < n && indices[j] == index);
- }
+ });
break;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- for (uint32_t index = 0; index < length; index++) {
- HandleScope loop_scope(isolate);
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, array, index), false);
- if (!visitor->visit(index, element)) return false;
- }
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, uint32_t, index = 0, index, index < length, index++, {
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, JSReceiver::GetElement(isolate, array, index),
+ false);
+ if (!visitor->visit(index, element)) return false;
+ });
break;
}
case NO_ELEMENTS:
@@ -1231,8 +1359,7 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
Handle<Object> obj((*args)[i], isolate);
uint32_t length_estimate;
uint32_t element_estimate;
@@ -1264,7 +1391,7 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
} else {
estimate_nof_elements += element_estimate;
}
- }
+ });
// If estimated number of elements is more than half of length, a
// fixed array (fast case) is more time and space-efficient than a
@@ -1289,6 +1416,7 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
double_storage->set(j, obj->Number());
j++;
} else {
+ DisallowHeapAllocation no_gc;
JSArray* array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
switch (array->GetElementsKind()) {
@@ -1316,10 +1444,11 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
}
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
+ Object* the_hole = isolate->heap()->the_hole_value();
FixedArray* elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
Object* element = elements->get(i);
- if (element->IsTheHole()) {
+ if (element == the_hole) {
failure = true;
break;
}
@@ -1381,7 +1510,7 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
return isolate->heap()->exception();
}
} else {
- visitor.visit(0, obj);
+ if (!visitor.visit(0, obj)) return isolate->heap()->exception();
visitor.increase_index_offset(1);
}
}
@@ -1400,6 +1529,12 @@ Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
+ // We shouldn't overflow when adding another len.
+ const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+ STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+ STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+ USE(kHalfOfMaxInt);
+
int n_arguments = args->length();
int result_len = 0;
{
@@ -1409,27 +1544,24 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
for (int i = 0; i < n_arguments; i++) {
Object* arg = (*args)[i];
if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
- if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+ if (!JSObject::cast(arg)->HasFastElements()) {
return MaybeHandle<JSArray>();
}
- // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
- if (!JSObject::cast(arg)->HasFastElements()) {
+ if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
return MaybeHandle<JSArray>();
}
Handle<JSArray> array(JSArray::cast(arg), isolate);
if (HasConcatSpreadableModifier(isolate, array)) {
return MaybeHandle<JSArray>();
}
- int len = Smi::cast(array->length())->value();
-
- // We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
- result_len += len;
+ // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
+ // overflow.
+ result_len += Smi::cast(array->length())->value();
DCHECK(result_len >= 0);
// Throw an Error if we overflow the FixedArray limits
- if (FixedArray::kMaxLength < result_len) {
+ if (FixedDoubleArray::kMaxLength < result_len ||
+ FixedArray::kMaxLength < result_len) {
+ AllowHeapAllocation allow_gc;
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kInvalidArrayLength),
JSArray);
@@ -1460,12 +1592,21 @@ BUILTIN(ArrayConcat) {
Handle<JSArray> result_array;
+ // Avoid a real species read to avoid extra lookups to the array constructor
+ if (V8_LIKELY(receiver->IsJSArray() &&
+ Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
+ isolate->IsArraySpeciesLookupChainIntact())) {
+ if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+ return *result_array;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
// Reading @@species happens before anything else with a side effect, so
// we can do it here to determine whether to take the fast path.
Handle<Object> species;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
- if (*species == isolate->context()->native_context()->array_function()) {
+ if (*species == *isolate->array_function()) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
@@ -1528,15 +1669,16 @@ MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
prop_value = JSObject::FastPropertyAt(from, representation, index);
}
} else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, prop_value,
- Object::GetProperty(from, next_key),
- Nothing<bool>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+ Nothing<bool>());
stable = from->map() == *map;
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
// the object has a simple shape, and that the key is a name.
- LookupIterator it(from, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(from, next_key, from,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
if (!it.IsFound()) continue;
DCHECK(it.state() == LookupIterator::DATA ||
it.state() == LookupIterator::ACCESSOR);
@@ -1544,7 +1686,7 @@ MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
}
- LookupIterator it(to, next_key);
+ LookupIterator it(to, next_key, to);
bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
Maybe<bool> result = Object::SetProperty(
&it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
@@ -1840,7 +1982,7 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
CONVERT_TO_STRING));
- Handle<Object> descriptors =
+ Handle<JSObject> descriptors =
isolate->factory()->NewJSObject(isolate->object_function());
for (int i = 0; i < keys->length(); ++i) {
@@ -1855,7 +1997,7 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
: undefined;
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, descriptors, key, LookupIterator::OWN);
+ isolate, descriptors, key, descriptors, LookupIterator::OWN);
Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
Object::DONT_THROW);
CHECK(success.FromJust());
@@ -1957,6 +2099,233 @@ BUILTIN(GlobalEval) {
}
+// -----------------------------------------------------------------------------
+// ES6 section 20.2.2 Function Properties of the Math Object
+
+
+// ES6 section 20.2.2.2 Math.acos ( x )
+BUILTIN(MathAcos) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> x = args.at<Object>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ return *isolate->factory()->NewHeapNumber(std::acos(x->Number()));
+}
+
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+BUILTIN(MathAsin) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> x = args.at<Object>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ return *isolate->factory()->NewHeapNumber(std::asin(x->Number()));
+}
+
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+BUILTIN(MathAtan) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> x = args.at<Object>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ return *isolate->factory()->NewHeapNumber(std::atan(x->Number()));
+}
+
+namespace {
+
+void Generate_MathRoundingOperation(
+ compiler::CodeStubAssembler* assembler,
+ compiler::Node* (compiler::CodeStubAssembler::*float64op)(
+ compiler::Node*)) {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_x(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_x);
+ var_x.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {x} value.
+ Node* x = var_x.value();
+
+ // Check if {x} is a Smi or a HeapObject.
+ Label if_xissmi(assembler), if_xisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+ assembler->Bind(&if_xissmi);
+ {
+ // Nothing to do when {x} is a Smi.
+ assembler->Return(x);
+ }
+
+ assembler->Bind(&if_xisnotsmi);
+ {
+ // Check if {x} is a HeapNumber.
+ Label if_xisheapnumber(assembler),
+ if_xisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(assembler->LoadMap(x),
+ assembler->HeapNumberMapConstant()),
+ &if_xisheapnumber, &if_xisnotheapnumber);
+
+ assembler->Bind(&if_xisheapnumber);
+ {
+ Node* x_value = assembler->LoadHeapNumberValue(x);
+ Node* value = (assembler->*float64op)(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_xisnotheapnumber);
+ {
+ // Need to convert {x} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_x.Bind(assembler->CallStub(callable, context, x));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+}
+
+} // namespace
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+void Builtins::Generate_MathCeil(compiler::CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler,
+ &compiler::CodeStubAssembler::Float64Ceil);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ // Shared entry point for the clz32 operation.
+ Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
+ Label do_clz32(assembler);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_x(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_x);
+ var_x.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {x} value.
+ Node* x = var_x.value();
+
+ // Check if {x} is a Smi or a HeapObject.
+ Label if_xissmi(assembler), if_xisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+ assembler->Bind(&if_xissmi);
+ {
+ var_clz32_x.Bind(assembler->SmiToWord32(x));
+ assembler->Goto(&do_clz32);
+ }
+
+ assembler->Bind(&if_xisnotsmi);
+ {
+ // Check if {x} is a HeapNumber.
+ Label if_xisheapnumber(assembler),
+ if_xisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(assembler->LoadMap(x),
+ assembler->HeapNumberMapConstant()),
+ &if_xisheapnumber, &if_xisnotheapnumber);
+
+ assembler->Bind(&if_xisheapnumber);
+ {
+ var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
+ assembler->Goto(&do_clz32);
+ }
+
+ assembler->Bind(&if_xisnotheapnumber);
+ {
+ // Need to convert {x} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_x.Bind(assembler->CallStub(callable, context, x));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_clz32);
+ {
+ Node* x_value = var_clz32_x.value();
+ Node* value = assembler->Word32Clz(x_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+ }
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler,
+ &compiler::CodeStubAssembler::Float64Floor);
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+BUILTIN(MathFround) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> x = args.at<Object>(1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ float x32 = DoubleToFloat32(x->Number());
+ return *isolate->factory()->NewNumber(x32);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+BUILTIN(MathImul) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> x = args.at<Object>(1);
+ Handle<Object> y = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, y, Object::ToNumber(y));
+ int product = static_cast<int>(NumberToUint32(*x) * NumberToUint32(*y));
+ return *isolate->factory()->NewNumberFromInt(product);
+}
+
+// ES6 section 20.2.2.28 Math.round ( x )
+void Builtins::Generate_MathRound(compiler::CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler,
+ &compiler::CodeStubAssembler::Float64Round);
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Sqrt(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler,
+ &compiler::CodeStubAssembler::Float64Trunc);
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 26.1 The Reflect Object
+
+
// ES6 section 26.1.3 Reflect.defineProperty
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
@@ -3659,45 +4028,6 @@ BUILTIN(GeneratorFunctionConstructor) {
return *result;
}
-// ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
-BUILTIN(FunctionHasInstance) {
- HandleScope scope(isolate);
- Handle<Object> callable = args.receiver();
- Handle<Object> object = args.atOrUndefined(isolate, 1);
-
- // {callable} must have a [[Call]] internal method.
- if (!callable->IsCallable()) {
- return isolate->heap()->false_value();
- }
- // If {object} is not a receiver, return false.
- if (!object->IsJSReceiver()) {
- return isolate->heap()->false_value();
- }
- // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
- // and use that instead of {callable}.
- while (callable->IsJSBoundFunction()) {
- callable =
- handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
- isolate);
- }
- DCHECK(callable->IsCallable());
- // Get the "prototype" of {callable}; raise an error if it's not a receiver.
- Handle<Object> prototype;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prototype,
- Object::GetProperty(callable, isolate->factory()->prototype_string()));
- if (!prototype->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
- }
- // Return whether or not {prototype} is in the prototype chain of {object}.
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
- Maybe<bool> result =
- JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
BUILTIN(SymbolConstructor) {
@@ -3728,10 +4058,78 @@ BUILTIN(ObjectProtoToString) {
Handle<Object> object = args.at<Object>(0);
Handle<String> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::ObjectProtoToString(isolate, object));
+ isolate, result, Object::ObjectProtoToString(isolate, object));
+ return *result;
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 String Objects
+
+namespace {
+
+bool ToUint16(Handle<Object> value, uint16_t* result) {
+ if (value->IsNumber() || Object::ToNumber(value).ToHandle(&value)) {
+ *result = DoubleToUint32(value->Number());
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+// ES6 21.1.2.1 String.fromCharCode ( ...codeUnits )
+BUILTIN(StringFromCharCode) {
+ HandleScope scope(isolate);
+ // Check resulting string length.
+ int index = 0;
+ Handle<String> result;
+ int const length = args.length() - 1;
+ if (length == 0) return isolate->heap()->empty_string();
+ DCHECK_LT(0, length);
+ // Load the first character code.
+ uint16_t code;
+ if (!ToUint16(args.at<Object>(1), &code)) return isolate->heap()->exception();
+ // Assume that the resulting String contains only one byte characters.
+ if (code <= String::kMaxOneByteCharCodeU) {
+ // Check for single one-byte character fast case.
+ if (length == 1) {
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+ }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ do {
+ Handle<SeqOneByteString>::cast(result)->Set(index, code);
+ if (++index == length) break;
+ if (!ToUint16(args.at<Object>(1 + index), &code)) {
+ return isolate->heap()->exception();
+ }
+ } while (code <= String::kMaxOneByteCharCodeU);
+ }
+ // Check if all characters fit into the one byte range.
+ if (index < length) {
+ // Fallback to two byte string.
+ Handle<String> new_result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_result, isolate->factory()->NewRawTwoByteString(length));
+ for (int new_index = 0; new_index < index; ++new_index) {
+ uint16_t new_code =
+ Handle<SeqOneByteString>::cast(result)->Get(new_index);
+ Handle<SeqTwoByteString>::cast(new_result)->Set(new_index, new_code);
+ }
+ while (true) {
+ Handle<SeqTwoByteString>::cast(new_result)->Set(index, code);
+ if (++index == length) break;
+ if (!ToUint16(args.at<Object>(1 + index), &code)) {
+ return isolate->heap()->exception();
+ }
+ }
+ result = new_result;
+ }
return *result;
}
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 ArrayBuffer Objects
// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
BUILTIN(ArrayBufferConstructor) {
@@ -3845,9 +4243,9 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
HandleScope scope(isolate);
Handle<HeapObject> function = args.target<HeapObject>();
Handle<JSReceiver> receiver;
- // TODO(ishell): turn this back to a DCHECK.
- CHECK(function->IsFunctionTemplateInfo() ||
- Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
+
+ DCHECK(function->IsFunctionTemplateInfo() ||
+ Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
Handle<FunctionTemplateInfo> fun_data =
function->IsFunctionTemplateInfo()
@@ -3893,8 +4291,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
Object* raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined()) {
- // TODO(ishell): remove this debugging code.
- CHECK(raw_call_data->IsCallHandlerInfo());
+ DCHECK(raw_call_data->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
v8::FunctionCallback callback =
@@ -3912,14 +4309,8 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
args.length() - 1,
is_construct);
- v8::Local<v8::Value> value = custom.Call(callback);
- Handle<Object> result;
- if (value.IsEmpty()) {
- result = isolate->factory()->undefined_value();
- } else {
- result = v8::Utils::OpenHandle(*value);
- result->VerifyApiCallResultType();
- }
+ Handle<Object> result = custom.Call(callback);
+ if (result.is_null()) result = isolate->factory()->undefined_value();
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!is_construct || result->IsJSObject()) {
@@ -4053,6 +4444,20 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
Handle<Object> receiver,
int argc,
Handle<Object> args[]) {
+ Isolate* isolate = function->GetIsolate();
+ // Do proper receiver conversion for non-strict mode api functions.
+ if (!receiver->IsJSReceiver()) {
+ DCHECK(function->IsFunctionTemplateInfo() || function->IsJSFunction());
+ if (function->IsFunctionTemplateInfo() ||
+ is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
+ if (receiver->IsUndefined() || receiver->IsNull()) {
+ receiver = handle(isolate->global_proxy(), isolate);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, receiver), Object);
+ }
+ }
+ }
// Construct BuiltinArguments object: function, arguments reversed, receiver.
const int kBufferSize = 32;
Object* small_argv[kBufferSize];
@@ -4069,7 +4474,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
argv[0] = *function;
MaybeHandle<Object> result;
{
- auto isolate = function->GetIsolate();
RelocatableArguments arguments(isolate, argc + 2, &argv[argc + 1]);
result = HandleApiCallHelper<false>(isolate, arguments);
}
@@ -4086,8 +4490,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Isolate* isolate, bool is_construct_call,
BuiltinArguments<BuiltinExtraArguments::kNone> args) {
- Heap* heap = isolate->heap();
-
Handle<Object> receiver = args.receiver();
// Get the object called.
@@ -4122,12 +4524,11 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
&args[0] - 1,
args.length() - 1,
is_construct_call);
- v8::Local<v8::Value> value = custom.Call(callback);
- if (value.IsEmpty()) {
- result = heap->undefined_value();
+ Handle<Object> result_handle = custom.Call(callback);
+ if (result_handle.is_null()) {
+ result = isolate->heap()->undefined_value();
} else {
- result = *reinterpret_cast<Object**>(*value);
- result->VerifyApiCallResultType();
+ result = *result_handle;
}
}
// Check for exceptions and return result.
@@ -4280,12 +4681,14 @@ Address const Builtins::c_functions_[cfunction_count] = {
struct BuiltinDesc {
+ Handle<Code> (*builder)(Isolate*, struct BuiltinDesc const*);
byte* generator;
byte* c_code;
const char* s_name; // name is only used for generating log information.
int name;
Code::Flags flags;
BuiltinExtraArguments extra_args;
+ int argc;
};
#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
@@ -4303,8 +4706,60 @@ class BuiltinFunctionTable {
friend class Builtins;
};
-static BuiltinFunctionTable builtin_function_table =
- BUILTIN_FUNCTION_TABLE_INIT;
+namespace {
+
+BuiltinFunctionTable builtin_function_table = BUILTIN_FUNCTION_TABLE_INIT;
+
+Handle<Code> MacroAssemblerBuilder(Isolate* isolate,
+ BuiltinDesc const* builtin_desc) {
+// For now we generate builtin adaptor code into a stack-allocated
+// buffer, before copying it into individual code objects. Be careful
+// with alignment, some platforms don't like unaligned code.
+#ifdef DEBUG
+ // We can generate a lot of debug code on Arm64.
+ const size_t buffer_size = 32 * KB;
+#elif V8_TARGET_ARCH_PPC64
+ // 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
+ const size_t buffer_size = 10 * KB;
+#else
+ const size_t buffer_size = 8 * KB;
+#endif
+ union {
+ int force_alignment;
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ } u;
+
+ MacroAssembler masm(isolate, u.buffer, sizeof(u.buffer),
+ CodeObjectRequired::kYes);
+ // Generate the code/adaptor.
+ typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
+ Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
+ // We pass all arguments to the generator, but it may not use all of
+ // them. This works because the first arguments are on top of the
+ // stack.
+ DCHECK(!masm.has_frame());
+ g(&masm, builtin_desc->name, builtin_desc->extra_args);
+ // Move the code into the object heap.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Code::Flags flags = builtin_desc->flags;
+ return isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+}
+
+Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
+ BuiltinDesc const* builtin_desc) {
+ Zone zone(isolate->allocator());
+ compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
+ builtin_desc->flags,
+ builtin_desc->s_name);
+ // Generate the code/adaptor.
+ typedef void (*Generator)(compiler::CodeStubAssembler*);
+ Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
+ g(&assembler);
+ return assembler.GenerateCode();
+}
+
+} // namespace
// Define array of pointers to generators and C builtin functions.
// We do this in a sort of roundabout way so that we can do the initialization
@@ -4312,47 +4767,70 @@ static BuiltinFunctionTable builtin_function_table =
// Code::Flags names a non-abstract type.
void Builtins::InitBuiltinFunctionTable() {
BuiltinDesc* functions = builtin_function_table.functions_;
- functions[builtin_count].generator = NULL;
- functions[builtin_count].c_code = NULL;
- functions[builtin_count].s_name = NULL;
+ functions[builtin_count].builder = nullptr;
+ functions[builtin_count].generator = nullptr;
+ functions[builtin_count].c_code = nullptr;
+ functions[builtin_count].s_name = nullptr;
functions[builtin_count].name = builtin_count;
functions[builtin_count].flags = static_cast<Code::Flags>(0);
functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
+ functions[builtin_count].argc = 0;
#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
+ functions->builder = &MacroAssemblerBuilder; \
functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
functions->s_name = #aname; \
functions->name = c_##aname; \
functions->flags = Code::ComputeFlags(Code::BUILTIN); \
functions->extra_args = BuiltinExtraArguments::aextra_args; \
+ functions->argc = 0; \
++functions;
#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
+ functions->builder = &MacroAssemblerBuilder; \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
functions->extra_args = BuiltinExtraArguments::kNone; \
+ functions->argc = 0; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_T(aname, aargc) \
+ functions->builder = &CodeStubAssemblerBuilder; \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = \
+ Code::ComputeFlags(Code::BUILTIN, UNINITIALIZED, kNoExtraICState); \
+ functions->extra_args = BuiltinExtraArguments::kNone; \
+ functions->argc = aargc; \
++functions;
#define DEF_FUNCTION_PTR_H(aname, kind) \
+ functions->builder = &MacroAssemblerBuilder; \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
functions->flags = Code::ComputeHandlerFlags(Code::kind); \
functions->extra_args = BuiltinExtraArguments::kNone; \
+ functions->argc = 0; \
++functions;
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_T(DEF_FUNCTION_PTR_T)
BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
#undef DEF_FUNCTION_PTR_A
+#undef DEF_FUNCTION_PTR_H
+#undef DEF_FUNCTION_PTR_T
}
@@ -4364,43 +4842,15 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
const BuiltinDesc* functions = builtin_function_table.functions();
- // For now we generate builtin adaptor code into a stack-allocated
- // buffer, before copying it into individual code objects. Be careful
- // with alignment, some platforms don't like unaligned code.
-#ifdef DEBUG
- // We can generate a lot of debug code on Arm64.
- const size_t buffer_size = 32*KB;
-#elif V8_TARGET_ARCH_PPC64
- // 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
- const size_t buffer_size = 10 * KB;
-#else
- const size_t buffer_size = 8*KB;
-#endif
- union { int force_alignment; byte buffer[buffer_size]; } u;
-
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(isolate, u.buffer, sizeof u.buffer,
- CodeObjectRequired::kYes);
- // Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
- Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
- // We pass all arguments to the generator, but it may not use all of
- // them. This works because the first arguments are on top of the
- // stack.
- DCHECK(!masm.has_frame());
- g(&masm, functions[i].name, functions[i].extra_args);
- // Move the code into the object heap.
- CodeDesc desc;
- masm.GetCode(&desc);
- Code::Flags flags = functions[i].flags;
- Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+ Handle<Code> code = (*functions[i].builder)(isolate, functions + i);
// Log the event and add the code to the builtins array.
PROFILE(isolate,
- CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
+ CodeCreateEvent(Logger::BUILTIN_TAG, AbstractCode::cast(*code),
+ functions[i].s_name));
builtins_[i] = *code;
code->set_builtin_index(i);
#ifdef ENABLE_DISASSEMBLER
@@ -4470,6 +4920,11 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
+#define DEFINE_BUILTIN_ACCESSOR_T(name, argc) \
+ Handle<Code> Builtins::name() { \
+ Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+ }
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
Handle<Code> Builtins::name() { \
Code** code_address = \
@@ -4478,11 +4933,13 @@ Handle<Code> Builtins::name() { \
}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_T(DEFINE_BUILTIN_ACCESSOR_T)
BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A
-
+#undef DEFINE_BUILTIN_ACCESSOR_T
+#undef DEFINE_BUILTIN_ACCESSOR_H
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 93e6e3d7f2..221d06f30f 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -11,6 +11,13 @@
namespace v8 {
namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CodeStubAssembler;
+
+} // namespace compiler
+
// Specifies extra arguments required by a C++ builtin.
enum class BuiltinExtraArguments : uint8_t {
kNone = 0u,
@@ -110,12 +117,17 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(FunctionConstructor, kTargetAndNewTarget) \
V(FunctionPrototypeBind, kNone) \
V(FunctionPrototypeToString, kNone) \
- V(FunctionHasInstance, kNone) \
\
V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
\
V(GlobalEval, kTarget) \
\
+ V(MathAcos, kNone) \
+ V(MathAsin, kNone) \
+ V(MathAtan, kNone) \
+ V(MathFround, kNone) \
+ V(MathImul, kNone) \
+ \
V(ObjectAssign, kNone) \
V(ObjectCreate, kNone) \
V(ObjectFreeze, kNone) \
@@ -149,6 +161,8 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(ReflectSet, kNone) \
V(ReflectSetPrototypeOf, kNone) \
\
+ V(StringFromCharCode, kNone) \
+ \
V(SymbolConstructor, kNone) \
V(SymbolConstructor_ConstructStub, kTarget) \
\
@@ -265,6 +279,7 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
+ V(FunctionHasInstance, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
@@ -285,7 +300,6 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -293,6 +307,16 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
+// Define list of builtins implemented in TurboFan (with JS linkage).
+#define BUILTIN_LIST_T(V) \
+ V(MathCeil, 2) \
+ V(MathClz32, 2) \
+ V(MathFloor, 2) \
+ V(MathRound, 2) \
+ V(MathSqrt, 2) \
+ V(MathTrunc, 2) \
+ V(ObjectHasOwnProperty, 2)
+
// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
V(LoadIC_Slow, LOAD_IC) \
@@ -331,14 +355,16 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_T(name, argc) k##name,
#define DEF_ENUM_H(name, kind) k##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
- BUILTIN_LIST_A(DEF_ENUM_A)
- BUILTIN_LIST_H(DEF_ENUM_H)
- BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
+ BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
+ BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
+ BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
- builtin_count
+#undef DEF_ENUM_T
+#undef DEF_ENUM_H
+ builtin_count
};
enum CFunctionId {
@@ -351,13 +377,17 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+ BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
+#undef DECLARE_BUILTIN_ACCESSOR_T
+#undef DECLARE_BUILTIN_ACCESSOR_H
// Convenience wrappers.
Handle<Code> CallFunction(
@@ -548,6 +578,7 @@ class Builtins {
// ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
+ static void Generate_FunctionHasInstance(MacroAssembler* masm);
static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
@@ -557,6 +588,12 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
+ // ES6 section 20.2.2.10 Math.ceil ( x )
+ static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
+ // ES6 section 20.2.2.11 Math.clz32 ( x )
+ static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
+ // ES6 section 20.2.2.16 Math.floor ( x )
+ static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
// ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@@ -567,16 +604,25 @@ class Builtins {
static void Generate_MathMin(MacroAssembler* masm) {
Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
}
+ // ES6 section 20.2.2.28 Math.round ( x )
+ static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
+ // ES6 section 20.2.2.32 Math.sqrt ( x )
+ static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
+ // ES6 section 20.2.2.35 Math.trunc ( x )
+ static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
static void Generate_NumberConstructor(MacroAssembler* masm);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
+ // ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
+ static void Generate_ObjectHasOwnProperty(
+ compiler::CodeStubAssembler* assembler);
+
static void Generate_StringConstructor(MacroAssembler* masm);
static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
- static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 9898282d31..fbfdd5f644 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -119,13 +119,6 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
// static
-Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
- Handle<Code> code = CompareNilICStub::GetUninitialized(isolate, nil_value);
- return Callable(code, CompareNilDescriptor(isolate));
-}
-
-
-// static
Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
BinaryOpICStub stub(isolate, op);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -141,8 +134,8 @@ Callable CodeFactory::InstanceOf(Isolate* isolate) {
// static
Callable CodeFactory::ToBoolean(Isolate* isolate) {
- Handle<Code> code = ToBooleanStub::GetUninitialized(isolate);
- return Callable(code, ToBooleanDescriptor(isolate));
+ ToBooleanStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -154,6 +147,18 @@ Callable CodeFactory::ToNumber(Isolate* isolate) {
// static
+Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
+ NonNumberToNumberStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringToNumber(Isolate* isolate) {
+ StringToNumberStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
Callable CodeFactory::ToString(Isolate* isolate) {
ToStringStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -168,6 +173,12 @@ Callable CodeFactory::ToName(Isolate* isolate) {
// static
+Callable CodeFactory::ToInteger(Isolate* isolate) {
+ ToIntegerStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
Callable CodeFactory::ToLength(Isolate* isolate) {
ToLengthStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -201,6 +212,83 @@ Callable CodeFactory::RegExpExec(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
+// static
+Callable CodeFactory::Add(Isolate* isolate) {
+ AddStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Subtract(Isolate* isolate) {
+ SubtractStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
+ BitwiseAndStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseOr(Isolate* isolate) {
+ BitwiseOrStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseXor(Isolate* isolate) {
+ BitwiseXorStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::LessThan(Isolate* isolate) {
+ LessThanStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
+ LessThanOrEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::GreaterThan(Isolate* isolate) {
+ GreaterThanStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
+ GreaterThanOrEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Equal(Isolate* isolate) {
+ EqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::NotEqual(Isolate* isolate) {
+ NotEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StrictEqual(Isolate* isolate) {
+ StrictEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
+ StrictNotEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
@@ -209,13 +297,65 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
+// static
+Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
+ switch (token) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ return StringEqual(isolate);
+ case Token::NE:
+ case Token::NE_STRICT:
+ return StringNotEqual(isolate);
+ case Token::LT:
+ return StringLessThan(isolate);
+ case Token::GT:
+ return StringGreaterThan(isolate);
+ case Token::LTE:
+ return StringLessThanOrEqual(isolate);
+ case Token::GTE:
+ return StringGreaterThanOrEqual(isolate);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return StringEqual(isolate);
+}
// static
-Callable CodeFactory::StringCompare(Isolate* isolate) {
- StringCompareStub stub(isolate);
+Callable CodeFactory::StringEqual(Isolate* isolate) {
+ StringEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
+// static
+Callable CodeFactory::StringNotEqual(Isolate* isolate) {
+ StringNotEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringLessThan(Isolate* isolate) {
+ StringLessThanStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
+ StringLessThanOrEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
+ StringGreaterThanStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
+ StringGreaterThanOrEqualStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
// static
Callable CodeFactory::SubString(Isolate* isolate) {
@@ -225,6 +365,12 @@ Callable CodeFactory::SubString(Isolate* isolate) {
// static
+Callable CodeFactory::StoreInterceptor(Isolate* isolate) {
+ StoreInterceptorStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -310,6 +456,13 @@ Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
+#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
+ Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
+ Allocate##Type##Stub stub(isolate); \
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC)
+#undef SIMD128_ALLOC
// static
Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
@@ -326,8 +479,9 @@ Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
// static
-Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
- return Callable(isolate->builtins()->Call(mode),
+Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ return Callable(isolate->builtins()->Call(mode, tail_call_mode),
CallTrampolineDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index fb1a165053..deb125f224 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -54,6 +54,8 @@ class CodeFactory final {
Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
+ static Callable StoreInterceptor(Isolate* isolate);
+
static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
@@ -66,8 +68,11 @@ class CodeFactory final {
static Callable ToBoolean(Isolate* isolate);
static Callable ToNumber(Isolate* isolate);
+ static Callable NonNumberToNumber(Isolate* isolate);
+ static Callable StringToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
static Callable ToName(Isolate* isolate);
+ static Callable ToInteger(Isolate* isolate);
static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
static Callable NumberToString(Isolate* isolate);
@@ -75,9 +80,29 @@ class CodeFactory final {
static Callable RegExpConstructResult(Isolate* isolate);
static Callable RegExpExec(Isolate* isolate);
+ static Callable Add(Isolate* isolate);
+ static Callable Subtract(Isolate* isolate);
+ static Callable BitwiseAnd(Isolate* isolate);
+ static Callable BitwiseOr(Isolate* isolate);
+ static Callable BitwiseXor(Isolate* isolate);
+ static Callable LessThan(Isolate* isolate);
+ static Callable LessThanOrEqual(Isolate* isolate);
+ static Callable GreaterThan(Isolate* isolate);
+ static Callable GreaterThanOrEqual(Isolate* isolate);
+ static Callable Equal(Isolate* isolate);
+ static Callable NotEqual(Isolate* isolate);
+ static Callable StrictEqual(Isolate* isolate);
+ static Callable StrictNotEqual(Isolate* isolate);
+
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
- static Callable StringCompare(Isolate* isolate);
+ static Callable StringCompare(Isolate* isolate, Token::Value token);
+ static Callable StringEqual(Isolate* isolate);
+ static Callable StringNotEqual(Isolate* isolate);
+ static Callable StringLessThan(Isolate* isolate);
+ static Callable StringLessThanOrEqual(Isolate* isolate);
+ static Callable StringGreaterThan(Isolate* isolate);
+ static Callable StringGreaterThanOrEqual(Isolate* isolate);
static Callable SubString(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
@@ -96,11 +121,16 @@ class CodeFactory final {
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable AllocateMutableHeapNumber(Isolate* isolate);
+#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
+ static Callable Allocate##Type(Isolate* isolate);
+ SIMD128_TYPES(SIMD128_ALLOC)
+#undef SIMD128_ALLOC
static Callable AllocateInNewSpace(Isolate* isolate);
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable CallFunction(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable Construct(Isolate* isolate);
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 461baaa4e6..1d2fb811fb 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -78,6 +78,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
Representation representation,
bool transition_to_field);
+ HValue* BuildPushElement(HValue* object, HValue* argc,
+ HValue* argument_elements, ElementsKind kind);
+
enum ArgumentClass {
NONE,
SINGLE,
@@ -294,7 +297,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
- Zone zone;
+ Zone zone(isolate->allocator());
CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
stub->GetCodeFlags());
// Parameter count is number of stack parameters.
@@ -780,6 +783,214 @@ Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
+HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
+ HValue* argument_elements,
+ ElementsKind kind) {
+ // Precheck whether all elements fit into the array.
+ if (!IsFastObjectElementsKind(kind)) {
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ HValue* start = graph()->GetConstant0();
+ HValue* key = builder.BeginBody(start, argc, Token::LT);
+ {
+ HInstruction* argument =
+ Add<HAccessArgumentsAt>(argument_elements, argc, key);
+ IfBuilder can_store(this);
+ can_store.IfNot<HIsSmiAndBranch>(argument);
+ if (IsFastDoubleElementsKind(kind)) {
+ can_store.And();
+ can_store.IfNot<HCompareMap>(argument,
+ isolate()->factory()->heap_number_map());
+ }
+ can_store.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ can_store.End();
+ }
+ builder.EndBody();
+ }
+
+ HValue* length = Add<HLoadNamedField>(object, nullptr,
+ HObjectAccess::ForArrayLength(kind));
+ HValue* new_length = AddUncasted<HAdd>(length, argc);
+ HValue* max_key = AddUncasted<HSub>(new_length, graph()->GetConstant1());
+
+ HValue* elements = Add<HLoadNamedField>(object, nullptr,
+ HObjectAccess::ForElementsPointer());
+ elements = BuildCheckForCapacityGrow(object, elements, kind, length, max_key,
+ true, STORE);
+
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ HValue* start = graph()->GetConstant0();
+ HValue* key = builder.BeginBody(start, argc, Token::LT);
+ {
+ HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
+ HValue* index = AddUncasted<HAdd>(key, length);
+ AddElementAccess(elements, index, argument, object, nullptr, kind, STORE);
+ }
+ builder.EndBody();
+ return new_length;
+}
+
+template <>
+HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
+ // TODO(verwaest): Fix deoptimizer messages.
+ HValue* argc = GetArgumentsLength();
+ HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
+ HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
+ graph()->GetConstantMinus1());
+ BuildCheckHeapObject(object);
+ HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_ARRAY);
+
+ // Disallow pushing onto prototypes. It might be the JSArray prototype.
+ // Disallow pushing onto non-extensible objects.
+ {
+ HValue* bit_field2 =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
+ HValue* mask =
+ Add<HConstant>(static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+ (1 << Map::kIsExtensible));
+ HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field2, mask);
+ IfBuilder check(this);
+ check.If<HCompareNumericAndBranch>(
+ bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
+ check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check.End();
+ }
+
+ // Disallow pushing onto observed objects.
+ {
+ HValue* bit_field =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+ HValue* mask = Add<HConstant>(1 << Map::kIsObserved);
+ HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
+ IfBuilder check(this);
+ check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+ check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check.End();
+ }
+
+ // Disallow pushing onto arrays in dictionary named property mode. We need to
+ // figure out whether the length property is still writable.
+ {
+ HValue* bit_field3 =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+ HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
+ HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
+ IfBuilder check(this);
+ check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+ check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check.End();
+ }
+
+ // Check whether the length property is writable. The length property is the
+ // only default named property on arrays. It's nonconfigurable, hence is
+ // guaranteed to stay the first property.
+ {
+ HValue* descriptors =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
+ HValue* details = Add<HLoadKeyed>(
+ descriptors, Add<HConstant>(DescriptorArray::ToDetailsIndex(0)),
+ nullptr, nullptr, FAST_SMI_ELEMENTS);
+ HValue* mask =
+ Add<HConstant>(READ_ONLY << PropertyDetails::AttributesField::kShift);
+ HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
+ IfBuilder readonly(this);
+ readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+ readonly.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ readonly.End();
+ }
+
+ HValue* null = Add<HLoadRoot>(Heap::kNullValueRootIndex);
+ HValue* empty = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+ environment()->Push(map);
+ LoopBuilder check_prototypes(this);
+ check_prototypes.BeginBody(1);
+ {
+ HValue* parent_map = environment()->Pop();
+ HValue* prototype = Add<HLoadNamedField>(parent_map, nullptr,
+ HObjectAccess::ForPrototype());
+
+ IfBuilder is_null(this);
+ is_null.If<HCompareObjectEqAndBranch>(prototype, null);
+ is_null.Then();
+ check_prototypes.Break();
+ is_null.End();
+
+ HValue* prototype_map =
+ Add<HLoadNamedField>(prototype, nullptr, HObjectAccess::ForMap());
+ HValue* instance_type = Add<HLoadNamedField>(
+ prototype_map, nullptr, HObjectAccess::ForMapInstanceType());
+ IfBuilder check_instance_type(this);
+ check_instance_type.If<HCompareNumericAndBranch>(
+ instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
+ Token::LTE);
+ check_instance_type.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check_instance_type.End();
+
+ HValue* elements = Add<HLoadNamedField>(
+ prototype, nullptr, HObjectAccess::ForElementsPointer());
+ IfBuilder no_elements(this);
+ no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
+ no_elements.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ no_elements.End();
+
+ environment()->Push(prototype_map);
+ }
+ check_prototypes.EndBody();
+
+ HValue* bit_field2 =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
+ HValue* kind = BuildDecodeField<Map::ElementsKindBits>(bit_field2);
+
+ // Below we only check the upper bound of the relevant ranges to include both
+ // holey and non-holey versions. We check them in order smi, object, double
+ // since smi < object < double.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS < FAST_HOLEY_SMI_ELEMENTS);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS < FAST_HOLEY_ELEMENTS);
+ STATIC_ASSERT(FAST_ELEMENTS < FAST_HOLEY_ELEMENTS);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
+ IfBuilder has_smi_elements(this);
+ has_smi_elements.If<HCompareNumericAndBranch>(
+ kind, Add<HConstant>(FAST_HOLEY_SMI_ELEMENTS), Token::LTE);
+ has_smi_elements.Then();
+ {
+ HValue* new_length = BuildPushElement(object, argc, argument_elements,
+ FAST_HOLEY_SMI_ELEMENTS);
+ environment()->Push(new_length);
+ }
+ has_smi_elements.Else();
+ {
+ IfBuilder has_object_elements(this);
+ has_object_elements.If<HCompareNumericAndBranch>(
+ kind, Add<HConstant>(FAST_HOLEY_ELEMENTS), Token::LTE);
+ has_object_elements.Then();
+ {
+ HValue* new_length = BuildPushElement(object, argc, argument_elements,
+ FAST_HOLEY_ELEMENTS);
+ environment()->Push(new_length);
+ }
+ has_object_elements.Else();
+ {
+ IfBuilder has_double_elements(this);
+ has_double_elements.If<HCompareNumericAndBranch>(
+ kind, Add<HConstant>(FAST_HOLEY_DOUBLE_ELEMENTS), Token::LTE);
+ has_double_elements.Then();
+ {
+ HValue* new_length = BuildPushElement(object, argc, argument_elements,
+ FAST_HOLEY_DOUBLE_ELEMENTS);
+ environment()->Push(new_length);
+ }
+ has_double_elements.ElseDeopt(Deoptimizer::kFastArrayPushFailed);
+ has_double_elements.End();
+ }
+ has_object_elements.End();
+ }
+ has_smi_elements.End();
+
+ return environment()->Pop();
+}
+
+Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
@@ -1185,36 +1396,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<AllocateHeapNumberStub>::BuildCodeStub() {
- HValue* result =
- Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapNumber(),
- NOT_TENURED, HEAP_NUMBER_TYPE);
- AddStoreMapConstant(result, isolate()->factory()->heap_number_map());
- return result;
-}
-
-
-Handle<Code> AllocateHeapNumberStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
- HValue* result =
- Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
- NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
- AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
- return result;
-}
-
-
-Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
JS_OBJECT_TYPE);
@@ -1419,31 +1600,6 @@ Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
- Isolate* isolate = graph()->isolate();
- CompareNilICStub* stub = casted_stub();
- HIfContinuation continuation;
- Handle<Map> sentinel_map(isolate->heap()->meta_map());
- Type* type = stub->GetType(zone(), sentinel_map);
- BuildCompareNil(GetParameter(0), type, &continuation, kEmbedMapsViaWeakCells);
- IfBuilder if_nil(this, &continuation);
- if_nil.Then();
- if (continuation.IsFalseReachable()) {
- if_nil.Else();
- if_nil.Return(graph()->GetConstantFalse());
- }
- if_nil.End();
- return continuation.IsTrueReachable() ? graph()->GetConstantTrue()
- : graph()->GetConstantUndefined();
-}
-
-
-Handle<Code> CompareNilICStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
BinaryOpICState state = casted_stub()->state();
@@ -1588,11 +1744,10 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
}
if_inputisprimitive.End();
// Convert the primitive to a string value.
- ToStringDescriptor descriptor(isolate());
ToStringStub stub(isolate());
HValue* values[] = {context(), Pop()};
Push(AddUncasted<HCallWithDescriptor>(
- Add<HConstant>(stub.GetCode()), 0, descriptor,
+ Add<HConstant>(stub.GetCode()), 0, stub.GetCallInterfaceDescriptor(),
Vector<HValue*>(values, arraysize(values))));
}
if_inputisstring.End();
@@ -1706,10 +1861,9 @@ Handle<Code> StringAddStub::GenerateCode() {
return DoGenerateCode(this);
}
-
template <>
-HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
- ToBooleanStub* stub = casted_stub();
+HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
+ ToBooleanICStub* stub = casted_stub();
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->types());
if_true.Then();
@@ -1719,11 +1873,7 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
return graph()->GetConstantFalse();
}
-
-Handle<Code> ToBooleanStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
+Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
@@ -1855,7 +2005,7 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(ToObjectDescriptor::kReceiverIndex);
+ HValue* receiver = GetParameter(TypeConversionDescriptor::kArgumentIndex);
return BuildToObject(receiver);
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4e5efcd8e0..60b350cd93 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -7,6 +7,7 @@
#include <sstream>
#include "src/bootstrapper.h"
+#include "src/code-factory.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
@@ -82,7 +83,8 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
std::ostringstream os;
os << *this;
PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, *code, os.str().c_str()));
+ CodeCreateEvent(Logger::STUB_TAG, AbstractCode::cast(*code),
+ os.str().c_str()));
Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef DEBUG
@@ -451,31 +453,9 @@ void CompareICStub::Generate(MacroAssembler* masm) {
}
-void CompareNilICStub::UpdateStatus(Handle<Object> object) {
- State state = this->state();
- DCHECK(!state.Contains(GENERIC));
- State old_state = state;
- if (object->IsNull()) {
- state.Add(NULL_TYPE);
- } else if (object->IsUndefined()) {
- state.Add(UNDEFINED);
- } else if (object->IsUndetectableObject() || object->IsSmi()) {
- state.RemoveAll();
- state.Add(GENERIC);
- } else if (IsMonomorphic()) {
- state.RemoveAll();
- state.Add(GENERIC);
- } else {
- state.Add(MONOMORPHIC_MAP);
- }
- TraceTransition(old_state, state);
- set_sub_minor_key(TypesBits::update(sub_minor_key(), state.ToIntegral()));
-}
-
-
Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
- Zone zone;
+ Zone zone(isolate()->allocator());
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
GetCodeFlags(), name);
@@ -483,6 +463,40 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
return assembler.GenerateCode();
}
+void AllocateHeapNumberStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* result = assembler->AllocateHeapNumber();
+ assembler->Return(result);
+}
+
+void AllocateMutableHeapNumberStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* result = assembler->Allocate(HeapNumber::kSize);
+ assembler->StoreMapNoWriteBarrier(
+ result,
+ assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
+ assembler->Return(result);
+}
+
+#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Stub::GenerateAssembly( \
+ compiler::CodeStubAssembler* assembler) const { \
+ compiler::Node* result = assembler->Allocate( \
+ Simd128Value::kSize, compiler::CodeStubAssembler::kNone); \
+ compiler::Node* map_offset = \
+ assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
+ compiler::Node* map = assembler->IntPtrAdd(result, map_offset); \
+ assembler->StoreNoWriteBarrier( \
+ MachineRepresentation::kTagged, map, \
+ assembler->HeapConstant(isolate()->factory()->type##_map())); \
+ assembler->Return(result); \
+ }
+SIMD128_TYPES(SIMD128_GEN_ASM)
+#undef SIMD128_GEN_ASM
void StringLengthStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
@@ -494,6 +508,2575 @@ void StringLengthStub::GenerateAssembly(
assembler->Return(result);
}
+void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry for floating point addition.
+ Label do_fadd(assembler);
+ Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive, ToString and/or
+ // ToNumber conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars);
+ var_lhs.Bind(assembler->Parameter(0));
+ var_rhs.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try fast Smi addition first.
+ Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ assembler->Return(assembler->Projection(0, pair));
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadObjectField(rhs, HeapObject::kMapOffset);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+ // Check if the {rhs} is a String.
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ assembler->TailCallStub(callable, context, lhs, rhs);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+ var_rhs.Bind(
+ assembler->CallRuntime(Runtime::kToPrimitive, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map and instance type of {lhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+ assembler->Branch(assembler->Int32LessThan(
+ lhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_lhsisstring, &if_lhsisnotstring);
+
+ assembler->Bind(&if_lhsisstring);
+ {
+ // Convert {rhs} to a String (using the sequence of ToPrimitive with
+ // no hint followed by ToString) and concatenate the strings.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ assembler->TailCallStub(callable, context, lhs, rhs);
+ }
+
+ assembler->Bind(&if_lhsisnotstring);
+ {
+ // Check if {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Check if {lhs} is a Number.
+ Label if_lhsisnumber(assembler),
+ if_lhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Word32Equal(
+ lhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // The {lhs} is neither a Number nor a String, and the {rhs} is a
+ // Smi.
+ Label if_lhsisreceiver(assembler, Label::kDeferred),
+ if_lhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing no hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+ var_lhs.Bind(
+ assembler->CallRuntime(Runtime::kToPrimitive, context, lhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_lhsisnotreceiver);
+ {
+ // Convert {lhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Check if {rhs} is a String.
+ Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // Convert {lhs} to a String (using the sequence of ToPrimitive with
+ // no hint followed by ToString) and concatenate the strings.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ assembler->TailCallStub(callable, context, lhs, rhs);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ lhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Word32Equal(
+ rhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point addition.
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here too.
+ var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+ context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Check if {lhs} is a JSReceiver.
+ Label if_lhsisreceiver(assembler, Label::kDeferred),
+ if_lhsisnotreceiver(assembler);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing no hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+ var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+ context, lhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_lhsisnotreceiver);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here too.
+ var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+ context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {lhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_fadd);
+ {
+ Node* lhs_value = var_fadd_lhs.value();
+ Node* rhs_value = var_fadd_rhs.value();
+ Node* value = assembler->Float64Add(lhs_value, rhs_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+}
+
+void SubtractStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry for floating point subtraction.
+ Label do_fsub(assembler);
+ Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars);
+ var_lhs.Bind(assembler->Parameter(0));
+ var_rhs.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try a fast Smi subtraction first.
+ Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ // The result doesn't fit into Smi range.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ assembler->Return(assembler->Projection(0, pair));
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of the {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if the {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler),
+ if_lhsisnotnumber(assembler, Label::kDeferred);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Convert the {lhs} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fsub);
+ {
+ Node* lhs_value = var_fsub_lhs.value();
+ Node* rhs_value = var_fsub_rhs.value();
+ Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+}
+
+void BitwiseAndStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ using compiler::Node;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* value = assembler->Word32And(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void BitwiseOrStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ using compiler::Node;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* value = assembler->Word32Or(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void BitwiseXorStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ using compiler::Node;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* value = assembler->Word32Xor(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+namespace {
+
+enum RelationalComparisonMode {
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual
+};
+
+void GenerateAbstractRelationalComparison(
+ compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(2);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(assembler);
+ Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars);
+ var_lhs.Bind(assembler->Parameter(0));
+ var_rhs.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ switch (mode) {
+ case kLessThan:
+ assembler->BranchIfSmiLessThan(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ case kLessThanOrEqual:
+ assembler->BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ case kGreaterThan:
+ assembler->BranchIfSmiLessThan(rhs, lhs, &return_true,
+ &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ assembler->BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true,
+ &return_false);
+ break;
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Node* number_map = assembler->HeapNumberMapConstant();
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fcmp);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the HeapNumber map for later comparisons.
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // Load the map of {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Check if the {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler),
+ if_lhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fcmp);
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Convert the {lhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(lhs, hint Number) operation, as the
+ // ToNumber(lhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(lhs_map, rhs_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fcmp);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(assembler),
+ if_lhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Int32LessThan(
+ lhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_lhsisstring, &if_lhsisnotstring);
+
+ assembler->Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(assembler),
+ if_rhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type, assembler->Int32Constant(
+ FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are strings.
+ switch (mode) {
+ case kLessThan:
+ assembler->TailCallStub(
+ CodeFactory::StringLessThan(assembler->isolate()),
+ context, lhs, rhs);
+ break;
+ case kLessThanOrEqual:
+ assembler->TailCallStub(
+ CodeFactory::StringLessThanOrEqual(assembler->isolate()),
+ context, lhs, rhs);
+ break;
+ case kGreaterThan:
+ assembler->TailCallStub(
+ CodeFactory::StringGreaterThan(assembler->isolate()),
+ context, lhs, rhs);
+ break;
+ case kGreaterThanOrEqual:
+ assembler->TailCallStub(CodeFactory::StringGreaterThanOrEqual(
+ assembler->isolate()),
+ context, lhs, rhs);
+ break;
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String, while {rhs} is neither a Number nor a
+ // String, so we need to call ToPrimitive(rhs, hint Number) if
+ // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
+ // other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing Number hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+ var_rhs.Bind(assembler->CallRuntime(
+ Runtime::kToPrimitive_Number, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotstring);
+ {
+ // The {lhs} is neither a Number nor a String, so we need to call
+ // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
+ // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_lhsisreceiver(assembler, Label::kDeferred),
+ if_lhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing Number hint.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+ var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive_Number,
+ context, lhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_lhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ switch (mode) {
+ case kLessThan:
+ assembler->BranchIfFloat64LessThan(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ case kLessThanOrEqual:
+ assembler->BranchIfFloat64LessThanOrEqual(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ case kGreaterThan:
+ assembler->BranchIfFloat64GreaterThan(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ assembler->BranchIfFloat64GreaterThanOrEqual(lhs, rhs, &return_true,
+ &return_false);
+ break;
+ }
+ }
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+enum ResultMode { kDontNegateResult, kNegateResult };
+
+void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::CodeStubAssembler::Label* if_equal,
+ compiler::CodeStubAssembler::Label* if_notequal) {
+ // In case of abstract or strict equality checks, we need additional checks
+ // for NaN values because they are not considered equal, even if both the
+ // left and the right hand side reference exactly the same value.
+ // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
+ // seems to be what is tested in the current SIMD.js testsuite.
+
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ // Check if {value} is a Smi or a HeapObject.
+ Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
+ &if_valueisnotsmi);
+
+ assembler->Bind(&if_valueisnotsmi);
+ {
+ // Load the map of {value}.
+ Node* value_map = assembler->LoadMap(value);
+
+ // Check if {value} (and therefore {rhs}) is a HeapNumber.
+ Node* number_map = assembler->HeapNumberMapConstant();
+ Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
+ assembler->Branch(assembler->WordEqual(value_map, number_map),
+ &if_valueisnumber, &if_valueisnotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Convert {value} (and therefore {rhs}) to floating point value.
+ Node* value_value = assembler->LoadHeapNumberValue(value);
+
+ // Check if the HeapNumber value is a NaN.
+ assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+ }
+
+ assembler->Bind(&if_valueisnotnumber);
+ assembler->Goto(if_equal);
+ }
+
+ assembler->Bind(&if_valueissmi);
+ assembler->Goto(if_equal);
+}
+
+void GenerateEqual_Simd128Value_HeapObject(
+ compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
+ compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
+ compiler::CodeStubAssembler::Label* if_equal,
+ compiler::CodeStubAssembler::Label* if_notequal) {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ // Check if {lhs} and {rhs} have the same map.
+ Label if_mapsame(assembler), if_mapnotsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_map, rhs_map), &if_mapsame,
+ &if_mapnotsame);
+
+ assembler->Bind(&if_mapsame);
+ {
+ // Both {lhs} and {rhs} are Simd128Values with the same map, need special
+ // handling for Float32x4 because of NaN comparisons.
+ Label if_float32x4(assembler), if_notfloat32x4(assembler);
+ Node* float32x4_map =
+ assembler->HeapConstant(assembler->factory()->float32x4_map());
+ assembler->Branch(assembler->WordEqual(lhs_map, float32x4_map),
+ &if_float32x4, &if_notfloat32x4);
+
+ assembler->Bind(&if_float32x4);
+ {
+ // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
+ // using a floating point comparison.
+ for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
+ offset < Float32x4::kSize - kHeapObjectTag;
+ offset += sizeof(float)) {
+ // Load the floating point values for {lhs} and {rhs}.
+ Node* lhs_value = assembler->Load(MachineType::Float32(), lhs,
+ assembler->IntPtrConstant(offset));
+ Node* rhs_value = assembler->Load(MachineType::Float32(), rhs,
+ assembler->IntPtrConstant(offset));
+
+ // Perform a floating point comparison.
+ Label if_valueequal(assembler), if_valuenotequal(assembler);
+ assembler->Branch(assembler->Float32Equal(lhs_value, rhs_value),
+ &if_valueequal, &if_valuenotequal);
+ assembler->Bind(&if_valuenotequal);
+ assembler->Goto(if_notequal);
+ assembler->Bind(&if_valueequal);
+ }
+
+ // All 4 lanes match, {lhs} and {rhs} considered equal.
+ assembler->Goto(if_equal);
+ }
+
+ assembler->Bind(&if_notfloat32x4);
+ {
+ // For other Simd128Values we just perform a bitwise comparison.
+ for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
+ offset < Simd128Value::kSize - kHeapObjectTag;
+ offset += kPointerSize) {
+ // Load the word values for {lhs} and {rhs}.
+ Node* lhs_value = assembler->Load(MachineType::Pointer(), lhs,
+ assembler->IntPtrConstant(offset));
+ Node* rhs_value = assembler->Load(MachineType::Pointer(), rhs,
+ assembler->IntPtrConstant(offset));
+
+ // Perform a bitwise word-comparison.
+ Label if_valueequal(assembler), if_valuenotequal(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_value, rhs_value),
+ &if_valueequal, &if_valuenotequal);
+ assembler->Bind(&if_valuenotequal);
+ assembler->Goto(if_notequal);
+ assembler->Bind(&if_valueequal);
+ }
+
+ // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
+ assembler->Goto(if_equal);
+ }
+ }
+
+ assembler->Bind(&if_mapnotsame);
+ assembler->Goto(if_notequal);
+}
+
+// ES6 section 7.2.12 Abstract Equality Comparison
+void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
+ // This is a slightly optimized version of Object::Equals represented as
+ // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+ // change something functionality wise in here, remember to update the
+ // Object::Equals method as well.
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(2);
+
+ Label if_equal(assembler), if_notequal(assembler);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(assembler);
+ Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars);
+ var_lhs.Bind(assembler->Parameter(0));
+ var_rhs.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
+ }
+
+ assembler->Bind(&if_notsame);
+ {
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi,
+ &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ assembler->Goto(&if_notequal);
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Node* number_map = assembler->HeapNumberMapConstant();
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fcmp);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Load the instance type of the {rhs}.
+ Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+ // Check if the {rhs} is a String.
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type, assembler->Int32Constant(
+ FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // Convert the {rhs} to a Number.
+ Callable callable =
+ CodeFactory::StringToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a Boolean.
+ Node* boolean_map = assembler->BooleanMapConstant();
+ Label if_rhsisboolean(assembler, Label::kDeferred),
+ if_rhsisnotboolean(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+ &if_rhsisboolean, &if_rhsisnotboolean);
+
+ assembler->Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, load its number value.
+ var_rhs.Bind(
+ assembler->LoadObjectField(rhs, Oddball::kToNumberOffset));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotboolean);
+ {
+ // Check if the {rhs} is a Receiver.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first (passing no hint).
+ // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
+ var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+ context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ assembler->Goto(&if_notequal);
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
+ // and {rhs} is not observable and doesn't matter for the result, so
+ // we can just swap them and use the Smi handling above (for {lhs}
+ // being a Smi).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ Label if_lhsisstring(assembler), if_lhsisnumber(assembler),
+ if_lhsissymbol(assembler), if_lhsissimd128value(assembler),
+ if_lhsisoddball(assembler), if_lhsisreceiver(assembler);
+
+ // Both {lhs} and {rhs} are HeapObjects, load their maps
+ // and their instance types.
+ Node* lhs_map = assembler->LoadMap(lhs);
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Load the instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+ Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+ // Dispatch based on the instance type of {lhs}.
+ size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+ Label* case_labels[kNumCases];
+ int32_t case_values[kNumCases];
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ case_labels[i] = new Label(assembler);
+ case_values[i] = i;
+ }
+ case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
+ case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
+ case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
+ case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
+ case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+ assembler->Switch(lhs_instance_type, &if_lhsisreceiver, case_values,
+ case_labels, arraysize(case_values));
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ assembler->Bind(case_labels[i]);
+ assembler->Goto(&if_lhsisstring);
+ delete case_labels[i];
+ }
+
+ assembler->Bind(&if_lhsisstring);
+ {
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(assembler),
+ if_rhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type, assembler->Int32Constant(
+ FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are of type String, just do the
+ // string comparison then.
+ Callable callable =
+ (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(assembler->isolate())
+ : CodeFactory::StringNotEqual(assembler->isolate());
+ assembler->TailCallStub(callable, context, lhs, rhs);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String and the {rhs} is some other HeapObject.
+ // Swapping {lhs} and {rhs} is not observable and doesn't matter
+ // for the result, so we can just swap them and use the String
+ // handling below (for {rhs} being a String).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fcmp);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // The {lhs} is a Number, the {rhs} is some other HeapObject.
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler);
+ assembler->Branch(
+ assembler->Int32LessThan(
+ rhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ // The {rhs} is a String and the {lhs} is a HeapNumber; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ Callable callable =
+ CodeFactory::StringToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a
+ // JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is a Boolean.
+ Label if_rhsisboolean(assembler),
+ if_rhsisnotboolean(assembler);
+ Node* boolean_map = assembler->BooleanMapConstant();
+ assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+ &if_rhsisboolean, &if_rhsisnotboolean);
+
+ assembler->Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, convert it to a Smi first.
+ var_rhs.Bind(assembler->LoadObjectField(
+ rhs, Oddball::kToNumberOffset));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotboolean);
+ assembler->Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisoddball);
+ {
+ // The {lhs} is an Oddball and {rhs} is some other HeapObject.
+ Label if_lhsisboolean(assembler), if_lhsisnotboolean(assembler);
+ Node* boolean_map = assembler->BooleanMapConstant();
+ assembler->Branch(assembler->WordEqual(lhs_map, boolean_map),
+ &if_lhsisboolean, &if_lhsisnotboolean);
+
+ assembler->Bind(&if_lhsisboolean);
+ {
+ // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
+ Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
+ assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+ &if_rhsisboolean, &if_rhsisnotboolean);
+
+ assembler->Bind(&if_rhsisboolean);
+ {
+ // Both {lhs} and {rhs} are distinct Boolean values.
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotboolean);
+ {
+ // Convert the {lhs} to a Number first.
+ var_lhs.Bind(
+ assembler->LoadObjectField(lhs, Oddball::kToNumberOffset));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotboolean);
+ {
+ // The {lhs} is either Null or Undefined; check if the {rhs} is
+ // undetectable (i.e. either also Null or Undefined or some
+ // undetectable JSReceiver).
+ Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
+ assembler->BranchIfWord32Equal(
+ assembler->Word32And(
+ rhs_bitfield,
+ assembler->Int32Constant(1 << Map::kIsUndetectable)),
+ assembler->Int32Constant(0), &if_notequal, &if_equal);
+ }
+ }
+
+ assembler->Bind(&if_lhsissymbol);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is not a JSReceiver and also not the same Symbol
+ // as the {lhs}, so this is equality check is considered false.
+ assembler->Goto(&if_notequal);
+ }
+ }
+
+ assembler->Bind(&if_lhsissimd128value);
+ {
+ // Check if the {rhs} is also a Simd128Value.
+ Label if_rhsissimd128value(assembler),
+ if_rhsisnotsimd128value(assembler);
+ assembler->Branch(
+ assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsissimd128value, &if_rhsisnotsimd128value);
+
+ assembler->Bind(&if_rhsissimd128value);
+ {
+ // Both {lhs} and {rhs} is a Simd128Value.
+ GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
+ rhs, rhs_map, &if_equal,
+ &if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotsimd128value);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is some other Primitive.
+ assembler->Goto(&if_notequal);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Check if the {rhs} is also a JSReceiver.
+ Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+ rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Both {lhs} and {rhs} are different JSReceiver references, so
+ // this cannot be considered equal.
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is Null or Undefined (an undetectable check
+ // is sufficient here, since we already know that {rhs} is not
+ // a JSReceiver).
+ Label if_rhsisundetectable(assembler),
+ if_rhsisnotundetectable(assembler, Label::kDeferred);
+ Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
+ assembler->BranchIfWord32Equal(
+ assembler->Word32And(
+ rhs_bitfield,
+ assembler->Int32Constant(1 << Map::kIsUndetectable)),
+ assembler->Int32Constant(0), &if_rhsisnotundetectable,
+ &if_rhsisundetectable);
+
+ assembler->Bind(&if_rhsisundetectable);
+ {
+ // Check if {lhs} is an undetectable JSReceiver.
+ Node* lhs_bitfield = assembler->LoadMapBitField(lhs_map);
+ assembler->BranchIfWord32Equal(
+ assembler->Word32And(
+ lhs_bitfield,
+ assembler->Int32Constant(1 << Map::kIsUndetectable)),
+ assembler->Int32Constant(0), &if_notequal, &if_equal);
+ }
+
+ assembler->Bind(&if_rhsisnotundetectable);
+ {
+ // The {rhs} is some Primitive different from Null and
+ // Undefined, need to convert {lhs} to Primitive first.
+ // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
+ var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+ context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ assembler->BranchIfFloat64Equal(lhs, rhs, &if_equal, &if_notequal);
+ }
+
+ assembler->Bind(&if_equal);
+ assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+ assembler->Bind(&if_notequal);
+ assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
+ ResultMode mode) {
+ // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+ // mode; for kNegateResult mode we properly negate the result.
+ //
+ // if (lhs == rhs) {
+ // if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
+ // return true;
+ // }
+ // if (!lhs->IsSmi()) {
+ // if (lhs->IsHeapNumber()) {
+ // if (rhs->IsSmi()) {
+ // return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+ // } else if (rhs->IsHeapNumber()) {
+ // return HeapNumber::cast(rhs)->value() ==
+ // HeapNumber::cast(lhs)->value();
+ // } else {
+ // return false;
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (lhs->IsString()) {
+ // if (rhs->IsString()) {
+ // return %StringEqual(lhs, rhs);
+ // } else {
+ // return false;
+ // }
+ // } else if (lhs->IsSimd128()) {
+ // if (rhs->IsSimd128()) {
+ // return %StrictEqual(lhs, rhs);
+ // }
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (rhs->IsHeapNumber()) {
+ // return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Label if_equal(assembler), if_notequal(assembler);
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
+ }
+
+ assembler->Bind(&if_notsame);
+ {
+ // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+ // String and Simd128Value they can still be considered equal.
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
+ Node* rhs_value = assembler->SmiToFloat64(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+ &if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
+ Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+ &if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ assembler->Goto(&if_notequal);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ assembler->Goto(&if_notequal);
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+ assembler->Branch(assembler->Int32LessThan(
+ lhs_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_lhsisstring, &if_lhsisnotstring);
+
+ assembler->Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+ assembler->Branch(assembler->Int32LessThan(
+ rhs_instance_type, assembler->Int32Constant(
+ FIRST_NONSTRING_TYPE)),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ Callable callable =
+ (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(assembler->isolate())
+ : CodeFactory::StringNotEqual(assembler->isolate());
+ assembler->TailCallStub(callable, context, lhs, rhs);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_lhsisnotstring);
+ {
+ // Check if {lhs} is a Simd128Value.
+ Label if_lhsissimd128value(assembler),
+ if_lhsisnotsimd128value(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ lhs_instance_type,
+ assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ &if_lhsissimd128value, &if_lhsisnotsimd128value);
+
+ assembler->Bind(&if_lhsissimd128value);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
+ GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
+ rhs, rhs_map, &if_equal,
+ &if_notequal);
+ }
+
+ assembler->Bind(&if_lhsisnotsimd128value);
+ assembler->Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
+ // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
+ // HeapNumber with an equal floating point value.
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ assembler->Goto(&if_notequal);
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // The {rhs} could be a HeapNumber with the same value as {lhs}.
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = assembler->SmiToFloat64(lhs);
+ Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+ &if_notequal);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ assembler->Goto(&if_notequal);
+ }
+ }
+ }
+
+ assembler->Bind(&if_equal);
+ assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+ assembler->Bind(&if_notequal);
+ assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
+ RelationalComparisonMode mode) {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_notsame);
+ {
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = assembler->Word32Or(
+ lhs_instance_type,
+ assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(assembler),
+ if_notbothonebyteseqstrings(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ assembler->Word32And(both_instance_types,
+ assembler->Int32Constant(
+ kBothSeqOneByteStringMask)),
+ assembler->Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ assembler->Bind(&if_bothonebyteseqstrings);
+ {
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
+ Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+
+ // Determine the minimum length.
+ Node* length = assembler->SmiMin(lhs_length, rhs_length);
+
+ // Compute the effective offset of the first character.
+ Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_offset);
+ var_offset.Bind(begin);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(assembler), if_notdone(assembler);
+ assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+ &if_notdone);
+
+ assembler->Bind(&if_notdone);
+ {
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
+
+ // Check if the characters match.
+ Label if_valueissame(assembler), if_valueisnotsame(assembler);
+ assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+ &if_valueissame, &if_valueisnotsame);
+
+ assembler->Bind(&if_valueissame);
+ {
+ // Advance to next character.
+ var_offset.Bind(
+ assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+ }
+ assembler->Goto(&loop);
+
+ assembler->Bind(&if_valueisnotsame);
+ assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
+ &if_less, &if_greater);
+ }
+
+ assembler->Bind(&if_done);
+ {
+ // All characters up to the min length are equal, decide based on
+ // string length.
+ Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+ assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
+ &if_lengthisequal, &if_lengthisnotequal);
+
+ assembler->Bind(&if_lengthisequal);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_lengthisnotequal);
+ assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
+ &if_greater);
+ }
+ }
+ }
+
+ assembler->Bind(&if_notbothonebyteseqstrings);
+ {
+ // TODO(bmeurer): Add fast case support for flattened cons strings;
+ // also add support for two byte string relational comparisons.
+ switch (mode) {
+ case kLessThan:
+ assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
+ rhs);
+ break;
+ case kLessThanOrEqual:
+ assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
+ lhs, rhs);
+ break;
+ case kGreaterThan:
+ assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
+ rhs);
+ break;
+ case kGreaterThanOrEqual:
+ assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
+ context, lhs, rhs);
+ break;
+ }
+ }
+ }
+
+ assembler->Bind(&if_less);
+ switch (mode) {
+ case kLessThan:
+ case kLessThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+ }
+
+ assembler->Bind(&if_equal);
+ switch (mode) {
+ case kLessThan:
+ case kGreaterThan:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+
+ case kLessThanOrEqual:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+ }
+
+ assembler->Bind(&if_greater);
+ switch (mode) {
+ case kLessThan:
+ case kLessThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+ }
+}
+
+void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
+ ResultMode mode) {
+ // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+ // mode; for kNegateResult mode we properly negate the result.
+ //
+ // if (lhs == rhs) return true;
+ // if (lhs->length() != rhs->length()) return false;
+ // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
+ // return false;
+ // }
+ // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
+ // for (i = 0; i != lhs->length(); ++i) {
+ // if (lhs[i] != rhs[i]) return false;
+ // }
+ // return true;
+ // }
+ // return %StringEqual(lhs, rhs);
+
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Label if_equal(assembler), if_notequal(assembler);
+
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_notsame);
+ {
+ // The {lhs} and {rhs} don't refer to the exact same String object.
+
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
+ Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+
+ // Check if the lengths of {lhs} and {rhs} are equal.
+ Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
+ &if_lengthisequal, &if_lengthisnotequal);
+
+ assembler->Bind(&if_lengthisequal);
+ {
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = assembler->Word32Or(
+ lhs_instance_type,
+ assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+ // Check if both {lhs} and {rhs} are internalized.
+ int const kBothInternalizedMask =
+ kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+ int const kBothInternalizedTag =
+ kInternalizedTag | (kInternalizedTag << 8);
+ Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ assembler->Word32And(both_instance_types,
+ assembler->Int32Constant(
+ kBothInternalizedMask)),
+ assembler->Int32Constant(kBothInternalizedTag)),
+ &if_bothinternalized, &if_notbothinternalized);
+
+ assembler->Bind(&if_bothinternalized);
+ {
+ // Fast negative check for internalized-to-internalized equality.
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_notbothinternalized);
+ {
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(assembler),
+ if_notbothonebyteseqstrings(assembler);
+ assembler->Branch(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ both_instance_types,
+ assembler->Int32Constant(kBothSeqOneByteStringMask)),
+ assembler->Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ assembler->Bind(&if_bothonebyteseqstrings);
+ {
+ // Compute the effective offset of the first character.
+ Node* begin = assembler->IntPtrConstant(
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end =
+ assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_offset);
+ var_offset.Bind(begin);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(assembler), if_notdone(assembler);
+ assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+ &if_notdone);
+
+ assembler->Bind(&if_notdone);
+ {
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value =
+ assembler->Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value =
+ assembler->Load(MachineType::Uint8(), rhs, offset);
+
+ // Check if the characters match.
+ Label if_valueissame(assembler), if_valueisnotsame(assembler);
+ assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+ &if_valueissame, &if_valueisnotsame);
+
+ assembler->Bind(&if_valueissame);
+ {
+ // Advance to next character.
+ var_offset.Bind(
+ assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+ }
+ assembler->Goto(&loop);
+
+ assembler->Bind(&if_valueisnotsame);
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_done);
+ assembler->Goto(&if_equal);
+ }
+ }
+
+ assembler->Bind(&if_notbothonebyteseqstrings);
+ {
+ // TODO(bmeurer): Add fast case support for flattened cons strings;
+ // also add support for two byte string equality checks.
+ Runtime::FunctionId function_id = (mode == kDontNegateResult)
+ ? Runtime::kStringEqual
+ : Runtime::kStringNotEqual;
+ assembler->TailCallRuntime(function_id, context, lhs, rhs);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lengthisnotequal);
+ {
+ // Mismatch in length of {lhs} and {rhs}, cannot be equal.
+ assembler->Goto(&if_notequal);
+ }
+ }
+
+ assembler->Bind(&if_equal);
+ assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+ assembler->Bind(&if_notequal);
+ assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+} // namespace
+
+void LessThanStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateAbstractRelationalComparison(assembler, kLessThan);
+}
+
+void LessThanOrEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+void GreaterThanStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateAbstractRelationalComparison(assembler, kGreaterThan);
+}
+
+void GreaterThanOrEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
+void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
+ GenerateEqual(assembler, kDontNegateResult);
+}
+
+void NotEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateEqual(assembler, kNegateResult);
+}
+
+void StrictEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStrictEqual(assembler, kDontNegateResult);
+}
+
+void StrictNotEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStrictEqual(assembler, kNegateResult);
+}
+
+void StringEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringEqual(assembler, kDontNegateResult);
+}
+
+void StringNotEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringEqual(assembler, kNegateResult);
+}
+
+void StringLessThanStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringRelationalComparison(assembler, kLessThan);
+}
+
+void StringLessThanOrEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+void StringGreaterThanStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringRelationalComparison(assembler, kGreaterThan);
+}
+
+void StringGreaterThanOrEqualStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
+void ToLengthStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(1);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_len(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_len);
+ var_len.Bind(assembler->Parameter(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Shared entry points.
+ Label return_len(assembler),
+ return_two53minus1(assembler, Label::kDeferred),
+ return_zero(assembler, Label::kDeferred);
+
+ // Load the current {len} value.
+ Node* len = var_len.value();
+
+ // Check if {len} is a positive Smi.
+ assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+
+ // Check if {len} is a (negative) Smi.
+ assembler->GotoIf(assembler->WordIsSmi(len), &return_zero);
+
+ // Check if {len} is a HeapNumber.
+ Label if_lenisheapnumber(assembler),
+ if_lenisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(assembler->LoadMap(len),
+ assembler->HeapNumberMapConstant()),
+ &if_lenisheapnumber, &if_lenisnotheapnumber);
+
+ assembler->Bind(&if_lenisheapnumber);
+ {
+ // Load the floating-point value of {len}.
+ Node* len_value = assembler->LoadHeapNumberValue(len);
+
+ // Check if {len} is not greater than zero.
+ assembler->GotoUnless(assembler->Float64GreaterThan(
+ len_value, assembler->Float64Constant(0.0)),
+ &return_zero);
+
+ // Check if {len} is greater than or equal to 2^53-1.
+ assembler->GotoIf(
+ assembler->Float64GreaterThanOrEqual(
+ len_value, assembler->Float64Constant(kMaxSafeInteger)),
+ &return_two53minus1);
+
+ // Round the {len} towards -Infinity.
+ Node* value = assembler->Float64Floor(len_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_lenisnotheapnumber);
+ {
+ // Need to convert {len} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_len.Bind(assembler->CallStub(callable, context, len));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&return_len);
+ assembler->Return(var_len.value());
+
+ assembler->Bind(&return_two53minus1);
+ assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+
+ assembler->Bind(&return_zero);
+ assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
+ }
+}
+
+void ToBooleanStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Label Label;
+
+ Node* value = assembler->Parameter(0);
+ Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+
+ // Check if {value} is a Smi or a HeapObject.
+ assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
+ &if_valueisnotsmi);
+
+ assembler->Bind(&if_valueissmi);
+ {
+ // The {value} is a Smi, only need to check against zero.
+ Label if_valueiszero(assembler), if_valueisnotzero(assembler);
+ assembler->Branch(assembler->SmiEqual(value, assembler->SmiConstant(0)),
+ &if_valueiszero, &if_valueisnotzero);
+
+ assembler->Bind(&if_valueiszero);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&if_valueisnotzero);
+ assembler->Return(assembler->BooleanConstant(true));
+ }
+
+ assembler->Bind(&if_valueisnotsmi);
+ {
+ Label if_valueisstring(assembler), if_valueisheapnumber(assembler),
+ if_valueisoddball(assembler), if_valueisother(assembler);
+
+ // The {value} is a HeapObject, load its map.
+ Node* value_map = assembler->LoadMap(value);
+
+ // Load the {value}s instance type.
+ Node* value_instance_type = assembler->Load(
+ MachineType::Uint8(), value_map,
+ assembler->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+
+ // Dispatch based on the instance type; we distinguish all String instance
+ // types, the HeapNumber type and the Oddball type.
+ size_t const kNumCases = FIRST_NONSTRING_TYPE + 2;
+ Label* case_labels[kNumCases];
+ int32_t case_values[kNumCases];
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ case_labels[i] = new Label(assembler);
+ case_values[i] = i;
+ }
+ case_labels[FIRST_NONSTRING_TYPE + 0] = &if_valueisheapnumber;
+ case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 1] = &if_valueisoddball;
+ case_values[FIRST_NONSTRING_TYPE + 1] = ODDBALL_TYPE;
+ assembler->Switch(value_instance_type, &if_valueisother, case_values,
+ case_labels, arraysize(case_values));
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ assembler->Bind(case_labels[i]);
+ assembler->Goto(&if_valueisstring);
+ delete case_labels[i];
+ }
+
+ assembler->Bind(&if_valueisstring);
+ {
+ // Load the string length field of the {value}.
+ Node* value_length =
+ assembler->LoadObjectField(value, String::kLengthOffset);
+
+ // Check if the {value} is the empty string.
+ Label if_valueisempty(assembler), if_valueisnotempty(assembler);
+ assembler->Branch(
+ assembler->SmiEqual(value_length, assembler->SmiConstant(0)),
+ &if_valueisempty, &if_valueisnotempty);
+
+ assembler->Bind(&if_valueisempty);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&if_valueisnotempty);
+ assembler->Return(assembler->BooleanConstant(true));
+ }
+
+ assembler->Bind(&if_valueisheapnumber);
+ {
+ Node* value_value = assembler->Load(
+ MachineType::Float64(), value,
+ assembler->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+
+ Label if_valueispositive(assembler), if_valueisnotpositive(assembler),
+ if_valueisnegative(assembler), if_valueisnanorzero(assembler);
+ assembler->Branch(assembler->Float64LessThan(
+ assembler->Float64Constant(0.0), value_value),
+ &if_valueispositive, &if_valueisnotpositive);
+
+ assembler->Bind(&if_valueispositive);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&if_valueisnotpositive);
+ assembler->Branch(assembler->Float64LessThan(
+ value_value, assembler->Float64Constant(0.0)),
+ &if_valueisnegative, &if_valueisnanorzero);
+
+ assembler->Bind(&if_valueisnegative);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&if_valueisnanorzero);
+ assembler->Return(assembler->BooleanConstant(false));
+ }
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // The {value} is an Oddball, and every Oddball knows its boolean value.
+ Node* value_toboolean =
+ assembler->LoadObjectField(value, Oddball::kToBooleanOffset);
+ assembler->Return(value_toboolean);
+ }
+
+ assembler->Bind(&if_valueisother);
+ {
+ Node* value_map_bitfield = assembler->Load(
+ MachineType::Uint8(), value_map,
+ assembler->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+ Node* value_map_undetectable = assembler->Word32And(
+ value_map_bitfield,
+ assembler->Int32Constant(1 << Map::kIsUndetectable));
+
+ // Check if the {value} is undetectable.
+ Label if_valueisundetectable(assembler),
+ if_valueisnotundetectable(assembler);
+ assembler->Branch(assembler->Word32Equal(value_map_undetectable,
+ assembler->Int32Constant(0)),
+ &if_valueisnotundetectable, &if_valueisundetectable);
+
+ assembler->Bind(&if_valueisundetectable);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&if_valueisnotundetectable);
+ assembler->Return(assembler->BooleanConstant(true));
+ }
+ }
+}
+
+void ToIntegerStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(1);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_arg(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_arg);
+ var_arg.Bind(assembler->Parameter(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Shared entry points.
+ Label return_arg(assembler), return_zero(assembler, Label::kDeferred);
+
+ // Load the current {arg} value.
+ Node* arg = var_arg.value();
+
+ // Check if {arg} is a Smi.
+ assembler->GotoIf(assembler->WordIsSmi(arg), &return_arg);
+
+ // Check if {arg} is a HeapNumber.
+ Label if_argisheapnumber(assembler),
+ if_argisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(assembler->LoadMap(arg),
+ assembler->HeapNumberMapConstant()),
+ &if_argisheapnumber, &if_argisnotheapnumber);
+
+ assembler->Bind(&if_argisheapnumber);
+ {
+ // Load the floating-point value of {arg}.
+ Node* arg_value = assembler->LoadHeapNumberValue(arg);
+
+ // Check if {arg} is NaN.
+ assembler->GotoUnless(assembler->Float64Equal(arg_value, arg_value),
+ &return_zero);
+
+ // Truncate {arg} towards zero.
+ Node* value = assembler->Float64Trunc(arg_value);
+ var_arg.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&return_arg);
+ }
+
+ assembler->Bind(&if_argisnotheapnumber);
+ {
+ // Need to convert {arg} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_arg.Bind(assembler->CallStub(callable, context, arg));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&return_arg);
+ assembler->Return(var_arg.value());
+
+ assembler->Bind(&return_zero);
+ assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
+ }
+}
+
+void StoreInterceptorStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* receiver = assembler->Parameter(0);
+ Node* name = assembler->Parameter(1);
+ Node* value = assembler->Parameter(2);
+ Node* context = assembler->Parameter(3);
+ assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
+ receiver, name, value);
+}
+
+void LoadIndexedInterceptorStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef compiler::CodeStubAssembler::Label Label;
+ Node* receiver = assembler->Parameter(0);
+ Node* key = assembler->Parameter(1);
+ Node* slot = assembler->Parameter(2);
+ Node* vector = assembler->Parameter(3);
+ Node* context = assembler->Parameter(4);
+
+ Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
+ assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
+ &if_keyisinvalid);
+ assembler->Bind(&if_keyispositivesmi);
+ assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
+ receiver, key);
+
+ assembler->Bind(&if_keyisinvalid);
+ assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
+ slot, vector);
+}
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
@@ -508,17 +3091,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
}
-void CompareNilICStub::PrintBaseName(std::ostream& os) const { // NOLINT
- CodeStub::PrintBaseName(os);
- os << ((nil_value() == kNullValue) ? "(NullValue)" : "(UndefinedValue)");
-}
-
-
-void CompareNilICStub::PrintState(std::ostream& os) const { // NOLINT
- os << state();
-}
-
-
// TODO(svenpanne) Make this a real infix_ostream_iterator.
class SimpleListPrinter {
public:
@@ -539,45 +3111,6 @@ class SimpleListPrinter {
};
-std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s) {
- os << "(";
- SimpleListPrinter p(os);
- if (s.IsEmpty()) p.Add("None");
- if (s.Contains(CompareNilICStub::UNDEFINED)) p.Add("Undefined");
- if (s.Contains(CompareNilICStub::NULL_TYPE)) p.Add("Null");
- if (s.Contains(CompareNilICStub::MONOMORPHIC_MAP)) p.Add("MonomorphicMap");
- if (s.Contains(CompareNilICStub::GENERIC)) p.Add("Generic");
- return os << ")";
-}
-
-
-Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
- State state = this->state();
- if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any();
-
- Type* result = Type::None();
- if (state.Contains(CompareNilICStub::UNDEFINED)) {
- result = Type::Union(result, Type::Undefined(), zone);
- }
- if (state.Contains(CompareNilICStub::NULL_TYPE)) {
- result = Type::Union(result, Type::Null(), zone);
- }
- if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
- Type* type = map.is_null() ? Type::Detectable() : Type::Class(map, zone);
- result = Type::Union(result, type, zone);
- }
-
- return result;
-}
-
-
-Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
- Type* output_type = GetType(zone, map);
- Type* nil_type = nil_value() == kNullValue ? Type::Null() : Type::Undefined();
- return Type::Union(output_type, nil_type, zone);
-}
-
-
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
os << state();
}
@@ -671,7 +3204,6 @@ void TypeofStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- NumberToStringDescriptor call_descriptor(isolate());
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kNumberToString)->entry);
}
@@ -732,21 +3264,21 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
descriptor->Initialize();
}
+#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Stub::InitializeDescriptor( \
+ CodeStubDescriptor* descriptor) { \
+ descriptor->Initialize( \
+ Runtime::FunctionForId(Runtime::kCreate##Type)->entry); \
+ }
+SIMD128_TYPES(SIMD128_INIT_DESC)
+#undef SIMD128_INIT_DESC
void AllocateInNewSpaceStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
-
-void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(Runtime_CompareNilIC_Miss));
- descriptor->SetMissHandler(ExternalReference(
- Runtime::FunctionForId(Runtime::kCompareNilIC_Miss), isolate()));
-}
-
-
-void ToBooleanStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(ExternalReference(
Runtime::FunctionForId(Runtime::kToBooleanIC_Miss), isolate()));
@@ -848,8 +3380,7 @@ std::ostream& ArrayConstructorStubBase::BasePrintName(
return os;
}
-
-bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
+bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
Types new_types = types();
Types old_types = new_types;
bool to_boolean_value = new_types.UpdateStatus(object);
@@ -858,30 +3389,27 @@ bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
return to_boolean_value;
}
-
-void ToBooleanStub::PrintState(std::ostream& os) const { // NOLINT
+void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
os << types();
}
-
-std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& s) {
+std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
os << "(";
SimpleListPrinter p(os);
if (s.IsEmpty()) p.Add("None");
- if (s.Contains(ToBooleanStub::UNDEFINED)) p.Add("Undefined");
- if (s.Contains(ToBooleanStub::BOOLEAN)) p.Add("Bool");
- if (s.Contains(ToBooleanStub::NULL_TYPE)) p.Add("Null");
- if (s.Contains(ToBooleanStub::SMI)) p.Add("Smi");
- if (s.Contains(ToBooleanStub::SPEC_OBJECT)) p.Add("SpecObject");
- if (s.Contains(ToBooleanStub::STRING)) p.Add("String");
- if (s.Contains(ToBooleanStub::SYMBOL)) p.Add("Symbol");
- if (s.Contains(ToBooleanStub::HEAP_NUMBER)) p.Add("HeapNumber");
- if (s.Contains(ToBooleanStub::SIMD_VALUE)) p.Add("SimdValue");
+ if (s.Contains(ToBooleanICStub::UNDEFINED)) p.Add("Undefined");
+ if (s.Contains(ToBooleanICStub::BOOLEAN)) p.Add("Bool");
+ if (s.Contains(ToBooleanICStub::NULL_TYPE)) p.Add("Null");
+ if (s.Contains(ToBooleanICStub::SMI)) p.Add("Smi");
+ if (s.Contains(ToBooleanICStub::SPEC_OBJECT)) p.Add("SpecObject");
+ if (s.Contains(ToBooleanICStub::STRING)) p.Add("String");
+ if (s.Contains(ToBooleanICStub::SYMBOL)) p.Add("Symbol");
+ if (s.Contains(ToBooleanICStub::HEAP_NUMBER)) p.Add("HeapNumber");
+ if (s.Contains(ToBooleanICStub::SIMD_VALUE)) p.Add("SimdValue");
return os << ")";
}
-
-bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
+bool ToBooleanICStub::Types::UpdateStatus(Handle<Object> object) {
if (object->IsUndefined()) {
Add(UNDEFINED);
return false;
@@ -896,16 +3424,16 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
return Smi::cast(*object)->value() != 0;
} else if (object->IsJSReceiver()) {
Add(SPEC_OBJECT);
- return !object->IsUndetectableObject();
+ return !object->IsUndetectable();
} else if (object->IsString()) {
- DCHECK(!object->IsUndetectableObject());
+ DCHECK(!object->IsUndetectable());
Add(STRING);
return String::cast(*object)->length() != 0;
} else if (object->IsSymbol()) {
Add(SYMBOL);
return true;
} else if (object->IsHeapNumber()) {
- DCHECK(!object->IsUndetectableObject());
+ DCHECK(!object->IsUndetectable());
Add(HEAP_NUMBER);
double value = HeapNumber::cast(*object)->value();
return value != 0 && !std::isnan(value);
@@ -919,12 +3447,12 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
}
}
-
-bool ToBooleanStub::Types::NeedsMap() const {
- return Contains(ToBooleanStub::SPEC_OBJECT) ||
- Contains(ToBooleanStub::STRING) || Contains(ToBooleanStub::SYMBOL) ||
- Contains(ToBooleanStub::HEAP_NUMBER) ||
- Contains(ToBooleanStub::SIMD_VALUE);
+bool ToBooleanICStub::Types::NeedsMap() const {
+ return Contains(ToBooleanICStub::SPEC_OBJECT) ||
+ Contains(ToBooleanICStub::STRING) ||
+ Contains(ToBooleanICStub::SYMBOL) ||
+ Contains(ToBooleanICStub::HEAP_NUMBER) ||
+ Contains(ToBooleanICStub::SIMD_VALUE);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index f370ce6473..ace4aae614 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -23,8 +23,7 @@ namespace internal {
/* PlatformCodeStubs */ \
V(ArrayConstructor) \
V(BinaryOpICWithAllocationSite) \
- V(CallApiFunction) \
- V(CallApiAccessor) \
+ V(CallApiCallback) \
V(CallApiGetter) \
V(CallConstruct) \
V(CallIC) \
@@ -38,7 +37,6 @@ namespace internal {
V(KeyedLoadICTrampoline) \
V(LoadICTrampoline) \
V(CallICTrampoline) \
- V(LoadIndexedInterceptor) \
V(LoadIndexedString) \
V(MathPow) \
V(ProfileEntryHook) \
@@ -46,11 +44,11 @@ namespace internal {
V(RegExpExec) \
V(StoreBufferOverflow) \
V(StoreElement) \
- V(StringCompare) \
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
- V(ToLength) \
+ V(NonNumberToNumber) \
+ V(StringToNumber) \
V(ToString) \
V(ToName) \
V(ToObject) \
@@ -59,18 +57,16 @@ namespace internal {
V(VectorStoreIC) \
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
- V(AllocateHeapNumber) \
- V(AllocateMutableHeapNumber) \
V(AllocateInNewSpace) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(BinaryOpIC) \
V(BinaryOpWithAllocationSite) \
- V(CompareNilIC) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
+ V(FastArrayPush) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
@@ -96,20 +92,56 @@ namespace internal {
V(StoreGlobalViaContext) \
V(StoreScriptContextField) \
V(StringAdd) \
- V(ToBoolean) \
+ V(ToBooleanIC) \
V(TransitionElementsKind) \
V(KeyedLoadIC) \
V(LoadIC) \
/* TurboFanCodeStubs */ \
+ V(AllocateHeapNumber) \
+ V(AllocateMutableHeapNumber) \
+ V(AllocateFloat32x4) \
+ V(AllocateInt32x4) \
+ V(AllocateUint32x4) \
+ V(AllocateBool32x4) \
+ V(AllocateInt16x8) \
+ V(AllocateUint16x8) \
+ V(AllocateBool16x8) \
+ V(AllocateInt8x16) \
+ V(AllocateUint8x16) \
+ V(AllocateBool8x16) \
V(StringLength) \
+ V(Add) \
+ V(Subtract) \
+ V(BitwiseAnd) \
+ V(BitwiseOr) \
+ V(BitwiseXor) \
+ V(LessThan) \
+ V(LessThanOrEqual) \
+ V(GreaterThan) \
+ V(GreaterThanOrEqual) \
+ V(Equal) \
+ V(NotEqual) \
+ V(StrictEqual) \
+ V(StrictNotEqual) \
+ V(StringEqual) \
+ V(StringNotEqual) \
+ V(StringLessThan) \
+ V(StringLessThanOrEqual) \
+ V(StringGreaterThan) \
+ V(StringGreaterThanOrEqual) \
+ V(ToBoolean) \
+ V(ToInteger) \
+ V(ToLength) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(LoadConstant) \
V(LoadFastElement) \
V(LoadField) \
+ V(LoadIndexedInterceptor) \
V(KeyedLoadSloppyArguments) \
V(KeyedStoreSloppyArguments) \
V(StoreField) \
+ V(StoreInterceptor) \
V(StoreGlobal) \
V(StoreTransition)
@@ -157,13 +189,24 @@ namespace internal {
#define CODE_STUB_LIST_MIPS(V)
#endif
+// List of code stubs only used on S390 platforms.
+#ifdef V8_TARGET_ARCH_S390
+#define CODE_STUB_LIST_S390(V) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_S390(V)
+#endif
+
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_PPC(V) \
- CODE_STUB_LIST_MIPS(V)
+ CODE_STUB_LIST_MIPS(V) \
+ CODE_STUB_LIST_S390(V)
static const int kHasReturnedMinusZeroSentinel = 1;
@@ -347,11 +390,10 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- return DESC##Descriptor(isolate()); \
- }; \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
+ public: \
+ void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
+ const override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -584,6 +626,8 @@ class RuntimeCallHelper {
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/code-stubs-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/code-stubs-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else
@@ -625,12 +669,212 @@ class StringLengthStub : public TurboFanCodeStub {
InlineCacheState GetICState() const override { return MONOMORPHIC; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
- void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_CODE_STUB(StringLength, TurboFanCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
+};
+
+class AddStub final : public TurboFanCodeStub {
+ public:
+ explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_CODE_STUB(Add, TurboFanCodeStub);
+};
+
+class SubtractStub final : public TurboFanCodeStub {
+ public:
+ explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_CODE_STUB(Subtract, TurboFanCodeStub);
};
+class BitwiseAndStub final : public TurboFanCodeStub {
+ public:
+ explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
+};
+
+class BitwiseOrStub final : public TurboFanCodeStub {
+ public:
+ explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_CODE_STUB(BitwiseOr, TurboFanCodeStub);
+};
+
+class BitwiseXorStub final : public TurboFanCodeStub {
+ public:
+ explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+};
+
+class LessThanStub final : public TurboFanCodeStub {
+ public:
+ explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(LessThan, TurboFanCodeStub);
+};
+
+class LessThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
+};
+
+class GreaterThanStub final : public TurboFanCodeStub {
+ public:
+ explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(GreaterThan, TurboFanCodeStub);
+};
+
+class GreaterThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit GreaterThanOrEqualStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
+};
+
+class EqualStub final : public TurboFanCodeStub {
+ public:
+ explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(Equal, TurboFanCodeStub);
+};
+
+class NotEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(NotEqual, TurboFanCodeStub);
+};
+
+class StrictEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StrictEqual, TurboFanCodeStub);
+};
+
+class StrictNotEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
+};
+
+class StringEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StringEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringEqual, TurboFanCodeStub);
+};
+
+class StringNotEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StringNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringNotEqual, TurboFanCodeStub);
+};
+
+class StringLessThanStub final : public TurboFanCodeStub {
+ public:
+ explicit StringLessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringLessThan, TurboFanCodeStub);
+};
+
+class StringLessThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StringLessThanOrEqualStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringLessThanOrEqual, TurboFanCodeStub);
+};
+
+class StringGreaterThanStub final : public TurboFanCodeStub {
+ public:
+ explicit StringGreaterThanStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringGreaterThan, TurboFanCodeStub);
+};
+
+class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+ explicit StringGreaterThanOrEqualStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
+};
+
+class ToBooleanStub final : public TurboFanCodeStub {
+ public:
+ explicit ToBooleanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_TURBOFAN_CODE_STUB(ToBoolean, TurboFanCodeStub);
+};
+
+class ToIntegerStub final : public TurboFanCodeStub {
+ public:
+ explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_TURBOFAN_CODE_STUB(ToInteger, TurboFanCodeStub);
+};
+
+class ToLengthStub final : public TurboFanCodeStub {
+ public:
+ explicit ToLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_TURBOFAN_CODE_STUB(ToLength, TurboFanCodeStub);
+};
+
+class StoreInterceptorStub : public TurboFanCodeStub {
+ public:
+ explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
+};
+
+class LoadIndexedInterceptorStub : public TurboFanCodeStub {
+ public:
+ explicit LoadIndexedInterceptorStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+ DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
+};
enum StringAddFlags {
// Omit both parameter checks.
@@ -658,7 +902,7 @@ class NumberToStringStub final : public HydrogenCodeStub {
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kNumber = 0;
- DEFINE_CALL_INTERFACE_DESCRIPTOR(NumberToString);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
};
@@ -873,12 +1117,29 @@ class GrowArrayElementsStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(GrowArrayElements, HydrogenCodeStub);
};
+class FastArrayPushStub : public HydrogenCodeStub {
+ public:
+ explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastArrayPush);
+ DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
+};
class InstanceOfStub final : public PlatformCodeStub {
public:
- explicit InstanceOfStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ explicit InstanceOfStub(Isolate* isolate, bool es6_instanceof = false)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IsES6InstanceOfBits::encode(es6_instanceof);
+ }
+
+ bool is_es6_instanceof() const {
+ return IsES6InstanceOfBits::decode(minor_key_);
+ }
private:
+ class IsES6InstanceOfBits : public BitField<bool, 0, 1> {};
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
};
@@ -1013,20 +1274,6 @@ class FunctionPrototypeStub : public PlatformCodeStub {
};
-// TODO(mvstanton): Translate to hydrogen code stub.
-class LoadIndexedInterceptorStub : public PlatformCodeStub {
- public:
- explicit LoadIndexedInterceptorStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- Code::StubType GetStubType() const override { return Code::FAST; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
-};
-
-
class LoadIndexedStringStub : public PlatformCodeStub {
public:
explicit LoadIndexedStringStub(Isolate* isolate)
@@ -1418,48 +1665,36 @@ class StoreGlobalViaContextStub final : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
};
-
-class CallApiFunctionStub : public PlatformCodeStub {
+class CallApiCallbackStub : public PlatformCodeStub {
public:
- explicit CallApiFunctionStub(Isolate* isolate, bool call_data_undefined)
- : PlatformCodeStub(isolate) {
- minor_key_ = CallDataUndefinedBits::encode(call_data_undefined);
- }
-
- private:
- bool call_data_undefined() const {
- return CallDataUndefinedBits::decode(minor_key_);
- }
+ static const int kArgBits = 3;
+ static const int kArgMax = (1 << kArgBits) - 1;
- class CallDataUndefinedBits : public BitField<bool, 0, 1> {};
+ // CallApiCallbackStub for regular setters and getters.
+ CallApiCallbackStub(Isolate* isolate, bool is_store, bool call_data_undefined,
+ bool is_lazy)
+ : CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store,
+ call_data_undefined, is_lazy) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
- DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
-};
+ // CallApiCallbackStub for callback functions.
+ CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined)
+ : CallApiCallbackStub(isolate, argc, false, call_data_undefined, false) {}
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
+ }
-class CallApiAccessorStub : public PlatformCodeStub {
- public:
- CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined,
- bool is_lazy)
+ private:
+ CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
+ bool call_data_undefined, bool is_lazy)
: PlatformCodeStub(isolate) {
+ CHECK(0 <= argc && argc <= kArgMax);
minor_key_ = IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
- ArgumentBits::encode(is_store ? 1 : 0) |
+ ArgumentBits::encode(argc) |
IsLazyAccessorBits::encode(is_lazy);
}
- protected:
- // For CallApiFunctionWithFixedArgsStub, see below.
- static const int kArgBits = 3;
- CallApiAccessorStub(Isolate* isolate, int argc, bool call_data_undefined)
- : PlatformCodeStub(isolate) {
- minor_key_ = IsStoreBits::encode(false) |
- CallDataUndefinedBits::encode(call_data_undefined) |
- ArgumentBits::encode(argc);
- }
-
- private:
bool is_store() const { return IsStoreBits::decode(minor_key_); }
bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
bool call_data_undefined() const {
@@ -1472,29 +1707,10 @@ class CallApiAccessorStub : public PlatformCodeStub {
class ArgumentBits : public BitField<int, 2, kArgBits> {};
class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
- DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
+ DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
};
-// TODO(dcarney): see if it's possible to remove this later without performance
-// degradation.
-// This is not a real stub, but a way of generating the CallApiAccessorStub
-// (which has the same abi) which makes it clear that it is not an accessor.
-class CallApiFunctionWithFixedArgsStub : public CallApiAccessorStub {
- public:
- static const int kMaxFixedArgs = (1 << kArgBits) - 1;
- CallApiFunctionWithFixedArgsStub(Isolate* isolate, int argc,
- bool call_data_undefined)
- : CallApiAccessorStub(isolate, argc, call_data_undefined) {
- DCHECK(0 <= argc && argc <= kMaxFixedArgs);
- }
-};
-
-
-typedef ApiAccessorDescriptor ApiFunctionWithFixedArgsDescriptor;
-
-
class CallApiGetterStub : public PlatformCodeStub {
public:
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -1701,96 +1917,6 @@ class CompareICStub : public PlatformCodeStub {
};
-class CompareNilICStub : public HydrogenCodeStub {
- public:
- Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
- Type* GetInputType(Zone* zone, Handle<Map> map);
-
- CompareNilICStub(Isolate* isolate, NilValue nil) : HydrogenCodeStub(isolate) {
- set_sub_minor_key(NilValueBits::encode(nil));
- }
-
- CompareNilICStub(Isolate* isolate, ExtraICState ic_state,
- InitializationState init_state = INITIALIZED)
- : HydrogenCodeStub(isolate, init_state) {
- set_sub_minor_key(ic_state);
- }
-
- static Handle<Code> GetUninitialized(Isolate* isolate,
- NilValue nil) {
- return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
- }
-
- InlineCacheState GetICState() const override {
- State state = this->state();
- if (state.Contains(GENERIC)) {
- return MEGAMORPHIC;
- } else if (state.Contains(MONOMORPHIC_MAP)) {
- return MONOMORPHIC;
- } else {
- return PREMONOMORPHIC;
- }
- }
-
- Code::Kind GetCodeKind() const override { return Code::COMPARE_NIL_IC; }
-
- ExtraICState GetExtraICState() const override { return sub_minor_key(); }
-
- void UpdateStatus(Handle<Object> object);
-
- bool IsMonomorphic() const { return state().Contains(MONOMORPHIC_MAP); }
-
- NilValue nil_value() const { return NilValueBits::decode(sub_minor_key()); }
-
- void ClearState() {
- set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
- }
-
- void PrintState(std::ostream& os) const override; // NOLINT
- void PrintBaseName(std::ostream& os) const override; // NOLINT
-
- private:
- CompareNilICStub(Isolate* isolate, NilValue nil,
- InitializationState init_state)
- : HydrogenCodeStub(isolate, init_state) {
- set_sub_minor_key(NilValueBits::encode(nil));
- }
-
- enum CompareNilType {
- UNDEFINED,
- NULL_TYPE,
- MONOMORPHIC_MAP,
- GENERIC,
- NUMBER_OF_TYPES
- };
-
- // At most 6 different types can be distinguished, because the Code object
- // only has room for a single byte to hold a set and there are two more
- // boolean flags we need to store. :-P
- STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
-
- class State : public EnumSet<CompareNilType, byte> {
- public:
- State() : EnumSet<CompareNilType, byte>(0) { }
- explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
- };
- friend std::ostream& operator<<(std::ostream& os, const State& s);
-
- State state() const { return State(TypesBits::decode(sub_minor_key())); }
-
- class NilValueBits : public BitField<NilValue, 0, 1> {};
- class TypesBits : public BitField<byte, 1, NUMBER_OF_TYPES> {};
-
- friend class CompareNilIC;
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CompareNil);
- DEFINE_HYDROGEN_CODE_STUB(CompareNilIC, HydrogenCodeStub);
-};
-
-
-std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s);
-
-
class CEntryStub : public PlatformCodeStub {
public:
CEntryStub(Isolate* isolate, int result_size,
@@ -2499,28 +2625,45 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
};
-
-class AllocateHeapNumberStub final : public HydrogenCodeStub {
+class AllocateHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateHeapNumberStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) {}
+ : TurboFanCodeStub(isolate) {}
+
+ void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+ void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
- private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
- DEFINE_HYDROGEN_CODE_STUB(AllocateHeapNumber, HydrogenCodeStub);
+ DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
};
-
-class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
+class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateMutableHeapNumberStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) {}
+ : TurboFanCodeStub(isolate) {}
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
- DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub);
-};
+ void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+ void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
+ DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
+};
+
+#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
+ class Allocate##Type##Stub : public TurboFanCodeStub { \
+ public: \
+ explicit Allocate##Type##Stub(Isolate* isolate) \
+ : TurboFanCodeStub(isolate) {} \
+ \
+ void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
+ void GenerateAssembly( \
+ compiler::CodeStubAssembler* assembler) const override; \
+ \
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
+ DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
+ };
+SIMD128_TYPES(SIMD128_ALLOC_STUB)
+#undef SIMD128_ALLOC_STUB
class AllocateInNewSpaceStub final : public HydrogenCodeStub {
public:
@@ -2727,8 +2870,7 @@ class StoreElementStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
};
-
-class ToBooleanStub: public HydrogenCodeStub {
+class ToBooleanICStub : public HydrogenCodeStub {
public:
enum Type {
UNDEFINED,
@@ -2755,14 +2897,14 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const {
- return Contains(ToBooleanStub::SPEC_OBJECT);
+ return Contains(ToBooleanICStub::SPEC_OBJECT);
}
bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
- ToBooleanStub(Isolate* isolate, ExtraICState state)
+ ToBooleanICStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
}
@@ -2776,7 +2918,7 @@ class ToBooleanStub: public HydrogenCodeStub {
bool SometimesSetsUpAFrame() override { return false; }
static Handle<Code> GetUninitialized(Isolate* isolate) {
- return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
+ return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
}
ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
@@ -2790,19 +2932,16 @@ class ToBooleanStub: public HydrogenCodeStub {
}
private:
- ToBooleanStub(Isolate* isolate, InitializationState init_state)
- : HydrogenCodeStub(isolate, init_state) {
- }
+ ToBooleanICStub(Isolate* isolate, InitializationState init_state)
+ : HydrogenCodeStub(isolate, init_state) {}
class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
- DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
};
-
-std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& t);
-
+std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
@@ -2910,17 +3049,25 @@ class ToNumberStub final : public PlatformCodeStub {
public:
explicit ToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToNumber);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToNumber, PlatformCodeStub);
};
+class NonNumberToNumberStub final : public PlatformCodeStub {
+ public:
+ explicit NonNumberToNumberStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
-class ToLengthStub final : public PlatformCodeStub {
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_PLATFORM_CODE_STUB(NonNumberToNumber, PlatformCodeStub);
+};
+
+class StringToNumberStub final : public PlatformCodeStub {
public:
- explicit ToLengthStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ explicit StringToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToLength);
- DEFINE_PLATFORM_CODE_STUB(ToLength, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+ DEFINE_PLATFORM_CODE_STUB(StringToNumber, PlatformCodeStub);
};
@@ -2928,7 +3075,7 @@ class ToStringStub final : public PlatformCodeStub {
public:
explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToString);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
};
@@ -2937,7 +3084,7 @@ class ToNameStub final : public PlatformCodeStub {
public:
explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToName);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
};
@@ -2946,20 +3093,10 @@ class ToObjectStub final : public HydrogenCodeStub {
public:
explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ToObject);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
};
-
-class StringCompareStub : public PlatformCodeStub {
- public:
- explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StringCompare);
- DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
-};
-
-
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
#undef DEFINE_PLATFORM_CODE_STUB
#undef DEFINE_HANDLER_CODE_STUB
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 512cbfc40a..f941696774 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -56,6 +56,8 @@
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/codegen-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h
new file mode 100644
index 0000000000..8454aae19d
--- /dev/null
+++ b/deps/v8/src/collector.h
@@ -0,0 +1,247 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COLLECTOR_H_
+#define V8_COLLECTOR_H_
+
+#include "src/checks.h"
+#include "src/list.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * A class that collects values into a backing store.
+ * Specialized versions of the class can allow access to the backing store
+ * in different ways.
+ * There is no guarantee that the backing store is contiguous (and, as a
+ * consequence, no guarantees that consecutively added elements are adjacent
+ * in memory). The collector may move elements unless it has guaranteed not
+ * to.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class Collector {
+ public:
+ explicit Collector(int initial_capacity = kMinCapacity)
+ : index_(0), size_(0) {
+ current_chunk_ = Vector<T>::New(initial_capacity);
+ }
+
+ virtual ~Collector() {
+ // Free backing store (in reverse allocation order).
+ current_chunk_.Dispose();
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ }
+
+ // Add a single element.
+ inline void Add(T value) {
+ if (index_ >= current_chunk_.length()) {
+ Grow(1);
+ }
+ current_chunk_[index_] = value;
+ index_++;
+ size_++;
+ }
+
+ // Add a block of contiguous elements and return a Vector backed by the
+ // memory area.
+ // A basic Collector will keep this vector valid as long as the Collector
+ // is alive.
+ inline Vector<T> AddBlock(int size, T initial_value) {
+ DCHECK(size > 0);
+ if (size > current_chunk_.length() - index_) {
+ Grow(size);
+ }
+ T* position = current_chunk_.start() + index_;
+ index_ += size;
+ size_ += size;
+ for (int i = 0; i < size; i++) {
+ position[i] = initial_value;
+ }
+ return Vector<T>(position, size);
+ }
+
+ // Add a contiguous block of elements and return a vector backed
+ // by the added block.
+ // A basic Collector will keep this vector valid as long as the Collector
+ // is alive.
+ inline Vector<T> AddBlock(Vector<const T> source) {
+ if (source.length() > current_chunk_.length() - index_) {
+ Grow(source.length());
+ }
+ T* position = current_chunk_.start() + index_;
+ index_ += source.length();
+ size_ += source.length();
+ for (int i = 0; i < source.length(); i++) {
+ position[i] = source[i];
+ }
+ return Vector<T>(position, source.length());
+ }
+
+ // Write the contents of the collector into the provided vector.
+ void WriteTo(Vector<T> destination) {
+ DCHECK(size_ <= destination.length());
+ int position = 0;
+ for (int i = 0; i < chunks_.length(); i++) {
+ Vector<T> chunk = chunks_.at(i);
+ for (int j = 0; j < chunk.length(); j++) {
+ destination[position] = chunk[j];
+ position++;
+ }
+ }
+ for (int i = 0; i < index_; i++) {
+ destination[position] = current_chunk_[i];
+ position++;
+ }
+ }
+
+ // Allocate a single contiguous vector, copy all the collected
+ // elements to the vector, and return it.
+ // The caller is responsible for freeing the memory of the returned
+ // vector (e.g., using Vector::Dispose).
+ Vector<T> ToVector() {
+ Vector<T> new_store = Vector<T>::New(size_);
+ WriteTo(new_store);
+ return new_store;
+ }
+
+ // Resets the collector to be empty.
+ virtual void Reset() {
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ chunks_.Rewind(0);
+ index_ = 0;
+ size_ = 0;
+ }
+
+ // Total number of elements added to collector so far.
+ inline int size() { return size_; }
+
+ protected:
+ static const int kMinCapacity = 16;
+ List<Vector<T> > chunks_;
+ Vector<T> current_chunk_; // Block of memory currently being written into.
+ int index_; // Current index in current chunk.
+ int size_; // Total number of elements in collector.
+
+ // Creates a new current chunk, and stores the old chunk in the chunks_ list.
+ void Grow(int min_capacity) {
+ DCHECK(growth_factor > 1);
+ int new_capacity;
+ int current_length = current_chunk_.length();
+ if (current_length < kMinCapacity) {
+ // The collector started out as empty.
+ new_capacity = min_capacity * growth_factor;
+ if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
+ } else {
+ int growth = current_length * (growth_factor - 1);
+ if (growth > max_growth) {
+ growth = max_growth;
+ }
+ new_capacity = current_length + growth;
+ if (new_capacity < min_capacity) {
+ new_capacity = min_capacity + growth;
+ }
+ }
+ NewChunk(new_capacity);
+ DCHECK(index_ + min_capacity <= current_chunk_.length());
+ }
+
+ // Before replacing the current chunk, give a subclass the option to move
+ // some of the current data into the new chunk. The function may update
+ // the current index_ value to represent data no longer in the current chunk.
+ // Returns the initial index of the new chunk (after copied data).
+ virtual void NewChunk(int new_capacity) {
+ Vector<T> new_chunk = Vector<T>::New(new_capacity);
+ if (index_ > 0) {
+ chunks_.Add(current_chunk_.SubVector(0, index_));
+ } else {
+ current_chunk_.Dispose();
+ }
+ current_chunk_ = new_chunk;
+ index_ = 0;
+ }
+};
+
+/*
+ * A collector that allows sequences of values to be guaranteed to
+ * stay consecutive.
+ * If the backing store grows while a sequence is active, the current
+ * sequence might be moved, but after the sequence is ended, it will
+ * not move again.
+ * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
+ * as well, if inside an active sequence where another element is added.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class SequenceCollector : public Collector<T, growth_factor, max_growth> {
+ public:
+ explicit SequenceCollector(int initial_capacity)
+ : Collector<T, growth_factor, max_growth>(initial_capacity),
+ sequence_start_(kNoSequence) {}
+
+ virtual ~SequenceCollector() {}
+
+ void StartSequence() {
+ DCHECK(sequence_start_ == kNoSequence);
+ sequence_start_ = this->index_;
+ }
+
+ Vector<T> EndSequence() {
+ DCHECK(sequence_start_ != kNoSequence);
+ int sequence_start = sequence_start_;
+ sequence_start_ = kNoSequence;
+ if (sequence_start == this->index_) return Vector<T>();
+ return this->current_chunk_.SubVector(sequence_start, this->index_);
+ }
+
+ // Drops the currently added sequence, and all collected elements in it.
+ void DropSequence() {
+ DCHECK(sequence_start_ != kNoSequence);
+ int sequence_length = this->index_ - sequence_start_;
+ this->index_ = sequence_start_;
+ this->size_ -= sequence_length;
+ sequence_start_ = kNoSequence;
+ }
+
+ virtual void Reset() {
+ sequence_start_ = kNoSequence;
+ this->Collector<T, growth_factor, max_growth>::Reset();
+ }
+
+ private:
+ static const int kNoSequence = -1;
+ int sequence_start_;
+
+ // Move the currently active sequence to the new chunk.
+ virtual void NewChunk(int new_capacity) {
+ if (sequence_start_ == kNoSequence) {
+ // Fall back on default behavior if no sequence has been started.
+ this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
+ return;
+ }
+ int sequence_length = this->index_ - sequence_start_;
+ Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
+ DCHECK(sequence_length < new_chunk.length());
+ for (int i = 0; i < sequence_length; i++) {
+ new_chunk[i] = this->current_chunk_[sequence_start_ + i];
+ }
+ if (sequence_start_ > 0) {
+ this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
+ } else {
+ this->current_chunk_.Dispose();
+ }
+ this->current_chunk_ = new_chunk;
+ this->index_ = sequence_length;
+ sequence_start_ = 0;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COLLECTOR_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index c47e1b7bee..8bb53323ab 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -31,7 +31,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/profiler/cpu-profiler.h"
#include "src/runtime-profiler.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/code-serializer.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -66,13 +66,27 @@ PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
#undef PARSE_INFO_GETTER
#undef PARSE_INFO_GETTER_WITH_DEFAULT
+// A wrapper around a CompilationInfo that detaches the Handles from
+// the underlying DeferredHandleScope and stores them in info_ on
+// destruction.
+class CompilationHandleScope BASE_EMBEDDED {
+ public:
+ explicit CompilationHandleScope(CompilationInfo* info)
+ : deferred_(info->isolate()), info_(info) {}
+ ~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
+
+ private:
+ DeferredHandleScope deferred_;
+ CompilationInfo* info_;
+};
// Exactly like a CompilationInfo, except being allocated via {new} and it also
// creates and enters a Zone on construction and deallocates it on destruction.
class CompilationInfoWithZone : public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<JSFunction> function)
- : CompilationInfo(new ParseInfo(&zone_, function)) {}
+ : CompilationInfo(new ParseInfo(&zone_, function)),
+ zone_(function->GetIsolate()->allocator()) {}
// Virtual destructor because a CompilationInfoWithZone has to exit the
// zone scope and get rid of dependent maps even when the destructor is
@@ -88,6 +102,8 @@ class CompilationInfoWithZone : public CompilationInfo {
Zone zone_;
};
+// ----------------------------------------------------------------------------
+// Implementation of CompilationInfo
bool CompilationInfo::has_shared_info() const {
return parse_info_ && !parse_info_->shared_info().is_null();
@@ -127,12 +143,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
if (FLAG_turbo_types) MarkAsTypingEnabled();
if (has_shared_info()) {
- if (shared_info()->is_compiled()) {
- // We should initialize the CompilationInfo feedback vector from the
- // passed in shared info, rather than creating a new one.
- feedback_vector_ = Handle<TypeFeedbackVector>(
- shared_info()->feedback_vector(), parse_info->isolate());
- }
if (shared_info()->never_compiled()) MarkAsFirstCompile();
}
}
@@ -206,20 +216,6 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
-void CompilationInfo::EnsureFeedbackVector() {
- if (feedback_vector_.is_null()) {
- Handle<TypeFeedbackMetadata> feedback_metadata =
- TypeFeedbackMetadata::New(isolate(), literal()->feedback_vector_spec());
- feedback_vector_ = TypeFeedbackVector::New(isolate(), feedback_metadata);
- }
-
- // It's very important that recompiles do not alter the structure of the
- // type feedback vector.
- CHECK(!feedback_vector_->metadata()->SpecDiffersFrom(
- literal()->feedback_vector_spec()));
-}
-
-
bool CompilationInfo::has_simple_parameters() {
return scope()->has_simple_parameters();
}
@@ -293,11 +289,38 @@ base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
return name;
}
+StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
+ switch (output_code_kind()) {
+ case Code::STUB:
+ case Code::BYTECODE_HANDLER:
+ case Code::HANDLER:
+ case Code::BUILTIN:
+ return StackFrame::STUB;
+ case Code::WASM_FUNCTION:
+ return StackFrame::WASM;
+ case Code::JS_TO_WASM_FUNCTION:
+ return StackFrame::JS_TO_WASM;
+ case Code::WASM_TO_JS_FUNCTION:
+ return StackFrame::WASM_TO_JS;
+ default:
+ UNIMPLEMENTED();
+ return StackFrame::NONE;
+ }
+}
bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
return is_sloppy(language_mode()) && !is_native();
}
+#if DEBUG
+void CompilationInfo::PrintAstForTesting() {
+ PrintF("--- Source from AST ---\n%s\n",
+ PrettyPrinter(isolate()).PrintProgram(literal()));
+}
+#endif
+
+// ----------------------------------------------------------------------------
+// Implementation of OptimizedCompileJob
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
@@ -352,6 +375,14 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
return AbortOptimization(kFunctionBeingDebugged);
}
+ // Resuming a suspended frame is not supported by Crankshaft/TurboFan.
+ if (info()->shared_info()->HasBuiltinFunctionId() &&
+ (info()->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
+ info()->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
+ info()->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
+ return AbortOptimization(kGeneratorResumeMethod);
+ }
+
// Limit the number of times we try to optimize functions.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
@@ -360,7 +391,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
// Check the whitelist for Crankshaft.
- if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
+ if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
return AbortOptimization(kHydrogenFilter);
}
@@ -401,7 +432,8 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
!optimization_disabled;
// 3. Explicitly enabled by the command-line filter.
- bool passes_turbo_filter = info()->closure()->PassesFilter(FLAG_turbo_filter);
+ bool passes_turbo_filter =
+ info()->shared_info()->PassesFilter(FLAG_turbo_filter);
// If this is OSR request, OSR must be enabled by Turbofan.
bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
@@ -461,11 +493,6 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
return AbortOptimization(kTooManyParametersLocals);
}
- if (scope->HasIllegalRedeclaration()) {
- // Crankshaft cannot handle illegal redeclarations.
- return AbortOptimization(kFunctionWithIllegalRedeclaration);
- }
-
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
@@ -490,13 +517,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
info()->shared_info()->disable_optimization_reason());
}
- graph_builder_ = (info()->is_tracking_positions() || FLAG_trace_ic)
- ? new (info()->zone())
- HOptimizedGraphBuilderWithPositions(info())
- : new (info()->zone()) HOptimizedGraphBuilder(info());
+ HOptimizedGraphBuilder* graph_builder =
+ (info()->is_tracking_positions() || FLAG_trace_ic)
+ ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
+ : new (info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
- graph_ = graph_builder_->CreateGraph();
+ graph_ = graph_builder->CreateGraph();
if (isolate()->has_pending_exception()) {
return SetLastStatus(FAILED);
@@ -533,7 +560,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
chunk_ = LChunk::NewChunk(graph_);
if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
} else if (bailout_reason != kNoReason) {
- graph_builder_->Bailout(bailout_reason);
+ info_->AbortOptimization(bailout_reason);
}
return SetLastStatus(BAILED_OUT);
@@ -674,6 +701,10 @@ void OptimizedCompileJob::RecordOptimizationStats() {
}
}
+// ----------------------------------------------------------------------------
+// Local helper methods that make up the compilation pipeline.
+
+namespace {
// Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
@@ -696,18 +727,16 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
shared->set_expected_nof_properties(estimate);
}
-
-static void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
- BailoutReason bailout_reason) {
+void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
+ BailoutReason bailout_reason) {
if (bailout_reason != kNoReason) {
shared_info->DisableOptimization(bailout_reason);
}
}
-
-static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
+void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+ CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared) {
// SharedFunctionInfo is passed separately, because if CompilationInfo
// was created using Script object, it will not have it.
@@ -717,8 +746,9 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
if (info->isolate()->logger()->is_logging_code_events() ||
info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->parse_info()->script();
- Handle<Code> code = info->code();
- if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
+ Handle<AbstractCode> abstract_code = info->abstract_code();
+ if (abstract_code.is_identical_to(
+ info->isolate()->builtins()->CompileLazy())) {
return;
}
int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
@@ -729,15 +759,37 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
: info->isolate()->heap()->empty_string();
Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
PROFILE(info->isolate(),
- CodeCreateEvent(log_tag, *code, *shared, info, script_name,
+ CodeCreateEvent(log_tag, *abstract_code, *shared, info, script_name,
line_num, column_num));
}
}
-static bool CompileUnoptimizedCode(CompilationInfo* info) {
+void EnsureFeedbackVector(CompilationInfo* info) {
+ if (!info->has_shared_info()) return;
+
+ // If no type feedback vector exists, we create one now. At this point the
+ // AstNumbering pass has already run. Note the snapshot can contain outdated
+ // vectors for a different configuration, hence we also recreate a new vector
+ // when the function is not compiled (i.e. no code was serialized).
+ if (info->shared_info()->feedback_vector()->is_empty() ||
+ !info->shared_info()->is_compiled()) {
+ Handle<TypeFeedbackMetadata> feedback_metadata = TypeFeedbackMetadata::New(
+ info->isolate(), info->literal()->feedback_vector_spec());
+ Handle<TypeFeedbackVector> feedback_vector =
+ TypeFeedbackVector::New(info->isolate(), feedback_metadata);
+ info->shared_info()->set_feedback_vector(*feedback_vector);
+ }
+
+ // It's very important that recompiles do not alter the structure of the type
+ // feedback vector. Verify that the structure fits the function literal.
+ CHECK(!info->shared_info()->feedback_vector()->metadata()->SpecDiffersFrom(
+ info->literal()->feedback_vector_spec()));
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
if (!Compiler::Analyze(info->parse_info()) ||
- !FullCodeGenerator::MakeCode(info)) {
+ !(EnsureFeedbackVector(info), FullCodeGenerator::MakeCode(info))) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -745,24 +797,31 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
return true;
}
+bool UseIgnition(CompilationInfo* info) {
+ // TODO(4681): Generator functions are not yet supported.
+ if (info->shared_info()->is_generator()) {
+ return false;
+ }
-static bool UseIgnition(CompilationInfo* info) {
- // Cannot use Ignition when the {function_data} is already used.
- if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
+ // TODO(4681): Resuming a suspended frame is not supported.
+ if (info->shared_info()->HasBuiltinFunctionId() &&
+ (info->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
+ info->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
+ info->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
return false;
}
// Checks whether top level functions should be passed by the filter.
- if (info->closure().is_null()) {
+ if (info->shared_info()->is_toplevel()) {
Vector<const char> filter = CStrVector(FLAG_ignition_filter);
return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
}
// Finally respect the filter.
- return info->closure()->PassesFilter(FLAG_ignition_filter);
+ return info->shared_info()->PassesFilter(FLAG_ignition_filter);
}
-static int CodeAndMetadataSize(CompilationInfo* info) {
+int CodeAndMetadataSize(CompilationInfo* info) {
int size = 0;
if (info->has_bytecode_array()) {
Handle<BytecodeArray> bytecode_array = info->bytecode_array();
@@ -780,9 +839,9 @@ static int CodeAndMetadataSize(CompilationInfo* info) {
return size;
}
-
-static bool GenerateBaselineCode(CompilationInfo* info) {
+bool GenerateBaselineCode(CompilationInfo* info) {
bool success;
+ EnsureFeedbackVector(info);
if (FLAG_ignition && UseIgnition(info)) {
success = interpreter::Interpreter::MakeBytecode(info);
} else {
@@ -797,8 +856,7 @@ static bool GenerateBaselineCode(CompilationInfo* info) {
return success;
}
-
-static bool CompileBaselineCode(CompilationInfo* info) {
+bool CompileBaselineCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
Isolate* isolate = info->isolate();
@@ -808,8 +866,21 @@ static bool CompileBaselineCode(CompilationInfo* info) {
return true;
}
+void InstallBaselineCompilationResult(CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared,
+ Handle<ScopeInfo> scope_info) {
+ // Assert that we are not overwriting (possibly patched) debug code.
+ DCHECK(!shared->HasDebugCode());
+ DCHECK(!info->code().is_null());
+ shared->ReplaceCode(*info->code());
+ shared->set_scope_info(*scope_info);
+ if (info->has_bytecode_array()) {
+ DCHECK(!shared->HasBytecodeArray()); // Only compiled once.
+ shared->set_bytecode_array(*info->bytecode_array());
+ }
+}
-MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCodeCommon(
CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
@@ -824,29 +895,20 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Compile either unoptimized code or bytecode for the interpreter.
if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
- if (info->code()->kind() == Code::FUNCTION) { // Only for full code.
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
- }
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
// Update the shared function info with the scope info. Allocating the
// ScopeInfo object may cause a GC.
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
- shared->set_scope_info(*scope_info);
- // Update the code and feedback vector for the shared function info.
- shared->ReplaceCode(*info->code());
- shared->set_feedback_vector(*info->feedback_vector());
- if (info->has_bytecode_array()) {
- DCHECK(shared->function_data()->IsUndefined());
- shared->set_function_data(*info->bytecode_array());
- }
+ // Install compilation result on the shared function info
+ InstallBaselineCompilationResult(info, shared, scope_info);
return info->code();
}
-
-MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
Handle<JSFunction> function, BailoutId osr_ast_id) {
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
@@ -862,8 +924,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
return MaybeHandle<Code>();
}
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
@@ -894,8 +955,7 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
}
}
-
-static bool Renumber(ParseInfo* parse_info) {
+bool Renumber(ParseInfo* parse_info) {
if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
parse_info->literal())) {
return false;
@@ -905,30 +965,14 @@ static bool Renumber(ParseInfo* parse_info) {
FunctionLiteral* lit = parse_info->literal();
shared_info->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
- shared_info->set_dont_crankshaft(lit->flags() &
- AstProperties::kDontCrankshaft);
+ shared_info->set_dont_crankshaft(
+ shared_info->dont_crankshaft() ||
+ (lit->flags() & AstProperties::kDontCrankshaft));
}
return true;
}
-
-bool Compiler::Analyze(ParseInfo* info) {
- DCHECK_NOT_NULL(info->literal());
- if (!Rewriter::Rewrite(info)) return false;
- if (!Scope::Analyze(info)) return false;
- if (!Renumber(info)) return false;
- DCHECK_NOT_NULL(info->scope());
- return true;
-}
-
-
-bool Compiler::ParseAndAnalyze(ParseInfo* info) {
- if (!Parser::ParseStatic(info)) return false;
- return Compiler::Analyze(info);
-}
-
-
-static bool GetOptimizedCodeNow(CompilationInfo* info) {
+bool GetOptimizedCodeNow(CompilationInfo* info) {
Isolate* isolate = info->isolate();
CanonicalHandleScope canonical(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
@@ -959,8 +1003,7 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
return true;
}
-
-static bool GetOptimizedCodeLater(CompilationInfo* info) {
+bool GetOptimizedCodeLater(CompilationInfo* info) {
Isolate* isolate = info->isolate();
CanonicalHandleScope canonical(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
@@ -1002,8 +1045,7 @@ static bool GetOptimizedCodeLater(CompilationInfo* info) {
return true;
}
-
-MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+MaybeHandle<Code> GetUnoptimizedCode(Handle<JSFunction> function) {
DCHECK(!function->GetIsolate()->has_pending_exception());
DCHECK(!function->is_compiled());
if (function->shared()->is_compiled()) {
@@ -1018,8 +1060,69 @@ MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
return result;
}
+MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
+ Compiler::ConcurrencyMode mode,
+ BailoutId osr_ast_id = BailoutId::None(),
+ JavaScriptFrame* osr_frame = nullptr) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (shared->HasDebugInfo()) return MaybeHandle<Code>();
+
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
+ .ToHandle(&cached_code)) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
+ PrintF("]\n");
+ }
+ return cached_code;
+ }
+
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ if (shared->is_compiled()) {
+ shared->code()->set_profiler_ticks(0);
+ }
+
+ // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
+ // an eval scope and hence would fail at parsing the eval source again.
+ if (shared->disable_optimization_reason() == kEval) {
+ return MaybeHandle<Code>();
+ }
+
+ // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
+ // builtin context, hence Genesis::InstallExperimentalNatives would fail.
+ if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
+ return MaybeHandle<Code>();
+ }
+
+ base::SmartPointer<CompilationInfo> info(
+ new CompilationInfoWithZone(function));
+ VMState<COMPILER> state(isolate);
+ DCHECK(!isolate->has_pending_exception());
+ PostponeInterruptsScope postpone(isolate);
+
+ info->SetOptimizingForOsr(osr_ast_id);
-MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
+ if (mode == Compiler::CONCURRENT) {
+ if (GetOptimizedCodeLater(info.get())) {
+ info.Detach(); // The background recompile job owns this now.
+ return isolate->builtins()->InOptimizationQueue();
+ }
+ } else {
+ info->set_osr_frame(osr_frame);
+ if (GetOptimizedCodeNow(info.get())) return info->code();
+ }
+
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return MaybeHandle<Code>();
+}
+
+MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
DCHECK(!isolate->has_pending_exception());
DCHECK(!function->is_compiled());
@@ -1058,7 +1161,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
if (FLAG_always_opt) {
Handle<Code> opt_code;
- if (Compiler::GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+ if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
.ToHandle(&opt_code)) {
result = opt_code;
}
@@ -1068,73 +1171,12 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
}
-bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
- if (function->is_compiled()) return true;
- MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- if (flag == CLEAR_EXCEPTION) {
- function->GetIsolate()->clear_pending_exception();
- }
- return false;
- }
- function->ReplaceCode(*code);
- DCHECK(function->is_compiled());
- return true;
-}
-
-
-// TODO(turbofan): In the future, unoptimized code with deopt support could
-// be generated lazily once deopt is triggered.
-bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
- DCHECK_NOT_NULL(info->literal());
- DCHECK(info->has_scope());
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (!shared->has_deoptimization_support()) {
- // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
- CompilationInfoWithZone unoptimized(info->closure());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- ParseInfo* parse_info = unoptimized.parse_info();
- parse_info->set_literal(info->literal());
- parse_info->set_scope(info->scope());
- parse_info->set_context(info->context());
- unoptimized.EnableDeoptimizationSupport();
- // If the current code has reloc info for serialization, also include
- // reloc info for serialization for the new code, so that deopt support
- // can be added without losing IC state.
- if (shared->code()->kind() == Code::FUNCTION &&
- shared->code()->has_reloc_info_for_serialization()) {
- unoptimized.PrepareForSerializing();
- }
- if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
-
- shared->EnableDeoptimizationSupport(*unoptimized.code());
- shared->set_feedback_vector(*unoptimized.feedback_vector());
-
- info->MarkAsCompiled();
-
- // The scope info might not have been set if a lazily compiled
- // function is inlined before being called for the first time.
- if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
- Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
- shared->set_scope_info(*target_scope_info);
- }
-
- // The existing unoptimized code was replaced with the new one.
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
- }
- return true;
-}
-
-
bool CompileEvalForDebugging(Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared) {
Handle<Script> script(Script::cast(shared->script()));
Handle<Context> context(function->context());
- Zone zone;
+ Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
Isolate* isolate = info.isolate();
@@ -1176,61 +1218,25 @@ bool CompileForDebugging(CompilationInfo* info) {
return true;
}
-
-static inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
+inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
return shared->is_toplevel() && shared->script()->IsScript() &&
Script::cast(shared->script())->compilation_type() ==
Script::COMPILATION_TYPE_EVAL;
}
-
-bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- if (IsEvalToplevel(shared)) {
- return CompileEvalForDebugging(function, shared);
- } else {
- CompilationInfoWithZone info(function);
- return CompileForDebugging(&info);
- }
-}
-
-
-bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
- DCHECK(shared->allows_lazy_compilation_without_context());
- DCHECK(!IsEvalToplevel(shared));
- Zone zone;
- ParseInfo parse_info(&zone, shared);
- CompilationInfo info(&parse_info);
- return CompileForDebugging(&info);
-}
-
-
-void Compiler::CompileForLiveEdit(Handle<Script> script) {
- // TODO(635): support extensions.
- Zone zone;
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
- PostponeInterruptsScope postpone(info.isolate());
- VMState<COMPILER> state(info.isolate());
-
- // Get rid of old list of shared function infos.
- info.MarkAsFirstCompile();
- info.MarkAsDebug();
- info.parse_info()->set_global();
- if (!Parser::ParseStatic(info.parse_info())) return;
-
- LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
- if (!CompileUnoptimizedCode(&info)) return;
- if (info.has_shared_info()) {
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
- info.shared_info()->set_scope_info(*scope_info);
- }
- tracker.RecordRootFunctionInfo(info.code());
+Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+ Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
+ Handle<Code> code = isolate->builtins()->CompileLazy();
+ Handle<ScopeInfo> scope_info = handle(ScopeInfo::Empty(isolate));
+ Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
+ literal->name(), literal->materialized_literal_count(), literal->kind(),
+ code, scope_info);
+ SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+ SharedFunctionInfo::SetScript(result, script);
+ return result;
}
-
-static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
+Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileCode> timer(isolate);
TRACE_EVENT0("v8", "V8.CompileCode");
@@ -1263,6 +1269,13 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
FLAG_min_preparse_length) &&
!info->is_debug();
+ // Consider parsing eagerly when targeting the code cache.
+ parse_allow_lazy &= !(FLAG_serialize_eager && info->will_serialize());
+
+ // Consider parsing eagerly when targeting Ignition.
+ parse_allow_lazy &= !(FLAG_ignition && FLAG_ignition_eager &&
+ !isolate->serializer_enabled());
+
parse_info->set_allow_lazy_parsing(parse_allow_lazy);
if (!parse_allow_lazy &&
(options == ScriptCompiler::kProduceParserCache ||
@@ -1295,31 +1308,25 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
HistogramTimerScope timer(rate);
TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
- // Compile the code.
- if (!CompileBaselineCode(info)) {
- return Handle<SharedFunctionInfo>::null();
- }
-
- // Allocate function.
- DCHECK(!info->code().is_null());
- result = isolate->factory()->NewSharedFunctionInfo(
- lit->name(), lit->materialized_literal_count(), lit->kind(),
- info->code(),
- ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
- info->feedback_vector());
- if (info->has_bytecode_array()) {
- DCHECK(result->function_data()->IsUndefined());
- result->set_function_data(*info->bytecode_array());
- }
-
+ // Allocate a shared function info object.
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
- SharedFunctionInfo::SetScript(result, script);
+ result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
result->set_is_toplevel(true);
if (info->is_eval()) {
// Eval scripts cannot be (re-)compiled without context.
result->set_allows_lazy_compilation_without_context(false);
}
+ parse_info->set_shared_info(result);
+
+ // Compile the code.
+ if (!CompileBaselineCode(info)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ // Install compilation result on the shared function info
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+ InstallBaselineCompilationResult(info, result, scope_info);
Handle<String> script_name =
script->name()->IsString()
@@ -1329,8 +1336,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
- PROFILE(isolate, CodeCreateEvent(
- log_tag, *info->code(), *result, info, *script_name));
+ PROFILE(isolate, CodeCreateEvent(log_tag, *info->abstract_code(), *result,
+ info, *script_name));
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -1347,6 +1354,152 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
return result;
}
+} // namespace
+
+// ----------------------------------------------------------------------------
+// Implementation of Compiler
+
+bool Compiler::Analyze(ParseInfo* info) {
+ DCHECK_NOT_NULL(info->literal());
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ if (!Renumber(info)) return false;
+ DCHECK_NOT_NULL(info->scope());
+ return true;
+}
+
+bool Compiler::ParseAndAnalyze(ParseInfo* info) {
+ if (!Parser::ParseStatic(info)) return false;
+ return Compiler::Analyze(info);
+}
+
+bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
+ if (function->is_compiled()) return true;
+ MaybeHandle<Code> maybe_code = GetLazyCode(function);
+ Handle<Code> code;
+ if (!maybe_code.ToHandle(&code)) {
+ if (flag == CLEAR_EXCEPTION) {
+ function->GetIsolate()->clear_pending_exception();
+ }
+ return false;
+ }
+ DCHECK(code->IsJavaScriptCode());
+ function->ReplaceCode(*code);
+ DCHECK(function->is_compiled());
+ return true;
+}
+
+bool Compiler::CompileOptimized(Handle<JSFunction> function,
+ ConcurrencyMode mode) {
+ Handle<Code> code;
+ if (GetOptimizedCode(function, mode).ToHandle(&code)) {
+ // Optimization succeeded, return optimized code.
+ function->ReplaceCode(*code);
+ } else {
+ // Optimization failed, get unoptimized code.
+ Isolate* isolate = function->GetIsolate();
+ if (isolate->has_pending_exception()) { // Possible stack overflow.
+ return false;
+ }
+ code = Handle<Code>(function->shared()->code(), isolate);
+ if (code->kind() != Code::FUNCTION &&
+ code->kind() != Code::OPTIMIZED_FUNCTION) {
+ if (!GetUnoptimizedCode(function).ToHandle(&code)) {
+ return false;
+ }
+ }
+ function->ReplaceCode(*code);
+ }
+
+ DCHECK(function->code()->kind() == Code::FUNCTION ||
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+ (function->code()->is_interpreter_entry_trampoline() &&
+ function->shared()->HasBytecodeArray()) ||
+ function->IsInOptimizationQueue());
+ return true;
+}
+
+bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (IsEvalToplevel(shared)) {
+ return CompileEvalForDebugging(function, shared);
+ } else {
+ CompilationInfoWithZone info(function);
+ return CompileForDebugging(&info);
+ }
+}
+
+bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
+ DCHECK(shared->allows_lazy_compilation_without_context());
+ DCHECK(!IsEvalToplevel(shared));
+ Zone zone(shared->GetIsolate()->allocator());
+ ParseInfo parse_info(&zone, shared);
+ CompilationInfo info(&parse_info);
+ return CompileForDebugging(&info);
+}
+
+// TODO(turbofan): In the future, unoptimized code with deopt support could
+// be generated lazily once deopt is triggered.
+bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
+ DCHECK_NOT_NULL(info->literal());
+ DCHECK(info->has_scope());
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!shared->has_deoptimization_support()) {
+ // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
+ CompilationInfoWithZone unoptimized(info->closure());
+ // Note that we use the same AST that we will use for generating the
+ // optimized code.
+ ParseInfo* parse_info = unoptimized.parse_info();
+ parse_info->set_literal(info->literal());
+ parse_info->set_scope(info->scope());
+ parse_info->set_context(info->context());
+ unoptimized.EnableDeoptimizationSupport();
+ // If the current code has reloc info for serialization, also include
+ // reloc info for serialization for the new code, so that deopt support
+ // can be added without losing IC state.
+ if (shared->code()->kind() == Code::FUNCTION &&
+ shared->code()->has_reloc_info_for_serialization()) {
+ unoptimized.PrepareForSerializing();
+ }
+ EnsureFeedbackVector(&unoptimized);
+ if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
+
+ shared->EnableDeoptimizationSupport(*unoptimized.code());
+
+ info->MarkAsCompiled();
+
+ // The scope info might not have been set if a lazily compiled
+ // function is inlined before being called for the first time.
+ if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
+ Handle<ScopeInfo> target_scope_info =
+ ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+ shared->set_scope_info(*target_scope_info);
+ }
+
+ // The existing unoptimized code was replaced with the new one.
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+ }
+ return true;
+}
+
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+ // TODO(635): support extensions.
+ Zone zone(script->GetIsolate()->allocator());
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
+ PostponeInterruptsScope postpone(info.isolate());
+ VMState<COMPILER> state(info.isolate());
+
+ // Get rid of old list of shared function infos.
+ info.MarkAsFirstCompile();
+ info.MarkAsDebug();
+ info.parse_info()->set_global();
+ if (!Parser::ParseStatic(info.parse_info())) return;
+
+ LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
+ if (!CompileUnoptimizedCode(&info)) return;
+ tracker.RecordRootFunctionInfo(info.code());
+}
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
@@ -1373,7 +1526,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script->set_column_offset(column_offset);
}
script->set_origin_options(options);
- Zone zone;
+ Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
parse_info.set_eval();
@@ -1401,8 +1554,6 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
compilation_cache->PutEval(source, outer_info, context, shared_info,
line_offset);
}
- } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
- shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
}
Handle<JSFunction> result =
@@ -1417,8 +1568,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-
-Handle<SharedFunctionInfo> Compiler::CompileScript(
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
@@ -1443,12 +1593,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- // TODO(rossberg): The natives do not yet obey strong mode rules
- // (for example, some macros use '==').
- bool use_strong = FLAG_use_strong && !isolate->bootstrapper()->IsActive();
- LanguageMode language_mode =
- construct_language_mode(FLAG_use_strict, use_strong);
-
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
@@ -1505,10 +1650,10 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
}
// Compile the function and add it to the cache.
- Zone zone;
+ Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
- if (FLAG_harmony_modules && is_module) {
+ if (is_module) {
parse_info.set_module();
} else {
parse_info.set_global();
@@ -1553,16 +1698,14 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
return result;
}
-
-Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
Handle<Script> script, ParseInfo* parse_info, int source_length) {
Isolate* isolate = script->GetIsolate();
// TODO(titzer): increment the counters in caller.
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- LanguageMode language_mode =
- construct_language_mode(FLAG_use_strict, FLAG_use_strong);
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
parse_info->set_language_mode(
static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
@@ -1587,8 +1730,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// On the first compile, there are no existing shared function info for
// inner functions yet, so do not try to find them. All bets are off for
// live edit though.
- DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
- isolate->debug()->live_edit_enabled());
+ SLOW_DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
+ isolate->debug()->live_edit_enabled());
} else {
maybe_existing = script->FindSharedFunctionInfo(literal);
}
@@ -1604,10 +1747,18 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
}
}
- Zone zone;
+ // Allocate a shared function info object.
+ Handle<SharedFunctionInfo> result;
+ if (!maybe_existing.ToHandle(&result)) {
+ result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
+ result->set_is_toplevel(false);
+ }
+
+ Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
parse_info.set_literal(literal);
+ parse_info.set_shared_info(result);
parse_info.set_scope(literal->scope());
parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
@@ -1633,49 +1784,35 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
+ // Consider compiling eagerly when targeting the code cache.
+ lazy &= !(FLAG_serialize_eager && info.will_serialize());
+
+ // Consider compiling eagerly when compiling bytecode for Ignition.
+ lazy &=
+ !(FLAG_ignition && FLAG_ignition_eager && !isolate->serializer_enabled());
+
// Generate code
TimerEventScope<TimerEventCompileCode> timer(isolate);
TRACE_EVENT0("v8", "V8.CompileCode");
- Handle<ScopeInfo> scope_info;
if (lazy) {
- Handle<Code> code = isolate->builtins()->CompileLazy();
- info.SetCode(code);
- // There's no need in theory for a lazy-compiled function to have a type
- // feedback vector, but some parts of the system expect all
- // SharedFunctionInfo instances to have one. The size of the vector depends
- // on how many feedback-needing nodes are in the tree, and when lazily
- // parsing we might not know that, if this function was never parsed before.
- // In that case the vector will be replaced the next time MakeCode is
- // called.
- info.EnsureFeedbackVector();
- scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+ info.SetCode(isolate->builtins()->CompileLazy());
} else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
// Code generation will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
if (literal->should_eager_compile() &&
literal->should_be_used_once_hint()) {
info.code()->MarkToBeExecutedOnce(isolate);
}
+ // Install compilation result on the shared function info.
+ InstallBaselineCompilationResult(&info, result, scope_info);
} else {
return Handle<SharedFunctionInfo>::null();
}
if (maybe_existing.is_null()) {
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- literal->name(), literal->materialized_literal_count(),
- literal->kind(), info.code(), scope_info, info.feedback_vector());
- if (info.has_bytecode_array()) {
- DCHECK(result->function_data()->IsUndefined());
- result->set_function_data(*info.bytecode_array());
- }
-
- SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
- SharedFunctionInfo::SetScript(result, script);
- result->set_is_toplevel(false);
// If the outer function has been compiled before, we cannot be sure that
// shared function info for this function literal has been created for the
// first time. It may have already been compiled previously.
@@ -1690,15 +1827,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
- return result;
- } else if (!lazy) {
- // Assert that we are not overwriting (possibly patched) debug code.
- DCHECK(!existing->HasDebugCode());
- existing->ReplaceCode(*info.code());
- existing->set_scope_info(*scope_info);
- existing->set_feedback_vector(*info.feedback_vector());
- }
- return existing;
+ }
+
+ return result;
}
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
@@ -1721,9 +1852,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
name, literals, FunctionKind::kNormalFunction, code,
- Handle<ScopeInfo>(fun->shared()->scope_info()),
- Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
+ Handle<ScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
+ shared->set_feedback_vector(fun->shared()->feedback_vector());
// Copy the function data to the shared function info.
shared->set_function_data(fun->shared()->function_data());
@@ -1733,81 +1864,15 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
return shared;
}
-MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
- ConcurrencyMode mode,
- BailoutId osr_ast_id,
- JavaScriptFrame* osr_frame) {
- Isolate* isolate = function->GetIsolate();
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (shared->HasDebugInfo()) return MaybeHandle<Code>();
-
- Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeMap(
- function, osr_ast_id).ToHandle(&cached_code)) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- if (!osr_ast_id.IsNone()) {
- PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
- }
- PrintF("]\n");
- }
- return cached_code;
- }
-
- DCHECK(AllowCompilation::IsAllowed(isolate));
-
- Handle<Code> current_code(shared->code());
- if (!shared->is_compiled() ||
- shared->scope_info() == ScopeInfo::Empty(isolate)) {
- // The function was never compiled. Compile it unoptimized first.
- // TODO(titzer): reuse the AST and scope info from this compile.
- CompilationInfoWithZone unoptimized(function);
- unoptimized.EnableDeoptimizationSupport();
- if (!GetUnoptimizedCodeCommon(&unoptimized).ToHandle(&current_code)) {
- return MaybeHandle<Code>();
- }
- shared->ReplaceCode(*current_code);
- }
-
- current_code->set_profiler_ticks(0);
-
- // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
- // an eval scope and hence would fail at parsing the eval source again.
- if (shared->disable_optimization_reason() == kEval) {
- return MaybeHandle<Code>();
- }
-
- // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
- // builtin context, hence Genesis::InstallExperimentalNatives would fail.
- if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
- return MaybeHandle<Code>();
- }
-
- base::SmartPointer<CompilationInfo> info(
- new CompilationInfoWithZone(function));
- VMState<COMPILER> state(isolate);
- DCHECK(!isolate->has_pending_exception());
- PostponeInterruptsScope postpone(isolate);
-
- info->SetOptimizingForOsr(osr_ast_id, current_code);
-
- if (mode == CONCURRENT) {
- if (GetOptimizedCodeLater(info.get())) {
- info.Detach(); // The background recompile job owns this now.
- return isolate->builtins()->InOptimizationQueue();
- }
- } else {
- info->set_osr_frame(osr_frame);
- if (GetOptimizedCodeNow(info.get())) return info->code();
- }
-
- if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return MaybeHandle<Code>();
+MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ JavaScriptFrame* osr_frame) {
+ DCHECK(!osr_ast_id.IsNone());
+ DCHECK_NOT_NULL(osr_frame);
+ return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
}
-MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
- OptimizedCompileJob* job) {
+void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
// Take ownership of compilation info. Deleting compilation info
// also tears down the zone and the recompile job.
base::SmartPointer<CompilationInfo> info(job->info());
@@ -1843,7 +1908,8 @@ MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
info->closure()->ShortPrint();
PrintF("]\n");
}
- return Handle<Code>(*info->code());
+ info->closure()->ReplaceCode(*info->code());
+ return;
}
}
@@ -1853,45 +1919,44 @@ MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
info->closure()->ShortPrint();
PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
- return MaybeHandle<Code>();
+ info->closure()->ReplaceCode(shared->code());
}
+void Compiler::PostInstantiation(Handle<JSFunction> function,
+ PretenureFlag pretenure) {
+ Handle<SharedFunctionInfo> shared(function->shared());
-CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
- : name_(name), info_(info) {
- if (FLAG_hydrogen_stats) {
- info_zone_start_allocation_size_ = info->zone()->allocation_size();
- timer_.Start();
+ if (FLAG_always_opt && shared->allows_lazy_compilation()) {
+ function->MarkForOptimization();
}
-}
-
-CompilationPhase::~CompilationPhase() {
- if (FLAG_hydrogen_stats) {
- size_t size = zone()->allocation_size();
- size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
+ CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+ function->context()->native_context(), BailoutId::None());
+ if (cached.code != nullptr) {
+ // Caching of optimized code enabled and optimized code found.
+ DCHECK(!cached.code->marked_for_deoptimization());
+ DCHECK(function->shared()->is_compiled());
+ function->ReplaceCode(cached.code);
}
-}
-
-bool CompilationPhase::ShouldProduceTraceOutput() const {
- // Trace if the appropriate trace flag is set and the phase name's first
- // character is in the FLAG_trace_phase command line parameter.
- AllowHandleDereference allow_deref;
- bool tracing_on = info()->IsStub()
- ? FLAG_trace_hydrogen_stubs
- : (FLAG_trace_hydrogen &&
- info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
- return (tracing_on &&
- base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+ if (cached.literals != nullptr) {
+ function->set_literals(cached.literals);
+ } else {
+ Isolate* isolate = function->GetIsolate();
+ int number_of_literals = shared->num_literals();
+ Handle<LiteralsArray> literals =
+ LiteralsArray::New(isolate, handle(shared->feedback_vector()),
+ number_of_literals, pretenure);
+ function->set_literals(*literals);
+
+ // Cache context-specific literals.
+ MaybeHandle<Code> code;
+ if (cached.code != nullptr) code = handle(cached.code);
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+ literals, BailoutId::None());
+ }
}
-#if DEBUG
-void CompilationInfo::PrintAstForTesting() {
- PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(isolate()).PrintProgram(literal()));
-}
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index a56fa13c48..fa043995b4 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -9,7 +9,6 @@
#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
-#include "src/signature.h"
#include "src/source-position.h"
#include "src/zone.h"
@@ -17,10 +16,107 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class CompilationInfo;
class JavaScriptFrame;
+class OptimizedCompileJob;
class ParseInfo;
class ScriptData;
+// The V8 compiler API.
+//
+// This is the central hub for dispatching to the various compilers within V8.
+// Logic for which compiler to choose and how to wire compilation results into
+// the object heap should be kept inside this class.
+//
+// General strategy: Scripts are translated into anonymous functions w/o
+// parameters which then can be executed. If the source code contains other
+// functions, they might be compiled and allocated as part of the compilation
+// of the source code or deferred for lazy compilation at a later point.
+class Compiler : public AllStatic {
+ public:
+ enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+ enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
+
+ // ===========================================================================
+ // The following family of methods ensures a given function is compiled. The
+ // general contract is that failures will be reported by returning {false},
+ // whereas successful compilation ensures the {is_compiled} predicate on the
+ // given function holds (except for live-edit, which compiles the world).
+
+ static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
+ static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
+ static bool CompileDebugCode(Handle<JSFunction> function);
+ static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
+ static void CompileForLiveEdit(Handle<Script> script);
+
+ // Generate and install code from previously queued optimization job.
+ static void FinalizeOptimizedCompileJob(OptimizedCompileJob* job);
+
+ // Give the compiler a chance to perform low-latency initialization tasks of
+ // the given {function} on its instantiation. Note that only the runtime will
+ // offer this chance, optimized closure instantiation will not call this.
+ static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
+
+ // Parser::Parse, then Compiler::Analyze.
+ static bool ParseAndAnalyze(ParseInfo* info);
+ // Rewrite, analyze scopes, and renumber.
+ static bool Analyze(ParseInfo* info);
+ // Adds deoptimization support, requires ParseAndAnalyze.
+ static bool EnsureDeoptimizationSupport(CompilationInfo* info);
+
+ // ===========================================================================
+ // The following family of methods instantiates new functions for scripts or
+ // function literals. The decision whether those functions will be compiled,
+ // is left to the discretion of the compiler.
+ //
+ // Please note this interface returns shared function infos. This means you
+ // need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
+ // real function with a context.
+
+ // Create a (bound) function for a String source within a context for eval.
+ MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, LanguageMode language_mode,
+ ParseRestriction restriction, int line_offset, int column_offset = 0,
+ Handle<Object> script_name = Handle<Object>(),
+ ScriptOriginOptions options = ScriptOriginOptions());
+
+ // Create a shared function info object for a String source within a context.
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
+ Handle<String> source, Handle<Object> script_name, int line_offset,
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Object> source_map_url, Handle<Context> context,
+ v8::Extension* extension, ScriptData** cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ NativesFlag is_natives_code, bool is_module);
+
+ // Create a shared function info object for a Script that has already been
+ // parsed while the script was being loaded from a streamed source.
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
+ Handle<Script> script, ParseInfo* info, int source_length);
+
+ // Create a shared function info object (the code may be lazily compiled).
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
+ FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
+
+ // Create a shared function info object for a native function literal.
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
+ v8::Extension* extension, Handle<String> name);
+
+ // ===========================================================================
+ // The following family of methods provides support for OSR. Code generated
+ // for entry via OSR might not be suitable for normal entry, hence will be
+ // returned directly to the caller.
+ //
+ // Please note this interface is the only part dealing with {Code} objects
+ // directly. Other methods are agnostic to {Code} and can use an interpreter
+ // instead of generating JIT code for a function at all.
+
+ // Generate and return optimized code for OSR, or empty handle on failure.
+ MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
+ Handle<JSFunction> function, BailoutId osr_ast_id,
+ JavaScriptFrame* osr_frame);
+};
struct InlinedFunctionInfo {
InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
@@ -101,7 +197,6 @@ class CompilationInfo {
Handle<Code> code() const { return code_; }
Code::Flags code_flags() const { return code_flags_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
- Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_parameters_including_this() const;
@@ -116,6 +211,11 @@ class CompilationInfo {
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+ Handle<AbstractCode> abstract_code() const {
+ return has_bytecode_array() ? Handle<AbstractCode>::cast(bytecode_array())
+ : Handle<AbstractCode>::cast(code());
+ }
+
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
@@ -218,14 +318,10 @@ class CompilationInfo {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
// since StaticMarkingVisitor::IsFlushable only flushes proper functions.
- return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
- !is_debug() && output_code_kind() == Code::FUNCTION;
+ return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
+ output_code_kind() == Code::FUNCTION;
}
- void EnsureFeedbackVector();
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
void SetCode(Handle<Code> code) { code_ = code; }
void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
@@ -261,10 +357,9 @@ class CompilationInfo {
code_flags_ =
Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
}
- void SetOptimizingForOsr(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+ void SetOptimizingForOsr(BailoutId osr_ast_id) {
SetOptimizing();
osr_ast_id_ = osr_ast_id;
- unoptimized_code_ = unoptimized;
}
// Deoptimization support.
@@ -288,7 +383,7 @@ class CompilationInfo {
}
void ReopenHandlesInNewHandleScope() {
- unoptimized_code_ = Handle<Code>(*unoptimized_code_);
+ // Empty for now but will be needed once fields move from ParseInfo.
}
void AbortOptimization(BailoutReason reason) {
@@ -377,12 +472,26 @@ class CompilationInfo {
return Code::ExtractKindFromFlags(code_flags_);
}
+ StackFrame::Type GetOutputStackFrameType() const;
+
protected:
ParseInfo* parse_info_;
void DisableFutureOptimization() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
- shared_info()->DisableOptimization(bailout_reason());
+ // If Crankshaft tried to optimize this function, bailed out, and
+ // doesn't want to try again, then use TurboFan next time.
+ if (!shared_info()->dont_crankshaft() &&
+ bailout_reason() != kOptimizedTooManyTimes) {
+ shared_info()->set_dont_crankshaft(true);
+ if (FLAG_trace_opt) {
+ PrintF("[disabled Crankshaft for ");
+ shared_info()->ShortPrint();
+ PrintF(", reason: %s]\n", GetBailoutReason(bailout_reason()));
+ }
+ } else {
+ shared_info()->DisableOptimization(bailout_reason());
+ }
}
}
@@ -421,16 +530,9 @@ class CompilationInfo {
// The compiled code.
Handle<Code> code_;
- // Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
- Handle<TypeFeedbackVector> feedback_vector_;
-
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
- // The unoptimized code we patched for OSR may not be the shared code
- // afterwards, since we may need to compile it again to include deoptimization
- // data. Keep track which code we patched.
- Handle<Code> unoptimized_code_;
// Holds the bytecode array generated by the interpreter.
// TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
@@ -475,25 +577,7 @@ class CompilationInfo {
};
-// A wrapper around a CompilationInfo that detaches the Handles from
-// the underlying DeferredHandleScope and stores them in info_ on
-// destruction.
-class CompilationHandleScope BASE_EMBEDDED {
- public:
- explicit CompilationHandleScope(CompilationInfo* info)
- : deferred_(info->isolate()), info_(info) {}
- ~CompilationHandleScope() {
- info_->set_deferred_handles(deferred_.Detach());
- }
-
- private:
- DeferredHandleScope deferred_;
- CompilationInfo* info_;
-};
-
-
class HGraph;
-class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@@ -505,12 +589,7 @@ class LChunk;
class OptimizedCompileJob: public ZoneObject {
public:
explicit OptimizedCompileJob(CompilationInfo* info)
- : info_(info),
- graph_builder_(NULL),
- graph_(NULL),
- chunk_(NULL),
- last_status_(FAILED),
- awaiting_install_(false) { }
+ : info_(info), graph_(NULL), chunk_(NULL), last_status_(FAILED) {}
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@@ -534,23 +613,14 @@ class OptimizedCompileJob: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
- void WaitForInstall() {
- DCHECK(info_->is_osr());
- awaiting_install_ = true;
- }
-
- bool IsWaitingForInstall() { return awaiting_install_; }
-
private:
CompilationInfo* info_;
- HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
base::TimeDelta time_taken_to_create_graph_;
base::TimeDelta time_taken_to_optimize_;
base::TimeDelta time_taken_to_codegen_;
Status last_status_;
- bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@@ -575,106 +645,6 @@ class OptimizedCompileJob: public ZoneObject {
};
};
-
-// The V8 compiler
-//
-// General strategy: Source code is translated into an anonymous function w/o
-// parameters which then can be executed. If the source code contains other
-// functions, they will be compiled and allocated as part of the compilation
-// of the source code.
-
-// Please note this interface returns shared function infos. This means you
-// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
-// real function with a context.
-
-class Compiler : public AllStatic {
- public:
- MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
- Handle<JSFunction> function);
- MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
- Handle<JSFunction> function);
-
- static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
- static bool CompileDebugCode(Handle<JSFunction> function);
- static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
- static void CompileForLiveEdit(Handle<Script> script);
-
- // Parser::Parse, then Compiler::Analyze.
- static bool ParseAndAnalyze(ParseInfo* info);
- // Rewrite, analyze scopes, and renumber.
- static bool Analyze(ParseInfo* info);
- // Adds deoptimization support, requires ParseAndAnalyze.
- static bool EnsureDeoptimizationSupport(CompilationInfo* info);
-
- // Compile a String source within a context for eval.
- MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int line_offset, int column_offset = 0,
- Handle<Object> script_name = Handle<Object>(),
- ScriptOriginOptions options = ScriptOriginOptions());
-
- // Compile a String source within a context.
- static Handle<SharedFunctionInfo> CompileScript(
- Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, ScriptOriginOptions resource_options,
- Handle<Object> source_map_url, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
- NativesFlag is_natives_code, bool is_module);
-
- static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
- ParseInfo* info,
- int source_length);
-
- // Create a shared function info object (the code may be lazily compiled).
- static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
- FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
-
- // Create a shared function info object for a native function literal.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
- v8::Extension* extension, Handle<String> name);
-
- enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
-
- // Generate and return optimized code or start a concurrent optimization job.
- // In the latter case, return the InOptimizationQueue builtin. On failure,
- // return the empty handle.
- MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
- Handle<JSFunction> function, ConcurrencyMode mode,
- BailoutId osr_ast_id = BailoutId::None(),
- JavaScriptFrame* osr_frame = nullptr);
-
- // Generate and return code from previously queued optimization job.
- // On failure, return the empty handle.
- MUST_USE_RESULT static MaybeHandle<Code> GetConcurrentlyOptimizedCode(
- OptimizedCompileJob* job);
-};
-
-
-class CompilationPhase BASE_EMBEDDED {
- public:
- CompilationPhase(const char* name, CompilationInfo* info);
- ~CompilationPhase();
-
- protected:
- bool ShouldProduceTraceOutput() const;
-
- const char* name() const { return name_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info()->isolate(); }
- Zone* zone() { return &zone_; }
-
- private:
- const char* name_;
- CompilationInfo* info_;
- Zone zone_;
- size_t info_zone_start_allocation_size_;
- base::ElapsedTimer timer_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 4a2a857029..e38f629c5b 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -192,12 +192,12 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
MapTransitionList transitions(maps.length());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
- Handle<Map> transition_target =
- Map::FindTransitionedMap(map, &possible_transition_targets);
- if (transition_target.is_null()) {
+ Map* transition_target =
+ map->FindElementsKindTransitionedMap(&possible_transition_targets);
+ if (transition_target == nullptr) {
receiver_maps.Add(map);
} else {
- transitions.push_back(std::make_pair(map, transition_target));
+ transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
}
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index bdf4c47165..a0b502237b 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -54,6 +54,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_deoptimize:
case kFlags_set:
return SetCC;
case kFlags_none:
@@ -149,8 +150,11 @@ class ArmOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -164,7 +168,9 @@ class OutOfLineLoadFloat32 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+ // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
+ __ vmov(result_, -1.0f);
+ __ vsqrt(result_, result_);
}
private:
@@ -178,7 +184,9 @@ class OutOfLineLoadFloat64 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+ // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
+ __ vmov(result_, -1.0);
+ __ vsqrt(result_, result_);
}
private:
@@ -222,7 +230,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -236,7 +245,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
}
@@ -249,7 +258,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, Operand(index_));
}
__ CallStub(&stub);
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
__ Pop(lr);
}
}
@@ -262,6 +271,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ bool must_save_lr_;
};
@@ -378,6 +388,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -394,7 +409,7 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
if (FLAG_enable_embedded_constant_pool) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
}
@@ -404,14 +419,39 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &done);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
- masm()->MaybeCheckConstPool();
-
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ __ MaybeCheckConstPool();
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
@@ -427,9 +467,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -458,6 +504,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -468,6 +515,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -535,7 +587,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ ldr(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
@@ -742,6 +794,67 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ teq(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
+ case kArmAddPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+ SBit::SetCC);
+ __ adc(i.OutputRegister(1), i.InputRegister(1),
+ Operand(i.InputRegister(3)));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmSubPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+ SBit::SetCC);
+ __ sbc(i.OutputRegister(1), i.InputRegister(1),
+ Operand(i.InputRegister(3)));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmMulPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(2));
+ __ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
+ i.OutputRegister(1));
+ __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
+ i.OutputRegister(1));
+ break;
+ case kArmLslPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputInt32(2));
+ } else {
+ __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ }
+ break;
+ case kArmLsrPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputInt32(2));
+ } else {
+ __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ }
+ break;
+ case kArmAsrPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputInt32(2));
+ } else {
+ __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ }
+ break;
case kArmVcmpF32:
if (instr->InputAt(1)->IsDoubleRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
@@ -1155,29 +1268,32 @@ void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ // TODO(turbofan): We should be able to generate better code by sharing the
+ // actual final call site and just bl'ing to it here, similar to what we do
+ // in the lithium backend.
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ __ CheckConstPool(false, false);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(lr, fp, pp);
- // Adjust FP to point to saved FP.
- __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(lr, fp, pp);
+ // Adjust FP to point to saved FP.
+ __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
} else {
- __ Push(lr, fp);
- __ mov(fp, sp);
+ __ StubPrologue(info()->GetOutputStackFrameType());
}
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(0);
}
- frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1247,15 +1363,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ LeaveFrame(StackFrame::MANUAL);
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ LeaveFrame(StackFrame::MANUAL);
+ AssembleDeconstructFrame();
}
}
__ Ret(pop_count);
@@ -1311,9 +1427,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ ldr(dst, MemOperand(fp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ ldr(dst, g.SlotToMemOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 50fa555eb5..5e6f5c96a5 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -46,6 +46,12 @@ namespace compiler {
V(ArmUxtab) \
V(ArmRbit) \
V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
V(ArmVsubF32) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index d950e8c97d..466765ee4a 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -48,6 +48,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmUxtab:
case kArmUxtah:
case kArmRbit:
+ case kArmAddPair:
+ case kArmSubPair:
+ case kArmMulPair:
+ case kArmLslPair:
+ case kArmLsrPair:
+ case kArmAsrPair:
case kArmVcmpF32:
case kArmVaddF32:
case kArmVsubF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 14b30b1af0..76d9e3c66d 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -237,8 +237,13 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -369,9 +374,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_Offset_RR;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -691,8 +694,13 @@ void VisitShift(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -759,6 +767,120 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitShift(this, node, TryMatchASR);
}
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ ArmOperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmAddPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ ArmOperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmSubPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ ArmOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ ArmOperandGenerator g(this);
+ // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
+ // no register aliasing with output registers.
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmLslPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ ArmOperandGenerator g(this);
+ // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+ // guarantee that there is no register aliasing with output register.
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmLsrPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ ArmOperandGenerator g(this);
+ // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+ // guarantee that there is no register aliasing with output register.
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmAsrPair, 2, outputs, 3, inputs);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, TryMatchROR);
@@ -1013,7 +1135,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kArmVcvtU32F64, node);
}
-
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kArmVcvtU32F64, node);
+}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kArmVcvtF32F64, node);
}
@@ -1272,6 +1396,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
namespace {
@@ -1284,6 +1409,9 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1357,8 +1485,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -1366,8 +1493,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -1482,7 +1614,11 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
+ cont->frame_state());
} else {
+ DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
}
@@ -1490,13 +1626,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} // namespace
-
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
@@ -1527,7 +1673,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1537,32 +1683,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
}
FlagsContinuation cont;
@@ -1572,7 +1720,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
}
FlagsContinuation cont;
@@ -1581,37 +1729,39 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kFloatLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kFloatLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index e45c677619..456e7e7608 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -207,13 +207,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
+ }
+
+ MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
- int from_sp =
- offset.offset() +
- ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
- kPointerSize);
+ int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
// Convert FP-offsets to SP-offsets if it results in better code.
if (Assembler::IsImmLSUnscaled(from_sp) ||
Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
@@ -279,7 +279,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -293,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
}
@@ -301,7 +302,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
remembered_set_action, save_fp_mode);
__ Add(scratch1_, object_, index_);
__ CallStub(&stub);
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
__ Pop(lr);
}
}
@@ -313,6 +314,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ bool must_save_lr_;
};
@@ -466,6 +468,15 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} \
} while (0)
+void CodeGenerator::AssembleDeconstructFrame() {
+ const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
+ __ Mov(csp, fp);
+ } else {
+ __ Mov(jssp, fp);
+ }
+ __ Pop(fp, lr);
+}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -482,13 +493,37 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ Claim(-sp_slot_delta);
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &done);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ Ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -506,21 +541,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
+ RecordCallPosition(instr);
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
- CallDescriptor::Flags flags =
- static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+ CallDescriptor::Flags flags(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
- __ mov(jssp, csp);
+ __ Ldr(jssp, MemOperand(csp));
+ __ Mov(csp, jssp);
+ }
+ if (flags & CallDescriptor::kRestoreCSP) {
+ __ Mov(csp, jssp);
+ __ AssertCspAligned();
}
frame_access_state()->ClearSPDelta();
- RecordCallPosition(instr);
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -545,18 +590,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
+ RecordCallPosition(instr);
// TODO(titzer): this is ugly. JSSP should be a caller-save register
// in this case, but it is not possible to express in the register
// allocator.
- CallDescriptor::Flags flags =
- static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+ CallDescriptor::Flags flags(MiscField::decode(opcode));
if (flags & CallDescriptor::kRestoreJSSP) {
- __ mov(jssp, csp);
+ __ Ldr(jssp, MemOperand(csp));
+ __ Mov(csp, jssp);
+ }
+ if (flags & CallDescriptor::kRestoreCSP) {
+ __ Mov(csp, jssp);
+ __ AssertCspAligned();
}
frame_access_state()->ClearSPDelta();
- RecordCallPosition(instr);
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -569,6 +619,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(x10);
frame_access_state()->ClearSPDelta();
@@ -628,7 +683,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), fp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ ldr(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
@@ -931,22 +986,46 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64ClaimCSP: {
- int count = i.InputInt32(0);
+ int count = RoundUp(i.InputInt32(0), 2);
Register prev = __ StackPointer();
- __ SetStackPointer(csp);
- __ Claim(count);
- __ SetStackPointer(prev);
- frame_access_state()->IncreaseSPDelta(count);
+ if (prev.Is(jssp)) {
+ // TODO(titzer): make this a macro-assembler method.
+ // Align the CSP and store the previous JSSP on the stack.
+ UseScratchRegisterScope scope(masm());
+ Register tmp = scope.AcquireX();
+
+ int sp_alignment = __ ActivationFrameAlignment();
+ __ Sub(tmp, jssp, kPointerSize);
+ __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
+ __ Mov(csp, tmp);
+ __ Str(jssp, MemOperand(csp));
+ if (count > 0) {
+ __ SetStackPointer(csp);
+ __ Claim(count);
+ __ SetStackPointer(prev);
+ }
+ } else {
+ __ AssertCspAligned();
+ if (count > 0) {
+ __ Claim(count);
+ frame_access_state()->IncreaseSPDelta(count);
+ }
+ }
break;
}
case kArm64ClaimJSSP: {
int count = i.InputInt32(0);
if (csp.Is(__ StackPointer())) {
- // No JSP is set up. Compute it from the CSP.
- int even = RoundUp(count, 2);
- __ Sub(jssp, csp, count * kPointerSize);
- __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
- frame_access_state()->IncreaseSPDelta(even);
+ // No JSSP is set up. Compute it from the CSP.
+ __ AssertCspAligned();
+ if (count > 0) {
+ int even = RoundUp(count, 2);
+ __ Sub(jssp, csp, count * kPointerSize);
+ __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
+ frame_access_state()->IncreaseSPDelta(even);
+ } else {
+ __ Mov(jssp, csp);
+ }
} else {
// JSSP is the current stack pointer, just use regular Claim().
__ Claim(count);
@@ -1424,34 +1503,38 @@ void CodeGenerator::AssembleDeoptimizerCall(
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
+void CodeGenerator::AssembleSetupStackPointer() {
+ const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
+}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- __ SetStackPointer(csp);
- __ Push(lr, fp);
- __ Mov(fp, csp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ SetStackPointer(jssp);
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- if (descriptor->UseNativeStack()) {
- __ SetStackPointer(csp);
- } else {
- __ SetStackPointer(jssp);
- }
- __ StubPrologue();
- } else {
- if (descriptor->UseNativeStack()) {
- __ SetStackPointer(csp);
+ if (descriptor->UseNativeStack()) {
+ __ AssertCspAligned();
+ }
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsJSFunctionCall()) {
+ DCHECK(!descriptor->UseNativeStack());
+ __ Prologue(this->info()->GeneratePreagedPrologue());
} else {
- __ SetStackPointer(jssp);
+ if (descriptor->IsCFunctionCall()) {
+ __ Push(lr, fp);
+ __ Mov(fp, masm_.StackPointer());
+ __ Claim(stack_shrink_slots);
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType(),
+ frame()->GetTotalFrameSlotCount());
+ }
}
- frame()->SetElidedFrameSizeInSlots(0);
}
- frame_access_state()->SetFrameAccessToDefault();
- int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1465,15 +1548,9 @@ void CodeGenerator::AssemblePrologue() {
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- // If frame()->needs_frame() is false, then
- // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
- if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
- // The system stack pointer requires 16-byte alignment at function call
- // boundaries.
-
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ if (descriptor->IsJSFunctionCall()) {
+ __ Claim(stack_shrink_slots);
}
- __ Claim(stack_shrink_slots);
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
@@ -1518,27 +1595,27 @@ void CodeGenerator::AssembleReturn() {
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (descriptor->IsCFunctionCall()) {
- __ Mov(csp, fp);
- __ Pop(fp, lr);
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ B(&return_label_);
return;
} else {
__ Bind(&return_label_);
+ AssembleDeconstructFrame();
if (descriptor->UseNativeStack()) {
- __ Mov(csp, fp);
pop_count += (pop_count & 1); // align
- } else {
- __ Mov(jssp, fp);
}
- __ Pop(fp, lr);
}
} else if (descriptor->UseNativeStack()) {
pop_count += (pop_count & 1); // align
}
__ Drop(pop_count);
+
+ if (descriptor->UseNativeStack()) {
+ __ AssertCspAligned();
+ }
__ Ret();
}
@@ -1576,9 +1653,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ Ldr(dst, MemOperand(fp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ Ldr(dst, g.SlotToMemOperand(slot, masm()));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 26a2896134..d90deaeecb 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -289,8 +289,13 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -413,9 +418,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MRR;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -1283,6 +1286,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kArm64Float64ToUint32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kArm64Float64ToUint32, node);
+}
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Arm64OperandGenerator g(this);
@@ -1628,20 +1634,20 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
+ bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
bool to_native_stack = descriptor->UseNativeStack();
+ bool always_claim = to_native_stack != from_native_stack;
+
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
- if (to_native_stack) {
- // Native stack must always be aligned to 16 (2 words).
- claim_count = RoundUp(claim_count, 2);
- }
- // TODO(titzer): claim and poke probably take small immediates.
// Bump the stack pointer(s).
- if (claim_count > 0) {
+ if (claim_count > 0 || always_claim) {
+ // TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+ // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
@@ -1662,6 +1668,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
namespace {
@@ -1674,6 +1681,9 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1789,85 +1799,72 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- OperandGenerator g(this);
- Node* user = branch;
- Node* value = branch->InputAt(0);
-
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
- if (CanCover(user, value)) {
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ while (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kWord32Equal: {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
case IrOpcode::kInt32LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord64Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kInt64LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kInt64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kFloat32Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
- cont.OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1879,24 +1876,24 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
- kArithmeticImm, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kArm64Add32, kArithmeticImm, cont);
case IrOpcode::kInt32SubWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
- kArithmeticImm, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kArm64Sub32, kArithmeticImm, cont);
case IrOpcode::kInt64AddWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
- kArithmeticImm, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+ kArithmeticImm, cont);
case IrOpcode::kInt64SubWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
- kArithmeticImm, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+ kArithmeticImm, cont);
default:
break;
}
@@ -1904,55 +1901,84 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+ return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(this, value, &cont);
+ return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And: {
Int32BinopMatcher m(value);
- if (m.right().HasValue() &&
+ if (cont->IsBranch() && m.right().HasValue() &&
(base::bits::CountPopulation32(m.right().Value()) == 1)) {
// If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont.condition() == kEqual) ||
- (cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros32(m.right().Value())),
- g.Label(cont.true_block()), g.Label(cont.false_block()));
+ DCHECK((cont->condition() == kEqual) ||
+ (cont->condition() == kNotEqual));
+ selector->Emit(
+ cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(
+ base::bits::CountTrailingZeros32(m.right().Value())),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
return;
}
- return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+ return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
kLogical32Imm);
}
case IrOpcode::kWord64And: {
Int64BinopMatcher m(value);
- if (m.right().HasValue() &&
+ if (cont->IsBranch() && m.right().HasValue() &&
(base::bits::CountPopulation64(m.right().Value()) == 1)) {
// If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont.condition() == kEqual) ||
- (cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros64(m.right().Value())),
- g.Label(cont.true_block()), g.Label(cont.false_block()));
+ DCHECK((cont->condition() == kEqual) ||
+ (cont->condition() == kNotEqual));
+ selector->Emit(
+ cont->Encode(kArm64TestAndBranch), g.NoOutput(),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(
+ base::bits::CountTrailingZeros64(m.right().Value())),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
return;
}
- return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+ return VisitWordCompare(selector, value, kArm64Tst, cont, true,
kLogical64Imm);
}
default:
break;
}
+ break;
}
// Branch could not be combined with a compare, compare against 0 and branch.
- Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(value), g.Label(cont.true_block()),
- g.Label(cont.false_block()));
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsDeoptimize());
+ selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value),
+ cont->frame_state());
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
@@ -1984,7 +2010,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* const user = node;
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(user);
if (m.right().Is(0)) {
Node* const value = m.left().node();
@@ -2018,32 +2044,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitWord64Equal(Node* const node) {
Node* const user = node;
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(user);
if (m.right().Is(0)) {
Node* const value = m.left().node();
@@ -2064,7 +2092,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
kArithmeticImm, &cont);
}
@@ -2075,7 +2103,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
}
@@ -2086,7 +2114,7 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
&cont);
}
@@ -2097,7 +2125,7 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
&cont);
}
@@ -2107,61 +2135,65 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kFloatLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kFloatLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index abcf828c39..89bb61949a 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -616,12 +616,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
}
- // Visit illegal re-declaration and bail out if it exists.
- if (scope->HasIllegalRedeclaration()) {
- VisitForEffect(scope->GetIllegalRedeclaration());
- return;
- }
-
// Visit declarations within the function scope.
VisitDeclarations(scope->declarations());
@@ -646,7 +640,7 @@ void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
}
NonLiveFrameStateSlotReplacer replacer(
- &state_values_cache_, jsgraph()->UndefinedConstant(),
+ &state_values_cache_, jsgraph()->OptimizedOutConstant(),
liveness_analyzer()->local_count(), local_zone());
Variable* arguments = info()->scope()->arguments();
if (arguments != nullptr && arguments->IsStackAllocated()) {
@@ -1448,9 +1442,11 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control.EndTry();
- // Clear message object as we enter the catch block.
- Node* the_hole = jsgraph()->TheHoleConstant();
- NewNode(javascript()->StoreMessage(), the_hole);
+ // If requested, clear message object as we enter the catch block.
+ if (stmt->clear_pending_message()) {
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ NewNode(javascript()->StoreMessage(), the_hole);
+ }
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
@@ -1644,8 +1640,7 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
}
}
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
+ // Set both the prototype and constructor to have fast properties.
prototype = environment()->Pop();
literal = environment()->Pop();
const Operator* op =
@@ -1725,7 +1720,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralObject(
expr->constant_properties(), expr->ComputeFlags(true),
- expr->literal_index());
+ expr->literal_index(), expr->properties_count());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1900,13 +1895,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- // Transform literals that contain functions to fast properties.
- literal = environment()->Top(); // Reload from operand stack.
- if (expr->has_function()) {
- const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
- NewNode(op, literal);
- }
-
ast_context()->ProduceValue(environment()->Pop());
}
@@ -1928,7 +1916,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralArray(
expr->constant_elements(), expr->ComputeFlags(true),
- expr->literal_index());
+ expr->literal_index(), expr->values()->length());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -2576,22 +2564,12 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
return VisitCallJSRuntime(expr);
}
- const Runtime::Function* function = expr->function();
-
- // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
- if (function->function_id == Runtime::kInlineGeneratorNext ||
- function->function_id == Runtime::kInlineGeneratorReturn ||
- function->function_id == Runtime::kInlineGeneratorThrow) {
- ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
- return SetStackOverflow();
- }
-
// Evaluate all arguments to the runtime call.
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
// Create node to perform the runtime call.
- Runtime::FunctionId functionId = function->function_id;
+ Runtime::FunctionId functionId = expr->function()->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length());
@@ -2704,11 +2682,9 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- if (!is_strong(language_mode())) {
- old_value = NewNode(javascript()->ToNumber(), old_value);
- PrepareFrameState(old_value, expr->ToNumberId(),
- OutputFrameStateCombine::Push());
- }
+ old_value = NewNode(javascript()->ToNumber(), old_value);
+ PrepareFrameState(old_value, expr->ToNumberId(),
+ OutputFrameStateCombine::Push());
// Create a proper eager frame state for the stores.
environment()->Push(old_value);
@@ -2731,10 +2707,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
FrameStateBeforeAndAfter states(this, BailoutId::None());
value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
expr->binary_op(), expr->CountBinOpFeedbackId());
- // This should never deoptimize outside strong mode because otherwise we
- // have converted to number before.
- states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
- : BailoutId::None(),
+ // This should never deoptimize because we have converted to number before.
+ states.AddToNode(value, BailoutId::None(),
OutputFrameStateCombine::Ignore());
}
@@ -2821,8 +2795,57 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
}
}
+void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ Node* nil_value) {
+ const Operator* op = nullptr;
+ switch (expr->op()) {
+ case Token::EQ:
+ op = javascript()->Equal();
+ break;
+ case Token::EQ_STRICT:
+ op = javascript()->StrictEqual();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitForValue(sub_expr);
+ FrameStateBeforeAndAfter states(this, sub_expr->id());
+ Node* value_to_compare = environment()->Pop();
+ Node* value = NewNode(op, value_to_compare, nil_value);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ return ast_context()->ProduceValue(value);
+}
+
+void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ VisitTypeofExpression(sub_expr);
+ FrameStateBeforeAndAfter states(this, sub_expr->id());
+ Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
+ Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
+ jsgraph()->Constant(check));
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ return ast_context()->ProduceValue(value);
+}
void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+ // Check for a few fast cases. The AST visiting behavior must be in sync
+ // with the full codegen: We don't push both left and right values onto
+ // the expression stack when one side is a special-case literal.
+ Expression* sub_expr = nullptr;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ return VisitLiteralCompareTypeof(expr, sub_expr, check);
+ }
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ return VisitLiteralCompareNil(expr, sub_expr,
+ jsgraph()->UndefinedConstant());
+ }
+ if (expr->IsLiteralCompareNull(&sub_expr)) {
+ return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
+ }
+
const Operator* op;
switch (expr->op()) {
case Token::EQ:
@@ -2850,6 +2873,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
op = javascript()->GreaterThanOrEqual();
break;
case Token::INSTANCEOF:
+ DCHECK(!FLAG_harmony_instanceof);
op = javascript()->InstanceOf();
break;
case Token::IN:
@@ -2984,23 +3008,25 @@ void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
ast_context()->ProduceValue(value);
}
-
-void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
- Node* operand;
- if (expr->expression()->IsVariableProxy()) {
+void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
+ if (expr->IsVariableProxy()) {
// Typeof does not throw a reference error on global variables, hence we
// perform a non-contextual load in case the operand is a variable proxy.
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ VariableProxy* proxy = expr->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- operand =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
+ Node* load =
+ BuildVariableLoad(proxy->var(), expr->id(), states, pair,
OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
+ environment()->Push(load);
} else {
- VisitForValue(expr->expression());
- operand = environment()->Pop();
+ VisitForValue(expr);
}
- Node* value = NewNode(javascript()->TypeOf(), operand);
+}
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+ VisitTypeofExpression(expr->expression());
+ Node* value = NewNode(javascript()->TypeOf(), environment()->Pop());
ast_context()->ProduceValue(value);
}
@@ -3052,7 +3078,7 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
LanguageMode AstGraphBuilder::language_mode() const {
- return info()->language_mode();
+ return current_scope()->language_mode();
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 6cff237c3c..e206db0c1a 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -419,11 +419,20 @@ class AstGraphBuilder : public AstVisitor {
void VisitTypeof(UnaryOperation* expr);
void VisitNot(UnaryOperation* expr);
+ // Dispatched from VisitTypeof, VisitLiteralCompareTypeof.
+ void VisitTypeofExpression(Expression* expr);
+
// Dispatched from VisitBinaryOperation.
void VisitComma(BinaryOperation* expr);
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
+ // Dispatched from VisitCompareOperation.
+ void VisitLiteralCompareNil(CompareOperation* expr, Expression* sub_expr,
+ Node* nil_value);
+ void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
+ Handle<String> check);
+
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index bc56e73a08..427612c36e 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -15,11 +15,11 @@ namespace compiler {
BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
Zone* zone)
: AdvancedReducer(editor),
+ jsgraph_(js_graph),
node_conditions_(zone, js_graph->graph()->NodeCount()),
zone_(zone),
dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
-
BranchElimination::~BranchElimination() {}
@@ -27,6 +27,9 @@ Reduction BranchElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kDead:
return NoChange();
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ return ReduceDeoptimizeConditional(node);
case IrOpcode::kMerge:
return ReduceMerge(node);
case IrOpcode::kLoop:
@@ -76,6 +79,41 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return TakeConditionsFromFirstControl(node);
}
+Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+ node->opcode() == IrOpcode::kDeoptimizeUnless);
+ bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ ControlPathConditions const* conditions = node_conditions_.Get(control);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (conditions == nullptr) {
+ DCHECK_NULL(node_conditions_.Get(node));
+ return NoChange();
+ }
+ Maybe<bool> condition_value = conditions->LookupCondition(condition);
+ if (condition_value.IsJust()) {
+ // If we know the condition we can discard the branch.
+ if (condition_is_true == condition_value.FromJust()) {
+ // We don't to update the conditions here, because we're replacing with
+ // the {control} node that already contains the right information.
+ return Replace(control);
+ } else {
+ control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+ return Replace(dead());
+ }
+ }
+ return UpdateConditions(
+ node, conditions->AddCondition(zone_, condition, condition_is_true));
+}
Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
// Add the condition to the list arriving from the input branch.
@@ -264,6 +302,12 @@ bool BranchElimination::ControlPathConditions::operator==(
return false;
}
+Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
+
+CommonOperatorBuilder* BranchElimination::common() const {
+ return jsgraph()->common();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index a7ac926c7a..7abeecaf61 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -11,6 +11,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class CommonOperatorBuilder;
class JSGraph;
@@ -73,6 +75,7 @@ class BranchElimination final : public AdvancedReducer {
};
Reduction ReduceBranch(Node* node);
+ Reduction ReduceDeoptimizeConditional(Node* node);
Reduction ReduceIf(Node* node, bool is_true_branch);
Reduction ReduceLoop(Node* node);
Reduction ReduceMerge(Node* node);
@@ -84,7 +87,11 @@ class BranchElimination final : public AdvancedReducer {
const ControlPathConditions* conditions);
Node* dead() const { return dead_; }
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ JSGraph* const jsgraph_;
PathConditionsForControlNodes node_conditions_;
Zone* zone_;
Node* dead_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index e28c19d844..2249cbcb3f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -444,7 +444,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
bytecode_array_(handle(info->shared_info()->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
- feedback_vector_(info->feedback_vector()),
+ feedback_vector_(handle(info->shared_info()->feedback_vector())),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
@@ -570,17 +570,11 @@ void BytecodeGraphBuilder::VisitLdaZero() {
environment()->BindAccumulator(node);
}
-void BytecodeGraphBuilder::VisitLdaSmi8() {
+void BytecodeGraphBuilder::VisitLdaSmi() {
Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
environment()->BindAccumulator(node);
}
-void BytecodeGraphBuilder::VisitLdaConstantWide() {
- Node* node =
- jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
- environment()->BindAccumulator(node);
-}
-
void BytecodeGraphBuilder::VisitLdaConstant() {
Node* node =
jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -629,8 +623,6 @@ void BytecodeGraphBuilder::VisitMov() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
-
void BytecodeGraphBuilder::BuildLoadGlobal(
TypeofMode typeof_mode) {
FrameStateBeforeAndAfter states(this);
@@ -652,14 +644,6 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
}
-void BytecodeGraphBuilder::VisitLdaGlobalWide() {
- BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
- BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
-}
-
void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
Handle<Name> name =
@@ -681,14 +665,6 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
BuildStoreGlobal(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
- BuildStoreGlobal(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
- BuildStoreGlobal(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::VisitLdaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
@@ -704,8 +680,6 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
environment()->BindAccumulator(node);
}
-void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
-
void BytecodeGraphBuilder::VisitStaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
@@ -718,8 +692,6 @@ void BytecodeGraphBuilder::VisitStaContextSlot() {
NewNode(op, context, value);
}
-void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
-
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
FrameStateBeforeAndAfter states(this);
Node* name =
@@ -752,12 +724,6 @@ void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
environment()->BindAccumulator(store, &states);
}
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
- VisitLdaLookupSlotInsideTypeof();
-}
-
void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
BuildStaLookupSlot(LanguageMode::SLOPPY);
}
@@ -766,14 +732,6 @@ void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
BuildStaLookupSlot(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
- VisitStaLookupSlotSloppy();
-}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
- VisitStaLookupSlotStrict();
-}
-
void BytecodeGraphBuilder::BuildNamedLoad() {
FrameStateBeforeAndAfter states(this);
Node* object =
@@ -790,8 +748,6 @@ void BytecodeGraphBuilder::BuildNamedLoad() {
void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
-void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
-
void BytecodeGraphBuilder::BuildKeyedLoad() {
FrameStateBeforeAndAfter states(this);
Node* key = environment()->LookupAccumulator();
@@ -807,8 +763,6 @@ void BytecodeGraphBuilder::BuildKeyedLoad() {
void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
-void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
-
void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
@@ -832,14 +786,6 @@ void BytecodeGraphBuilder::VisitStoreICStrict() {
BuildNamedStore(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
- BuildNamedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide() {
- BuildNamedStore(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
@@ -863,14 +809,6 @@ void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
BuildKeyedStore(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
- BuildKeyedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
- BuildKeyedStore(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::VisitPushContext() {
Node* new_context = environment()->LookupAccumulator();
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
@@ -888,14 +826,12 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
PretenureFlag tenured =
- bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+ bytecode_iterator().GetFlagOperand(1) ? TENURED : NOT_TENURED;
const Operator* op = javascript()->CreateClosure(shared_info, tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
-void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
-
void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
FrameStateBeforeAndAfter states(this);
const Operator* op = javascript()->CreateArguments(type);
@@ -921,61 +857,39 @@ void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
environment()->BindAccumulator(literal, &states);
}
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
Handle<String> constant_pattern =
Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+ int literal_flags = bytecode_iterator().GetFlagOperand(2);
const Operator* op = javascript()->CreateLiteralRegExp(
constant_pattern, literal_flags, literal_index);
BuildCreateLiteral(op);
}
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
- BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+ int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ int number_of_elements = constant_elements->length();
const Operator* op = javascript()->CreateLiteralArray(
- constant_elements, literal_flags, literal_index);
+ constant_elements, literal_flags, literal_index, number_of_elements);
BuildCreateLiteral(op);
}
-void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
- BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
+void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+ int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ // TODO(mstarzinger): Thread through number of properties.
+ int number_of_properties = constant_properties->length() / 2;
const Operator* op = javascript()->CreateLiteralObject(
- constant_properties, literal_flags, literal_index);
+ constant_properties, literal_flags, literal_index, number_of_properties);
BuildCreateLiteral(op);
}
-void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- BuildCreateObjectLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
- BuildCreateObjectLiteral();
-}
-
-
Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
Node* callee,
interpreter::Register receiver,
@@ -1013,17 +927,15 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
-void BytecodeGraphBuilder::VisitCallWide() {
- BuildCall(TailCallMode::kDisallow);
-}
-
-void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
-
-void BytecodeGraphBuilder::VisitTailCallWide() {
- BuildCall(TailCallMode::kAllow);
+void BytecodeGraphBuilder::VisitTailCall() {
+ TailCallMode tail_call_mode =
+ bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
+ ? TailCallMode::kAllow
+ : TailCallMode::kDisallow;
+ BuildCall(tail_call_mode);
}
-void BytecodeGraphBuilder::BuildCallJSRuntime() {
+void BytecodeGraphBuilder::VisitCallJSRuntime() {
FrameStateBeforeAndAfter states(this);
Node* callee =
BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
@@ -1036,10 +948,6 @@ void BytecodeGraphBuilder::BuildCallJSRuntime() {
environment()->BindAccumulator(value, &states);
}
-void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
-
Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
const Operator* call_runtime_op, interpreter::Register first_arg,
size_t arity) {
@@ -1053,10 +961,10 @@ Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
return value;
}
-void BytecodeGraphBuilder::BuildCallRuntime() {
+void BytecodeGraphBuilder::VisitCallRuntime() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId =
- static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+ Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+ bytecode_iterator().GetRuntimeIdOperand(0));
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1066,14 +974,10 @@ void BytecodeGraphBuilder::BuildCallRuntime() {
environment()->BindAccumulator(value, &states);
}
-void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId =
- static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+ Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+ bytecode_iterator().GetRuntimeIdOperand(0));
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
interpreter::Register first_return =
@@ -1085,12 +989,18 @@ void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
environment()->BindRegistersToProjections(first_return, return_pair, &states);
}
-void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
- BuildCallRuntimeForPair();
-}
+void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
+ FrameStateBeforeAndAfter states(this);
+ Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+ bytecode_iterator().GetRuntimeIdOperand(0));
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
-void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
- BuildCallRuntimeForPair();
+ // Create node to perform the runtime call. Turbofan will take care of the
+ // lowering.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindAccumulator(value, &states);
}
Node* BytecodeGraphBuilder::ProcessCallNewArguments(
@@ -1108,7 +1018,7 @@ Node* BytecodeGraphBuilder::ProcessCallNewArguments(
return value;
}
-void BytecodeGraphBuilder::BuildCallConstruct() {
+void BytecodeGraphBuilder::VisitNew() {
FrameStateBeforeAndAfter states(this);
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
@@ -1124,10 +1034,6 @@ void BytecodeGraphBuilder::BuildCallConstruct() {
environment()->BindAccumulator(value, &states);
}
-void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
-
-void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
-
void BytecodeGraphBuilder::BuildThrow() {
FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
@@ -1282,10 +1188,6 @@ void BytecodeGraphBuilder::VisitTestEqualStrict() {
BuildCompareOp(javascript()->StrictEqual());
}
-void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
- BuildCompareOp(javascript()->StrictNotEqual());
-}
-
void BytecodeGraphBuilder::VisitTestLessThan() {
BuildCompareOp(javascript()->LessThan());
}
@@ -1307,6 +1209,7 @@ void BytecodeGraphBuilder::VisitTestIn() {
}
void BytecodeGraphBuilder::VisitTestInstanceOf() {
+ DCHECK(!FLAG_harmony_instanceof);
BuildCompareOp(javascript()->InstanceOf());
}
@@ -1332,7 +1235,6 @@ void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
-void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpIfTrue() {
BuildJumpIfEqual(jsgraph()->TrueConstant());
@@ -1342,10 +1244,6 @@ void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
BuildJumpIfEqual(jsgraph()->TrueConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
- BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
-
void BytecodeGraphBuilder::VisitJumpIfFalse() {
BuildJumpIfEqual(jsgraph()->FalseConstant());
}
@@ -1354,10 +1252,6 @@ void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
BuildJumpIfEqual(jsgraph()->FalseConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
- BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
-
void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
@@ -1366,10 +1260,6 @@ void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
- BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
-}
-
void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
@@ -1378,20 +1268,12 @@ void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
- BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
-}
-
void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
BuildJumpIfNotHole();
}
-void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
- BuildJumpIfNotHole();
-}
-
void BytecodeGraphBuilder::VisitJumpIfNull() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
@@ -1400,10 +1282,6 @@ void BytecodeGraphBuilder::VisitJumpIfNullConstant() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
- BuildJumpIfEqual(jsgraph()->NullConstant());
-}
-
void BytecodeGraphBuilder::VisitJumpIfUndefined() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
@@ -1412,10 +1290,6 @@ void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
- BuildJumpIfEqual(jsgraph()->UndefinedConstant());
-}
-
void BytecodeGraphBuilder::VisitStackCheck() {
FrameStateBeforeAndAfter states(this);
Node* node = NewNode(javascript()->StackCheck());
@@ -1451,8 +1325,6 @@ void BytecodeGraphBuilder::BuildForInPrepare() {
void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
-void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
-
void BytecodeGraphBuilder::VisitForInDone() {
FrameStateBeforeAndAfter states(this);
Node* index =
@@ -1482,8 +1354,6 @@ void BytecodeGraphBuilder::BuildForInNext() {
void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
-void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
-
void BytecodeGraphBuilder::VisitForInStep() {
FrameStateBeforeAndAfter states(this);
Node* index =
@@ -1492,6 +1362,21 @@ void BytecodeGraphBuilder::VisitForInStep() {
environment()->BindAccumulator(index, &states);
}
+void BytecodeGraphBuilder::VisitWide() {
+ // Consumed by the BytecodeArrayIterator.
+ UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitExtraWide() {
+ // Consumed by the BytecodeArrayIterator.
+ UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitIllegal() {
+ // Never present in valid bytecode.
+ UNREACHABLE();
+}
+
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
if (merge_environments_[current_offset] != nullptr) {
if (environment() != nullptr) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 2fa5967c86..c842c24b8c 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -111,9 +111,6 @@ class BytecodeGraphBuilder {
size_t arity);
void BuildCreateLiteral(const Operator* op);
- void BuildCreateRegExpLiteral();
- void BuildCreateArrayLiteral();
- void BuildCreateObjectLiteral();
void BuildCreateArguments(CreateArgumentsType type);
void BuildLoadGlobal(TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
@@ -124,10 +121,6 @@ class BytecodeGraphBuilder {
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
void BuildCall(TailCallMode tail_call_mode);
- void BuildCallJSRuntime();
- void BuildCallRuntime();
- void BuildCallRuntimeForPair();
- void BuildCallConstruct();
void BuildThrow();
void BuildBinaryOp(const Operator* op);
void BuildCompareOp(const Operator* op);
@@ -135,6 +128,7 @@ class BytecodeGraphBuilder {
void BuildCastOperator(const Operator* op);
void BuildForInPrepare();
void BuildForInNext();
+ void BuildInvokeIntrinsic();
// Control flow plumbing.
void BuildJump();
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 783d9d6da3..b38e529f9f 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -123,6 +123,26 @@ LinkageLocation regloc(Register reg) {
d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+ r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
+ d14.bit() | d15.bit()
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+ r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS (d4.bit() | d6.bit())
+
#else
// ===========================================================================
// == unknown ================================================================
@@ -210,6 +230,11 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+ if (set_initialize_root_flag) {
+ flags |= CallDescriptor::kInitializeRootRegister;
+ }
+
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
@@ -220,10 +245,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
- set_initialize_root_flag ? // flags
- CallDescriptor::kInitializeRootRegister
- : CallDescriptor::kNoFlags,
- "c-call");
+ flags, "c-call");
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index e217f3786b..907b36a93b 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -55,6 +55,8 @@ Reduction ChangeLowering::Reduce(Node* node) {
return ObjectIsSmi(node);
case IrOpcode::kObjectIsNumber:
return ObjectIsNumber(node);
+ case IrOpcode::kObjectIsUndetectable:
+ return ObjectIsUndetectable(node);
default:
return NoChange();
}
@@ -603,6 +605,13 @@ Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
graph()->start(), control);
}
+Node* ChangeLowering::LoadMapBitField(Node* map) {
+ return graph()->NewNode(
+ machine()->Load(MachineType::Uint8()), map,
+ jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
Node* ChangeLowering::LoadMapInstanceType(Node* map) {
return graph()->NewNode(
machine()->Load(MachineType::Uint8()), map,
@@ -650,6 +659,31 @@ Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
return Changed(node);
}
+Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ Node* check = IsSmi(input);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(),
+ jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
+ LoadMapBitField(LoadHeapObjectMap(input, if_false))),
+ jsgraph()->Int32Constant(0)),
+ jsgraph()->Int32Constant(0));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+ return Changed(node);
+}
+
Reduction ChangeLowering::ObjectIsSmi(Node* node) {
node->ReplaceInput(0,
graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index defadd95fd..7e5078bf84 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -58,11 +58,13 @@ class ChangeLowering final : public Reducer {
Node* IsSmi(Node* value);
Node* LoadHeapObjectMap(Node* object, Node* control);
+ Node* LoadMapBitField(Node* map);
Node* LoadMapInstanceType(Node* map);
Reduction ObjectIsNumber(Node* node);
Reduction ObjectIsReceiver(Node* node);
Reduction ObjectIsSmi(Node* node);
+ Reduction ObjectIsUndetectable(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 7295948399..7de32c5c91 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -139,6 +139,19 @@ class InstructionOperandConverter {
Instruction* instr_;
};
+// Eager deoptimization exit.
+class DeoptimizationExit : public ZoneObject {
+ public:
+ explicit DeoptimizationExit(int deoptimization_id)
+ : deoptimization_id_(deoptimization_id) {}
+
+ int deoptimization_id() const { return deoptimization_id_; }
+ Label* label() { return &label_; }
+
+ private:
+ int const deoptimization_id_;
+ Label label_;
+};
// Generator for out-of-line code that is emitted after the main code is done.
class OutOfLineCode : public ZoneObject {
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 712cfe0b2d..086da560e4 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -31,7 +31,6 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
: frame_access_state_(new (code->zone()) FrameAccessState(frame)),
@@ -45,6 +44,7 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
resolver_(this),
safepoints_(code->zone()),
handlers_(code->zone()),
+ deoptimization_exits_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
inlined_function_count_(0),
@@ -56,12 +56,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
- if (code->ContainsCall()) {
- frame->MarkNeedsFrame();
- }
}
-
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
@@ -80,14 +76,11 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
// Architecture-specific, linkage-specific prologue.
info->set_prologue_offset(masm()->pc_offset());
- AssemblePrologue();
- if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
- masm()->InitializeRootRegister();
- }
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
- for (auto& inlined : info->inlined_functions()) {
+ for (const CompilationInfo::InlinedFunctionHolder& inlined :
+ info->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info->shared_info())) {
DefineDeoptimizationLiteral(inlined.shared_info);
}
@@ -96,15 +89,19 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all unoptimized code objects of inlined
// functions. This ensures unoptimized code is kept alive by optimized code.
- for (auto& inlined : info->inlined_functions()) {
+ for (const CompilationInfo::InlinedFunctionHolder& inlined :
+ info->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info->shared_info())) {
DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
}
}
+ // Finish the Frame
+ frame()->AlignFrame(kFrameAlignmentInBytes);
+ AssembleSetupStackPointer();
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
- for (auto const block : code()->instruction_blocks()) {
+ for (const InstructionBlock* block : code()->instruction_blocks()) {
if (block->IsDeferred() == (deferred == 0)) {
continue;
}
@@ -141,9 +138,26 @@ Handle<Code> CodeGenerator::GenerateCode() {
SNPrintF(buffer, " --");
masm()->RecordComment(buffer_start);
}
+
+ frame_access_state()->MarkHasFrame(block->needs_frame());
+
masm()->bind(GetLabel(current_block_));
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- AssembleInstruction(code()->InstructionAt(i));
+ if (block->must_construct_frame()) {
+ AssemblePrologue();
+ // We need to setup the root register after we assemble the prologue, to
+ // avoid clobbering callee saved registers in case of C linkage and
+ // using the roots.
+ // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
+ if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+ masm()->InitializeRootRegister();
+ }
+ }
+
+ if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ AssembleBlock(block);
+ } else {
+ AssembleBlock(block);
}
}
}
@@ -158,6 +172,12 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
+ // Assemble all eager deoptimization exits.
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ masm()->bind(exit->label());
+ AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER);
+ }
+
// Ensure there is space for lazy deoptimization in the code.
if (info->ShouldEnsureSpaceForLazyDeopt()) {
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
@@ -211,7 +231,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Emit a code line info recording stop event.
void* line_info = recorder->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+ LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(
+ AbstractCode::cast(*result), line_info));
return result;
}
@@ -232,7 +253,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
- for (auto& operand : references->reference_operands()) {
+ for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
int index = LocationOperand::cast(operand).index();
DCHECK(index >= 0);
@@ -250,16 +271,15 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
}
}
-
bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
- int* offset_return) {
+ int* slot_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
if (info()->has_context() && object.is_identical_to(info()->context()) &&
!info()->is_osr()) {
- *offset_return = StandardFrameConstants::kContextOffset;
+ *slot_return = Frame::kContextSlot;
return true;
} else if (object.is_identical_to(info()->closure())) {
- *offset_return = JavaScriptFrameConstants::kFunctionOffset;
+ *slot_return = Frame::kJSFunctionSlot;
return true;
}
}
@@ -282,43 +302,82 @@ bool CodeGenerator::IsMaterializableFromRoot(
return false;
}
+void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code()->InstructionAt(i);
+ AssembleInstruction(instr, block);
+ }
+}
-void CodeGenerator::AssembleInstruction(Instruction* instr) {
+void CodeGenerator::AssembleInstruction(Instruction* instr,
+ const InstructionBlock* block) {
AssembleGaps(instr);
+ DCHECK_IMPLIES(
+ block->must_deconstruct_frame(),
+ instr != code()->InstructionAt(block->last_instruction_index()) ||
+ instr->IsRet() || instr->IsJump());
+ if (instr->IsJump() && block->must_deconstruct_frame()) {
+ AssembleDeconstructFrame();
+ }
AssembleSourcePosition(instr);
// Assemble architecture-specific code for the instruction.
AssembleArchInstruction(instr);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
- if (mode == kFlags_branch) {
- // Assemble a branch after this instruction.
- InstructionOperandConverter i(this, instr);
- RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
- RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
-
- if (true_rpo == false_rpo) {
- // redundant branch.
- if (!IsNextInAssemblyOrder(true_rpo)) {
- AssembleArchJump(true_rpo);
+ switch (mode) {
+ case kFlags_branch: {
+ // Assemble a branch after this instruction.
+ InstructionOperandConverter i(this, instr);
+ RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+ RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
+
+ if (true_rpo == false_rpo) {
+ // redundant branch.
+ if (!IsNextInAssemblyOrder(true_rpo)) {
+ AssembleArchJump(true_rpo);
+ }
+ return;
}
- return;
+ if (IsNextInAssemblyOrder(true_rpo)) {
+ // true block is next, can fall through if condition negated.
+ std::swap(true_rpo, false_rpo);
+ condition = NegateFlagsCondition(condition);
+ }
+ BranchInfo branch;
+ branch.condition = condition;
+ branch.true_label = GetLabel(true_rpo);
+ branch.false_label = GetLabel(false_rpo);
+ branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+ // Assemble architecture-specific branch.
+ AssembleArchBranch(instr, &branch);
+ break;
+ }
+ case kFlags_deoptimize: {
+ // Assemble a conditional eager deoptimization after this instruction.
+ InstructionOperandConverter i(this, instr);
+ size_t frame_state_offset = MiscField::decode(instr->opcode());
+ DeoptimizationExit* const exit =
+ AddDeoptimizationExit(instr, frame_state_offset);
+ Label continue_label;
+ BranchInfo branch;
+ branch.condition = condition;
+ branch.true_label = exit->label();
+ branch.false_label = &continue_label;
+ branch.fallthru = true;
+ // Assemble architecture-specific branch.
+ AssembleArchBranch(instr, &branch);
+ masm()->bind(&continue_label);
+ break;
+ }
+ case kFlags_set: {
+ // Assemble a boolean materialization after this instruction.
+ AssembleArchBoolean(instr, condition);
+ break;
}
- if (IsNextInAssemblyOrder(true_rpo)) {
- // true block is next, can fall through if condition negated.
- std::swap(true_rpo, false_rpo);
- condition = NegateFlagsCondition(condition);
+ case kFlags_none: {
+ break;
}
- BranchInfo branch;
- branch.condition = condition;
- branch.true_label = GetLabel(true_rpo);
- branch.false_label = GetLabel(false_rpo);
- branch.fallthru = IsNextInAssemblyOrder(false_rpo);
- // Assemble architecture-specific branch.
- AssembleArchBranch(instr, &branch);
- } else if (mode == kFlags_set) {
- // Assemble a boolean materialization after this instruction.
- AssembleArchBoolean(instr, condition);
}
}
@@ -595,6 +654,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kTailCallerFunction:
+ translation->BeginTailCallerFrame(shared_info_id);
+ break;
case FrameStateType::kConstructStub:
translation->BeginConstructStubFrame(
shared_info_id,
@@ -714,15 +776,22 @@ void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = masm()->pc_offset();
}
+DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
+ Instruction* instr, size_t frame_state_offset) {
+ int const deoptimization_id = BuildTranslation(
+ instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
+ DeoptimizationExit* const exit =
+ new (zone()) DeoptimizationExit(deoptimization_id);
+ deoptimization_exits_.push_back(exit);
+ return exit;
+}
int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int spill_slots = frame()->GetSpillSlotCount();
- bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
// Leave the PC on the stack on platforms that have that as part of their ABI
int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
- int sp_slot_delta =
- has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+ int sp_slot_delta = frame_access_state()->has_frame()
+ ? (frame()->GetTotalFrameSlotCount() - pc_slots)
+ : 0;
// Discard only slots that won't be used by new parameters.
sp_slot_delta += stack_param_delta;
return sp_slot_delta;
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 70bf81f5af..b82181c331 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
// Forward declarations.
+class DeoptimizationExit;
class FrameAccessState;
class Linkage;
class OutOfLineCode;
@@ -76,15 +77,18 @@ class CodeGenerator final : public GapResolver::Assembler {
// Check if a heap object can be materialized by loading from the frame, which
// is usually way cheaper than materializing the actual heap object constant.
- bool IsMaterializableFromFrame(Handle<HeapObject> object, int* offset_return);
+ bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return);
// Check if a heap object can be materialized by loading from a heap root,
// which is cheaper on some platforms than materializing the actual heap
// object constant.
bool IsMaterializableFromRoot(Handle<HeapObject> object,
Heap::RootListIndex* index_return);
+ // Assemble instructions for the specified block.
+ void AssembleBlock(const InstructionBlock* block);
+
// Assemble code for the specified instruction.
- void AssembleInstruction(Instruction* instr);
+ void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
void AssembleSourcePosition(Instruction* instr);
void AssembleGaps(Instruction* instr);
@@ -105,6 +109,9 @@ class CodeGenerator final : public GapResolver::Assembler {
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssemblePrologue();
+
+ void AssembleSetupStackPointer();
+
// Generates an architecture-specific, descriptor-specific return sequence
// to tear down a stack frame.
void AssembleReturn();
@@ -112,9 +119,15 @@ class CodeGenerator final : public GapResolver::Assembler {
// Generates code to deconstruct a the caller's frame, including arguments.
void AssembleDeconstructActivationRecord(int stack_param_delta);
+ void AssembleDeconstructFrame();
+
// Generates code to manipulate the stack in preparation for a tail call.
void AssemblePrepareTailCall(int stack_param_delta);
+ // Generates code to pop current frame if it is an arguments adaptor frame.
+ void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
+ Register scratch2, Register scratch3);
+
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
// ===========================================================================
@@ -144,10 +157,10 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- FrameStateDescriptor* GetFrameStateDescriptor(
- Instruction* instr, size_t frame_access_state_offset);
+ FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+ size_t frame_state_offset);
int BuildTranslation(Instruction* instr, int pc_offset,
- size_t frame_access_state_offset,
+ size_t frame_state_offset,
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
@@ -165,6 +178,9 @@ class CodeGenerator final : public GapResolver::Assembler {
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
+ DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
+ size_t frame_state_offset);
+
// Converts the delta in the number of stack parameter passed from a tail
// caller to the callee into the distance (in pointers) the SP must be
// adjusted, taking frame elision and other relevant factors into
@@ -210,6 +226,7 @@ class CodeGenerator final : public GapResolver::Assembler {
GapResolver resolver_;
SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_;
+ ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
ZoneDeque<Handle<Object>> deoptimization_literals_;
size_t inlined_function_count_;
diff --git a/deps/v8/src/compiler/code-stub-assembler.cc b/deps/v8/src/compiler/code-stub-assembler.cc
index 45f47d3310..bbb4d6353b 100644
--- a/deps/v8/src/compiler/code-stub-assembler.cc
+++ b/deps/v8/src/compiler/code-stub-assembler.cc
@@ -28,12 +28,29 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name,
size_t result_size)
- : raw_assembler_(new RawMachineAssembler(
- isolate, new (zone) Graph(zone),
+ : CodeStubAssembler(
+ isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size))),
+ MachineType::AnyTagged(), result_size),
+ flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ int parameter_count, Code::Flags flags,
+ const char* name)
+ : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
+ zone, false, parameter_count,
+ CallDescriptor::kNoFlags),
+ flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor,
+ Code::Flags flags, const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone), call_descriptor,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags())),
flags_(flags),
name_(name),
code_generated_(false),
@@ -72,6 +89,9 @@ Node* CodeStubAssembler::NumberConstant(double value) {
return raw_assembler_->NumberConstant(value);
}
+Node* CodeStubAssembler::SmiConstant(Smi* value) {
+ return IntPtrConstant(bit_cast<intptr_t>(value));
+}
Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler_->HeapConstant(object);
@@ -86,11 +106,30 @@ Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
return raw_assembler_->ExternalConstant(address);
}
+Node* CodeStubAssembler::Float64Constant(double value) {
+ return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+ return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+ return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NullConstant() {
+ return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+ return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
Node* CodeStubAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value);
}
-
void CodeStubAssembler::Return(Node* value) {
return raw_assembler_->Return(value);
}
@@ -112,19 +151,253 @@ Node* CodeStubAssembler::LoadStackPointer() {
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
- return Int32Constant(kSmiShiftSize + kSmiTagSize);
+ return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* CodeStubAssembler::Float64Round(Node* x) {
+ Node* one = Float64Constant(1.0);
+ Node* one_half = Float64Constant(0.5);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this);
+
+ // Round up {x} towards Infinity.
+ var_x.Bind(Float64Ceil(x));
+
+ GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+ &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+ if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+ return raw_assembler_->Float64RoundUp(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Add(var_x.value(), one));
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards Infinity and return the result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
}
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+ if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+ return raw_assembler_->Float64RoundDown(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return the result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Add(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+ if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
+ return raw_assembler_->Float64RoundTruncate(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than 0.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+ var_x.Bind(raw_assembler_->Float64RoundDown(x));
+ } else {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ }
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+ var_x.Bind(raw_assembler_->Float64RoundUp(x));
+ Goto(&return_x);
+ } else {
+ // Just return {x} unless its in the range ]-2^52,0[.
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
Node* CodeStubAssembler::SmiTag(Node* value) {
return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
}
-
Node* CodeStubAssembler::SmiUntag(Node* value) {
return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
}
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+ Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+ if (raw_assembler_->machine()->Is64()) {
+ result = raw_assembler_->TruncateInt64ToInt32(result);
+ }
+ return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+ return ChangeInt32ToFloat64(SmiUntag(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+ return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+ return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+ return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+ return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+ // TODO(bmeurer): Consider using Select once available.
+ Variable min(this, MachineRepresentation::kTagged);
+ Label if_a(this), if_b(this), join(this);
+ BranchIfSmiLessThan(a, b, &if_a, &if_b);
+ Bind(&if_a);
+ min.Bind(a);
+ Goto(&join);
+ Bind(&if_b);
+ min.Bind(b);
+ Goto(&join);
+ Bind(&join);
+ return min.value();
+}
+
#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name) \
Node* CodeStubAssembler::name(Node* a, Node* b) { \
return raw_assembler_->name(a, b); \
@@ -132,56 +405,129 @@ Node* CodeStubAssembler::SmiUntag(Node* value) {
CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
-Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
- return raw_assembler_->ChangeInt32ToInt64(value);
-}
-
Node* CodeStubAssembler::WordShl(Node* value, int shift) {
- return raw_assembler_->WordShl(value, Int32Constant(shift));
+ return raw_assembler_->WordShl(value, IntPtrConstant(shift));
}
+#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
+ Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
+
Node* CodeStubAssembler::WordIsSmi(Node* a) {
- return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
- Int32Constant(0));
+ return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+ return WordEqual(
+ raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+ IntPtrConstant(0));
}
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
- return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
- IntPtrConstant(offset));
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+ MachineType rep) {
+ return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
}
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
- return raw_assembler_->Load(MachineType::AnyTagged(), object,
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+ MachineType rep) {
+ return raw_assembler_->Load(rep, object,
IntPtrConstant(offset - kHeapObjectTag));
}
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+ return Load(MachineType::Float64(), object,
+ IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kFloat64, object,
+ IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+ Node* value = LoadHeapNumberValue(object);
+ return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
+ value);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+ return Load(MachineType::Uint8(), map,
+ IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+ return Load(MachineType::Uint8(), map,
+ IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+ return Load(MachineType::Uint32(), map,
+ IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+ return Load(MachineType::Uint8(), map,
+ IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+ return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name) {
+ return Load(MachineType::Uint32(), name,
+ IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
+ Node* object, Node* int32_index, int additional_offset) {
+ Node* header_size = IntPtrConstant(additional_offset +
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
+ Node* offset = IntPtrAdd(scaled_index, header_size);
+ return Load(MachineType::AnyTagged(), object, offset);
+}
+
Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
Node* smi_index,
int additional_offset) {
- Node* header_size = raw_assembler_->Int32Constant(
- additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+ int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+ Node* header_size = IntPtrConstant(additional_offset +
+ FixedArray::kHeaderSize - kHeapObjectTag);
Node* scaled_index =
- (kSmiShiftSize == 0)
- ? raw_assembler_->Word32Shl(
- smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
- : raw_assembler_->Word32Shl(SmiUntag(smi_index),
- Int32Constant(kPointerSizeLog2));
- Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
- return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+ (kSmiShiftBits > kPointerSizeLog2)
+ ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
+ : WordShl(smi_index,
+ IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
+ Node* offset = IntPtrAdd(scaled_index, header_size);
+ return Load(MachineType::AnyTagged(), object, offset);
}
Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
int index) {
- Node* offset = raw_assembler_->Int32Constant(
- FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+ Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
+ index * kPointerSize);
return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
}
+Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
+ Node* index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
+ IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+ value);
+}
+
Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index);
if (root->IsSmi()) {
- return Int32Constant(Handle<Smi>::cast(root)->value());
+ return SmiConstant(Smi::cast(*root));
} else {
return HeapConstant(Handle<HeapObject>::cast(root));
}
@@ -197,6 +543,135 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
return nullptr;
}
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+ AllocationFlags flags,
+ Node* top_address,
+ Node* limit_address) {
+ Node* top = Load(MachineType::Pointer(), top_address);
+ Node* limit = Load(MachineType::Pointer(), limit_address);
+
+ // If there's not enough space, call the runtime.
+ RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
+ merge_runtime;
+ raw_assembler_->Branch(
+ raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
+ &runtime_call, &no_runtime_call);
+
+ raw_assembler_->Bind(&runtime_call);
+ // AllocateInTargetSpace does not use the context.
+ Node* context = IntPtrConstant(0);
+ Node* runtime_flags = SmiTag(Int32Constant(
+ AllocateDoubleAlignFlag::encode(false) |
+ AllocateTargetSpace::encode(flags & kPretenured
+ ? AllocationSpace::OLD_SPACE
+ : AllocationSpace::NEW_SPACE)));
+ Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+ SmiTag(size_in_bytes), runtime_flags);
+ raw_assembler_->Goto(&merge_runtime);
+
+ // When there is enough space, return `top' and bump it up.
+ raw_assembler_->Bind(&no_runtime_call);
+ Node* no_runtime_result = top;
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+ IntPtrAdd(top, size_in_bytes));
+ no_runtime_result =
+ IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
+ raw_assembler_->Goto(&merge_runtime);
+
+ raw_assembler_->Bind(&merge_runtime);
+ return raw_assembler_->Phi(MachineType::PointerRepresentation(),
+ runtime_result, no_runtime_result);
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+ AllocationFlags flags,
+ Node* top_address,
+ Node* limit_address) {
+ Node* top = Load(MachineType::Pointer(), top_address);
+ Node* limit = Load(MachineType::Pointer(), limit_address);
+ Node* adjusted_size = size_in_bytes;
+ if (flags & kDoubleAlignment) {
+ // TODO(epertoso): Simd128 alignment.
+ RawMachineLabel aligned, not_aligned, merge;
+ raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
+ &not_aligned, &aligned);
+
+ raw_assembler_->Bind(&not_aligned);
+ Node* not_aligned_size =
+ IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+ raw_assembler_->Goto(&merge);
+
+ raw_assembler_->Bind(&aligned);
+ raw_assembler_->Goto(&merge);
+
+ raw_assembler_->Bind(&merge);
+ adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+ not_aligned_size, adjusted_size);
+ }
+
+ Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
+
+ RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
+ raw_assembler_->Branch(
+ raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
+ &doesnt_need_filler, &needs_filler);
+
+ raw_assembler_->Bind(&needs_filler);
+ // Store a filler and increase the address by kPointerSize.
+ // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+ // it when Simd128 alignment is supported.
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+ LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+ Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
+ raw_assembler_->Goto(&merge_address);
+
+ raw_assembler_->Bind(&doesnt_need_filler);
+ Node* address_without_filler = address;
+ raw_assembler_->Goto(&merge_address);
+
+ raw_assembler_->Bind(&merge_address);
+ address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+ address_with_filler, address_without_filler);
+ // Update the top.
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+ IntPtrAdd(top, adjusted_size));
+ return address;
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+ bool const new_space = !(flags & kPretenured);
+ Node* top_address = ExternalConstant(
+ new_space
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = ExternalConstant(
+ new_space
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+ if (flags & kDoubleAlignment) {
+ return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
+ limit_address);
+ }
+#endif
+
+ return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
+ limit_address);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+ Node* result = Allocate(HeapNumber::kSize, kNone);
+ StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+ Node* result = AllocateHeapNumber();
+ StoreHeapNumberValue(result, value);
+ return result;
+}
+
Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base);
}
@@ -230,6 +705,232 @@ Node* CodeStubAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
+Node* CodeStubAssembler::LoadMap(Node* object) {
+ return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, object,
+ IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+ return LoadMapInstanceType(LoadMap(object));
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+ return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
+ return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+ uint32_t mask) {
+ return raw_assembler_->Word32Shr(
+ raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
+ raw_assembler_->Int32Constant(shift));
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+ Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
+ TruncationMode::kRoundToZero, value);
+ Node* value64 = ChangeInt32ToFloat64(value32);
+
+ Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+ Label if_valueisequal(this), if_valueisnotequal(this);
+ Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+ Bind(&if_valueisequal);
+ {
+ Label if_valueiszero(this), if_valueisnotzero(this);
+ Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
+ &if_valueisnotzero);
+
+ Bind(&if_valueiszero);
+ BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
+ Int32Constant(0), &if_valueisheapnumber,
+ &if_valueisint32);
+
+ Bind(&if_valueisnotzero);
+ Goto(&if_valueisint32);
+ }
+ Bind(&if_valueisnotequal);
+ Goto(&if_valueisheapnumber);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Bind(&if_valueisint32);
+ {
+ if (raw_assembler_->machine()->Is64()) {
+ Node* result = SmiTag(ChangeInt32ToInt64(value32));
+ var_result.Bind(result);
+ Goto(&if_join);
+ } else {
+ Node* pair = Int32AddWithOverflow(value32, value32);
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+ Bind(&if_overflow);
+ Goto(&if_valueisheapnumber);
+ Bind(&if_notoverflow);
+ {
+ Node* result = Projection(0, pair);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+ }
+ }
+ Bind(&if_valueisheapnumber);
+ {
+ Node* result = AllocateHeapNumberWithValue(value);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ return SmiTag(ChangeInt32ToInt64(value));
+ }
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Node* pair = Int32AddWithOverflow(value, value);
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+ if_join(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+ Bind(&if_overflow);
+ {
+ Node* value64 = ChangeInt32ToFloat64(value);
+ Node* result = AllocateHeapNumberWithValue(value64);
+ var_result.Bind(result);
+ }
+ Goto(&if_join);
+ Bind(&if_notoverflow);
+ {
+ Node* result = Projection(0, pair);
+ var_result.Bind(result);
+ }
+ Goto(&if_join);
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kFloat64);
+ Label loop(this, &var_value), done_loop(this, &var_result);
+ var_value.Bind(value);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToFloat64(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Load the floating point value.
+ var_result.Bind(LoadHeapNumberValue(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kWord32);
+ Label loop(this, &var_value), done_loop(this, &var_result);
+ var_value.Bind(value);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToWord32(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Truncate the floating point value.
+ var_result.Bind(TruncateHeapNumberValueToWord32(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
+}
+
+void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
+ Label* if_false) {
+ Label if_condition_is_true(this), if_condition_is_false(this);
+ Branch(condition, &if_condition_is_true, &if_condition_is_false);
+ Bind(&if_condition_is_true);
+ Goto(if_true);
+ Bind(&if_condition_is_false);
+ Goto(if_false);
+}
+
Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
CallPrologue();
@@ -290,6 +991,11 @@ Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
}
Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1) {
return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
}
@@ -314,6 +1020,12 @@ Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
context);
}
+Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
@@ -401,12 +1113,28 @@ Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
return CallN(call_descriptor, target, args);
}
-Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
- Node* code_target = HeapConstant(stub.GetCode());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), stub.GetCallInterfaceDescriptor(),
- stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
- return raw_assembler_->TailCallN(descriptor, code_target, args);
+Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+ result_size);
+}
+
+Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::TailCall(
@@ -425,6 +1153,18 @@ void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
raw_assembler_->Goto(label->label_);
}
+void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
+ Label false_label(this);
+ Branch(condition, true_label, &false_label);
+ Bind(&false_label);
+}
+
+void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
+ Label true_label(this);
+ Branch(condition, &true_label, false_label);
+ Bind(&true_label);
+}
+
void CodeStubAssembler::Branch(Node* condition,
CodeStubAssembler::Label* true_label,
CodeStubAssembler::Label* false_label) {
@@ -450,11 +1190,15 @@ void CodeStubAssembler::Switch(Node* index, Label* default_label,
}
// RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+Isolate* CodeStubAssembler::isolate() const {
+ return raw_assembler_->isolate();
+}
+
+Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
-Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
-Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
// The core implementation of Variable is stored through an indirection so
// that it can outlive the often block-scoped Variable declarations. This is
@@ -488,27 +1232,20 @@ bool CodeStubAssembler::Variable::IsBound() const {
return impl_->value_ != nullptr;
}
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
- : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
- void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
- label_ = new (buffer) RawMachineLabel();
-}
-
CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
int merged_value_count,
- CodeStubAssembler::Variable** merged_variables)
+ CodeStubAssembler::Variable** merged_variables,
+ CodeStubAssembler::Label::Type type)
: bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
- label_ = new (buffer) RawMachineLabel();
+ label_ = new (buffer)
+ RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+ : RawMachineLabel::kNonDeferred);
for (int i = 0; i < merged_value_count; ++i) {
variable_phis_[merged_variables[i]->impl_] = nullptr;
}
}
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Variable* merged_variable)
- : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
-
void CodeStubAssembler::Label::MergeVariables() {
++merge_count_;
for (auto var : assembler_->variables_) {
@@ -539,16 +1276,17 @@ void CodeStubAssembler::Label::MergeVariables() {
assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
} else {
auto i = variable_merges_.find(var);
- USE(i);
- // If the following assert fires, then you've declared a variable that
- // has the same bound value along all paths up until the point you bound
- // this label, but then later merged a path with a new value for the
- // variable after the label bind (it's not possible to add phis to the
- // bound label after the fact, just make sure to list the variable in
- // the label's constructor's list of merged variables).
- DCHECK(find_if(i->second.begin(), i->second.end(),
- [node](Node* e) -> bool { return node != e; }) ==
- i->second.end());
+ if (i != variable_merges_.end()) {
+ // If the following assert fires, then you've declared a variable that
+ // has the same bound value along all paths up until the point you
+ // bound this label, but then later merged a path with a new value for
+ // the variable after the label bind (it's not possible to add phis to
+ // the bound label after the fact, just make sure to list the variable
+ // in the label's constructor's list of merged variables).
+ DCHECK(find_if(i->second.begin(), i->second.end(),
+ [node](Node* e) -> bool { return node != e; }) ==
+ i->second.end());
+ }
}
}
}
diff --git a/deps/v8/src/compiler/code-stub-assembler.h b/deps/v8/src/compiler/code-stub-assembler.h
index 2ab13764c4..9fcb890606 100644
--- a/deps/v8/src/compiler/code-stub-assembler.h
+++ b/deps/v8/src/compiler/code-stub-assembler.h
@@ -19,8 +19,10 @@
namespace v8 {
namespace internal {
+class Callable;
class CallInterfaceDescriptor;
class Isolate;
+class Factory;
class Zone;
namespace compiler {
@@ -33,49 +35,91 @@ class RawMachineAssembler;
class RawMachineLabel;
class Schedule;
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
- V(IntPtrAdd) \
- V(IntPtrSub) \
- V(Int32Add) \
- V(Int32Sub) \
- V(Int32Mul) \
- V(Int32GreaterThanOrEqual) \
- V(WordEqual) \
- V(WordNotEqual) \
- V(WordOr) \
- V(WordAnd) \
- V(WordXor) \
- V(WordShl) \
- V(WordShr) \
- V(WordSar) \
- V(WordRor) \
- V(Word32Equal) \
- V(Word32NotEqual) \
- V(Word32Or) \
- V(Word32And) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word64Equal) \
- V(Word64NotEqual) \
- V(Word64Or) \
- V(Word64And) \
- V(Word64Xor) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror) \
- V(UintPtrGreaterThanOrEqual)
+#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float32Equal) \
+ V(Float32LessThan) \
+ V(Float32LessThanOrEqual) \
+ V(Float32GreaterThan) \
+ V(Float32GreaterThanOrEqual) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual) \
+ V(Float64GreaterThan) \
+ V(Float64GreaterThanOrEqual) \
+ V(Int32GreaterThan) \
+ V(Int32GreaterThanOrEqual) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(IntPtrLessThan) \
+ V(IntPtrLessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(UintPtrGreaterThanOrEqual) \
+ V(WordEqual) \
+ V(WordNotEqual) \
+ V(Word32Equal) \
+ V(Word32NotEqual) \
+ V(Word64Equal) \
+ V(Word64NotEqual)
+
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
+ CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(IntPtrAdd) \
+ V(IntPtrAddWithOverflow) \
+ V(IntPtrSub) \
+ V(IntPtrSubWithOverflow) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(WordOr) \
+ V(WordAnd) \
+ V(WordXor) \
+ V(WordShl) \
+ V(WordShr) \
+ V(WordSar) \
+ V(WordRor) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word64Or) \
+ V(Word64And) \
+ V(Word64Xor) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror)
+
+#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
+ V(Float64Neg) \
+ V(Float64Sqrt) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(Word32Clz)
class CodeStubAssembler {
public:
+ // Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name,
size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
virtual ~CodeStubAssembler();
Handle<Code> GenerateCode();
@@ -95,6 +139,14 @@ class CodeStubAssembler {
Impl* impl_;
};
+ enum AllocationFlag : uint8_t {
+ kNone = 0,
+ kDoubleAlignment = 1,
+ kPretenured = 1 << 1
+ };
+
+ typedef base::Flags<AllocationFlag> AllocationFlags;
+
// ===========================================================================
// Base Assembler
// ===========================================================================
@@ -103,15 +155,23 @@ class CodeStubAssembler {
Node* Int32Constant(int value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
+ Node* SmiConstant(Smi* value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
+ Node* Float64Constant(double value);
+ Node* BooleanMapConstant();
+ Node* HeapNumberMapConstant();
+ Node* NullConstant();
+ Node* UndefinedConstant();
Node* Parameter(int value);
void Return(Node* value);
void Bind(Label* label);
void Goto(Label* label);
+ void GotoIf(Node* condition, Label* true_label);
+ void GotoUnless(Node* condition, Label* false_label);
void Branch(Node* condition, Label* true_label, Label* false_label);
void Switch(Node* index, Label* default_label, int32_t* case_values,
@@ -142,8 +202,10 @@ class CodeStubAssembler {
Node* WordShl(Node* value, int shift);
- // Conversions
- Node* ChangeInt32ToInt64(Node* value);
+// Unary
+#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
+ CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
// Projections
Node* Projection(int index, Node* value);
@@ -160,6 +222,7 @@ class CodeStubAssembler {
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
@@ -169,6 +232,9 @@ class CodeStubAssembler {
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ size_t result_size = 1);
+
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
@@ -183,7 +249,13 @@ class CodeStubAssembler {
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size = 1);
- Node* TailCallStub(CodeStub& stub, Node** args);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, size_t result_size = 1);
+
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2,
+ size_t result_size = 1);
+
Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
Node** args, size_t result_size = 1);
@@ -191,31 +263,141 @@ class CodeStubAssembler {
// Macros
// ===========================================================================
- // Tag and untag Smi values.
+ // Float64 operations.
+ Node* Float64Ceil(Node* x);
+ Node* Float64Floor(Node* x);
+ Node* Float64Round(Node* x);
+ Node* Float64Trunc(Node* x);
+
+ // Tag a Word as a Smi value.
Node* SmiTag(Node* value);
+ // Untag a Smi value as a Word.
Node* SmiUntag(Node* value);
+ // Smi conversions.
+ Node* SmiToFloat64(Node* value);
+ Node* SmiToWord32(Node* value);
+
+ // Smi operations.
+ Node* SmiAdd(Node* a, Node* b);
+ Node* SmiAddWithOverflow(Node* a, Node* b);
+ Node* SmiSub(Node* a, Node* b);
+ Node* SmiSubWithOverflow(Node* a, Node* b);
+ Node* SmiEqual(Node* a, Node* b);
+ Node* SmiLessThan(Node* a, Node* b);
+ Node* SmiLessThanOrEqual(Node* a, Node* b);
+ Node* SmiMin(Node* a, Node* b);
+
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Check a value for smi-ness
Node* WordIsSmi(Node* a);
+ // Check that the value is a positive smi.
+ Node* WordIsPositiveSmi(Node* a);
+
// Load an object pointer from a buffer that isn't in the heap.
- Node* LoadBufferObject(Node* buffer, int offset);
+ Node* LoadBufferObject(Node* buffer, int offset,
+ MachineType rep = MachineType::AnyTagged());
// Load a field from an object on the heap.
- Node* LoadObjectField(Node* object, int offset);
+ Node* LoadObjectField(Node* object, int offset,
+ MachineType rep = MachineType::AnyTagged());
+ // Load the floating point value of a HeapNumber.
+ Node* LoadHeapNumberValue(Node* object);
+ // Store the floating point value of a HeapNumber.
+ Node* StoreHeapNumberValue(Node* object, Node* value);
+ // Truncate the floating point value of a HeapNumber to an Int32.
+ Node* TruncateHeapNumberValueToWord32(Node* object);
+ // Load the bit field of a Map.
+ Node* LoadMapBitField(Node* map);
+ // Load bit field 2 of a map.
+ Node* LoadMapBitField2(Node* map);
+ // Load bit field 3 of a map.
+ Node* LoadMapBitField3(Node* map);
+ // Load the instance type of a map.
+ Node* LoadMapInstanceType(Node* map);
+ // Load the instance descriptors of a map.
+ Node* LoadMapDescriptors(Node* map);
+
+ // Load the hash field of a name.
+ Node* LoadNameHash(Node* name);
// Load an array element from a FixedArray.
+ Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
+ int additional_offset = 0);
Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
int additional_offset = 0);
Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
+ // Allocate an object of the given size.
+ Node* Allocate(int size, AllocationFlags flags = kNone);
+ // Allocate a HeapNumber without initializing its value.
+ Node* AllocateHeapNumber();
+ // Allocate a HeapNumber with a specific value.
+ Node* AllocateHeapNumberWithValue(Node* value);
+
+ // Store an array element to a FixedArray.
+ Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
+ Node* value);
+ // Load the Map of an HeapObject.
+ Node* LoadMap(Node* object);
+ // Store the Map of an HeapObject.
+ Node* StoreMapNoWriteBarrier(Node* object, Node* map);
+ // Load the instance type of an HeapObject.
+ Node* LoadInstanceType(Node* object);
+
+ // Load the elements backing store of a JSObject.
+ Node* LoadElements(Node* object);
+ // Load the length of a fixed array base instance.
+ Node* LoadFixedArrayBaseLength(Node* array);
+
+ // Returns a node that is true if the given bit is set in |word32|.
+ template <typename T>
+ Node* BitFieldDecode(Node* word32) {
+ return BitFieldDecode(word32, T::kShift, T::kMask);
+ }
+
+ Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
+
+ // Conversions.
+ Node* ChangeFloat64ToTagged(Node* value);
+ Node* ChangeInt32ToTagged(Node* value);
+ Node* TruncateTaggedToFloat64(Node* context, Node* value);
+ Node* TruncateTaggedToWord32(Node* context, Node* value);
+
+ // Branching helpers.
+ // TODO(danno): Can we be more cleverish wrt. edge-split?
+ void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name) \
+ void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+ BranchIf(name(a, b), if_true, if_false); \
+ }
+ CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+ void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
+ BranchIf(SmiLessThan(a, b), if_true, if_false);
+ }
+
+ void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
+ Label* if_false) {
+ BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+ }
+
+ void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
+ BranchIfFloat64Equal(value, value, if_false, if_true);
+ }
+
+ // Helpers which delegate to RawMachineAssembler.
+ Factory* factory() const;
+ Isolate* isolate() const;
+ Zone* zone() const;
+
protected:
// Protected helpers which delegate to RawMachineAssembler.
- Graph* graph();
- Isolate* isolate();
- Zone* zone();
+ Graph* graph() const;
// Enables subclasses to perform operations before and after a call.
virtual void CallPrologue();
@@ -224,11 +406,20 @@ class CodeStubAssembler {
private:
friend class CodeStubAssemblerTester;
+ CodeStubAssembler(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor, Code::Flags flags,
+ const char* name);
+
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* SmiShiftBitsConstant();
+ Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
+ Node* top_address, Node* limit_address);
+ Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
+ Node* top_adddress, Node* limit_address);
+
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_;
const char* name_;
@@ -238,13 +429,25 @@ class CodeStubAssembler {
DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
};
+DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
+
class CodeStubAssembler::Label {
public:
- explicit Label(CodeStubAssembler* assembler);
- Label(CodeStubAssembler* assembler, int merged_variable_count,
- CodeStubAssembler::Variable** merged_variables);
+ enum Type { kDeferred, kNonDeferred };
+
+ explicit Label(CodeStubAssembler* assembler,
+ CodeStubAssembler::Label::Type type =
+ CodeStubAssembler::Label::kNonDeferred)
+ : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Variable* merged_variable);
+ CodeStubAssembler::Variable* merged_variable,
+ CodeStubAssembler::Label::Type type =
+ CodeStubAssembler::Label::kNonDeferred)
+ : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
+ Label(CodeStubAssembler* assembler, int merged_variable_count,
+ CodeStubAssembler::Variable** merged_variables,
+ CodeStubAssembler::Label::Type type =
+ CodeStubAssembler::Label::kNonDeferred);
~Label() {}
private:
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 2334541f8a..22e16a27f2 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -57,6 +57,9 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ return ReduceDeoptimizeConditional(node);
case IrOpcode::kMerge:
return ReduceMerge(node);
case IrOpcode::kEffectPhi:
@@ -123,6 +126,37 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
return Replace(dead());
}
+Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+ node->opcode() == IrOpcode::kDeoptimizeUnless);
+ bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // Swap DeoptimizeIf/DeoptimizeUnless on {node} if {cond} is a BooleaNot
+ // and use the input to BooleanNot as new condition for {node}. Note we
+ // assume that {cond} was already properly optimized before we get here
+ // (as guaranteed by the graph reduction logic).
+ if (condition->opcode() == IrOpcode::kBooleanNot) {
+ NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
+ NodeProperties::ChangeOp(node, condition_is_true
+ ? common()->DeoptimizeIf()
+ : common()->DeoptimizeUnless());
+ return Changed(node);
+ }
+ Decision const decision = DecideCondition(condition);
+ if (decision == Decision::kUnknown) return NoChange();
+ if (condition_is_true == (decision == Decision::kTrue)) {
+ return Replace(control);
+ }
+ control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+ return Replace(dead());
+}
Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
DCHECK_EQ(IrOpcode::kMerge, node->opcode());
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 7184755885..49d9f1dd8e 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -30,6 +30,7 @@ class CommonOperatorReducer final : public AdvancedReducer {
private:
Reduction ReduceBranch(Node* node);
+ Reduction ReduceDeoptimizeConditional(Node* node);
Reduction ReduceMerge(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index c92bae9b19..3bb1b34495 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -142,21 +142,21 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
return os;
}
-
-#define CACHED_OP_LIST(V) \
- V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
- V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
- V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
+#define CACHED_OP_LIST(V) \
+ V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
+ V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+ V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+ V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
+ V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
+ V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
-
#define CACHED_RETURN_LIST(V) \
V(1) \
V(2) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 7c3f3dae86..7c59f47c34 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -133,6 +133,8 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfDefault();
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind);
+ const Operator* DeoptimizeIf();
+ const Operator* DeoptimizeUnless();
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index 7c3f9b2741..5ad4aad41e 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -19,11 +19,12 @@ void FrameElider::Run() {
void FrameElider::MarkBlocks() {
- for (auto block : instruction_blocks()) {
+ for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) continue;
- for (auto i = block->code_start(); i < block->code_end(); ++i) {
- if (InstructionAt(i)->IsCall() ||
- InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ const Instruction* instr = InstructionAt(i);
+ if (instr->IsCall() || instr->IsDeoptimizeCall() ||
+ instr->arch_opcode() == ArchOpcode::kArchStackPointer) {
block->mark_needs_frame();
break;
}
@@ -33,13 +34,13 @@ void FrameElider::MarkBlocks() {
void FrameElider::PropagateMarks() {
- while (PropagateInOrder() && PropagateReversed()) {
+ while (PropagateInOrder() || PropagateReversed()) {
}
}
void FrameElider::MarkDeConstruction() {
- for (auto block : instruction_blocks()) {
+ for (InstructionBlock* block : instruction_blocks()) {
if (block->needs_frame()) {
// Special case: The start block needs a frame.
if (block->predecessors().empty()) {
@@ -47,15 +48,25 @@ void FrameElider::MarkDeConstruction() {
}
// Find "frame -> no frame" transitions, inserting frame
// deconstructions.
- for (auto succ : block->successors()) {
+ for (RpoNumber& succ : block->successors()) {
if (!InstructionBlockAt(succ)->needs_frame()) {
DCHECK_EQ(1U, block->SuccessorCount());
+ const Instruction* last =
+ InstructionAt(block->last_instruction_index());
+ if (last->IsThrow() || last->IsTailCall() ||
+ last->IsDeoptimizeCall()) {
+ // We need to keep the frame if we exit the block through any
+ // of these.
+ continue;
+ }
+ // The only cases when we need to deconstruct are ret and jump.
+ DCHECK(last->IsRet() || last->IsJump());
block->mark_must_deconstruct_frame();
}
}
} else {
// Find "no frame -> frame" transitions, inserting frame constructions.
- for (auto succ : block->successors()) {
+ for (RpoNumber& succ : block->successors()) {
if (InstructionBlockAt(succ)->needs_frame()) {
DCHECK_NE(1U, block->SuccessorCount());
InstructionBlockAt(succ)->mark_must_construct_frame();
@@ -68,7 +79,7 @@ void FrameElider::MarkDeConstruction() {
bool FrameElider::PropagateInOrder() {
bool changed = false;
- for (auto block : instruction_blocks()) {
+ for (InstructionBlock* block : instruction_blocks()) {
changed |= PropagateIntoBlock(block);
}
return changed;
@@ -77,7 +88,7 @@ bool FrameElider::PropagateInOrder() {
bool FrameElider::PropagateReversed() {
bool changed = false;
- for (auto block : base::Reversed(instruction_blocks())) {
+ for (InstructionBlock* block : base::Reversed(instruction_blocks())) {
changed |= PropagateIntoBlock(block);
}
return changed;
@@ -94,7 +105,7 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
// Propagate towards the end ("downwards") if there is a predecessor needing
// a frame, but don't "bleed" from deferred code to non-deferred code.
- for (auto pred : block->predecessors()) {
+ for (RpoNumber& pred : block->predecessors()) {
if (InstructionBlockAt(pred)->needs_frame() &&
(!InstructionBlockAt(pred)->IsDeferred() || block->IsDeferred())) {
block->mark_needs_frame();
@@ -104,7 +115,7 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
// Propagate towards start ("upwards") if there are successors and all of
// them need a frame.
- for (auto succ : block->successors()) {
+ for (RpoNumber& succ : block->successors()) {
if (!InstructionBlockAt(succ)->needs_frame()) return false;
}
block->mark_needs_frame();
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 387d6a9bbb..91827d028e 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -58,6 +58,9 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
break;
+ case FrameStateType::kTailCallerFunction:
+ os << "TAIL_CALLER_FRAME";
+ break;
case FrameStateType::kConstructStub:
os << "CONSTRUCT_STUB";
break;
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 60ff9b55fa..2552bcb758 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -79,10 +79,10 @@ enum class FrameStateType {
kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
kInterpretedFunction, // Represents an InterpretedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
+ kTailCallerFunction, // Represents a frame removed by tail call elimination.
kConstructStub // Represents a ConstructStubFrame.
};
-
class FrameStateFunctionInfo {
public:
FrameStateFunctionInfo(FrameStateType type, int parameter_count,
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index b08030b8c6..3d93e1528f 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -13,17 +13,32 @@ namespace internal {
namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
- : needs_frame_((descriptor != nullptr) &&
- descriptor->RequiresFrameAsIncoming()),
- frame_slot_count_(fixed_frame_size_in_slots),
+ : frame_slot_count_(fixed_frame_size_in_slots),
callee_saved_slot_count_(0),
spill_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
+int Frame::AlignFrame(int alignment) {
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ int alignment_slots = alignment / kPointerSize;
+ int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+ if (delta != alignment_slots) {
+ frame_slot_count_ += delta;
+ if (spill_slot_count_ != 0) {
+ spill_slot_count_ += delta;
+ }
+ }
+ return delta;
+}
+
+void FrameAccessState::MarkHasFrame(bool state) {
+ has_frame_ = state;
+ SetFrameAccessToDefault();
+}
void FrameAccessState::SetFrameAccessToDefault() {
- if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+ if (has_frame() && !FLAG_turbo_sp_frame_access) {
SetFrameAccessToFP();
} else {
SetFrameAccessToSP();
@@ -32,16 +47,12 @@ void FrameAccessState::SetFrameAccessToDefault() {
FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
- const int offset =
- (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
- kPointerSize;
+ const int frame_offset = FrameSlotToFPOffset(spill_slot);
if (access_frame_with_fp()) {
- DCHECK(frame()->needs_frame());
- return FrameOffset::FromFramePointer(offset);
+ return FrameOffset::FromFramePointer(frame_offset);
} else {
// No frame. Retrieve all parameters relative to stack pointer.
- int sp_offset =
- offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+ int sp_offset = frame_offset + GetSPToFPOffset();
return FrameOffset::FromStackPointer(sp_offset);
}
}
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 011a0f02d5..d413d3e033 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -59,9 +59,9 @@ class CallDescriptor;
// |- - - - - - - - -| | |
// 1 | saved frame ptr | Fixed |
// |- - - - - - - - -| Header <-- frame ptr |
-// 2 | Context | | |
+// 2 |Context/Frm. Type| | |
// |- - - - - - - - -| | |
-// 3 |JSFunction/Marker| v |
+// 3 | [JSFunction] | v |
// +-----------------+---- |
// 4 | spill 1 | ^ Callee
// |- - - - - - - - -| | frame slots
@@ -81,26 +81,13 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots,
const CallDescriptor* descriptor);
- inline bool needs_frame() const { return needs_frame_; }
- inline void MarkNeedsFrame() { needs_frame_ = true; }
-
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
- inline int GetSpToFpSlotCount() const {
- return GetTotalFrameSlotCount() -
- StandardFrameConstants::kFixedSlotCountAboveFp;
- }
inline int GetSavedCalleeRegisterSlotCount() const {
return callee_saved_slot_count_;
}
inline int GetSpillSlotCount() const { return spill_slot_count_; }
- inline void SetElidedFrameSizeInSlots(int slots) {
- DCHECK_EQ(0, callee_saved_slot_count_);
- DCHECK_EQ(0, spill_slot_count_);
- frame_slot_count_ = slots;
- }
-
void SetAllocatedRegisters(BitVector* regs) {
DCHECK(allocated_registers_ == nullptr);
allocated_registers_ = regs;
@@ -115,33 +102,34 @@ class Frame : public ZoneObject {
return !allocated_double_registers_->IsEmpty();
}
- int AlignSavedCalleeRegisterSlots() {
+ int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
DCHECK_EQ(0, callee_saved_slot_count_);
- needs_frame_ = true;
- int delta = frame_slot_count_ & 1;
- frame_slot_count_ += delta;
+ int alignment_slots = alignment / kPointerSize;
+ int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+ if (delta != alignment_slots) {
+ frame_slot_count_ += delta;
+ }
return delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
- needs_frame_ = true;
frame_slot_count_ += count;
callee_saved_slot_count_ += count;
}
int AllocateSpillSlot(int width) {
DCHECK_EQ(0, callee_saved_slot_count_);
- needs_frame_ = true;
int frame_slot_count_before = frame_slot_count_;
int slot = AllocateAlignedFrameSlot(width);
spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
return slot;
}
+ int AlignFrame(int alignment = kDoubleSize);
+
int ReserveSpillSlots(size_t slot_count) {
DCHECK_EQ(0, callee_saved_slot_count_);
DCHECK_EQ(0, spill_slot_count_);
- needs_frame_ = true;
spill_slot_count_ += static_cast<int>(slot_count);
frame_slot_count_ += static_cast<int>(slot_count);
return frame_slot_count_ - 1;
@@ -163,7 +151,6 @@ class Frame : public ZoneObject {
}
private:
- bool needs_frame_;
int frame_slot_count_;
int callee_saved_slot_count_;
int spill_slot_count_;
@@ -205,21 +192,36 @@ class FrameOffset {
class FrameAccessState : public ZoneObject {
public:
explicit FrameAccessState(Frame* const frame)
- : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
- SetFrameAccessToDefault();
- }
+ : frame_(frame),
+ access_frame_with_fp_(false),
+ sp_delta_(0),
+ has_frame_(false) {}
Frame* frame() const { return frame_; }
+ void MarkHasFrame(bool state);
int sp_delta() const { return sp_delta_; }
void ClearSPDelta() { sp_delta_ = 0; }
void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
bool access_frame_with_fp() const { return access_frame_with_fp_; }
+
+ // Regardless of how we access slots on the stack - using sp or fp - do we
+ // have a frame, at the current stage in code generation.
+ bool has_frame() const { return has_frame_; }
+
void SetFrameAccessToDefault();
void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
+ int GetSPToFPSlotCount() const {
+ int frame_slot_count =
+ (has_frame() ? frame()->GetTotalFrameSlotCount() : kElidedFrameSlots) -
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ return frame_slot_count + sp_delta();
+ }
+ int GetSPToFPOffset() const { return GetSPToFPSlotCount() * kPointerSize; }
+
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
// architecture-specific. Negative spill slots indicate arguments on the
@@ -230,6 +232,7 @@ class FrameAccessState : public ZoneObject {
Frame* const frame_;
bool access_frame_with_fp_;
int sp_delta_;
+ bool has_frame_;
};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 4107b0f7bf..35e91fa404 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -29,7 +29,7 @@ void GapResolver::Resolve(ParallelMove* moves) const {
auto it =
std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
moves->erase(it, moves->end());
- for (auto move : *moves) {
+ for (MoveOperands* move : *moves) {
if (!move->IsEliminated()) PerformMove(moves, move);
}
}
@@ -53,7 +53,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
- for (auto other : *moves) {
+ for (MoveOperands* other : *moves) {
if (other->Blocks(destination) && !other->IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
@@ -103,7 +103,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Any unperformed (including pending) move with a source of either this
// move's source or destination needs to have their source changed to
// reflect the state of affairs after the swap.
- for (auto other : *moves) {
+ for (MoveOperands* other : *moves) {
if (other->Blocks(source)) {
other->set_source(destination);
} else if (other->Blocks(destination)) {
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
index 7f4cc95d91..cb775e96f3 100644
--- a/deps/v8/src/compiler/graph-replay.cc
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -20,7 +20,7 @@ namespace compiler {
void GraphReplayPrinter::PrintReplay(Graph* graph) {
GraphReplayPrinter replay;
PrintF(" Node* nil = graph()->NewNode(common()->Dead());\n");
- Zone zone;
+ Zone zone(graph->zone()->allocator());
AllNodes nodes(&zone, graph);
// Allocate the nodes first.
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 07851768b3..301e3900e1 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -197,7 +197,8 @@ class JSONGraphEdgeWriter {
std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
- Zone tmp_zone;
+ base::AccountingAllocator allocator;
+ Zone tmp_zone(&allocator);
os << "{\n\"nodes\":[";
JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
os << "],\n\"edges\":[";
@@ -231,8 +232,8 @@ class GraphC1Visualizer {
void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
- void PrintLiveRange(LiveRange* range, const char* type, int vreg);
- void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
+ void PrintLiveRange(const LiveRange* range, const char* type, int vreg);
+ void PrintLiveRangeChain(const TopLevelLiveRange* range, const char* type);
class Tag final BASE_EMBEDDED {
public:
@@ -505,31 +506,30 @@ void GraphC1Visualizer::PrintLiveRanges(const char* phase,
Tag tag(this, "intervals");
PrintStringProperty("name", phase);
- for (auto range : data->fixed_double_live_ranges()) {
+ for (const TopLevelLiveRange* range : data->fixed_double_live_ranges()) {
PrintLiveRangeChain(range, "fixed");
}
- for (auto range : data->fixed_live_ranges()) {
+ for (const TopLevelLiveRange* range : data->fixed_live_ranges()) {
PrintLiveRangeChain(range, "fixed");
}
- for (auto range : data->live_ranges()) {
+ for (const TopLevelLiveRange* range : data->live_ranges()) {
PrintLiveRangeChain(range, "object");
}
}
-
-void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
+void GraphC1Visualizer::PrintLiveRangeChain(const TopLevelLiveRange* range,
const char* type) {
if (range == nullptr || range->IsEmpty()) return;
int vreg = range->vreg();
- for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ for (const LiveRange* child = range; child != nullptr;
+ child = child->next()) {
PrintLiveRange(child, type, vreg);
}
}
-
-void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
+void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
int vreg) {
if (range != nullptr && !range->IsEmpty()) {
PrintIndent();
@@ -545,7 +545,7 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
os_ << " \"" << assigned_reg.ToString() << "\"";
}
} else if (range->spilled()) {
- auto top = range->TopLevel();
+ const TopLevelLiveRange* top = range->TopLevel();
int index = -1;
if (top->HasSpillRange()) {
index = kMaxInt; // This hasn't been set yet.
@@ -564,8 +564,8 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
}
os_ << " " << vreg;
- for (auto interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
+ for (const UseInterval* interval = range->first_interval();
+ interval != nullptr; interval = interval->next()) {
os_ << " [" << interval->start().value() << ", "
<< interval->end().value() << "[";
}
@@ -584,14 +584,16 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
- Zone tmp_zone;
+ base::AccountingAllocator allocator;
+ Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
return os;
}
std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
- Zone tmp_zone;
+ base::AccountingAllocator allocator;
+ Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone)
.PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
return os;
@@ -600,7 +602,8 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac) {
- Zone tmp_zone;
+ base::AccountingAllocator allocator;
+ Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
return os;
}
@@ -610,7 +613,8 @@ const int kOnStack = 1;
const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
- Zone local_zone;
+ base::AccountingAllocator allocator;
+ Zone local_zone(&allocator);
ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
ZoneStack<Node*> stack(&local_zone);
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index ba69617bd2..ff1a17ef3e 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -25,7 +25,7 @@ Graph::Graph(Zone* zone)
void Graph::Decorate(Node* node) {
- for (auto const decorator : decorators_) {
+ for (GraphDecorator* const decorator : decorators_) {
decorator->Decorate(node);
}
}
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 1f61af8abf..ee05ad00b6 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -49,16 +49,13 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
- return Operand(offset.from_stack_pointer() ? esp : ebp,
- offset.offset() + extra);
+ return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
- Operand ToMaterializableOperand(int materializable_offset) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- FPOffsetToFrameSlot(materializable_offset));
- return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ Operand SlotToOperand(int slot, int extra = 0) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
}
Operand HighOperand(InstructionOperand* op) {
@@ -333,6 +330,39 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(&done); \
} while (false)
+#define ASSEMBLE_COMPARE(asm_instr) \
+ do { \
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+ size_t index = 0; \
+ Operand left = i.MemoryOperand(&index); \
+ if (HasImmediateInput(instr, index)) { \
+ __ asm_instr(left, i.InputImmediate(index)); \
+ } else { \
+ __ asm_instr(left, i.InputRegister(index)); \
+ } \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -349,18 +379,56 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register, Register,
+ Register) {
+ // There are not enough temp registers left on ia32 for a call instruction
+ // so we pick some scratch registers and save/restore them manually here.
+ int scratch_count = 3;
+ Register scratch1 = ebx;
+ Register scratch2 = ecx;
+ Register scratch3 = edx;
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &done, Label::kNear);
+
+ __ push(scratch1);
+ __ push(scratch2);
+ __ push(scratch3);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack, scratch_count);
+ __ pop(scratch3);
+ __ pop(scratch2);
+ __ pop(scratch1);
+
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
-
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
@@ -375,9 +443,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ no_reg, no_reg, no_reg);
+ }
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -402,6 +475,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -411,6 +485,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ no_reg, no_reg, no_reg);
+ }
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
break;
@@ -469,7 +547,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), ebp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ mov(i.OutputRegister(), Operand(ebp, 0));
} else {
__ mov(i.OutputRegister(), ebp);
@@ -530,38 +608,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kIA32Cmp:
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ cmp(operand, i.InputImmediate(index));
- } else {
- __ cmp(operand, i.InputRegister(index));
- }
- } else {
- if (HasImmediateInput(instr, 1)) {
- __ cmp(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ cmp(i.InputRegister(0), i.InputOperand(1));
- }
- }
+ ASSEMBLE_COMPARE(cmp);
+ break;
+ case kIA32Cmp16:
+ ASSEMBLE_COMPARE(cmpw);
+ break;
+ case kIA32Cmp8:
+ ASSEMBLE_COMPARE(cmpb);
break;
case kIA32Test:
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ test(operand, i.InputImmediate(index));
- } else {
- __ test(i.InputRegister(index), operand);
- }
- } else {
- if (HasImmediateInput(instr, 1)) {
- __ test(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ test(i.InputRegister(0), i.InputOperand(1));
- }
- }
+ ASSEMBLE_COMPARE(test);
+ break;
+ case kIA32Test16:
+ ASSEMBLE_COMPARE(test_w);
+ break;
+ case kIA32Test8:
+ ASSEMBLE_COMPARE(test_b);
break;
case kIA32Imul:
if (HasImmediateInput(instr, 1)) {
@@ -632,6 +694,92 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sar_cl(i.OutputOperand());
}
break;
+ case kIA32AddPair: {
+ // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ bool use_temp = false;
+ if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+ // We cannot write to the output register directly, because it would
+ // overwrite an input for adc. We have to use the temp register.
+ use_temp = true;
+ __ Move(i.TempRegister(0), i.InputRegister(0));
+ __ add(i.TempRegister(0), i.InputRegister(2));
+ } else {
+ __ add(i.OutputRegister(0), i.InputRegister(2));
+ }
+ __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+ if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+ __ Move(i.OutputRegister(1), i.InputRegister(1));
+ }
+ if (use_temp) {
+ __ Move(i.OutputRegister(0), i.TempRegister(0));
+ }
+ break;
+ }
+ case kIA32SubPair: {
+ // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ bool use_temp = false;
+ if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+ // We cannot write to the output register directly, because it would
+ // overwrite an input for adc. We have to use the temp register.
+ use_temp = true;
+ __ Move(i.TempRegister(0), i.InputRegister(0));
+ __ sub(i.TempRegister(0), i.InputRegister(2));
+ } else {
+ __ sub(i.OutputRegister(0), i.InputRegister(2));
+ }
+ __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+ if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+ __ Move(i.OutputRegister(1), i.InputRegister(1));
+ }
+ if (use_temp) {
+ __ Move(i.OutputRegister(0), i.TempRegister(0));
+ }
+ break;
+ }
+ case kIA32MulPair: {
+ __ imul(i.OutputRegister(1), i.InputOperand(0));
+ __ mov(i.TempRegister(0), i.InputOperand(1));
+ __ imul(i.TempRegister(0), i.InputOperand(2));
+ __ add(i.OutputRegister(1), i.TempRegister(0));
+ __ mov(i.OutputRegister(0), i.InputOperand(0));
+ // Multiplies the low words and stores them in eax and edx.
+ __ mul(i.InputRegister(2));
+ __ add(i.OutputRegister(1), i.TempRegister(0));
+
+ break;
+ }
+ case kIA32ShlPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
+ case kIA32ShrPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
+ case kIA32SarPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
case kIA32Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1476,21 +1624,16 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- // Assemble a prologue similar the to cdecl calling convention.
- __ push(ebp);
- __ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
- // TODO(turbofan): this prologue is redundant with OSR, but still needed for
- // code aging.
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ push(ebp);
+ __ mov(ebp, esp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ }
}
- frame_access_state()->SetFrameAccessToDefault();
-
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1536,17 +1679,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
+ AssembleDeconstructFrame();
}
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -1581,15 +1722,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
- int offset;
- if (IsMaterializableFromFrame(src, &offset)) {
+ int slot;
+ if (IsMaterializableFromFrame(src, &slot)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, g.ToMaterializableOperand(offset));
+ __ mov(dst, g.SlotToOperand(slot));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(g.ToMaterializableOperand(offset));
+ __ push(g.SlotToOperand(slot));
__ pop(dst);
}
} else if (destination->IsRegister()) {
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 61fd035403..3cf2094bdd 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -15,7 +15,11 @@ namespace compiler {
V(IA32Add) \
V(IA32And) \
V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
V(IA32Or) \
V(IA32Xor) \
V(IA32Sub) \
@@ -29,6 +33,12 @@ namespace compiler {
V(IA32Shl) \
V(IA32Shr) \
V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
V(IA32Ror) \
V(IA32Lzcnt) \
V(IA32Tzcnt) \
@@ -105,7 +115,6 @@ namespace compiler {
V(IA32Poke) \
V(IA32StackCheck)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 093bc22268..803fdf6fd6 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -17,7 +17,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Add:
case kIA32And:
case kIA32Cmp:
+ case kIA32Cmp16:
+ case kIA32Cmp8:
case kIA32Test:
+ case kIA32Test16:
+ case kIA32Test8:
case kIA32Or:
case kIA32Xor:
case kIA32Sub:
@@ -31,6 +35,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Shl:
case kIA32Shr:
case kIA32Sar:
+ case kIA32AddPair:
+ case kIA32SubPair:
+ case kIA32MulPair:
+ case kIA32ShlPair:
+ case kIA32ShrPair:
+ case kIA32SarPair:
case kIA32Ror:
case kIA32Lzcnt:
case kIA32Tzcnt:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index f649ba9109..3eae18edcb 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -27,6 +27,34 @@ class IA32OperandGenerator final : public OperandGenerator {
return DefineAsRegister(node);
}
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
+ if (input->opcode() != IrOpcode::kLoad ||
+ !selector()->CanCover(node, input)) {
+ return false;
+ }
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
+ MachineRepresentation rep =
+ LoadRepresentationOf(input->op()).representation();
+ switch (opcode) {
+ case kIA32Cmp:
+ case kIA32Test:
+ return rep == MachineRepresentation::kWord32 ||
+ rep == MachineRepresentation::kTagged;
+ case kIA32Cmp16:
+ case kIA32Test16:
+ return rep == MachineRepresentation::kWord16;
+ case kIA32Cmp8:
+ case kIA32Test8:
+ return rep == MachineRepresentation::kWord8;
+ default:
+ break;
+ }
+ return false;
+ }
+
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -232,9 +260,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -404,10 +430,11 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
}
}
+namespace {
// Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
@@ -456,18 +483,24 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
// Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
+} // namespace
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kIA32And);
@@ -579,6 +612,93 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitShift(this, node, kIA32Sar);
}
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ IA32OperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ InstructionOperand temps[] = {g.TempRegister()};
+
+ Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ IA32OperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ InstructionOperand temps[] = {g.TempRegister()};
+
+ Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ IA32OperandGenerator g(this);
+
+ // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+ // register and one mov instruction.
+ InstructionOperand inputs[] = {
+ g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+
+ Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ IA32OperandGenerator g(selector);
+
+ Node* shift = node->InputAt(2);
+ InstructionOperand shift_operand;
+ if (g.CanBeImmediate(shift)) {
+ shift_operand = g.UseImmediate(shift);
+ } else {
+ shift_operand = g.UseFixed(shift, ecx);
+ }
+ InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+ g.UseFixed(node->InputAt(1), edx),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kIA32ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kIA32ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kIA32SarPair, node);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
@@ -746,6 +866,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRO(this, node, kSSEFloat64ToUint32);
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToUint32);
+}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRO(this, node, kSSEFloat64ToFloat32);
@@ -987,6 +1110,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
namespace {
@@ -1008,6 +1132,9 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1015,33 +1142,21 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
}
}
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
- Node* node, Node* input) {
- if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
- return false;
- }
- MachineRepresentation load_representation =
- LoadRepresentationOf(input->op()).representation();
- if (load_representation == MachineRepresentation::kWord32 ||
- load_representation == MachineRepresentation::kTagged) {
- return opcode == kIA32Cmp || opcode == kIA32Test;
- }
- return false;
-}
-
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+ selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
- selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
- left, right);
+ selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
}
}
@@ -1057,6 +1172,36 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+ Node* right) {
+ if (opcode != kIA32Cmp && opcode != kIA32Test) {
+ return opcode;
+ }
+ // Currently, if one of the two operands is not a Load, we don't know what its
+ // machine representation is, so we bail out.
+ // TODO(epertoso): we can probably get some size information out of immediates
+ // and phi nodes.
+ if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+ return opcode;
+ }
+ // If the load representations don't match, both operands will be
+ // zero/sign-extended to 32bit.
+ LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+ if (left_representation != LoadRepresentationOf(right->op())) {
+ return opcode;
+ }
+ switch (left_representation.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ return opcode == kIA32Cmp ? kIA32Cmp8 : kIA32Test8;
+ case MachineRepresentation::kWord16:
+ return opcode == kIA32Cmp ? kIA32Cmp16 : kIA32Test16;
+ default:
+ return opcode;
+ }
+}
// Shared routine for multiple float32 compare operations (inputs commuted).
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1082,15 +1227,28 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- // If one of the two inputs is an immediate, make sure it's on the right.
- if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
+ // If one of the two inputs is an immediate, make sure it's on the right, or
+ // if one of the two inputs is a memory operand, make sure it's on the left.
+ if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+ (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
+ // TODO(epertoso): we should use `narrowed_opcode' here once we match
+ // immediates too.
return VisitCompareWithMemoryOperand(selector, opcode, left,
g.UseImmediate(right), cont);
}
@@ -1098,15 +1256,21 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
+ // Match memory operands on left side of comparison.
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+ bool needs_byte_register =
+ narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
+ return VisitCompareWithMemoryOperand(
+ selector, narrowed_opcode, left,
+ needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+ cont);
+ }
+
if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
- return VisitCompareWithMemoryOperand(selector, opcode, left,
- g.UseRegister(right), cont);
- }
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1126,6 +1290,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1228,13 +1395,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} // namespace
-
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
IA32OperandGenerator g(this);
@@ -1265,7 +1442,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1275,32 +1452,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kIA32Add, &cont);
}
FlagsContinuation cont;
@@ -1310,7 +1489,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kIA32Sub, &cont);
}
FlagsContinuation cont;
@@ -1319,37 +1498,41 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index d2144cf638..b005083a85 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -21,6 +21,8 @@
#include "src/compiler/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC
#include "src/compiler/ppc/instruction-codes-ppc.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/compiler/s390/instruction-codes-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/compiler/x87/instruction-codes-x87.h"
#else
@@ -39,40 +41,42 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define COMMON_ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchCallCFunction) \
- V(ArchPrepareTailCall) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchThrowTerminator) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchFramePointer) \
- V(ArchParentFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(ArchStoreWithWriteBarrier) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObjectFromJSFunction) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunctionFromJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchParentFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64) \
V(ArchStackSlot)
#define ARCH_OPCODE_LIST(V) \
@@ -110,7 +114,12 @@ enum AddressingMode {
std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
// The mode of the flags continuation (see below).
-enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+enum FlagsMode {
+ kFlags_none = 0,
+ kFlags_branch = 1,
+ kFlags_deoptimize = 2,
+ kFlags_set = 3
+};
std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index adbfd5d10d..b612cd1e9e 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -115,7 +115,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
if (IsBlockTerminator(instr)) {
// Make sure that basic block terminators are not moved by adding them
// as successor of every instruction.
- for (auto node : graph_) {
+ for (ScheduleGraphNode* node : graph_) {
node->AddSuccessor(new_node);
}
} else if (IsFixedRegisterParameter(instr)) {
@@ -134,7 +134,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
- for (auto load : pending_loads_) {
+ for (ScheduleGraphNode* load : pending_loads_) {
load->AddSuccessor(new_node);
}
pending_loads_.clear();
@@ -149,7 +149,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Look for operand dependencies.
- for (auto node : graph_) {
+ for (ScheduleGraphNode* node : graph_) {
if (HasOperandDependency(node->instruction(), instr)) {
node->AddSuccessor(new_node);
}
@@ -168,7 +168,7 @@ void InstructionScheduler::ScheduleBlock() {
ComputeTotalLatencies();
// Add nodes which don't have dependencies to the ready list.
- for (auto node : graph_) {
+ for (ScheduleGraphNode* node : graph_) {
if (!node->HasUnscheduledPredecessor()) {
ready_list.AddNode(node);
}
@@ -177,12 +177,12 @@ void InstructionScheduler::ScheduleBlock() {
// Go through the ready list and schedule the instructions.
int cycle = 0;
while (!ready_list.IsEmpty()) {
- auto candidate = ready_list.PopBestCandidate(cycle);
+ ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle);
if (candidate != nullptr) {
sequence()->AddInstruction(candidate->instruction());
- for (auto successor : candidate->successors()) {
+ for (ScheduleGraphNode* successor : candidate->successors()) {
successor->DropUnscheduledPredecessor();
successor->set_start_cycle(
std::max(successor->start_cycle(),
@@ -220,7 +220,9 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallJSFunction:
return kHasSideEffect;
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction:
return kHasSideEffect | kIsBlockTerminator;
@@ -296,10 +298,10 @@ bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
void InstructionScheduler::ComputeTotalLatencies() {
- for (auto node : base::Reversed(graph_)) {
+ for (ScheduleGraphNode* node : base::Reversed(graph_)) {
int max_latency = 0;
- for (auto successor : node->successors()) {
+ for (ScheduleGraphNode* successor : node->successors()) {
DCHECK(successor->total_latency() != -1);
if (successor->total_latency() > max_latency) {
max_latency = successor->total_latency();
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 5cca8880d5..e750aed19a 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -303,22 +303,32 @@ class FlagsContinuation final {
DCHECK_NOT_NULL(false_block);
}
- // Creates a new flags continuation from the given condition and result node.
- FlagsContinuation(FlagsCondition condition, Node* result)
- : mode_(kFlags_set), condition_(condition), result_(result) {
- DCHECK_NOT_NULL(result);
+ // Creates a new flags continuation for an eager deoptimization exit.
+ static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+ Node* frame_state) {
+ return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
+ }
+
+ // Creates a new flags continuation for a boolean value.
+ static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
+ return FlagsContinuation(kFlags_set, condition, result);
}
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
FlagsCondition condition() const {
DCHECK(!IsNone());
return condition_;
}
+ Node* frame_state() const {
+ DCHECK(IsDeoptimize());
+ return frame_state_or_result_;
+ }
Node* result() const {
DCHECK(IsSet());
- return result_;
+ return frame_state_or_result_;
}
BasicBlock* true_block() const {
DCHECK(IsBranch());
@@ -355,11 +365,20 @@ class FlagsContinuation final {
}
private:
- FlagsMode mode_;
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ Node* frame_state_or_result)
+ : mode_(mode),
+ condition_(condition),
+ frame_state_or_result_(frame_state_or_result) {
+ DCHECK_NOT_NULL(frame_state_or_result);
+ }
+
+ FlagsMode const mode_;
FlagsCondition condition_;
- Node* result_; // Only valid if mode_ == kFlags_set.
- BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
- BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+ Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
+ // or mode_ == kFlags_set.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 0f27e50dc9..b7162fe5dc 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -82,6 +82,9 @@ void InstructionSelector::SelectInstructions() {
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
+#if DEBUG
+ sequence()->ValidateSSA();
+#endif
}
@@ -218,10 +221,25 @@ Instruction* InstructionSelector::Emit(Instruction* instr) {
bool InstructionSelector::CanCover(Node* user, Node* node) const {
- return node->OwnedBy(user) &&
- schedule()->block(node) == schedule()->block(user) &&
- (node->op()->HasProperty(Operator::kPure) ||
- GetEffectLevel(node) == GetEffectLevel(user));
+ // 1. Both {user} and {node} must be in the same basic block.
+ if (schedule()->block(node) != schedule()->block(user)) {
+ return false;
+ }
+ // 2. Pure {node}s must be owned by the {user}.
+ if (node->op()->HasProperty(Operator::kPure)) {
+ return node->OwnedBy(user);
+ }
+ // 3. Impure {node}s must match the effect level of {user}.
+ if (GetEffectLevel(node) != GetEffectLevel(user)) {
+ return false;
+ }
+ // 4. Only {node} must have value edges pointing to {user}.
+ for (Edge const edge : node->use_edges()) {
+ if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
+ return false;
+ }
+ }
+ return true;
}
int InstructionSelector::GetVirtualRegister(const Node* node) {
@@ -597,15 +615,17 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
// If it was a syntactic tail call we need to drop the current frame and
- // an arguments adaptor frame on top of it (if the latter is present).
+ // all the frames on top of it that are either an arguments adaptor frame
+ // or a tail caller frame.
if (buffer->descriptor->SupportsTailCalls()) {
frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
-
- if (buffer->frame_state_descriptor != nullptr &&
- buffer->frame_state_descriptor->type() ==
- FrameStateType::kArgumentsAdaptor) {
+ while (buffer->frame_state_descriptor != nullptr &&
+ (buffer->frame_state_descriptor->type() ==
+ FrameStateType::kArgumentsAdaptor ||
+ buffer->frame_state_descriptor->type() ==
+ FrameStateType::kTailCallerFunction)) {
frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
@@ -694,6 +714,12 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(node, effect_level);
}
+ // We visit the control first, then the nodes in the block, so the block's
+ // control input should be on the same effect level as the last node.
+ if (block->control_input() != nullptr) {
+ SetEffectLevel(block->control_input(), effect_level);
+ }
+
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
@@ -873,6 +899,10 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kCall:
return VisitCall(node);
+ case IrOpcode::kDeoptimizeIf:
+ return VisitDeoptimizeIf(node);
+ case IrOpcode::kDeoptimizeUnless:
+ return VisitDeoptimizeUnless(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
@@ -998,6 +1028,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTruncateFloat64ToUint32:
+ return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
case IrOpcode::kTruncateFloat32ToUint32:
@@ -1128,6 +1160,30 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kCheckedStore:
return VisitCheckedStore(node);
+ case IrOpcode::kInt32PairAdd:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitInt32PairAdd(node);
+ case IrOpcode::kInt32PairSub:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitInt32PairSub(node);
+ case IrOpcode::kInt32PairMul:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitInt32PairMul(node);
+ case IrOpcode::kWord32PairShl:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitWord32PairShl(node);
+ case IrOpcode::kWord32PairShr:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitWord32PairShr(node);
+ case IrOpcode::kWord32PairSar:
+ MarkAsWord32(NodeProperties::FindProjection(node, 0));
+ MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ return VisitWord32PairSar(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1144,7 +1200,6 @@ void InstructionSelector::VisitLoadStackPointer(Node* node) {
void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
- frame_->MarkNeedsFrame();
Emit(kArchFramePointer, g.DefineAsRegister(node));
}
@@ -1351,6 +1406,20 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
#endif // V8_TARGET_ARCH_32_BIT
+// 64 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_64_BIT
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+#endif // V8_TARGET_ARCH_64_BIT
void InstructionSelector::VisitFinishRegion(Node* node) {
OperandGenerator g(this);
@@ -1429,6 +1498,12 @@ void InstructionSelector::VisitProjection(Node* node) {
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
case IrOpcode::kTryTruncateFloat64ToUint64:
+ case IrOpcode::kInt32PairAdd:
+ case IrOpcode::kInt32PairSub:
+ case IrOpcode::kInt32PairMul:
+ case IrOpcode::kWord32PairShl:
+ case IrOpcode::kWord32PairShr:
+ case IrOpcode::kWord32PairSar:
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
@@ -1484,13 +1559,15 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
- // (arm64 only) caller uses JSSP but callee might destroy it.
- if (descriptor->UseNativeStack() &&
- !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
- flags |= CallDescriptor::kRestoreJSSP;
+ bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
+ bool to_native_stack = descriptor->UseNativeStack();
+ if (from_native_stack != to_native_stack) {
+ // (arm64 only) Mismatch in the use of stack pointers. One or the other
+ // has to be restored manually by the code generator.
+ flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
+ : CallDescriptor::kRestoreCSP;
}
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
@@ -1539,16 +1616,35 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
+ InstructionOperandVector temps(zone());
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObjectFromJSFunction;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunctionFromJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ int temps_count = GetTempsCountForTailCallFromJSFunction();
+ for (int i = 0; i < temps_count; i++) {
+ temps.push_back(g.TempRegister());
+ }
+ } else {
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
}
opcode |= MiscField::encode(descriptor->flags());
@@ -1559,7 +1655,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
+ &buffer.instruction_args.front(), temps.size(),
+ temps.empty() ? nullptr : &temps.front());
} else {
FrameStateDescriptor* frame_state_descriptor =
descriptor->NeedsFrameState()
@@ -1627,25 +1724,41 @@ void InstructionSelector::VisitReturn(Node* ret) {
}
}
+Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b,
+ Node* frame_state) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b};
+ size_t input_count = arraysize(inputs);
+ return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
+ frame_state);
+}
-void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
+Instruction* InstructionSelector::EmitDeoptimize(
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, Node* frame_state) {
OperandGenerator g(this);
-
- FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
-
+ FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
- args.reserve(desc->GetTotalSize() + 1); // Include deopt id.
-
- InstructionSequence::StateId state_id =
- sequence()->AddFrameStateDescriptor(desc);
+ args.reserve(input_count + 1 + descriptor->GetTotalSize());
+ for (size_t i = 0; i < input_count; ++i) {
+ args.push_back(inputs[i]);
+ }
+ opcode |= MiscField::encode(static_cast<int>(input_count));
+ InstructionSequence::StateId const state_id =
+ sequence()->AddFrameStateDescriptor(descriptor);
args.push_back(g.TempImmediate(state_id.ToInt()));
-
StateObjectDeduplicator deduplicator(instruction_zone());
-
- AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
- FrameStateInputKind::kAny,
+ AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
+ &args, FrameStateInputKind::kAny,
instruction_zone());
+ return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
+ nullptr);
+}
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
InstructionCode opcode = kArchDeoptimize;
switch (kind) {
case DeoptimizeKind::kEager:
@@ -1655,7 +1768,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
opcode |= MiscField::encode(Deoptimizer::SOFT);
break;
}
- Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
+ EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index a01cab4dab..9c1cd4ca0c 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -100,6 +100,17 @@ class InstructionSelector final {
Instruction* Emit(Instruction* instr);
// ===========================================================================
+ // ===== Architecture-independent deoptimization exit emission methods. ======
+ // ===========================================================================
+
+ Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ Node* frame_state);
+ Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, Node* frame_state);
+
+ // ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
// ===========================================================================
@@ -213,6 +224,7 @@ class InstructionSelector final {
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
CallBufferFlags flags, int stack_param_delta = 0);
bool IsTailCallAddressImmediate();
+ int GetTempsCountForTailCallFromJSFunction();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
@@ -243,6 +255,8 @@ class InstructionSelector final {
void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* handler = nullptr);
+ void VisitDeoptimizeIf(Node* node);
+ void VisitDeoptimizeUnless(Node* node);
void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index d4ec6bc943..c757557a0d 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -114,7 +114,7 @@ std::ostream& operator<<(std::ostream& os,
return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
<< "]";
case InstructionOperand::IMMEDIATE: {
- auto imm = ImmediateOperand::cast(op);
+ ImmediateOperand imm = ImmediateOperand::cast(op);
switch (imm.type()) {
case ImmediateOperand::INLINE:
return os << "#" << imm.inline_value();
@@ -124,7 +124,7 @@ std::ostream& operator<<(std::ostream& os,
}
case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
- auto allocated = LocationOperand::cast(op);
+ LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
os << "[stack:" << LocationOperand::cast(op).index();
} else if (op.IsDoubleStackSlot()) {
@@ -214,7 +214,7 @@ std::ostream& operator<<(std::ostream& os,
bool ParallelMove::IsRedundant() const {
- for (auto move : *this) {
+ for (MoveOperands* move : *this) {
if (!move->IsRedundant()) return false;
}
return true;
@@ -224,7 +224,7 @@ bool ParallelMove::IsRedundant() const {
MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* replacement = nullptr;
MoveOperands* to_eliminate = nullptr;
- for (auto curr : *this) {
+ for (MoveOperands* curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination().EqualsCanonicalized(move->source())) {
DCHECK(!replacement);
@@ -321,7 +321,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
bool first = true;
- for (auto move : pm) {
+ for (MoveOperands* move : pm) {
if (move->IsEliminated()) continue;
if (!first) os << " ";
first = false;
@@ -346,7 +346,7 @@ std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
PrintableInstructionOperand poi = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
InstructionOperand()};
- for (auto& op : pm.reference_operands_) {
+ for (const InstructionOperand& op : pm.reference_operands_) {
if (!first) {
os << ";";
} else {
@@ -393,6 +393,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
+ case kFlags_deoptimize:
+ return os << "deoptimize";
case kFlags_set:
return os << "set";
}
@@ -618,7 +620,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
return blocks;
}
-void InstructionSequence::Validate() {
+void InstructionSequence::ValidateEdgeSplitForm() {
// Validate blocks are in edge-split form: no block with multiple successors
// has an edge to a block (== a successor) with more than one predecessors.
for (const InstructionBlock* block : instruction_blocks()) {
@@ -633,14 +635,40 @@ void InstructionSequence::Validate() {
}
}
+void InstructionSequence::ValidateDeferredBlockExitPaths() {
+ // A deferred block with more than one successor must have all its successors
+ // deferred.
+ for (const InstructionBlock* block : instruction_blocks()) {
+ if (!block->IsDeferred() || block->SuccessorCount() <= 1) continue;
+ for (RpoNumber successor_id : block->successors()) {
+ CHECK(InstructionBlockAt(successor_id)->IsDeferred());
+ }
+ }
+}
+
+void InstructionSequence::ValidateSSA() {
+ // TODO(mtrofin): We could use a local zone here instead.
+ BitVector definitions(VirtualRegisterCount(), zone());
+ for (const Instruction* instruction : *this) {
+ for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+ const InstructionOperand* output = instruction->OutputAt(i);
+ int vreg = (output->IsConstant())
+ ? ConstantOperand::cast(output)->virtual_register()
+ : UnallocatedOperand::cast(output)->virtual_register();
+ CHECK(!definitions.Contains(vreg));
+ definitions.Add(vreg);
+ }
+ }
+}
+
void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
int ao = 0;
- for (auto const block : *blocks) {
+ for (InstructionBlock* const block : *blocks) {
if (!block->IsDeferred()) {
block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
- for (auto const block : *blocks) {
+ for (InstructionBlock* const block : *blocks) {
if (block->IsDeferred()) {
block->set_ao_number(RpoNumber::FromInt(ao++));
}
@@ -665,10 +693,6 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
representations_(zone()),
deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size());
-
-#if DEBUG
- Validate();
-#endif
}
@@ -730,7 +754,7 @@ InstructionBlock* InstructionSequence::GetInstructionBlock(
if (end == block_starts_.end() || *end > instruction_index) --end;
DCHECK(*end <= instruction_index);
size_t index = std::distance(begin, end);
- auto block = instruction_blocks_->at(index);
+ InstructionBlock* block = instruction_blocks_->at(index);
DCHECK(block->code_start() <= instruction_index &&
instruction_index < block->code_end());
return block;
@@ -861,15 +885,15 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
os << " instructions: [" << block->code_start() << ", " << block->code_end()
<< ")\n predecessors:";
- for (auto pred : block->predecessors()) {
+ for (RpoNumber pred : block->predecessors()) {
os << " B" << pred.ToInt();
}
os << "\n";
- for (auto phi : block->phis()) {
+ for (const PhiInstruction* phi : block->phis()) {
PrintableInstructionOperand printable_op = {config, phi->output()};
os << " phi: " << printable_op << " =";
- for (auto input : phi->operands()) {
+ for (int input : phi->operands()) {
os << " v" << input;
}
os << "\n";
@@ -886,7 +910,7 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
os << " " << buf.start() << ": " << printable_instr << "\n";
}
- for (auto succ : block->successors()) {
+ for (RpoNumber succ : block->successors()) {
os << " B" << succ.ToInt();
}
os << "\n";
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 9c978cee7c..a1fe494761 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -773,6 +773,9 @@ class Instruction final {
DCHECK(output_count == 0 || outputs != nullptr);
DCHECK(input_count == 0 || inputs != nullptr);
DCHECK(temp_count == 0 || temps != nullptr);
+ // TODO(jarin/mstarzinger): Handle this gracefully. See crbug.com/582702.
+ CHECK(InputCountField::is_valid(input_count));
+
size_t total_extra_ops = output_count + input_count + temp_count;
if (total_extra_ops != 0) total_extra_ops--;
int size = static_cast<int>(
@@ -812,6 +815,23 @@ class Instruction final {
OutputCount() == 0 && TempCount() == 0;
}
+ bool IsDeoptimizeCall() const {
+ return arch_opcode() == ArchOpcode::kArchDeoptimize ||
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize;
+ }
+
+ bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
+ bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
+ bool IsTailCall() const {
+ return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
+ arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
+ arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
+ arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+ }
+ bool IsThrow() const {
+ return arch_opcode() == ArchOpcode::kArchThrowTerminator;
+ }
+
enum GapPosition {
START,
END,
@@ -1354,7 +1374,9 @@ class InstructionSequence final : public ZoneObject {
void PrintBlock(const RegisterConfiguration* config, int block_id) const;
void PrintBlock(int block_id) const;
- void Validate();
+ void ValidateEdgeSplitForm();
+ void ValidateDeferredBlockExitPaths();
+ void ValidateSSA();
private:
friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index ff31abe518..8824a03dc9 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -4,9 +4,11 @@
#include "src/compiler/int64-lowering.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -24,36 +26,44 @@ Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
graph_(graph),
machine_(machine),
common_(common),
- state_(graph, 4),
+ state_(graph, 3),
stack_(zone),
- replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
- signature_(signature) {
+ replacements_(nullptr),
+ signature_(signature),
+ placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
+ graph->start())) {
+ replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
}
void Int64Lowering::LowerGraph() {
- if (4 != kPointerSize) {
+ if (!machine()->Is32()) {
return;
}
- stack_.push(graph()->end());
+ stack_.push_back({graph()->end(), 0});
state_.Set(graph()->end(), State::kOnStack);
while (!stack_.empty()) {
- Node* top = stack_.top();
- if (state_.Get(top) == State::kInputsPushed) {
- stack_.pop();
- state_.Set(top, State::kVisited);
- // All inputs of top have already been reduced, now reduce top.
- LowerNode(top);
+ NodeState& top = stack_.back();
+ if (top.input_index == top.node->InputCount()) {
+ // All inputs of top have already been lowered, now lower top.
+ stack_.pop_back();
+ state_.Set(top.node, State::kVisited);
+ LowerNode(top.node);
} else {
- // Push all children onto the stack.
- for (Node* input : top->inputs()) {
- if (state_.Get(input) == State::kUnvisited) {
- stack_.push(input);
- state_.Set(input, State::kOnStack);
+ // Push the next input onto the stack.
+ Node* input = top.node->InputAt(top.input_index++);
+ if (state_.Get(input) == State::kUnvisited) {
+ if (input->opcode() == IrOpcode::kPhi) {
+ // To break cycles with phi nodes we push phis on a separate stack so
+ // that they are processed after all other nodes.
+ PreparePhiReplacement(input);
+ stack_.push_front({input, 0});
+ } else {
+ stack_.push_back({input, 0});
}
+ state_.Set(input, State::kOnStack);
}
- state_.Set(top, State::kInputsPushed);
}
}
}
@@ -122,6 +132,8 @@ void Int64Lowering::LowerNode(Node* node) {
}
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
+ } else {
+ DefaultLowering(node);
}
break;
}
@@ -164,30 +176,11 @@ void Int64Lowering::LowerNode(Node* node) {
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
+ } else {
+ DefaultLowering(node);
}
break;
}
- case IrOpcode::kWord64And: {
- DCHECK(node->InputCount() == 2);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- Node* low_node =
- graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
- GetReplacementLow(right));
- Node* high_node =
- graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
- GetReplacementHigh(right));
- ReplaceNode(node, low_node, high_node);
- break;
- }
- case IrOpcode::kTruncateInt64ToInt32: {
- DCHECK(node->InputCount() == 1);
- Node* input = node->InputAt(0);
- ReplaceNode(node, GetReplacementLow(input), nullptr);
- node->NullAllInputs();
- break;
- }
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering(signature());
// Only exchange the node if the parameter count actually changed.
@@ -248,8 +241,517 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kWord64And: {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ Node* low_node =
+ graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
+ graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+ GetReplacementHigh(right));
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ case IrOpcode::kTruncateInt64ToInt32: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ ReplaceNode(node, GetReplacementLow(input), nullptr);
+ node->NullAllInputs();
+ break;
+ }
+ // todo(ahaas): I added a list of missing instructions here to make merging
+ // easier when I do them one by one.
+ // kExprI64Add:
+ case IrOpcode::kInt64Add: {
+ DCHECK(node->InputCount() == 2);
+
+ Node* right = node->InputAt(1);
+ node->ReplaceInput(1, GetReplacementLow(right));
+ node->AppendInput(zone(), GetReplacementHigh(right));
+
+ Node* left = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(left));
+ node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+ NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+
+ // kExprI64Sub:
+ case IrOpcode::kInt64Sub: {
+ DCHECK(node->InputCount() == 2);
+
+ Node* right = node->InputAt(1);
+ node->ReplaceInput(1, GetReplacementLow(right));
+ node->AppendInput(zone(), GetReplacementHigh(right));
+
+ Node* left = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(left));
+ node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+ NodeProperties::ChangeOp(node, machine()->Int32PairSub());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64Mul:
+ case IrOpcode::kInt64Mul: {
+ DCHECK(node->InputCount() == 2);
+
+ Node* right = node->InputAt(1);
+ node->ReplaceInput(1, GetReplacementLow(right));
+ node->AppendInput(zone(), GetReplacementHigh(right));
+
+ Node* left = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(left));
+ node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+ NodeProperties::ChangeOp(node, machine()->Int32PairMul());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64DivS:
+ // kExprI64DivU:
+ // kExprI64RemS:
+ // kExprI64RemU:
+ // kExprI64Ior:
+ case IrOpcode::kWord64Or: {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ Node* low_node =
+ graph()->NewNode(machine()->Word32Or(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
+ graph()->NewNode(machine()->Word32Or(), GetReplacementHigh(left),
+ GetReplacementHigh(right));
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+
+ // kExprI64Xor:
+ case IrOpcode::kWord64Xor: {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ Node* low_node =
+ graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
+ graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+ GetReplacementHigh(right));
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64Shl:
+ case IrOpcode::kWord64Shl: {
+ // TODO(turbofan): if the shift count >= 32, then we can set the low word
+ // of the output to 0 and just calculate the high word.
+ DCHECK(node->InputCount() == 2);
+ Node* shift = node->InputAt(1);
+ if (HasReplacementLow(shift)) {
+ // We do not have to care about the high word replacement, because
+ // the shift can only be between 0 and 63 anyways.
+ node->ReplaceInput(1, GetReplacementLow(shift));
+ }
+
+ Node* value = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(value));
+ node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+ NodeProperties::ChangeOp(node, machine()->Word32PairShl());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64ShrU:
+ case IrOpcode::kWord64Shr: {
+ // TODO(turbofan): if the shift count >= 32, then we can set the low word
+ // of the output to 0 and just calculate the high word.
+ DCHECK(node->InputCount() == 2);
+ Node* shift = node->InputAt(1);
+ if (HasReplacementLow(shift)) {
+ // We do not have to care about the high word replacement, because
+ // the shift can only be between 0 and 63 anyways.
+ node->ReplaceInput(1, GetReplacementLow(shift));
+ }
+
+ Node* value = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(value));
+ node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+ NodeProperties::ChangeOp(node, machine()->Word32PairShr());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64ShrS:
+ case IrOpcode::kWord64Sar: {
+ // TODO(turbofan): if the shift count >= 32, then we can set the low word
+ // of the output to 0 and just calculate the high word.
+ DCHECK(node->InputCount() == 2);
+ Node* shift = node->InputAt(1);
+ if (HasReplacementLow(shift)) {
+ // We do not have to care about the high word replacement, because
+ // the shift can only be between 0 and 63 anyways.
+ node->ReplaceInput(1, GetReplacementLow(shift));
+ }
+
+ Node* value = node->InputAt(0);
+ node->ReplaceInput(0, GetReplacementLow(value));
+ node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+ NodeProperties::ChangeOp(node, machine()->Word32PairSar());
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ // kExprI64Eq:
+ case IrOpcode::kWord64Equal: {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // TODO(wasm): Use explicit comparisons and && here?
+ Node* replacement = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+ GetReplacementLow(right)),
+ graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+ GetReplacementHigh(right))),
+ graph()->NewNode(common()->Int32Constant(0)));
+
+ ReplaceNode(node, replacement, nullptr);
+ break;
+ }
+ // kExprI64LtS:
+ case IrOpcode::kInt64LessThan: {
+ LowerComparison(node, machine()->Int32LessThan(),
+ machine()->Uint32LessThan());
+ break;
+ }
+ case IrOpcode::kInt64LessThanOrEqual: {
+ LowerComparison(node, machine()->Int32LessThan(),
+ machine()->Uint32LessThanOrEqual());
+ break;
+ }
+ case IrOpcode::kUint64LessThan: {
+ LowerComparison(node, machine()->Uint32LessThan(),
+ machine()->Uint32LessThan());
+ break;
+ }
+ case IrOpcode::kUint64LessThanOrEqual: {
+ LowerComparison(node, machine()->Uint32LessThan(),
+ machine()->Uint32LessThanOrEqual());
+ break;
+ }
+
+ // kExprI64SConvertI32:
+ case IrOpcode::kChangeInt32ToInt64: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ if (HasReplacementLow(input)) {
+ input = GetReplacementLow(input);
+ }
+ // We use SAR to preserve the sign in the high word.
+ ReplaceNode(
+ node, input,
+ graph()->NewNode(machine()->Word32Sar(), input,
+ graph()->NewNode(common()->Int32Constant(31))));
+ node->NullAllInputs();
+ break;
+ }
+ // kExprI64UConvertI32: {
+ case IrOpcode::kChangeUint32ToUint64: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ if (HasReplacementLow(input)) {
+ input = GetReplacementLow(input);
+ }
+ ReplaceNode(node, input, graph()->NewNode(common()->Int32Constant(0)));
+ node->NullAllInputs();
+ break;
+ }
+ // kExprF64ReinterpretI64:
+ case IrOpcode::kBitcastInt64ToFloat64: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ Node* stack_slot = graph()->NewNode(
+ machine()->StackSlot(MachineRepresentation::kWord64));
+
+ Node* store_high_word = graph()->NewNode(
+ machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier)),
+ stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+ GetReplacementHigh(input), graph()->start(), graph()->start());
+
+ Node* store_low_word = graph()->NewNode(
+ machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier)),
+ stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+ GetReplacementLow(input), store_high_word, graph()->start());
+
+ Node* load =
+ graph()->NewNode(machine()->Load(MachineType::Float64()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(0)),
+ store_low_word, graph()->start());
+
+ ReplaceNode(node, load, nullptr);
+ break;
+ }
+ // kExprI64ReinterpretF64:
+ case IrOpcode::kBitcastFloat64ToInt64: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ if (HasReplacementLow(input)) {
+ input = GetReplacementLow(input);
+ }
+ Node* stack_slot = graph()->NewNode(
+ machine()->StackSlot(MachineRepresentation::kWord64));
+ Node* store = graph()->NewNode(
+ machine()->Store(
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ WriteBarrierKind::kNoWriteBarrier)),
+ stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
+ graph()->start(), graph()->start());
+
+ Node* high_node =
+ graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(4)), store,
+ graph()->start());
+
+ Node* low_node =
+ graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(0)), store,
+ graph()->start());
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ case IrOpcode::kWord64Ror: {
+ DCHECK(node->InputCount() == 2);
+ Node* input = node->InputAt(0);
+ Node* shift = HasReplacementLow(node->InputAt(1))
+ ? GetReplacementLow(node->InputAt(1))
+ : node->InputAt(1);
+ Int32Matcher m(shift);
+ if (m.HasValue()) {
+ // Precondition: 0 <= shift < 64.
+ int32_t shift_value = m.Value() & 0x3f;
+ if (shift_value == 0) {
+ ReplaceNode(node, GetReplacementLow(input),
+ GetReplacementHigh(input));
+ } else if (shift_value == 32) {
+ ReplaceNode(node, GetReplacementHigh(input),
+ GetReplacementLow(input));
+ } else {
+ Node* low_input;
+ Node* high_input;
+ if (shift_value < 32) {
+ low_input = GetReplacementLow(input);
+ high_input = GetReplacementHigh(input);
+ } else {
+ low_input = GetReplacementHigh(input);
+ high_input = GetReplacementLow(input);
+ }
+ int32_t masked_shift_value = shift_value & 0x1f;
+ Node* masked_shift =
+ graph()->NewNode(common()->Int32Constant(masked_shift_value));
+ Node* inv_shift = graph()->NewNode(
+ common()->Int32Constant(32 - masked_shift_value));
+
+ Node* low_node = graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32Shr(), low_input, masked_shift),
+ graph()->NewNode(machine()->Word32Shl(), high_input, inv_shift));
+ Node* high_node = graph()->NewNode(
+ machine()->Word32Or(), graph()->NewNode(machine()->Word32Shr(),
+ high_input, masked_shift),
+ graph()->NewNode(machine()->Word32Shl(), low_input, inv_shift));
+ ReplaceNode(node, low_node, high_node);
+ }
+ } else {
+ Node* safe_shift = shift;
+ if (!machine()->Word32ShiftIsSafe()) {
+ safe_shift =
+ graph()->NewNode(machine()->Word32And(), shift,
+ graph()->NewNode(common()->Int32Constant(0x1f)));
+ }
+
+ // By creating this bit-mask with SAR and SHL we do not have to deal
+ // with shift == 0 as a special case.
+ Node* inv_mask = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(common()->Int32Constant(
+ std::numeric_limits<int32_t>::min())),
+ safe_shift),
+ graph()->NewNode(common()->Int32Constant(1)));
+
+ Node* bit_mask =
+ graph()->NewNode(machine()->Word32Xor(), inv_mask,
+ graph()->NewNode(common()->Int32Constant(-1)));
+
+ // We have to mask the shift value for this comparison. If
+ // !machine()->Word32ShiftIsSafe() then the masking should already be
+ // part of the graph.
+ Node* masked_shift6 = shift;
+ if (machine()->Word32ShiftIsSafe()) {
+ masked_shift6 =
+ graph()->NewNode(machine()->Word32And(), shift,
+ graph()->NewNode(common()->Int32Constant(0x3f)));
+ }
+
+ Diamond lt32(
+ graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(), masked_shift6,
+ graph()->NewNode(common()->Int32Constant(32))));
+
+ // The low word and the high word can be swapped either at the input or
+ // at the output. We swap the inputs so that shift does not have to be
+ // kept for so long in a register.
+ Node* input_low =
+ lt32.Phi(MachineRepresentation::kWord32, GetReplacementLow(input),
+ GetReplacementHigh(input));
+ Node* input_high =
+ lt32.Phi(MachineRepresentation::kWord32, GetReplacementHigh(input),
+ GetReplacementLow(input));
+
+ Node* rotate_low =
+ graph()->NewNode(machine()->Word32Ror(), input_low, safe_shift);
+ Node* rotate_high =
+ graph()->NewNode(machine()->Word32Ror(), input_high, safe_shift);
+
+ Node* low_node = graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32And(), rotate_low, bit_mask),
+ graph()->NewNode(machine()->Word32And(), rotate_high, inv_mask));
+
+ Node* high_node = graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32And(), rotate_high, bit_mask),
+ graph()->NewNode(machine()->Word32And(), rotate_low, inv_mask));
+
+ ReplaceNode(node, low_node, high_node);
+ }
+ break;
+ }
+ // kExprI64Clz:
+ case IrOpcode::kWord64Clz: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ Diamond d(
+ graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(input),
+ graph()->NewNode(common()->Int32Constant(0))));
+
+ Node* low_node = d.Phi(
+ MachineRepresentation::kWord32,
+ graph()->NewNode(machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32Clz(),
+ GetReplacementLow(input)),
+ graph()->NewNode(common()->Int32Constant(32))),
+ graph()->NewNode(machine()->Word32Clz(), GetReplacementHigh(input)));
+ ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+ break;
+ }
+ // kExprI64Ctz:
+ case IrOpcode::kWord64Ctz: {
+ DCHECK(node->InputCount() == 1);
+ DCHECK(machine()->Word32Ctz().IsSupported());
+ Node* input = node->InputAt(0);
+ Diamond d(
+ graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), GetReplacementLow(input),
+ graph()->NewNode(common()->Int32Constant(0))));
+ Node* low_node =
+ d.Phi(MachineRepresentation::kWord32,
+ graph()->NewNode(machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32Ctz().op(),
+ GetReplacementHigh(input)),
+ graph()->NewNode(common()->Int32Constant(32))),
+ graph()->NewNode(machine()->Word32Ctz().op(),
+ GetReplacementLow(input)));
+ ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+ break;
+ }
+ // kExprI64Popcnt:
+ case IrOpcode::kWord64Popcnt: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ // We assume that a Word64Popcnt node only has been created if
+ // Word32Popcnt is actually supported.
+ DCHECK(machine()->Word32Popcnt().IsSupported());
+ ReplaceNode(node, graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32Popcnt().op(),
+ GetReplacementLow(input)),
+ graph()->NewNode(machine()->Word32Popcnt().op(),
+ GetReplacementHigh(input))),
+ graph()->NewNode(common()->Int32Constant(0)));
+ break;
+ }
+ case IrOpcode::kPhi: {
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kWord64) {
+ // The replacement nodes have already been created, we only have to
+ // replace placeholder nodes.
+ Node* low_node = GetReplacementLow(node);
+ Node* high_node = GetReplacementHigh(node);
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ low_node->ReplaceInput(i, GetReplacementLow(node->InputAt(i)));
+ high_node->ReplaceInput(i, GetReplacementHigh(node->InputAt(i)));
+ }
+ } else {
+ DefaultLowering(node);
+ }
+ break;
+ }
+
default: { DefaultLowering(node); }
}
+} // NOLINT(readability/fn_size)
+
+void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
+ const Operator* low_word_op) {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Node* replacement = graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(high_word_op, GetReplacementHigh(left),
+ GetReplacementHigh(right)),
+ graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(left),
+ GetReplacementHigh(right)),
+ graph()->NewNode(low_word_op, GetReplacementLow(left),
+ GetReplacementLow(right))));
+
+ ReplaceNode(node, replacement, nullptr);
}
bool Int64Lowering::DefaultLowering(Node* node) {
@@ -294,6 +796,32 @@ Node* Int64Lowering::GetReplacementHigh(Node* node) {
DCHECK(result);
return result;
}
+
+void Int64Lowering::PreparePhiReplacement(Node* phi) {
+ MachineRepresentation rep = PhiRepresentationOf(phi->op());
+ if (rep == MachineRepresentation::kWord64) {
+ // We have to create the replacements for a phi node before we actually
+ // lower the phi to break potential cycles in the graph. The replacements of
+ // input nodes do not exist yet, so we use a placeholder node to pass the
+ // graph verifier.
+ int value_count = phi->op()->ValueInputCount();
+ Node** inputs_low = zone()->NewArray<Node*>(value_count + 1);
+ Node** inputs_high = zone()->NewArray<Node*>(value_count + 1);
+ for (int i = 0; i < value_count; i++) {
+ inputs_low[i] = placeholder_;
+ inputs_high[i] = placeholder_;
+ }
+ inputs_low[value_count] = NodeProperties::GetControlInput(phi, 0);
+ inputs_high[value_count] = NodeProperties::GetControlInput(phi, 0);
+ ReplaceNode(phi,
+ graph()->NewNode(
+ common()->Phi(MachineRepresentation::kWord32, value_count),
+ value_count + 1, inputs_low, false),
+ graph()->NewNode(
+ common()->Phi(MachineRepresentation::kWord32, value_count),
+ value_count + 1, inputs_high, false));
+ }
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 79a95dc195..7f6ef9a297 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_INT64_REDUCER_H_
-#define V8_COMPILER_INT64_REDUCER_H_
+#ifndef V8_COMPILER_INT64_LOWERING_H_
+#define V8_COMPILER_INT64_LOWERING_H_
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
@@ -24,7 +24,7 @@ class Int64Lowering {
void LowerGraph();
private:
- enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+ enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
struct Replacement {
Node* low;
@@ -37,27 +37,39 @@ class Int64Lowering {
CommonOperatorBuilder* common() const { return common_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
+ void PrepareReplacements(Node* node);
+ void PushNode(Node* node);
void LowerNode(Node* node);
bool DefaultLowering(Node* node);
+ void LowerComparison(Node* node, const Operator* signed_op,
+ const Operator* unsigned_op);
+ void PrepareProjectionReplacements(Node* node);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
Node* GetReplacementLow(Node* node);
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
+ void PreparePhiReplacement(Node* phi);
+
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
Zone* zone_;
Graph* const graph_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
NodeMarker<State> state_;
- ZoneStack<Node*> stack_;
+ ZoneDeque<NodeState> stack_;
Replacement* replacements_;
Signature<MachineRepresentation>* signature_;
+ Node* placeholder_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_INT64_REDUCER_H_
+#endif // V8_COMPILER_INT64_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 3023031c2f..41f9c30707 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -117,18 +117,59 @@ Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
return NoChange();
}
-
-// ES6 draft 08-24-14, section 20.2.2.19.
+// ES6 section 20.2.2.19 Math.imul ( x, y )
Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
- // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
- Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+ if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
+ // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
+ // NumberToUint32(b))
+ Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
+ Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
+ Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.ceil(a:number) -> NumberCeil(a)
+ Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Unsigned32())) {
+ // Math.clz32(a:unsigned32) -> NumberClz32(a)
+ Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
+ return Replace(value);
+ }
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
+ Node* value = graph()->NewNode(
+ simplified()->NumberClz32(),
+ graph()->NewNode(simplified()->NumberToUint32(), r.left()));
return Replace(value);
}
return NoChange();
}
+// ES6 draft 08-24-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.floor(a:number) -> NumberFloor(a)
+ Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
// ES6 draft 08-24-14, section 20.2.2.17.
Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
@@ -145,25 +186,32 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
// ES6 section 20.2.2.28 Math.round ( x )
Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
- // Math.round(a:integer \/ -0 \/ NaN) -> a
- return Replace(r.left());
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.round(a:number) -> NumberRound(a)
+ Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.sqrt(a:number) -> Float64Sqrt(a)
+ Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+ return Replace(value);
}
- if (r.InputsMatchOne(Type::Number()) &&
- machine()->Float64RoundUp().IsSupported()) {
- // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
- // Float64Sub(i, #1.0), i)
- // where i = Float64RoundUp(a)
- Node* value = r.left();
- Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
- Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
- return Replace(graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(),
- jsgraph()->Float64Constant(0.5), real),
- graph()->NewNode(machine()->Float64Sub(), integer,
- jsgraph()->Float64Constant(1.0)),
- integer));
+ return NoChange();
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.trunc(a:number) -> NumberTrunc(a)
+ Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+ return Replace(value);
}
return NoChange();
}
@@ -181,12 +229,27 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kMathImul:
reduction = ReduceMathImul(node);
break;
+ case kMathClz32:
+ reduction = ReduceMathClz32(node);
+ break;
+ case kMathCeil:
+ reduction = ReduceMathCeil(node);
+ break;
+ case kMathFloor:
+ reduction = ReduceMathFloor(node);
+ break;
case kMathFround:
reduction = ReduceMathFround(node);
break;
case kMathRound:
reduction = ReduceMathRound(node);
break;
+ case kMathSqrt:
+ reduction = ReduceMathSqrt(node);
+ break;
+ case kMathTrunc:
+ reduction = ReduceMathTrunc(node);
+ break;
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index b64b33565d..dfeb409291 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -33,8 +33,13 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction ReduceFunctionCall(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathCeil(Node* node);
+ Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathFround(Node* node);
Reduction ReduceMathRound(Node* node);
+ Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceMathTrunc(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 34217e7d9a..892dcc70ce 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -329,16 +329,8 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
Node* check = effect =
graph()->NewNode(javascript()->StrictEqual(), target, array_function,
context, effect, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceValueInput(node, array_function, 0);
@@ -355,16 +347,8 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
Node* check = effect =
graph()->NewNode(javascript()->StrictEqual(), target, target_function,
context, effect, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Specialize the JSCallFunction node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -473,16 +457,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
Node* check = effect =
graph()->NewNode(javascript()->StrictEqual(), target, array_function,
context, effect, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -505,16 +481,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
Node* check = effect =
graph()->NewNode(javascript()->StrictEqual(), target, target_function,
context, effect, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Specialize the JSCallConstruct node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index f40f05d852..9ffae152ac 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@ class JSOperatorBuilder;
// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public AdvancedReducer {
+class JSCallReducer final : public Reducer {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -29,12 +29,9 @@ class JSCallReducer final : public AdvancedReducer {
};
typedef base::Flags<Flag> Flags;
- JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+ JSCallReducer(JSGraph* jsgraph, Flags flags,
MaybeHandle<Context> native_context)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- flags_(flags),
- native_context_(native_context) {}
+ : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
Reduction Reduce(Node* node) final;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index df2d9086ef..1f12579ec7 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -45,6 +45,8 @@ Reduction JSGenericLowering::Reduce(Node* node) {
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
case IrOpcode::kBranch:
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
// TODO(mstarzinger): If typing is enabled then simplified lowering will
// have inserted the correct ChangeBoolToBit, otherwise we need to perform
// poor-man's representation inference here and insert manual change.
@@ -68,14 +70,9 @@ Reduction JSGenericLowering::Reduce(Node* node) {
ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
-REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
@@ -85,19 +82,39 @@ REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
}
-REPLACE_RUNTIME_CALL(JSEqual, Runtime::kEqual)
-REPLACE_RUNTIME_CALL(JSNotEqual, Runtime::kNotEqual)
-REPLACE_RUNTIME_CALL(JSStrictEqual, Runtime::kStrictEqual)
-REPLACE_RUNTIME_CALL(JSStrictNotEqual, Runtime::kStrictNotEqual)
-REPLACE_RUNTIME_CALL(JSLessThan, Runtime::kLessThan)
-REPLACE_RUNTIME_CALL(JSGreaterThan, Runtime::kGreaterThan)
-REPLACE_RUNTIME_CALL(JSLessThanOrEqual, Runtime::kLessThanOrEqual)
-REPLACE_RUNTIME_CALL(JSGreaterThanOrEqual, Runtime::kGreaterThanOrEqual)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
#undef REPLACE_RUNTIME_CALL
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+ Callable callable = CodeFactory::Name(isolate()); \
+ ReplaceWithStubCall(node, callable, flags); \
+ }
+REPLACE_STUB_CALL(Add)
+REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(BitwiseAnd)
+REPLACE_STUB_CALL(BitwiseOr)
+REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(LessThan)
+REPLACE_STUB_CALL(LessThanOrEqual)
+REPLACE_STUB_CALL(GreaterThan)
+REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(Equal)
+REPLACE_STUB_CALL(NotEqual)
+REPLACE_STUB_CALL(StrictEqual)
+REPLACE_STUB_CALL(StrictNotEqual)
+REPLACE_STUB_CALL(ToBoolean)
+REPLACE_STUB_CALL(ToInteger)
+REPLACE_STUB_CALL(ToLength)
+REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToName)
+REPLACE_STUB_CALL(ToObject)
+REPLACE_STUB_CALL(ToString)
+#undef REPLACE_STUB_CALL
+
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
Operator::Properties properties = node->op()->properties();
@@ -134,42 +151,6 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
}
-void JSGenericLowering::LowerJSToBoolean(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToBoolean(isolate());
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
-}
-
-
-void JSGenericLowering::LowerJSToNumber(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToNumber(isolate());
- ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToString(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToString(isolate());
- ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToName(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToName(isolate());
- ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToObject(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToObject(isolate());
- ReplaceWithStubCall(node, callable, flags);
-}
-
-
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
Node* closure = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -546,15 +527,13 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- int const length = Handle<FixedArray>::cast(p.constant())->length();
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
// Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
// initial length limit for arrays with "fast" elements kind.
if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
- (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
- length < JSArray::kInitialMaxFastElementArray) {
+ p.length() < JSArray::kInitialMaxFastElementArray) {
Callable callable = CodeFactory::FastCloneShallowArray(isolate());
ReplaceWithStubCall(node, callable, flags);
} else {
@@ -567,7 +546,6 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- int const length = Handle<FixedArray>::cast(p.constant())->length();
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -575,8 +553,9 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
// Use the FastCloneShallowObjectStub only for shallow boilerplates without
// elements up to the number of properties that the stubs can handle.
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
- length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+ p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ Callable callable =
+ CodeFactory::FastCloneShallowObject(isolate(), p.length());
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index 132dec6ffb..d8c9f17fd4 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -171,16 +171,8 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
jsgraph()->Constant(property_cell_value));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
break;
}
case PropertyCellType::kConstantType: {
@@ -191,16 +183,8 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Type* property_cell_value_type = Type::TaggedSigned();
if (property_cell_value->IsHeapObject()) {
// Deoptimize if the {value} is a Smi.
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_true);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfFalse(), branch);
+ control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ effect, control);
// Load the {value} map check against the {property_cell} map.
Node* value_map = effect =
@@ -213,16 +197,8 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
jsgraph()->HeapConstant(property_cell_value_map));
property_cell_value_type = Type::TaggedPointer();
}
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
effect = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index e938798287..98ca7aa3c3 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -29,6 +29,10 @@ Node* JSGraph::EmptyFixedArrayConstant() {
HeapConstant(factory()->empty_fixed_array()));
}
+Node* JSGraph::OptimizedOutConstant() {
+ return CACHED(kOptimizedOutConstant,
+ HeapConstant(factory()->optimized_out()));
+}
Node* JSGraph::UndefinedConstant() {
return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 5a25ed0697..06e8030164 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -41,6 +41,7 @@ class JSGraph : public ZoneObject {
// Canonicalized global constants.
Node* CEntryStubConstant(int result_size);
Node* EmptyFixedArrayConstant();
+ Node* OptimizedOutConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -136,6 +137,7 @@ class JSGraph : public ZoneObject {
enum CachedNode {
kCEntryStubConstant,
kEmptyFixedArrayConstant,
+ kOptimizedOutConstant,
kUndefinedConstant,
kTheHoleConstant,
kTrueConstant,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index cd5637b0c4..0e0508bcd4 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -121,7 +121,10 @@ void JSInliningHeuristic::Finalize() {
bool JSInliningHeuristic::CandidateCompare::operator()(
const Candidate& left, const Candidate& right) const {
- return left.node != right.node && left.calls >= right.calls;
+ if (left.calls != right.calls) {
+ return left.calls > right.calls;
+ }
+ return left.node < right.node;
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 2244f9bbfe..e3254bd077 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -263,6 +263,35 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
node->InputAt(0), outer_frame_state);
}
+Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared;
+ frame_info.shared_info().ToHandle(&shared);
+
+ Node* function = frame_state->InputAt(kFrameStateFunctionInput);
+
+ // If we are inlining a tail call drop caller's frame state and an
+ // arguments adaptor if it exists.
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ if (frame_state->opcode() == IrOpcode::kFrameState) {
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ }
+ }
+
+ const FrameStateFunctionInfo* state_info =
+ jsgraph_->common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kTailCallerFunction, 0, 0, shared);
+
+ const Operator* op = jsgraph_->common()->FrameState(
+ BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
+ const Operator* op0 = jsgraph_->common()->StateValues(0);
+ Node* node0 = jsgraph_->graph()->NewNode(op0);
+ return jsgraph_->graph()->NewNode(op, node0, node0, node0,
+ jsgraph_->UndefinedConstant(), function,
+ frame_state);
+}
namespace {
@@ -271,7 +300,10 @@ bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
Isolate* const isolate = shared_info->GetIsolate();
Code* const construct_stub = shared_info->construct_stub();
- return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+ return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+ construct_stub !=
+ *isolate->builtins()->JSBuiltinsConstructStubForDerived() &&
+ construct_stub != *isolate->builtins()->JSConstructStubApi();
}
bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
@@ -380,7 +412,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- Zone zone;
+ Zone zone(info_->isolate()->allocator());
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
@@ -395,17 +427,6 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- // In strong mode, in case of too few arguments we need to throw a TypeError
- // so we must not inline this call.
- int parameter_count = info.literal()->parameter_count();
- if (is_strong(info.language_mode()) &&
- call.formal_arguments() < parameter_count) {
- TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
if (!Compiler::EnsureDeoptimizationSupport(&info)) {
TRACE("Not inlining %s into %s because deoptimization support failed\n",
shared_info->DebugName()->ToCString().get(),
@@ -508,10 +529,25 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
NodeProperties::ReplaceEffectInput(node, convert);
}
+ // If we are inlining a JS call at tail position then we have to pop current
+ // frame state and its potential arguments adaptor frame state in order to
+ // make the call stack be consistent with non-inlining case.
+ // After that we add a tail caller frame state which lets deoptimizer handle
+ // the case when the outermost function inlines a tail call (it should remove
+ // potential arguments adaptor frame that belongs to outermost function when
+ // deopt happens).
+ if (node->opcode() == IrOpcode::kJSCallFunction) {
+ const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ frame_state = CreateTailCallerFrameState(node, frame_state);
+ }
+ }
+
// Insert argument adaptor frame if required. The callees formal parameter
// count (i.e. value outputs of start node minus target, receiver, new target,
// arguments count and context) have to match the number of arguments passed
// to the call.
+ int parameter_count = info.literal()->parameter_count();
DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
if (call.formal_arguments() != parameter_count) {
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 99eff96c4c..d0ab7c0583 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -45,6 +45,8 @@ class JSInliner final : public AdvancedReducer {
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared);
+ Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
+
Reduction InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end);
};
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index abeb11001d..034ee6fd76 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -15,7 +15,6 @@
#include "src/compiler/operator-properties.h"
#include "src/counters.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -23,11 +22,7 @@ namespace compiler {
JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
DeoptimizationMode mode)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- mode_(mode),
- type_cache_(TypeCache::Get()) {}
-
+ : AdvancedReducer(editor), jsgraph_(jsgraph), mode_(mode) {}
Reduction JSIntrinsicLowering::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
@@ -45,8 +40,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceDoubleHi(node);
case Runtime::kInlineDoubleLo:
return ReduceDoubleLo(node);
- case Runtime::kInlineIncrementStatsCounter:
- return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
@@ -57,12 +50,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
- case Runtime::kInlineMathClz32:
- return ReduceMathClz32(node);
- case Runtime::kInlineMathFloor:
- return ReduceMathFloor(node);
- case Runtime::kInlineMathSqrt:
- return ReduceMathSqrt(node);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
case Runtime::kInlineFixedArrayGet:
@@ -95,10 +82,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
- case Runtime::kInlineTailCall:
- return ReduceTailCall(node);
+ case Runtime::kInlineNewObject:
+ return ReduceNewObject(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
+ case Runtime::kInlineGetOrdinaryHasInstance:
+ return ReduceGetOrdinaryHasInstance(node);
default:
break;
}
@@ -149,40 +138,23 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
+ // Tell the compiler to assume number input.
+ Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+ node->InputAt(0), graph()->start());
+ node->ReplaceInput(0, renamed);
return Change(node, machine()->Float64ExtractHighWord32());
}
Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
+ // Tell the compiler to assume number input.
+ Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+ node->InputAt(0), graph()->start());
+ node->ReplaceInput(0, renamed);
return Change(node, machine()->Float64ExtractLowWord32());
}
-Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
- if (!FLAG_native_code_counters) return ChangeToUndefined(node);
- HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- if (!m.HasValue() || !m.Value()->IsString()) {
- return ChangeToUndefined(node);
- }
- base::SmartArrayPointer<char> name =
- Handle<String>::cast(m.Value())->ToCString();
- StatsCounter counter(jsgraph()->isolate(), name.get());
- if (!counter.Enabled()) return ChangeToUndefined(node);
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- FieldAccess access = AccessBuilder::ForStatsCounter();
- Node* cnt = jsgraph()->ExternalConstant(ExternalReference(&counter));
- Node* load =
- graph()->NewNode(simplified()->LoadField(access), cnt, effect, control);
- Node* inc =
- graph()->NewNode(machine()->Int32Add(), load, jsgraph()->OneConstant());
- Node* store = graph()->NewNode(simplified()->StoreField(access), cnt, inc,
- load, control);
- return ChangeToUndefined(node, store);
-}
-
-
Reduction JSIntrinsicLowering::ReduceIsInstanceType(
Node* node, InstanceType instance_type) {
// if (%_IsSmi(value)) {
@@ -232,22 +204,6 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
- return Change(node, machine()->Word32Clz());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
- if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
- return Change(node, machine()->Float64RoundDown().op());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
- return Change(node, machine()->Float64Sqrt());
-}
-
-
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
@@ -385,44 +341,8 @@ Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // ToInteger is a no-op on integer values and -0.
- Type* value_type = NodeProperties::GetType(value);
- if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
- ReplaceWithValue(node, value);
- return Replace(value);
- }
-
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = value;
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- vfalse = efalse =
- graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
- context, frame_state, efalse, if_false);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
- ReplaceWithValue(node, value, effect, control);
- return Changed(value);
+ NodeProperties::ChangeOp(node, javascript()->ToInteger());
+ return Changed(node);
}
@@ -439,38 +359,8 @@ Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Type* value_type = NodeProperties::GetType(value);
- if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
- if (value_type->Max() <= 0.0) {
- value = jsgraph()->ZeroConstant();
- } else if (value_type->Min() >= kMaxSafeInteger) {
- value = jsgraph()->Constant(kMaxSafeInteger);
- } else {
- if (value_type->Min() <= 0.0) {
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
- jsgraph()->ZeroConstant()),
- jsgraph()->ZeroConstant(), value);
- value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
- NodeProperties::SetType(value, value_type);
- }
- if (value_type->Max() > kMaxSafeInteger) {
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->Constant(kMaxSafeInteger), value),
- jsgraph()->Constant(kMaxSafeInteger), value);
- value_type =
- Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
- NodeProperties::SetType(value, value_type);
- }
- }
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- return Change(node, CodeFactory::ToLength(isolate()), 0);
+ NodeProperties::ChangeOp(node, javascript()->ToLength());
+ return Changed(node);
}
@@ -506,17 +396,18 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
return Changed(node);
}
-
-Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
- size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(node,
- javascript()->CallFunction(arity, VectorSlotPair(),
- ConvertReceiverMode::kAny,
- TailCallMode::kAllow));
- return Changed(node);
+Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* value = graph()->NewNode(javascript()->Create(), constructor,
+ new_target, context, frame_state, effect);
+ ReplaceWithValue(node, value, value);
+ return Replace(value);
}
-
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
Node* active_function = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -528,6 +419,16 @@ Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
active_function_map, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ return Change(node, javascript()->LoadContext(
+ 0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
+ native_context, context, effect);
+}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index d8e1102afa..a43ed01166 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -13,7 +13,6 @@ namespace internal {
// Forward declarations.
class Callable;
-class TypeCache;
namespace compiler {
@@ -43,13 +42,9 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
- Reduction ReduceIncrementStatsCounter(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
- Reduction ReduceMathClz32(Node* node);
- Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathSqrt(Node* node);
Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
@@ -66,8 +61,9 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceToPrimitive(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
- Reduction ReduceTailCall(Node* node);
+ Reduction ReduceNewObject(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
+ Reduction ReduceGetOrdinaryHasInstance(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -86,11 +82,9 @@ class JSIntrinsicLowering final : public AdvancedReducer {
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
DeoptimizationMode mode() const { return mode_; }
- TypeCache const& type_cache() const { return type_cache_; }
JSGraph* const jsgraph_;
DeoptimizationMode const mode_;
- TypeCache const& type_cache_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 2c11794dba..d1353d20be 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -108,27 +108,36 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<Node*> effects(zone());
ZoneVector<Node*> controls(zone());
- // The list of "exiting" controls, which currently go to a single deoptimize.
- // TODO(bmeurer): Consider using an IC as fallback.
- Node* const exit_effect = effect;
- ZoneVector<Node*> exit_controls(zone());
-
// Ensure that {index} matches the specified {name} (if {index} is given).
if (index != nullptr) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
index, jsgraph()->HeapConstant(name));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- control = graph()->NewNode(common()->IfTrue(), branch);
+ control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ effect, control);
+ }
+
+ // Check if {receiver} may be a number.
+ bool receiverissmi_possible = false;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ if (access_info.receiver_type()->Is(Type::Number())) {
+ receiverissmi_possible = true;
+ break;
+ }
}
// Ensure that {receiver} is a heap object.
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
- control = graph()->NewNode(common()->IfFalse(), branch);
- Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* receiverissmi_control = nullptr;
Node* receiverissmi_effect = effect;
+ if (receiverissmi_possible) {
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ receiverissmi_effect = effect;
+ } else {
+ control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ effect, control);
+ }
// Load the {receiver} map. The resulting effect is the dominating effect for
// all (polymorphic) branches.
@@ -138,7 +147,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate code for the various different property access patterns.
Node* fallthrough_control = control;
- for (PropertyAccessInfo const& access_info : access_infos) {
+ for (size_t j = 0; j < access_infos.size(); ++j) {
+ PropertyAccessInfo const& access_info = access_infos[j];
Node* this_value = value;
Node* this_receiver = receiver;
Node* this_effect = effect;
@@ -154,37 +164,52 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* check =
graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ if (j == access_infos.size() - 1) {
+ this_control =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, fallthrough_control);
+ fallthrough_control = nullptr;
+ } else {
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
} else {
// Emit a (sequence of) map checks for other {receiver}s.
ZoneVector<Node*> this_controls(zone());
ZoneVector<Node*> this_effects(zone());
+ int num_classes = access_info.receiver_type()->NumClasses();
for (auto i = access_info.receiver_type()->Classes(); !i.Done();
i.Advance()) {
+ DCHECK_LT(0, num_classes);
Handle<Map> map = i.Current();
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
receiver_map, jsgraph()->Constant(map));
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- this_effects.push_back(this_effect);
+ if (--num_classes == 0 && j == access_infos.size() - 1) {
+ this_controls.push_back(
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, fallthrough_control));
+ this_effects.push_back(this_effect);
+ fallthrough_control = nullptr;
+ } else {
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
+ }
}
// The Number case requires special treatment to also deal with Smis.
if (receiver_type->Is(Type::Number())) {
- // Join this check with the "receiver is smi" check above, and mark the
- // "receiver is smi" check as "consumed" so that we don't deoptimize if
- // the {receiver} is actually a Smi.
- if (receiverissmi_control != nullptr) {
- this_controls.push_back(receiverissmi_control);
- this_effects.push_back(receiverissmi_effect);
- receiverissmi_control = receiverissmi_effect = nullptr;
- }
+ // Join this check with the "receiver is smi" check above.
+ DCHECK_NOT_NULL(receiverissmi_effect);
+ DCHECK_NOT_NULL(receiverissmi_control);
+ this_effects.push_back(receiverissmi_effect);
+ this_controls.push_back(receiverissmi_control);
+ receiverissmi_effect = receiverissmi_control = nullptr;
}
// Create dominating Merge+EffectPhi for this {receiver} type.
@@ -212,23 +237,14 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
if (access_info.IsNotFound()) {
DCHECK_EQ(AccessMode::kLoad, access_mode);
- if (is_strong(language_mode)) {
- // TODO(bmeurer/mstarzinger): Add support for lowering inside try
- // blocks rewiring the IfException edge to a runtime call/throw.
- exit_controls.push_back(this_control);
- continue;
- } else {
- this_value = jsgraph()->UndefinedConstant();
- }
+ this_value = jsgraph()->UndefinedConstant();
} else if (access_info.IsDataConstant()) {
this_value = jsgraph()->Constant(access_info.constant());
if (access_mode == AccessMode::kStore) {
Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
}
} else {
DCHECK(access_info.IsDataField());
@@ -253,10 +269,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
jsgraph()->Int32Constant(
1 << JSArrayBuffer::WasNeutered::kShift)),
jsgraph()->Int32Constant(0));
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- this_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control =
+ graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ this_effect, this_control);
break;
}
}
@@ -292,11 +307,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (field_type->Is(Type::UntaggedFloat64())) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(
- graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, this_control);
this_value = graph()->NewNode(common()->Guard(Type::Number()),
this_value, this_control);
@@ -335,46 +348,30 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
} else if (field_type->Is(Type::TaggedSigned())) {
Node* check =
graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(
- graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, this_control);
this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
this_value, this_control);
} else if (field_type->Is(Type::TaggedPointer())) {
Node* check =
graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- this_control = graph()->NewNode(common()->IfFalse(), branch);
- if (field_type->NumClasses() > 0) {
- // Emit a (sequence of) map checks for the value.
- ZoneVector<Node*> this_controls(zone());
+ this_control =
+ graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ this_effect, this_control);
+ if (field_type->NumClasses() == 1) {
+ // Emit a map check for the value.
Node* this_value_map = this_effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMap()), this_value,
this_effect, this_control);
- for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
- Handle<Map> field_map(i.Current());
- check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Internal()),
- this_value_map, jsgraph()->Constant(field_map));
- branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- this_control = graph()->NewNode(common()->IfFalse(), branch);
- this_controls.push_back(
- graph()->NewNode(common()->IfTrue(), branch));
- }
- exit_controls.push_back(this_control);
- int const this_control_count =
- static_cast<int>(this_controls.size());
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Internal()), this_value_map,
+ jsgraph()->Constant(field_type->Classes().Current()));
this_control =
- (this_control_count == 1)
- ? this_controls.front()
- : graph()->NewNode(common()->Merge(this_control_count),
- this_control_count,
- &this_controls.front());
+ graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
+ } else {
+ DCHECK_EQ(0, field_type->NumClasses());
}
} else {
DCHECK(field_type->Is(Type::Tagged()));
@@ -403,39 +400,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
controls.push_back(this_control);
}
- // Collect the fallthrough control as final "exit" control.
- if (fallthrough_control != control) {
- // Mark the last fallthrough branch as deferred.
- MarkAsDeferred(fallthrough_control);
- }
- exit_controls.push_back(fallthrough_control);
-
- // Also collect the "receiver is smi" control if we didn't handle the case of
- // Number primitives in the polymorphic branches above.
- if (receiverissmi_control != nullptr) {
- // Mark the "receiver is smi" case as deferred.
- MarkAsDeferred(receiverissmi_control);
- DCHECK_EQ(exit_effect, receiverissmi_effect);
- exit_controls.push_back(receiverissmi_control);
- }
-
- // Generate the single "exit" point, where we get if either all map/instance
- // type checks failed, or one of the assumptions inside one of the cases
- // failes (i.e. failing prototype chain check).
- // TODO(bmeurer): Consider falling back to IC here if deoptimization is
- // disabled.
- int const exit_control_count = static_cast<int>(exit_controls.size());
- Node* exit_control =
- (exit_control_count == 1)
- ? exit_controls.front()
- : graph()->NewNode(common()->Merge(exit_control_count),
- exit_control_count, &exit_controls.front());
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, exit_effect, exit_control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
+ DCHECK_NULL(fallthrough_control);
// Generate the final merge point for all (polymorphic) branches.
int const control_count = static_cast<int>(controls.size());
@@ -562,17 +527,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ZoneVector<Node*> effects(zone());
ZoneVector<Node*> controls(zone());
- // The list of "exiting" controls, which currently go to a single deoptimize.
- // TODO(bmeurer): Consider using an IC as fallback.
- Node* const exit_effect = effect;
- ZoneVector<Node*> exit_controls(zone());
-
// Ensure that {receiver} is a heap object.
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- control = graph()->NewNode(common()->IfFalse(), branch);
+ control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ effect, control);
// Load the {receiver} map. The resulting effect is the dominating effect for
// all (polymorphic) branches.
@@ -582,7 +540,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Generate code for the various different element access patterns.
Node* fallthrough_control = control;
- for (ElementAccessInfo const& access_info : access_infos) {
+ for (size_t j = 0; j < access_infos.size(); ++j) {
+ ElementAccessInfo const& access_info = access_infos[j];
Node* this_receiver = receiver;
Node* this_value = value;
Node* this_index = index;
@@ -595,35 +554,61 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
{
ZoneVector<Node*> this_controls(zone());
ZoneVector<Node*> this_effects(zone());
+ size_t num_transitions = access_info.transitions().size();
+ int num_classes = access_info.receiver_type()->NumClasses();
for (auto i = access_info.receiver_type()->Classes(); !i.Done();
i.Advance()) {
+ DCHECK_LT(0, num_classes);
Handle<Map> map = i.Current();
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
receiver_map, jsgraph()->Constant(map));
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ if (--num_classes == 0 && num_transitions == 0 &&
+ j == access_infos.size() - 1) {
+ // Last map check on the fallthrough control path, do a conditional
+ // eager deoptimization exit here.
+ // TODO(turbofan): This is ugly as hell! We should probably introduce
+ // macro-ish operators for property access that encapsulate this whole
+ // mess.
+ this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
+ check, frame_state, effect,
+ fallthrough_control));
+ fallthrough_control = nullptr;
+ } else {
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ }
this_effects.push_back(effect);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
}
// Generate possible elements kind transitions.
for (auto transition : access_info.transitions()) {
+ DCHECK_LT(0u, num_transitions);
Handle<Map> transition_source = transition.first;
Handle<Map> transition_target = transition.second;
+ Node* transition_control;
+ Node* transition_effect = effect;
// Check if {receiver} has the specified {transition_source} map.
Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Any()), receiver_map,
jsgraph()->HeapConstant(transition_source));
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ if (--num_transitions == 0 && j == access_infos.size() - 1) {
+ transition_control =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ transition_effect, fallthrough_control);
+ fallthrough_control = nullptr;
+ } else {
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ transition_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
// Migrate {receiver} from {transition_source} to {transition_target}.
- Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
- Node* transition_effect = effect;
if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
transition_target->elements_kind())) {
// In-place migration, just store the {transition_target} map.
@@ -647,8 +632,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
this_controls.push_back(transition_control);
this_effects.push_back(transition_effect);
-
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
}
// Create single chokepoint for the control.
@@ -679,10 +662,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (!NumberMatcher(this_index).HasValue()) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
this_control);
}
@@ -694,10 +675,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
graph()->NewNode(simplified()->NumberToUint32(), this_index);
Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
this_index);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
this_index = this_index32;
}
@@ -716,13 +695,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_elements_map = this_effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
this_elements, this_effect, this_control);
- check = graph()->NewNode(
+ Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Any()), this_elements_map,
jsgraph()->HeapConstant(factory()->fixed_array_map()));
- branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
}
// Load the length of the {receiver}.
@@ -739,10 +716,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Check that the {index} is in the valid range for the {receiver}.
Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
this_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
// Compute the element access.
Type* element_type = Type::Any();
@@ -781,16 +756,16 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
this_value, jsgraph()->TheHoleConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
// Check if we are allowed to turn the hole into undefined.
Type* initial_holey_array_type = Type::Class(
handle(isolate()->get_initial_js_array_map(elements_kind)),
graph()->zone());
if (receiver_type->NowIs(initial_holey_array_type) &&
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
// Add a code dependency on the array protector cell.
AssumePrototypesStable(receiver_type, native_context,
isolate()->initial_object_prototype());
@@ -805,8 +780,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Type::Union(element_type, Type::Undefined(), graph()->zone());
} else {
// Deoptimize in case of the hole.
- exit_controls.push_back(if_true);
- this_control = if_false;
+ this_control =
+ graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ this_effect, this_control);
}
// Rename the result to represent the actual type (not polluted by the
// hole).
@@ -833,29 +809,24 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
check, jsgraph()->UndefinedConstant(), this_value);
} else {
// Deoptimize in case of the hole.
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- this_control = graph()->NewNode(common()->IfFalse(), branch);
- exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control =
+ graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ this_effect, this_control);
}
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsFastSmiElementsKind(elements_kind)) {
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
this_value, this_control);
} else if (IsFastDoubleElementsKind(elements_kind)) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, this_control);
- exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
- this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, this_effect, this_control);
this_value = graph()->NewNode(common()->Guard(Type::Number()),
this_value, this_control);
}
@@ -870,30 +841,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
controls.push_back(this_control);
}
- // Collect the fallthrough control as final "exit" control.
- if (fallthrough_control != control) {
- // Mark the last fallthrough branch as deferred.
- MarkAsDeferred(fallthrough_control);
- }
- exit_controls.push_back(fallthrough_control);
-
- // Generate the single "exit" point, where we get if either all map/instance
- // type checks failed, or one of the assumptions inside one of the cases
- // failes (i.e. failing prototype chain check).
- // TODO(bmeurer): Consider falling back to IC here if deoptimization is
- // disabled.
- int const exit_control_count = static_cast<int>(exit_controls.size());
- Node* exit_control =
- (exit_control_count == 1)
- ? exit_controls.front()
- : graph()->NewNode(common()->Merge(exit_control_count),
- exit_control_count, &exit_controls.front());
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, exit_effect, exit_control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
+ DCHECK_NULL(fallthrough_control);
// Generate the final merge point for all (polymorphic) branches.
int const control_count = static_cast<int>(controls.size());
@@ -1048,18 +996,6 @@ void JSNativeContextSpecialization::AssumePrototypesStable(
}
-void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
- Node* branch = NodeProperties::GetControlInput(if_projection);
- DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
- if (if_projection->opcode() == IrOpcode::kIfTrue) {
- NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
- } else {
- DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
- NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
- }
-}
-
-
MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
Node* node) {
Node* const context = NodeProperties::GetContextInput(node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 4251d72fc4..5562c6e36e 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -85,10 +85,6 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Handle<Context> native_context,
Handle<JSObject> holder);
- // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
- // the dominating Branch that {if_projection} is the unlikely (deferred) case.
- void MarkAsDeferred(Node* if_projection);
-
// Retrieve the native context from the given {node} if known.
MaybeHandle<Context> GetNativeContext(Node* node);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5fcd51928d..98e090b509 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -346,7 +346,8 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
bool operator==(CreateLiteralParameters const& lhs,
CreateLiteralParameters const& rhs) {
return lhs.constant().location() == rhs.constant().location() &&
- lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
+ lhs.length() == rhs.length() && lhs.flags() == rhs.flags() &&
+ lhs.index() == rhs.index();
}
@@ -357,12 +358,14 @@ bool operator!=(CreateLiteralParameters const& lhs,
size_t hash_value(CreateLiteralParameters const& p) {
- return base::hash_combine(p.constant().location(), p.flags(), p.index());
+ return base::hash_combine(p.constant().location(), p.length(), p.flags(),
+ p.index());
}
std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
- return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
+ return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags()
+ << ", " << p.index();
}
@@ -382,10 +385,12 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
V(GreaterThan, Operator::kNoProperties, 2, 1) \
V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
V(Yield, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kEliminatable, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
@@ -733,12 +738,11 @@ const Operator* JSOperatorBuilder::CreateClosure(
parameters); // parameter
}
-
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<FixedArray> constant_elements, int literal_flags,
- int literal_index) {
- CreateLiteralParameters parameters(constant_elements, literal_flags,
- literal_index);
+ Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
+ int number_of_elements) {
+ CreateLiteralParameters parameters(constant_elements, number_of_elements,
+ literal_flags, literal_index);
return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties, // opcode
"JSCreateLiteralArray", // name
@@ -746,12 +750,11 @@ const Operator* JSOperatorBuilder::CreateLiteralArray(
parameters); // parameter
}
-
const Operator* JSOperatorBuilder::CreateLiteralObject(
Handle<FixedArray> constant_properties, int literal_flags,
- int literal_index) {
- CreateLiteralParameters parameters(constant_properties, literal_flags,
- literal_index);
+ int literal_index, int number_of_properties) {
+ CreateLiteralParameters parameters(constant_properties, number_of_properties,
+ literal_flags, literal_index);
return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties, // opcode
"JSCreateLiteralObject", // name
@@ -762,7 +765,7 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
const Operator* JSOperatorBuilder::CreateLiteralRegExp(
Handle<String> constant_pattern, int literal_flags, int literal_index) {
- CreateLiteralParameters parameters(constant_pattern, literal_flags,
+ CreateLiteralParameters parameters(constant_pattern, -1, literal_flags,
literal_index);
return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 070e71efac..eb323c9c12 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -350,15 +350,18 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
// JSCreateLiteralRegExp operators.
class CreateLiteralParameters final {
public:
- CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
- : constant_(constant), flags_(flags), index_(index) {}
+ CreateLiteralParameters(Handle<HeapObject> constant, int length, int flags,
+ int index)
+ : constant_(constant), length_(length), flags_(flags), index_(index) {}
Handle<HeapObject> constant() const { return constant_; }
+ int length() const { return length_; }
int flags() const { return flags_; }
int index() const { return index_; }
private:
Handle<HeapObject> const constant_;
+ int const length_;
int const flags_;
int const index_;
};
@@ -401,10 +404,12 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* Modulus(BinaryOperationHints hints);
const Operator* ToBoolean(ToBooleanHints hints);
- const Operator* ToNumber();
- const Operator* ToString();
+ const Operator* ToInteger();
+ const Operator* ToLength();
const Operator* ToName();
+ const Operator* ToNumber();
const Operator* ToObject();
+ const Operator* ToString();
const Operator* Yield();
const Operator* Create();
@@ -414,9 +419,11 @@ class JSOperatorBuilder final : public ZoneObject {
PretenureFlag pretenure);
const Operator* CreateIterResultObject();
const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
- int literal_flags, int literal_index);
+ int literal_flags, int literal_index,
+ int number_of_elements);
const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
- int literal_flags, int literal_index);
+ int literal_flags, int literal_index,
+ int number_of_properties);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 11ae3a9709..7e1a0dc24e 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -533,15 +533,11 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
- if (r.OneInputIs(Type::NullOrUndefined())) {
- Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
- node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ if (r.OneInputIs(Type::Undetectable())) {
+ RelaxEffectsAndControls(node);
+ node->RemoveInput(r.LeftInputIs(Type::Undetectable()) ? 0 : 1);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsUndetectable());
if (invert) {
// Insert an boolean not to invert the value.
Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
@@ -648,6 +644,51 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+ // JSToInteger(x:integer) => x
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+ if (input_type->Max() <= 0.0) {
+ input = jsgraph()->ZeroConstant();
+ } else if (input_type->Min() >= kMaxSafeInteger) {
+ input = jsgraph()->Constant(kMaxSafeInteger);
+ } else {
+ if (input_type->Min() <= 0.0) {
+ input = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), input,
+ jsgraph()->ZeroConstant()),
+ jsgraph()->ZeroConstant(), input);
+ input_type = Type::Range(0.0, input_type->Max(), graph()->zone());
+ NodeProperties::SetType(input, input_type);
+ }
+ if (input_type->Max() > kMaxSafeInteger) {
+ input = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->Constant(kMaxSafeInteger), input),
+ jsgraph()->Constant(kMaxSafeInteger), input);
+ input_type =
+ Type::Range(input_type->Min(), kMaxSafeInteger, graph()->zone());
+ NodeProperties::SetType(input, input_type);
+ }
+ }
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input->opcode() == IrOpcode::kJSToNumber) {
@@ -699,7 +740,10 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
// JSToNumber(x:boolean) => BooleanToNumber(x)
return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
}
- // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
+ if (input_type->Is(Type::String())) {
+ // JSToNumber(x:string) => StringToNumber(x)
+ return Replace(graph()->NewNode(simplified()->StringToNumber(), input));
+ }
return NoChange();
}
@@ -1684,6 +1728,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSModulus(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
+ case IrOpcode::kJSToInteger:
+ return ReduceJSToInteger(node);
+ case IrOpcode::kJSToLength:
+ return ReduceJSToLength(node);
case IrOpcode::kJSToNumber:
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 4621a45e28..151787106b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -62,6 +62,8 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSToBoolean(Node* node);
+ Reduction ReduceJSToInteger(Node* node);
+ Reduction ReduceJSToLength(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index 5abd34633b..55542825e7 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -96,7 +96,9 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
// the frame at start. So we should move the decision of whether
// to build a frame or not in the register allocator, and trickle it
// here and to the code generator.
- if (frame_at_start || !block->must_deconstruct_frame()) {
+ if (frame_at_start ||
+ !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
fw = code->InputRpo(instr, 0);
}
fallthru = false;
@@ -141,7 +143,7 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
- Zone local_zone;
+ Zone local_zone(code->isolate()->allocator());
ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
// Skip empty blocks when the previous block doesn't fall through.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index d4a366563a..105bd353fc 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -160,12 +160,17 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
case Runtime::kStringCompare:
- case Runtime::kStringEquals:
- case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
+ case Runtime::kStringEqual:
+ case Runtime::kStringNotEqual:
+ case Runtime::kStringLessThan:
+ case Runtime::kStringLessThanOrEqual:
+ case Runtime::kStringGreaterThan:
+ case Runtime::kStringGreaterThanOrEqual:
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
return 0;
case Runtime::kInlineGetPrototype:
+ case Runtime::kInlineNewObject:
case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
@@ -174,13 +179,12 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kInlineToName:
case Runtime::kInlineToNumber:
case Runtime::kInlineToObject:
+ case Runtime::kInlineToPrimitive:
case Runtime::kInlineToPrimitive_Number:
case Runtime::kInlineToPrimitive_String:
- case Runtime::kInlineToPrimitive:
case Runtime::kInlineToString:
return 1;
case Runtime::kInlineCall:
- case Runtime::kInlineTailCall:
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
return 2;
@@ -319,8 +323,9 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
MachineType target_type = MachineType::AnyTagged();
// When entering into an OSR function from unoptimized code the JSFunction
// is not in a register, but it is on the stack in the marker spill slot.
- LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
- : regloc(kJSFunctionRegister);
+ LinkageLocation target_loc = is_osr
+ ? LinkageLocation::ForSavedCallerFunction()
+ : regloc(kJSFunctionRegister);
return new (zone) CallDescriptor( // --
CallDescriptor::kCallJSFunction, // kind
target_type, // target MachineType
@@ -401,7 +406,8 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
properties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
- flags, // flags
+ CallDescriptor::kCanUseRoots | // flags
+ flags, // flags
descriptor.DebugName(isolate));
}
@@ -431,7 +437,7 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
bool Linkage::ParameterHasSecondaryLocation(int index) const {
- if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+ if (!incoming_->IsJSFunctionCall()) return false;
LinkageLocation loc = GetParameterLocation(index);
return (loc == regloc(kJSFunctionRegister) ||
loc == regloc(kContextRegister));
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 3012f56e01..a0434f8aff 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -76,9 +76,9 @@ class LinkageLocation {
kPointerSize);
}
- static LinkageLocation ForSavedCallerMarker() {
+ static LinkageLocation ForSavedCallerFunction() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
- StandardFrameConstants::kMarkerOffset) /
+ StandardFrameConstants::kFunctionOffset) /
kPointerSize);
}
@@ -160,10 +160,11 @@ class CallDescriptor final : public ZoneObject {
kCanUseRoots = 1u << 6,
// (arm64 only) native stack should be used for arguments.
kUseNativeStack = 1u << 7,
- // (arm64 only) call instruction has to restore JSSP.
+ // (arm64 only) call instruction has to restore JSSP or CSP.
kRestoreJSSP = 1u << 8,
+ kRestoreCSP = 1u << 9,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 9,
+ kInitializeRootRegister = 1u << 10,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -367,6 +368,11 @@ class Linkage : public ZoneObject {
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
+ // A special {Parameter} index for Stub Calls that represents context.
+ static int GetStubCallContextParamIndex(int parameter_count) {
+ return parameter_count + 0; // Parameter (arity + 0) is special.
+ }
+
// A special {Parameter} index for JSCalls that represents the new target.
static int GetJSCallNewTargetParamIndex(int parameter_count) {
return parameter_count + 0; // Parameter (arity + 0) is special.
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 97f1ab0ec5..e19368d107 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -4,8 +4,11 @@
#include "src/compiler/load-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
@@ -13,7 +16,6 @@ namespace compiler {
LoadElimination::~LoadElimination() {}
-
Reduction LoadElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kLoadField:
@@ -24,7 +26,6 @@ Reduction LoadElimination::Reduce(Node* node) {
return NoChange();
}
-
Reduction LoadElimination::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const access = FieldAccessOf(node->op());
@@ -45,8 +46,22 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
if (access == FieldAccessOf(effect->op())) {
if (object == NodeProperties::GetValueInput(effect, 0)) {
Node* const value = NodeProperties::GetValueInput(effect, 1);
- ReplaceWithValue(node, value);
- return Replace(value);
+ Type* stored_value_type = NodeProperties::GetType(value);
+ Type* load_type = NodeProperties::GetType(node);
+ // Make sure the replacement's type is a subtype of the node's
+ // type. Otherwise we could confuse optimizations that were
+ // based on the original type.
+ if (stored_value_type->Is(load_type)) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ } else {
+ Node* renamed = graph()->NewNode(
+ common()->Guard(Type::Intersect(stored_value_type, load_type,
+ graph()->zone())),
+ value, NodeProperties::GetControlInput(node));
+ ReplaceWithValue(node, renamed);
+ return Replace(renamed);
+ }
}
// TODO(turbofan): Alias analysis to the rescue?
return NoChange();
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index db87d9a082..92c6dd01ba 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -11,15 +11,25 @@ namespace v8 {
namespace internal {
namespace compiler {
+class CommonOperatorBuilder;
+class Graph;
+
class LoadElimination final : public AdvancedReducer {
public:
- explicit LoadElimination(Editor* editor) : AdvancedReducer(editor) {}
+ explicit LoadElimination(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common)
+ : AdvancedReducer(editor), graph_(graph), common_(common) {}
~LoadElimination() final;
Reduction Reduce(Node* node) final;
private:
+ CommonOperatorBuilder* common() const { return common_; }
+ Graph* graph() { return graph_; }
+
Reduction ReduceLoadField(Node* node);
+ Graph* graph_;
+ CommonOperatorBuilder* common_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 2ed5bc2280..b8bc395acb 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -116,6 +116,8 @@ class LoopTree : public ZoneObject {
return nullptr;
}
+ Zone* zone() const { return zone_; }
+
private:
friend class LoopFinderImpl;
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index b553a9ff58..53795961b3 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -184,7 +184,7 @@ static void FindLoopExits(LoopTree* loop_tree, LoopTree::Loop* loop,
bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
- Zone zone;
+ Zone zone(loop_tree->zone()->allocator());
NodeVector exits(&zone);
NodeVector rets(&zone);
FindLoopExits(loop_tree, loop, exits, rets);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 3b6f21b151..6a506d26ad 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -148,6 +148,7 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
@@ -195,7 +196,13 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
+ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
+ V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -467,6 +474,19 @@ const Operator* MachineOperatorBuilder::CheckedStore(
return nullptr;
}
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64PopcntPlaceholder() {
+ return &cache_.kWord64Popcnt;
+}
+
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
+ return &cache_.kWord64Ctz;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index c5a80aa609..68e393aadd 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -164,6 +164,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const OptionalOperator Word32Ctz();
const OptionalOperator Word32Popcnt();
const OptionalOperator Word64Popcnt();
+ const Operator* Word64PopcntPlaceholder();
const OptionalOperator Word32ReverseBits();
const OptionalOperator Word64ReverseBits();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
@@ -177,8 +178,16 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word64Ror();
const Operator* Word64Clz();
const OptionalOperator Word64Ctz();
+ const Operator* Word64CtzPlaceholder();
const Operator* Word64Equal();
+ const Operator* Int32PairAdd();
+ const Operator* Int32PairSub();
+ const Operator* Int32PairMul();
+ const Operator* Word32PairShl();
+ const Operator* Word32PairShr();
+ const Operator* Word32PairSar();
+
const Operator* Int32Add();
const Operator* Int32AddWithOverflow();
const Operator* Int32Sub();
@@ -219,6 +228,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* TruncateFloat64ToUint32();
const Operator* TruncateFloat32ToInt32();
const Operator* TruncateFloat32ToUint32();
const Operator* TryTruncateFloat32ToInt64();
@@ -340,6 +350,7 @@ class MachineOperatorBuilder final : public ZoneObject {
V(Word, Shr) \
V(Word, Sar) \
V(Word, Ror) \
+ V(Word, Clz) \
V(Word, Equal) \
V(Int, Add) \
V(Int, Sub) \
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index cdd7e34866..9b0d706327 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -120,8 +120,11 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -221,7 +224,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
@@ -243,7 +247,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
remembered_set_action, save_fp_mode);
__ Addu(scratch1_, object_, index_);
__ CallStub(&stub);
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
__ Pop(ra);
}
}
@@ -255,6 +259,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ bool must_save_lr_;
};
@@ -467,6 +472,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
}
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -482,20 +494,44 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Branch(&done, ne, scratch1,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ lw(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
-
- switch (ArchOpcodeField::decode(opcode)) {
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
@@ -509,9 +545,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -537,6 +579,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -547,6 +590,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
@@ -606,7 +654,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), fp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ lw(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
@@ -875,6 +923,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
+ case kMipsMulPair: {
+ __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
+ i.InputRegister(2));
+ __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
+ __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
+ __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
+ __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
+ } break;
case kMipsAddD:
// TODO(plind): add special case: combine mult & add.
__ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1525,17 +1581,16 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_shrink_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(0);
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ }
}
- frame_access_state()->SetFrameAccessToDefault();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1597,17 +1652,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ mov(sp, fp);
- __ Pop(ra, fp);
+ AssembleDeconstructFrame();
}
}
if (pop_count != 0) {
@@ -1665,9 +1718,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ lw(dst, MemOperand(fp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ lw(dst, g.SlotToMemOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 64aecd0ee4..d85c2a7fe5 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -59,6 +59,7 @@ namespace compiler {
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsMulPair) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index df972f73bc..f86ffe7643 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -114,8 +114,13 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -189,9 +194,7 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -392,6 +395,27 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kMipsSar, node);
}
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ MipsOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Emit(kMipsMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
@@ -612,6 +636,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMipsTruncUwD, node);
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kMipsTruncUwD, node);
+}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
MipsOperandGenerator g(this);
@@ -878,6 +905,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -981,6 +1009,9 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1086,9 +1117,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kMipsCmp, cont, false);
}
-} // namespace
-
-
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
@@ -1177,12 +1205,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+ g.TempImmediate(0), cont->frame_state());
} else {
+ DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
}
}
+} // namespace
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
@@ -1190,6 +1223,17 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
@@ -1220,7 +1264,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1230,32 +1274,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMipsAddOvf, &cont);
}
FlagsContinuation cont;
@@ -1265,7 +1311,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMipsSubOvf, &cont);
}
FlagsContinuation cont;
@@ -1274,37 +1320,39 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 373a1a6abc..c6341b1210 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -120,8 +120,11 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -221,7 +224,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
@@ -243,7 +247,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
remembered_set_action, save_fp_mode);
__ Daddu(scratch1_, object_, index_);
__ CallStub(&stub);
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
__ Pop(ra);
}
}
@@ -255,6 +259,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ bool must_save_lr_;
};
@@ -479,6 +484,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
}
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -494,20 +506,44 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Branch(&done, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ ld(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
-
- switch (ArchOpcodeField::decode(opcode)) {
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
@@ -521,9 +557,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -548,6 +590,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -557,6 +600,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
@@ -616,7 +664,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), fp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ ld(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
@@ -1831,17 +1879,16 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(0);
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ }
}
- frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1900,17 +1947,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ mov(sp, fp);
- __ Pop(ra, fp);
+ AssembleDeconstructFrame();
}
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -1969,9 +2014,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ ld(dst, MemOperand(fp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ ld(dst, g.SlotToMemOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 44a5470aca..5e2b5f2ad8 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -119,8 +119,13 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -196,9 +201,7 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -922,6 +925,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kMips64TruncUwD, node);
+}
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
@@ -1320,6 +1326,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -1431,6 +1438,9 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1542,7 +1552,6 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
-} // namespace
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
@@ -1553,6 +1562,9 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+ g.TempImmediate(0), cont->frame_state());
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -1677,6 +1689,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
EmitWordCompareZero(selector, value, cont);
}
+} // namespace
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
@@ -1684,6 +1697,17 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this);
@@ -1714,7 +1738,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1725,32 +1749,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dadd, &cont);
}
FlagsContinuation cont;
@@ -1760,7 +1786,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dsub, &cont);
}
FlagsContinuation cont;
@@ -1770,7 +1796,7 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMips64DaddOvf, &cont);
}
FlagsContinuation cont;
@@ -1780,7 +1806,7 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kMips64DsubOvf, &cont);
}
FlagsContinuation cont;
@@ -1789,7 +1815,7 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
void InstructionSelector::VisitWord64Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1800,61 +1826,65 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index c78e15e25b..b038d154b7 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -21,6 +21,8 @@
V(IfDefault) \
V(Merge) \
V(Deoptimize) \
+ V(DeoptimizeIf) \
+ V(DeoptimizeUnless) \
V(Return) \
V(TailCall) \
V(Terminate) \
@@ -94,10 +96,12 @@
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
- V(JSToNumber) \
- V(JSToString) \
+ V(JSToInteger) \
+ V(JSToLength) \
V(JSToName) \
- V(JSToObject)
+ V(JSToNumber) \
+ V(JSToObject) \
+ V(JSToString)
#define JS_OTHER_UNOP_LIST(V) \
V(JSTypeOf)
@@ -181,10 +185,17 @@
V(NumberShiftLeft) \
V(NumberShiftRight) \
V(NumberShiftRightLogical) \
+ V(NumberImul) \
+ V(NumberClz32) \
+ V(NumberCeil) \
+ V(NumberFloor) \
+ V(NumberRound) \
+ V(NumberTrunc) \
V(NumberToInt32) \
V(NumberToUint32) \
V(NumberIsHoleNaN) \
V(PlainPrimitiveToNumber) \
+ V(StringToNumber) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
@@ -202,7 +213,8 @@
V(StoreElement) \
V(ObjectIsNumber) \
V(ObjectIsReceiver) \
- V(ObjectIsSmi)
+ V(ObjectIsSmi) \
+ V(ObjectIsUndetectable)
// Opcodes for Machine-level operators.
#define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -273,6 +285,7 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
@@ -330,7 +343,13 @@
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(CheckedLoad) \
- V(CheckedStore)
+ V(CheckedStore) \
+ V(Int32PairAdd) \
+ V(Int32PairSub) \
+ V(Int32PairMul) \
+ V(Word32PairShl) \
+ V(Word32PairShr) \
+ V(Word32PairSar)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 1ee31d5ba5..7f38ca7299 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -58,6 +58,8 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSCreateScriptContext:
// Conversions
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToObject:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 21c34fc77e..1d7e967cc7 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -267,23 +267,26 @@ class PipelineData {
register_allocation_data_ = nullptr;
}
- void InitializeInstructionSequence() {
+ void InitializeInstructionSequence(const CallDescriptor* descriptor) {
DCHECK(sequence_ == nullptr);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
sequence_ = new (instruction_zone()) InstructionSequence(
info()->isolate(), instruction_zone(), instruction_blocks);
+ if (descriptor && descriptor->RequiresFrameAsIncoming()) {
+ sequence_->instruction_blocks()[0]->mark_needs_frame();
+ } else {
+ DCHECK_EQ(0, descriptor->CalleeSavedFPRegisters());
+ DCHECK_EQ(0, descriptor->CalleeSavedRegisters());
+ }
}
void InitializeFrameData(CallDescriptor* descriptor) {
DCHECK(frame_ == nullptr);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
- fixed_frame_size = (descriptor->IsCFunctionCall())
- ? StandardFrameConstants::kFixedSlotCountAboveFp +
- StandardFrameConstants::kCPSlotCount
- : StandardFrameConstants::kFixedSlotCount;
+ fixed_frame_size = CalculateFixedFrameSize(descriptor);
}
frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
}
@@ -338,6 +341,16 @@ class PipelineData {
Zone* register_allocation_zone_;
RegisterAllocationData* register_allocation_data_;
+ int CalculateFixedFrameSize(CallDescriptor* descriptor) {
+ if (descriptor->IsJSFunctionCall()) {
+ return StandardFrameConstants::kFixedSlotCount;
+ }
+ return descriptor->IsCFunctionCall()
+ ? (CommonFrameConstants::kFixedSlotCountAboveFp +
+ CommonFrameConstants::kCPSlotCount)
+ : TypedFrameConstants::kFixedSlotCount;
+ }
+
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
@@ -539,7 +552,7 @@ struct InliningPhase {
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
- JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+ JSCallReducer call_reducer(data->jsgraph(),
data->info()->is_deoptimization_enabled()
? JSCallReducer::kDeoptimizationEnabled
: JSCallReducer::kNoFlags,
@@ -615,7 +628,8 @@ struct TypedLoweringPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- LoadElimination load_elimination(&graph_reducer);
+ LoadElimination load_elimination(&graph_reducer, data->graph(),
+ data->common());
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
MaybeHandle<LiteralsArray> literals_array =
data->info()->is_native_context_specializing()
@@ -639,6 +653,7 @@ struct TypedLoweringPhase {
data->info()->is_deoptimization_enabled()
? JSIntrinsicLowering::kDeoptimizationEnabled
: JSIntrinsicLowering::kDeoptimizationDisabled);
+ SimplifiedOperatorReducer simple_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -649,6 +664,7 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
+ AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
@@ -1079,7 +1095,7 @@ void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
Handle<Code> Pipeline::GenerateCode() {
- ZonePool zone_pool;
+ ZonePool zone_pool(isolate()->allocator());
base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
@@ -1240,7 +1256,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
CompilationInfo info(debug_name, isolate, graph->zone(), flags);
// Construct a pipeline for scheduling and code generation.
- ZonePool zone_pool;
+ ZonePool zone_pool(isolate->allocator());
PipelineData data(&zone_pool, &info, graph, schedule);
base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
@@ -1281,7 +1297,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule) {
// Construct a pipeline for scheduling and code generation.
- ZonePool zone_pool;
+ ZonePool zone_pool(info->isolate()->allocator());
PipelineData data(&zone_pool, info, graph, schedule);
base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
@@ -1304,7 +1320,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
CompilationInfo info("testing", sequence->isolate(), sequence->zone());
- ZonePool zone_pool;
+ ZonePool zone_pool(sequence->isolate()->allocator());
PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
pipeline.data_ = &data;
@@ -1329,7 +1345,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
data->schedule());
}
- data->InitializeInstructionSequence();
+ data->InitializeInstructionSequence(call_descriptor);
data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
@@ -1358,6 +1374,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
AllocateRegisters(
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
call_descriptor, run_verifier);
+ Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>();
@@ -1366,11 +1383,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
BeginPhaseKind("code generation");
// TODO(mtrofin): move this off to the register allocator.
bool generate_frame_at_start =
- !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
- !data_->frame()->needs_frame() ||
- data_->sequence()->instruction_blocks().front()->needs_frame() ||
- linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
- linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
+ data_->sequence()->instruction_blocks().front()->must_construct_frame();
// Optimimize jumps.
if (FLAG_turbo_jt) {
Run<JumpThreadingPhase>(generate_frame_at_start);
@@ -1430,7 +1443,7 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
base::SmartPointer<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.Reset(new Zone());
+ verifier_zone.Reset(new Zone(isolate()->allocator()));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
@@ -1438,6 +1451,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
debug_name = info()->GetDebugName();
+ data_->sequence()->ValidateEdgeSplitForm();
+ data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
@@ -1477,12 +1492,6 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<MergeSplintersPhase>();
}
- // We plan to enable frame elision only for stubs and bytecode handlers.
- if (FLAG_turbo_frame_elision && info()->IsStub()) {
- Run<LocateSpillSlotsPhase>();
- Run<FrameElisionPhase>();
- }
-
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
@@ -1493,6 +1502,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<OptimizeMovesPhase>();
}
+ Run<LocateSpillSlotsPhase>();
+
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
PrintableInstructionSequence printable = {config, data->sequence()};
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 7fc6dd9d07..6f1e5881eb 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -32,6 +32,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_deoptimize:
case kFlags_set:
return SetRC;
case kFlags_none:
@@ -103,8 +104,11 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -183,7 +187,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -197,7 +202,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ mflr(scratch1_);
__ Push(scratch1_);
@@ -211,7 +216,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
__ CallStub(&stub);
- if (!frame()->needs_frame()) {
+ if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Pop(scratch1_);
__ mtlr(scratch1_);
@@ -226,6 +231,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ bool must_save_lr_;
};
@@ -293,20 +299,24 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} // namespace
-#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.OutputRCBit()); \
+ if (round) { \
+ __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ } \
} while (0)
-
-#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1), i.OutputRCBit()); \
+ if (round) { \
+ __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ } \
} while (0)
-
#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -662,6 +672,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -678,12 +693,36 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ RestoreFrameStateForTailCall();
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&done);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -708,9 +747,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -745,6 +790,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -756,6 +802,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -823,7 +874,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ LoadP(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mr(i.OutputRegister(), fp);
@@ -928,6 +979,71 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_BINOP_INT_RC(srad, sradi);
break;
#endif
+#if !V8_TARGET_ARCH_PPC64
+ case kPPC_AddPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+ __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_SubPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+ __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_MulPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
+ __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
+ __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
+ __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+ __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
+ __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
+ break;
+ case kPPC_ShiftLeftPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1), kScratchReg,
+ i.InputRegister(2));
+ }
+ break;
+ case kPPC_ShiftRightPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1), kScratchReg,
+ i.InputRegister(2));
+ }
+ break;
+ case kPPC_ShiftRightAlgPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ kScratchReg, i.InputRegister(2));
+ }
+ break;
+#endif
case kPPC_RotRight32:
if (HasRegisterInput(instr, 1)) {
__ subfic(kScratchReg, i.InputRegister(1), Operand(32));
@@ -992,7 +1108,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
case kPPC_AddDouble:
- ASSEMBLE_FLOAT_BINOP_RC(fadd);
+ ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
break;
case kPPC_Sub:
#if V8_TARGET_ARCH_PPC64
@@ -1015,7 +1131,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SUB_WITH_OVERFLOW32();
break;
case kPPC_SubDouble:
- ASSEMBLE_FLOAT_BINOP_RC(fsub);
+ ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
break;
case kPPC_Mul32:
__ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1036,7 +1152,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.OutputRCBit());
break;
case kPPC_MulDouble:
- ASSEMBLE_FLOAT_BINOP_RC(fmul);
+ ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
break;
case kPPC_Div32:
__ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1059,7 +1175,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
#endif
case kPPC_DivDouble:
- ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+ ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
break;
case kPPC_Mod32:
ASSEMBLE_MODULO(divw, mullw);
@@ -1092,25 +1208,25 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
break;
case kPPC_AbsDouble:
- ASSEMBLE_FLOAT_UNOP_RC(fabs);
+ ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
break;
case kPPC_SqrtDouble:
- ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+ ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
break;
case kPPC_FloorDouble:
- ASSEMBLE_FLOAT_UNOP_RC(frim);
+ ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
break;
case kPPC_CeilDouble:
- ASSEMBLE_FLOAT_UNOP_RC(frip);
+ ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
break;
case kPPC_TruncateDouble:
- ASSEMBLE_FLOAT_UNOP_RC(friz);
+ ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
break;
case kPPC_RoundDouble:
- ASSEMBLE_FLOAT_UNOP_RC(frin);
+ ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
break;
case kPPC_NegDouble:
- ASSEMBLE_FLOAT_UNOP_RC(fneg);
+ ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
break;
case kPPC_Cntlz32:
__ cntlzw_(i.OutputRegister(), i.InputRegister(0));
@@ -1316,7 +1432,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
#endif
case kPPC_DoubleToFloat32:
- ASSEMBLE_FLOAT_UNOP_RC(frsp);
+ ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
break;
case kPPC_Float32ToDouble:
// Nothing to do.
@@ -1589,36 +1705,36 @@ void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ // TODO(turbofan): We should be able to generate better code by sharing the
+ // actual final call site and just bl'ing to it here, similar to what we do
+ // in the lithium backend.
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- __ function_descriptor();
- __ mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(r0, fp, kConstantPoolRegister);
- // Adjust FP to point to saved FP.
- __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- } else {
- __ Push(r0, fp);
- __ mr(fp, sp);
- }
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
- } else if (frame()->needs_frame()) {
- if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
- // TODO(mbrandy): Restrict only to the wasm wrapper case.
- __ StubPrologue();
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ function_descriptor();
+ __ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
} else {
- __ StubPrologue(ip);
+ StackFrame::Type type = info()->GetOutputStackFrameType();
+ // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+ // efficient intialization of the constant pool pointer register).
+ __ StubPrologue(type);
}
- } else {
- frame()->SetElidedFrameSizeInSlots(0);
}
- frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1687,20 +1803,18 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ AssembleDeconstructFrame();
}
- } else {
- __ Drop(pop_count);
}
- __ Ret();
+ __ Ret(pop_count);
}
@@ -1753,9 +1867,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ LoadP(dst, MemOperand(fp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ LoadP(dst, g.SlotToMemOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 877ebb5c12..66c2e9980b 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -19,10 +19,13 @@ namespace compiler {
V(PPC_Xor) \
V(PPC_ShiftLeft32) \
V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
V(PPC_ShiftRight32) \
V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
V(PPC_ShiftRightAlg32) \
V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
V(PPC_RotRight32) \
V(PPC_RotRight64) \
V(PPC_Not) \
@@ -32,14 +35,17 @@ namespace compiler {
V(PPC_RotLeftAndClearRight64) \
V(PPC_Add) \
V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
V(PPC_AddDouble) \
V(PPC_Sub) \
V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
V(PPC_SubDouble) \
V(PPC_Mul32) \
V(PPC_Mul64) \
V(PPC_MulHigh32) \
V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
V(PPC_MulDouble) \
V(PPC_Div32) \
V(PPC_Div64) \
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index fd1df6a495..e7d7719f5b 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -21,10 +21,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Xor:
case kPPC_ShiftLeft32:
case kPPC_ShiftLeft64:
+ case kPPC_ShiftLeftPair:
case kPPC_ShiftRight32:
case kPPC_ShiftRight64:
+ case kPPC_ShiftRightPair:
case kPPC_ShiftRightAlg32:
case kPPC_ShiftRightAlg64:
+ case kPPC_ShiftRightAlgPair:
case kPPC_RotRight32:
case kPPC_RotRight64:
case kPPC_Not:
@@ -34,14 +37,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_RotLeftAndClearRight64:
case kPPC_Add:
case kPPC_AddWithOverflow32:
+ case kPPC_AddPair:
case kPPC_AddDouble:
case kPPC_Sub:
case kPPC_SubWithOverflow32:
+ case kPPC_SubPair:
case kPPC_SubDouble:
case kPPC_Mul32:
case kPPC_Mul64:
case kPPC_MulHigh32:
case kPPC_MulHighU32:
+ case kPPC_MulPair:
case kPPC_MulDouble:
case kPPC_Div32:
case kPPC_Div64:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 244e6f44c5..5abb5f1476 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -71,22 +71,22 @@ class PPCOperandGenerator final : public OperandGenerator {
namespace {
-void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
PPCOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
-void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
PPCOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
-
-void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
ImmediateMode operand_mode) {
PPCOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -96,8 +96,8 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
#if V8_TARGET_ARCH_PPC64
-void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+void VisitTryTruncateDouble(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
PPCOperandGenerator g(selector);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
@@ -144,15 +144,20 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
// Shared routine for multiple binary operations.
template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- ImmediateMode operand_mode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode) {
FlagsContinuation cont;
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
@@ -247,9 +252,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(offset);
addressing_mode = kMode_MRR;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -720,7 +723,6 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
}
-
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Shr(Node* node) {
PPCOperandGenerator g(this);
@@ -782,9 +784,109 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
}
+#if !V8_TARGET_ARCH_PPC64
+void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ VisitPairBinop(this, kPPC_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ VisitPairBinop(this, kPPC_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ PPCOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+
+ Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+}
+
+void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitPairShift(this, kPPC_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitPairShift(this, kPPC_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitPairShift(this, kPPC_ShiftRightAlgPair, node);
+}
+#endif
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Sar(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+ m.right().Is(32)) {
+ // Just load and sign-extend the interesting 4 bytes instead. This happens,
+ // for example, when we're loading and untagging SMIs.
+ BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+ if (mleft.matches() && mleft.index() == nullptr) {
+ int64_t offset = 0;
+ Node* displacement = mleft.displacement();
+ if (displacement != nullptr) {
+ Int64Matcher mdisplacement(displacement);
+ DCHECK(mdisplacement.HasValue());
+ offset = mdisplacement.Value();
+ }
+ offset = SmiWordOffset(offset);
+ if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
+ Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(mleft.base()),
+ g.TempImmediate(offset));
+ return;
+ }
+ }
+ }
VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
}
#endif
@@ -861,7 +963,6 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
#endif
-
void InstructionSelector::VisitInt32Sub(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -994,6 +1095,9 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kPPC_DoubleToUint32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kPPC_DoubleToUint32, node);
+}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
@@ -1108,7 +1212,7 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
- VisitRRR(this, kPPC_AddDouble, node);
+ VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
}
@@ -1122,11 +1226,11 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
PPCOperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
- Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+ Emit(kPPC_NegDouble | MiscField::encode(1), g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
- VisitRRR(this, kPPC_SubDouble, node);
+ VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
}
@@ -1157,7 +1261,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRRR(this, kPPC_MulDouble, node);
+ VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
}
@@ -1168,7 +1272,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRRR(this, kPPC_DivDouble, node);
+ VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
}
@@ -1198,7 +1302,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitRR(this, kPPC_AbsDouble, node);
+ VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
}
@@ -1208,7 +1312,7 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRR(this, kPPC_SqrtDouble, node);
+ VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
}
@@ -1218,7 +1322,7 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, kPPC_FloorDouble, node);
+ VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
}
@@ -1228,7 +1332,7 @@ void InstructionSelector::VisitFloat64RoundDown(Node* node) {
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, kPPC_CeilDouble, node);
+ VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
}
@@ -1238,7 +1342,7 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, kPPC_TruncateDouble, node);
+ VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
}
@@ -1264,7 +1368,7 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
kInt16Imm, &cont);
}
@@ -1276,7 +1380,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
kInt16Imm_Negate, &cont);
}
@@ -1289,7 +1393,7 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
&cont);
}
@@ -1300,7 +1404,7 @@ void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
&cont);
}
@@ -1336,6 +1440,9 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1573,6 +1680,17 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
@@ -1603,7 +1721,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1613,32 +1731,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord32Compare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord32Compare(this, node, &cont);
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1648,62 +1768,66 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
#endif
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
@@ -1750,6 +1874,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
PPCOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 0d4b8cb200..728d79af5b 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -39,6 +39,17 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
+ OFStream os(stdout);
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("--- RAW SCHEDULE -------------------------------------------\n");
+ os << *schedule_;
+ }
+ schedule_->EnsureSplitEdgeForm();
+ schedule_->PropagateDeferredMark();
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
+ os << *schedule_;
+ }
Scheduler::ComputeSpecialRPO(zone(), schedule_);
// Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
@@ -79,15 +90,17 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
for (size_t index = 0; index < case_count; ++index) {
int32_t case_value = case_values[index];
- BasicBlock* case_block = Use(case_labels[index]);
+ BasicBlock* case_block = schedule()->NewBasicBlock();
Node* case_node =
graph()->NewNode(common()->IfValue(case_value), switch_node);
schedule()->AddNode(case_block, case_node);
+ schedule()->AddGoto(case_block, Use(case_labels[index]));
succ_blocks[index] = case_block;
}
- BasicBlock* default_block = Use(default_label);
+ BasicBlock* default_block = schedule()->NewBasicBlock();
Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
schedule()->AddNode(default_block, default_node);
+ schedule()->AddGoto(default_block, Use(default_label));
succ_blocks[case_count] = default_block;
schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
current_block_ = nullptr;
@@ -247,6 +260,27 @@ Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
return tail_call;
}
+Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
+ Node* context) {
+ const int kArity = 0;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
Node* arg1, Node* context) {
@@ -407,6 +441,7 @@ void RawMachineAssembler::Bind(RawMachineLabel* label) {
DCHECK(!label->bound_);
label->bound_ = true;
current_block_ = EnsureBlock(label);
+ current_block_->set_deferred(label->deferred_);
}
@@ -459,11 +494,6 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
return graph()->NewNodeUnchecked(op, input_count, inputs);
}
-
-RawMachineLabel::RawMachineLabel()
- : block_(nullptr), used_(false), bound_(false) {}
-
-
RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
} // namespace compiler
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index a0cb7a0bfb..f3445aceea 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -324,6 +324,24 @@ class RawMachineAssembler {
Node* Uint64Mod(Node* a, Node* b) {
return AddNode(machine()->Uint64Mod(), a, b);
}
+ Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+ return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high);
+ }
+ Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+ return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high);
+ }
+ Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+ return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high);
+ }
+ Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) {
+ return AddNode(machine()->Word32PairShl(), low_word, high_word, shift);
+ }
+ Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) {
+ return AddNode(machine()->Word32PairShr(), low_word, high_word, shift);
+ }
+ Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
+ return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
+ }
#define INTPTR_BINOP(prefix, name) \
Node* IntPtr##name(Node* a, Node* b) { \
@@ -332,7 +350,9 @@ class RawMachineAssembler {
}
INTPTR_BINOP(Int, Add);
+ INTPTR_BINOP(Int, AddWithOverflow);
INTPTR_BINOP(Int, Sub);
+ INTPTR_BINOP(Int, SubWithOverflow);
INTPTR_BINOP(Int, LessThan);
INTPTR_BINOP(Int, LessThanOrEqual);
INTPTR_BINOP(Word, Equal);
@@ -374,6 +394,7 @@ class RawMachineAssembler {
return AddNode(machine()->Float32Min().op(), a, b);
}
Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
+ Node* Float32Neg(Node* a) { return Float32Sub(Float32Constant(-0.0f), a); }
Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
return AddNode(machine()->Float32Equal(), a, b);
@@ -414,6 +435,7 @@ class RawMachineAssembler {
return AddNode(machine()->Float64Min().op(), a, b);
}
Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
+ Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
return AddNode(machine()->Float64Equal(), a, b);
@@ -448,6 +470,9 @@ class RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
+ Node* TruncateFloat64ToUint32(Node* a) {
+ return AddNode(machine()->TruncateFloat64ToUint32(), a);
+ }
Node* TruncateFloat32ToInt32(Node* a) {
return AddNode(machine()->TruncateFloat32ToInt32(), a);
}
@@ -457,22 +482,12 @@ class RawMachineAssembler {
Node* TryTruncateFloat32ToInt64(Node* a) {
return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
}
- Node* TruncateFloat64ToInt64(Node* a) {
- // TODO(ahaas): Remove this function as soon as it is not used anymore in
- // WebAssembly.
- return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
- }
Node* TryTruncateFloat64ToInt64(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
}
Node* TryTruncateFloat32ToUint64(Node* a) {
return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
}
- Node* TruncateFloat64ToUint64(Node* a) {
- // TODO(ahaas): Remove this function as soon as it is not used anymore in
- // WebAssembly.
- return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
- }
Node* TryTruncateFloat64ToUint64(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
}
@@ -622,6 +637,8 @@ class RawMachineAssembler {
// Tail call the given call descriptor and the given arguments.
Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+ // Tail call to a runtime function with zero arguments.
+ Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
// Tail call to a runtime function with one argument.
Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
Node* context);
@@ -708,13 +725,17 @@ class RawMachineAssembler {
class RawMachineLabel final {
public:
- RawMachineLabel();
+ enum Type { kDeferred, kNonDeferred };
+
+ explicit RawMachineLabel(Type type = kNonDeferred)
+ : deferred_(type == kDeferred) {}
~RawMachineLabel();
private:
- BasicBlock* block_;
- bool used_;
- bool bound_;
+ BasicBlock* block_ = nullptr;
+ bool used_ = false;
+ bool bound_ = false;
+ bool deferred_;
friend class RawMachineAssembler;
DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
};
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 0b12e149e8..f2160f52ce 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -32,9 +32,9 @@ void VerifyAllocatedGaps(const Instruction* instr) {
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
- auto moves = instr->GetParallelMove(inner_pos);
+ const ParallelMove* moves = instr->GetParallelMove(inner_pos);
if (moves == nullptr) continue;
- for (auto move : *moves) {
+ for (const MoveOperands* move : *moves) {
if (move->IsRedundant()) continue;
CHECK(move->source().IsAllocated() || move->source().IsConstant());
CHECK(move->destination().IsAllocated());
@@ -81,11 +81,12 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
// TODO(dcarney): model unique constraints.
// Construct OperandConstraints for all InstructionOperands, eliminating
// kSameAsFirst along the way.
- for (const auto* instr : sequence->instructions()) {
+ for (const Instruction* instr : sequence->instructions()) {
// All gaps should be totally unallocated at this point.
VerifyEmptyGaps(instr);
const size_t operand_count = OperandCount(instr);
- auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
+ OperandConstraint* op_constraints =
+ zone->NewArray<OperandConstraint>(operand_count);
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
BuildConstraint(instr->InputAt(i), &op_constraints[count]);
@@ -115,11 +116,12 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
CHECK(sequence()->instructions().size() == constraints()->size());
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
- const auto* instr = instr_constraint.instruction_;
+ const Instruction* instr = instr_constraint.instruction_;
// All gaps should be totally allocated at this point.
VerifyAllocatedGaps(instr);
const size_t operand_count = instr_constraint.operand_constaints_size_;
- const auto* op_constraints = instr_constraint.operand_constraints_;
+ const OperandConstraint* op_constraints =
+ instr_constraint.operand_constraints_;
CHECK_EQ(instr, *instr_it);
CHECK(operand_count == OperandCount(instr));
size_t count = 0;
@@ -148,14 +150,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
} else if (op->IsExplicit()) {
constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
- auto imm = ImmediateOperand::cast(op);
+ const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
: imm->indexed_value();
constraint->type_ = kImmediate;
constraint->value_ = value;
} else {
CHECK(op->IsUnallocated());
- const auto* unallocated = UnallocatedOperand::cast(op);
+ const UnallocatedOperand* unallocated = UnallocatedOperand::cast(op);
int vreg = unallocated->virtual_register();
constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
@@ -213,7 +215,7 @@ void RegisterAllocatorVerifier::CheckConstraint(
return;
case kImmediate: {
CHECK(op->IsImmediate());
- auto imm = ImmediateOperand::cast(op);
+ const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE
? imm->inline_value()
: imm->indexed_value();
@@ -324,7 +326,7 @@ class OperandMap : public ZoneObject {
if (this->empty()) return;
auto it = this->begin();
OperandLess less;
- for (const auto& o : other) {
+ for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
while (less(it->first, o.first)) {
this->erase(it++);
if (it == this->end()) return;
@@ -346,7 +348,7 @@ class OperandMap : public ZoneObject {
void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
// Compute outgoing mappings.
Map to_insert(zone);
- for (auto move : *moves) {
+ for (const MoveOperands* move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->source());
CHECK(cur != map().end());
@@ -356,7 +358,7 @@ class OperandMap : public ZoneObject {
CHECK(res.second);
}
// Drop current mappings.
- for (auto move : *moves) {
+ for (const MoveOperands* move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->destination());
if (cur != map().end()) map().erase(cur);
@@ -368,8 +370,9 @@ class OperandMap : public ZoneObject {
void RunGaps(Zone* zone, const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
- auto inner_pos = static_cast<Instruction::GapPosition>(i);
- auto move = instr->GetParallelMove(inner_pos);
+ Instruction::GapPosition inner_pos =
+ static_cast<Instruction::GapPosition>(i);
+ const ParallelMove* move = instr->GetParallelMove(inner_pos);
if (move == nullptr) continue;
RunParallelMoves(zone, move);
}
@@ -383,7 +386,7 @@ class OperandMap : public ZoneObject {
void DropRegisters(const RegisterConfiguration* config) {
// TODO(dcarney): sort map by kind and drop range.
for (auto it = map().begin(); it != map().end();) {
- auto op = it->first;
+ const InstructionOperand* op = it->first;
if (op->IsRegister() || op->IsDoubleRegister()) {
map().erase(it++);
} else {
@@ -394,7 +397,7 @@ class OperandMap : public ZoneObject {
MapValue* Define(Zone* zone, const InstructionOperand* op,
int virtual_register) {
- auto value = new (zone) MapValue();
+ MapValue* value = new (zone) MapValue();
value->define_vreg = virtual_register;
auto res = map().insert(std::make_pair(op, value));
if (!res.second) res.first->second = value;
@@ -404,7 +407,7 @@ class OperandMap : public ZoneObject {
void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
auto it = map().find(op);
CHECK(it != map().end());
- auto v = it->second;
+ MapValue* v = it->second;
if (v->define_vreg != kInvalidVreg) {
CHECK_EQ(v->define_vreg, use_vreg);
}
@@ -445,7 +448,7 @@ class OperandMap : public ZoneObject {
bool initial_pass) {
auto it = map().find(op);
CHECK(it != map().end());
- auto v = it->second;
+ MapValue* v = it->second;
int use_vreg = phi->virtual_register;
// Phis are not defined.
CHECK_EQ(kInvalidVreg, v->define_vreg);
@@ -473,7 +476,7 @@ class OperandMap : public ZoneObject {
CHECK(v->define_vreg == phi->first_pred_vreg);
} else if (v->use_vreg != phi->first_pred_vreg) {
// Walk the phi chain, hunting for a matching phi use.
- auto p = phi;
+ const PhiData* p = phi;
for (; p != nullptr; p = p->first_pred_phi) {
if (p->virtual_register == v->use_vreg) break;
}
@@ -529,12 +532,12 @@ class RegisterAllocatorVerifier::BlockMaps {
BlockIds block_ids((BlockIds::key_compare()),
zone_allocator<size_t>(zone()));
// First ensure that incoming contains only keys in all predecessors.
- for (auto block : sequence()->instruction_blocks()) {
+ for (const InstructionBlock* block : sequence()->instruction_blocks()) {
size_t index = block->rpo_number().ToSize();
block_ids.insert(index);
- auto& succ_map = incoming_maps_[index]->map();
+ OperandMap::Map& succ_map = incoming_maps_[index]->map();
for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- auto pred_rpo = block->predecessors()[i];
+ RpoNumber pred_rpo = block->predecessors()[i];
succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
}
}
@@ -545,8 +548,9 @@ class RegisterAllocatorVerifier::BlockMaps {
const size_t succ_index = *block_id_it;
block_ids.erase(block_id_it);
// Propagate uses back to their definition blocks using succ_vreg.
- auto block = sequence()->instruction_blocks()[succ_index];
- auto& succ_map = incoming_maps_[succ_index]->map();
+ const InstructionBlock* block =
+ sequence()->instruction_blocks()[succ_index];
+ OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
for (size_t i = 0; i < block->PredecessorCount(); ++i) {
for (auto& succ_val : succ_map) {
// An incoming map contains no defines.
@@ -561,15 +565,15 @@ class RegisterAllocatorVerifier::BlockMaps {
if (succ_vreg == kInvalidVreg) continue;
// May need to transition phi.
if (IsPhi(succ_vreg)) {
- auto phi = GetPhi(succ_vreg);
+ const PhiData* phi = GetPhi(succ_vreg);
if (phi->definition_rpo.ToSize() == succ_index) {
// phi definition block, transition to pred value.
succ_vreg = phi->operands[i];
}
}
// Push succ_vreg up to all predecessors.
- auto pred_rpo = block->predecessors()[i];
- auto& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
+ RpoNumber pred_rpo = block->predecessors()[i];
+ OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
auto& pred_val = *pred_map.find(succ_val.first);
if (pred_val.second->use_vreg != kInvalidVreg) {
CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
@@ -606,7 +610,7 @@ class RegisterAllocatorVerifier::BlockMaps {
}
}
// Clear uses and back links for second pass.
- for (auto operand_map : incoming_maps_) {
+ for (OperandMap* operand_map : incoming_maps_) {
for (auto& succ_val : operand_map->map()) {
succ_val.second->incoming = nullptr;
succ_val.second->use_vreg = kInvalidVreg;
@@ -616,18 +620,19 @@ class RegisterAllocatorVerifier::BlockMaps {
private:
OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
- auto to_init = outgoing_maps_[block_index];
+ OperandMap* to_init = outgoing_maps_[block_index];
CHECK(to_init->map().empty());
- auto block = sequence()->instruction_blocks()[block_index];
+ const InstructionBlock* block =
+ sequence()->instruction_blocks()[block_index];
if (block->predecessors().empty()) return to_init;
size_t predecessor_index = block->predecessors()[0].ToSize();
// Ensure not a backedge.
CHECK(predecessor_index < block->rpo_number().ToSize());
- auto incoming = outgoing_maps_[predecessor_index];
+ OperandMap* incoming = outgoing_maps_[predecessor_index];
// Copy map and replace values.
to_init->map() = incoming->map();
for (auto& it : to_init->map()) {
- auto incoming = it.second;
+ OperandMap::MapValue* incoming = it.second;
it.second = new (zone()) OperandMap::MapValue();
it.second->incoming = incoming;
}
@@ -653,8 +658,9 @@ class RegisterAllocatorVerifier::BlockMaps {
void InitializePhis() {
const size_t block_count = sequence()->instruction_blocks().size();
for (size_t block_index = 0; block_index < block_count; ++block_index) {
- const auto block = sequence()->instruction_blocks()[block_index];
- for (auto phi : block->phis()) {
+ const InstructionBlock* block =
+ sequence()->instruction_blocks()[block_index];
+ for (const PhiInstruction* phi : block->phis()) {
int first_pred_vreg = phi->operands()[0];
const PhiData* first_pred_phi = nullptr;
if (IsPhi(first_pred_vreg)) {
@@ -662,7 +668,7 @@ class RegisterAllocatorVerifier::BlockMaps {
first_pred_vreg = first_pred_phi->first_pred_vreg;
}
CHECK(!IsPhi(first_pred_vreg));
- auto phi_data = new (zone()) PhiData(
+ PhiData* phi_data = new (zone()) PhiData(
block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
auto res =
phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
@@ -700,14 +706,17 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
bool initial_pass) {
const size_t block_count = sequence()->instruction_blocks().size();
for (size_t block_index = 0; block_index < block_count; ++block_index) {
- auto current = block_maps->InitializeIncoming(block_index, initial_pass);
- const auto block = sequence()->instruction_blocks()[block_index];
+ OperandMap* current =
+ block_maps->InitializeIncoming(block_index, initial_pass);
+ const InstructionBlock* block =
+ sequence()->instruction_blocks()[block_index];
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
- const auto& instr_constraint = constraints_[instr_index];
- const auto instr = instr_constraint.instruction_;
+ const InstructionConstraint& instr_constraint = constraints_[instr_index];
+ const Instruction* instr = instr_constraint.instruction_;
current->RunGaps(zone(), instr);
- const auto op_constraints = instr_constraint.operand_constraints_;
+ const OperandConstraint* op_constraints =
+ instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
if (op_constraints[count].type_ == kImmediate ||
@@ -715,11 +724,11 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
- auto op = instr->InputAt(i);
+ const InstructionOperand* op = instr->InputAt(i);
if (!block_maps->IsPhi(virtual_register)) {
current->Use(op, virtual_register, initial_pass);
} else {
- auto phi = block_maps->GetPhi(virtual_register);
+ const PhiData* phi = block_maps->GetPhi(virtual_register);
current->UsePhi(op, phi, initial_pass);
}
}
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 02ba1f17c2..82faf75242 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -25,7 +25,6 @@ void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
v->erase(it);
}
-
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
: cfg->num_general_registers();
@@ -1561,7 +1560,6 @@ bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
return true;
}
-
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
@@ -2197,8 +2195,22 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
InstructionOperand* hint = nullptr;
- Instruction* instr = GetLastInstruction(
- code(), code()->InstructionBlockAt(block->predecessors()[0]));
+ const InstructionBlock::Predecessors& predecessors = block->predecessors();
+ const InstructionBlock* predecessor_block =
+ code()->InstructionBlockAt(predecessors[0]);
+ const Instruction* instr = GetLastInstruction(code(), predecessor_block);
+ if (predecessor_block->IsDeferred()) {
+ // "Prefer the hint from the first non-deferred predecessor, if any.
+ for (size_t i = 1; i < predecessors.size(); ++i) {
+ predecessor_block = code()->InstructionBlockAt(predecessors[i]);
+ if (!predecessor_block->IsDeferred()) {
+ instr = GetLastInstruction(code(), predecessor_block);
+ break;
+ }
+ }
+ }
+ DCHECK_NOT_NULL(instr);
+
for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
@@ -2322,11 +2334,79 @@ void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
}
- for (TopLevelLiveRange* current : data()->live_ranges()) {
- if (current != nullptr && !current->IsEmpty()) current->Verify();
+ for (const TopLevelLiveRange* current : data()->live_ranges()) {
+ if (current != nullptr && !current->IsEmpty()) {
+ // New LiveRanges should not be split.
+ CHECK_NULL(current->next());
+ // General integrity check.
+ current->Verify();
+ const UseInterval* first = current->first_interval();
+ if (first->next() == nullptr) continue;
+
+ // Consecutive intervals should not end and start in the same block,
+ // otherwise the intervals should have been joined, because the
+ // variable is live throughout that block.
+ CHECK(NextIntervalStartsInDifferentBlocks(first));
+
+ for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
+ // Except for the first interval, the other intevals must start at
+ // a block boundary, otherwise data wouldn't flow to them.
+ CHECK(IntervalStartsAtBlockBoundary(i));
+ // The last instruction of the predecessors of the block the interval
+ // starts must be covered by the range.
+ CHECK(IntervalPredecessorsCoveredByRange(i, current));
+ if (i->next() != nullptr) {
+ // Check the consecutive intervals property, except for the last
+ // interval, where it doesn't apply.
+ CHECK(NextIntervalStartsInDifferentBlocks(i));
+ }
+ }
+ }
}
}
+bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
+ const UseInterval* interval) const {
+ LifetimePosition start = interval->start();
+ if (!start.IsFullStart()) return false;
+ int instruction_index = start.ToInstructionIndex();
+ const InstructionBlock* block =
+ data()->code()->GetInstructionBlock(instruction_index);
+ return block->first_instruction_index() == instruction_index;
+}
+
+bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
+ const UseInterval* interval, const TopLevelLiveRange* range) const {
+ LifetimePosition start = interval->start();
+ int instruction_index = start.ToInstructionIndex();
+ const InstructionBlock* block =
+ data()->code()->GetInstructionBlock(instruction_index);
+ for (RpoNumber pred_index : block->predecessors()) {
+ const InstructionBlock* predecessor =
+ data()->code()->InstructionBlockAt(pred_index);
+ LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
+ predecessor->last_instruction_index());
+ last_pos = last_pos.NextStart().End();
+ if (!range->Covers(last_pos)) return false;
+ }
+ return true;
+}
+
+bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
+ const UseInterval* interval) const {
+ DCHECK_NOT_NULL(interval->next());
+ LifetimePosition end = interval->end();
+ LifetimePosition next_start = interval->next()->start();
+ // Since end is not covered, but the previous position is, move back a
+ // position
+ end = end.IsStart() ? end.PrevStart().End() : end.Start();
+ int last_covered_index = end.ToInstructionIndex();
+ const InstructionBlock* block =
+ data()->code()->GetInstructionBlock(last_covered_index);
+ const InstructionBlock* next_block =
+ data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
+ return block->rpo_number() < next_block->rpo_number();
+}
RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterKind kind)
@@ -3081,21 +3161,14 @@ void SpillSlotLocator::LocateSpillSlots() {
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
- if (!range->HasSpillRange()) continue;
- if (range->IsSpilledOnlyInDeferredBlocks()) {
- for (LiveRange* child = range; child != nullptr; child = child->next()) {
- if (child->spilled()) {
- code->GetInstructionBlock(child->Start().ToInstructionIndex())
- ->mark_needs_frame();
- }
- }
- } else {
- TopLevelLiveRange::SpillMoveInsertionList* spills =
- range->GetSpillMoveInsertionLocations();
- DCHECK_NOT_NULL(spills);
- for (; spills != nullptr; spills = spills->next) {
- code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
- }
+ if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
+ continue;
+ }
+ TopLevelLiveRange::SpillMoveInsertionList* spills =
+ range->GetSpillMoveInsertionLocations();
+ DCHECK_NOT_NULL(spills);
+ for (; spills != nullptr; spills = spills->next) {
+ code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
}
}
}
@@ -3558,7 +3631,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
worklist.pop();
if (done_blocks.Contains(block_id)) continue;
done_blocks.Add(block_id);
- const InstructionBlock* spill_block =
+ InstructionBlock* spill_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
for (const RpoNumber& pred : spill_block->predecessors()) {
@@ -3578,6 +3651,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
+ spill_block->mark_needs_frame();
}
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 38fad05ed3..d6ed005270 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -919,7 +919,12 @@ class LiveRangeBuilder final : public ZoneObject {
return data()->live_in_sets();
}
+ // Verification.
void Verify() const;
+ bool IntervalStartsAtBlockBoundary(const UseInterval* interval) const;
+ bool IntervalPredecessorsCoveredByRange(const UseInterval* interval,
+ const TopLevelLiveRange* range) const;
+ bool NextIntervalStartsInDifferentBlocks(const UseInterval* interval) const;
// Liveness analysis support.
void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 2f7720beb3..f59c8bc909 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -142,7 +142,8 @@ Node* RepresentationChanger::GetRepresentationFor(
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return GetWord32RepresentationFor(node, output_rep, output_type);
+ return GetWord32RepresentationFor(node, output_rep, output_type,
+ truncation);
case MachineRepresentation::kWord64:
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128: // Fall through.
@@ -234,30 +235,34 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
break;
}
// Select the correct X -> Float32 operator.
- const Operator* op;
- if (output_rep == MachineRepresentation::kBit) {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kFloat32);
- } else if (IsWord(output_rep)) {
+ const Operator* op = nullptr;
+ if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
+ // int32 -> float64 -> float32
op = machine()->ChangeInt32ToFloat64();
- } else {
- // Either the output is int32 or the uses only care about the
- // low 32 bits (so we can pick int32 safely).
- DCHECK(output_type->Is(Type::Unsigned32()) ||
- truncation.TruncatesToWord32());
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32()) {
+ // Either the output is uint32 or the uses only care about the
+ // low 32 bits (so we can pick uint32 safely).
+
+ // uint32 -> float64 -> float32
op = machine()->ChangeUint32ToFloat64();
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
}
- // int32 -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
} else if (output_rep == MachineRepresentation::kTagged) {
- op = simplified()->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
+ if (output_type->Is(Type::Number())) {
+ op = simplified()
+ ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ }
} else if (output_rep == MachineRepresentation::kFloat64) {
op = machine()->TruncateFloat64ToFloat32();
- } else {
+ }
+ if (op == nullptr) {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kFloat32);
}
@@ -289,25 +294,24 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
break;
}
// Select the correct X -> Float64 operator.
- const Operator* op;
- if (output_rep == MachineRepresentation::kBit) {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kFloat64);
- } else if (IsWord(output_rep)) {
+ const Operator* op = nullptr;
+ if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
- } else {
- // Either the output is int32 or the uses only care about the
- // low 32 bits (so we can pick int32 safely).
- DCHECK(output_type->Is(Type::Unsigned32()) ||
- truncation.TruncatesToWord32());
+ } else if (output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32()) {
+ // Either the output is uint32 or the uses only care about the
+ // low 32 bits (so we can pick uint32 safely).
op = machine()->ChangeUint32ToFloat64();
}
} else if (output_rep == MachineRepresentation::kTagged) {
- op = simplified()->ChangeTaggedToFloat64();
+ if (output_type->Is(Type::Number())) {
+ op = simplified()->ChangeTaggedToFloat64();
+ }
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
- } else {
+ }
+ if (op == nullptr) {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kFloat64);
}
@@ -319,9 +323,9 @@ Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
-
Node* RepresentationChanger::GetWord32RepresentationFor(
- Node* node, MachineRepresentation output_rep, Type* output_type) {
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -335,43 +339,37 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
break;
}
// Select the correct X -> Word32 operator.
- const Operator* op;
- Type* type = NodeProperties::GetType(node);
-
+ const Operator* op = nullptr;
if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
- // TODO(jarin) Use only output_type here, once we intersect it with the
- // type inferred by the typer.
- if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32()) ||
- type->Is(Type::Signed32())) {
+ } else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else {
+ } else if (truncation.TruncatesToWord32()) {
op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32()) ||
- type->Is(Type::Signed32())) {
+ } else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else {
+ } else if (truncation.TruncatesToWord32()) {
op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
- } else if (output_type->Is(Type::Signed32()) ||
- type->Is(Type::Signed32())) {
+ } else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
- } else {
+ } else if (truncation.TruncatesToWord32()) {
node = InsertChangeTaggedToFloat64(node);
op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
}
- } else {
+ }
+ if (op == nullptr) {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
@@ -467,6 +465,10 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
return machine()->Uint32LessThan();
case IrOpcode::kNumberLessThanOrEqual:
return machine()->Uint32LessThanOrEqual();
+ case IrOpcode::kNumberClz32:
+ return machine()->Word32Clz();
+ case IrOpcode::kNumberImul:
+ return machine()->Int32Mul();
default:
UNREACHABLE();
return nullptr;
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 62ea3b4684..24e28f399c 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -121,7 +121,7 @@ class RepresentationChanger final {
MachineRepresentation output_rep,
Type* output_type, Truncation truncation);
Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type);
+ Type* output_type, Truncation truncation);
Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
Type* output_type);
Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
diff --git a/deps/v8/src/compiler/s390/OWNERS b/deps/v8/src/compiler/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/compiler/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
new file mode 100644
index 0000000000..1d9685668e
--- /dev/null
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -0,0 +1,2079 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+#define kScratchReg ip
+
+// Adds S390-specific methods to convert InstructionOperands.
+class S390OperandConverter final : public InstructionOperandConverter {
+ public:
+ S390OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ size_t OutputCount() { return instr_->OutputCount(); }
+
+ bool CompareLogical() const {
+ switch (instr_->flags_condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+ return Operand(constant.ToInt64());
+#endif
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+ const size_t index = *first_index;
+ *mode = AddressingModeField::decode(instr_->opcode());
+ switch (*mode) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+ return MemoryOperand(mode, &first_index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineLoadNAN32 final : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+class OutOfLineLoadNAN64 final : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+class OutOfLineLoadZero final : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
+
+ private:
+ Register const result_;
+};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ offset_immediate_(0),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(no_reg),
+ offset_immediate_(offset),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ if (must_save_lr_) {
+ // We need to save and restore r14 if the frame was elided.
+ __ Push(r14);
+ }
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode);
+ if (offset_.is(no_reg)) {
+ __ AddP(scratch1_, object_, Operand(offset_immediate_));
+ } else {
+ DCHECK_EQ(0, offset_immediate_);
+ __ AddP(scratch1_, object_, offset_);
+ }
+ __ CallStub(&stub);
+ if (must_save_lr_) {
+ // We need to save and restore r14 if the frame was elided.
+ __ Pop(r14);
+ }
+ }
+
+ private:
+ Register const object_;
+ Register const offset_;
+ int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+ bool must_save_lr_;
+};
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ case kOverflow:
+ // Overflow checked for AddP/SubP only.
+ switch (op) {
+#if V8_TARGET_ARCH_S390X
+ case kS390_Add:
+ case kS390_Sub:
+ return lt;
+#endif
+ case kS390_AddWithOverflow32:
+ case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+ return ne;
+#else
+ return lt;
+#endif
+ default:
+ break;
+ }
+ break;
+ case kNotOverflow:
+ switch (op) {
+#if V8_TARGET_ARCH_S390X
+ case kS390_Add:
+ case kS390_Sub:
+ return ge;
+#endif
+ case kS390_AddWithOverflow32:
+ case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+ return eq;
+#else
+ return ge;
+#endif
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+} // namespace
+
+#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_BINOP(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } while (0)
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1)); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ -i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+
+#if V8_TARGET_ARCH_S390X
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(AddP, AddP); \
+ __ TestIfInt32(i.OutputRegister(), r0); \
+ } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(SubP, SubP); \
+ __ TestIfInt32(i.OutputRegister(), r0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
+#endif
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } \
+ } else { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } \
+ } \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+ } while (0)
+
+// Divide instruction dr will implicity use register pair
+// r0 & r1 below.
+// R0:R1 = R1 / divisor - R0 remainder
+// Copy remainder to output reg
+#define ASSEMBLE_MODULO(div_instr, shift_instr) \
+ do { \
+ __ LoadRR(r0, i.InputRegister(0)); \
+ __ shift_instr(r0, Operand(32)); \
+ __ div_instr(r0, i.InputRegister(1)); \
+ __ ltr(i.OutputRegister(), r0); \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+ 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
+ do { \
+ Label ge, done; \
+ __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ bge(&ge, Label::kNear); \
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
+ __ b(&done, Label::kNear); \
+ __ bind(&ge); \
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ bind(&done); \
+ } while (0)
+
+#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
+ do { \
+ Label ge, done; \
+ __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ bge(&ge, Label::kNear); \
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ b(&done, Label::kNear); \
+ __ bind(&ge); \
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
+ __ bind(&done); \
+ } while (0)
+
+// Only MRI mode for these instructions available
+#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ __ asm_instr(result, operand); \
+ } while (0)
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ __ asm_instr(result, operand); \
+ } while (0)
+
+#define ASSEMBLE_STORE_FLOAT32() \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ __ StoreFloat32(value, operand); \
+ } while (0)
+
+#define ASSEMBLE_STORE_DOUBLE() \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ __ StoreDouble(value, operand); \
+ } while (0)
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ __ asm_instr(value, operand); \
+ } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ Register offset = operand.rb(); \
+ __ lgfr(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ CmpLogical32(offset, i.InputRegister(2)); \
+ } else { \
+ __ CmpLogical32(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+ __ bge(ool->entry()); \
+ __ asm_instr(result, operand); \
+ __ bind(ool->exit()); \
+ } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ Register result = i.OutputRegister(); \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ Register offset = operand.rb(); \
+ __ lgfr(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ CmpLogical32(offset, i.InputRegister(2)); \
+ } else { \
+ __ CmpLogical32(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ bge(ool->entry()); \
+ __ asm_instr(result, operand); \
+ __ bind(ool->exit()); \
+ } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ Register offset = operand.rb(); \
+ __ lgfr(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ CmpLogical32(offset, i.InputRegister(2)); \
+ } else { \
+ __ CmpLogical32(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ __ StoreFloat32(value, operand); \
+ __ bind(&done); \
+ } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ lgfr(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ CmpLogical32(offset, i.InputRegister(2)); \
+ } else { \
+ __ CmpLogical32(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ __ StoreDouble(value, operand); \
+ __ bind(&done); \
+ } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ Register offset = operand.rb(); \
+ __ lgfr(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ CmpLogical32(offset, i.InputRegister(2)); \
+ } else { \
+ __ CmpLogical32(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ Register value = i.InputRegister(3); \
+ __ asm_instr(value, operand); \
+ __ bind(&done); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame_access_state()->has_frame()) {
+ __ RestoreFrameStateForTailCall();
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&done);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ S390OperandConverter i(this, instr);
+ ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+ switch (opcode) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasRegisterInput(instr, 0)) {
+ __ AddP(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ } else {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObjectFromJSFunction:
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
+ if (HasRegisterInput(instr, 0)) {
+ __ AddP(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+ } else {
+ // We cannot use the constant pool to load the target since
+ // we've already restored the caller's frame.
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ CmpP(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunctionFromJSFunction:
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ CmpP(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(ip);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchStackPointer:
+ __ LoadRR(i.OutputRegister(), sp);
+ break;
+ case kArchFramePointer:
+ __ LoadRR(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ LoadRR(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ // TODO(mbrandy): move slow call to stub out of line.
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ OutOfLineRecordWrite* ool;
+
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ if (addressing_mode == kMode_MRI) {
+ int32_t offset = i.InputInt32(1);
+ ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StoreP(value, MemOperand(object, offset));
+ } else {
+ DCHECK_EQ(kMode_MRR, addressing_mode);
+ Register offset(i.InputRegister(1));
+ ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StoreP(value, MemOperand(object, offset));
+ }
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+ Operand(offset.offset()));
+ break;
+ }
+ case kS390_And:
+ ASSEMBLE_BINOP(AndP, AndP);
+ break;
+ case kS390_AndComplement:
+ __ NotP(i.InputRegister(1));
+ __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kS390_Or:
+ ASSEMBLE_BINOP(OrP, OrP);
+ break;
+ case kS390_OrComplement:
+ __ NotP(i.InputRegister(1));
+ __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kS390_Xor:
+ ASSEMBLE_BINOP(XorP, XorP);
+ break;
+ case kS390_ShiftLeft32:
+ if (HasRegisterInput(instr, 1)) {
+ if (i.OutputRegister().is(i.InputRegister(1)) &&
+ !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ __ LoadRR(kScratchReg, i.InputRegister(1));
+ __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+ } else {
+ ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+ }
+ } else {
+ ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+ }
+ __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_ShiftLeft64:
+ ASSEMBLE_BINOP(sllg, sllg);
+ break;
+#endif
+ case kS390_ShiftRight32:
+ if (HasRegisterInput(instr, 1)) {
+ if (i.OutputRegister().is(i.InputRegister(1)) &&
+ !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ __ LoadRR(kScratchReg, i.InputRegister(1));
+ __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+ } else {
+ ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+ }
+ } else {
+ ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+ }
+ __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_ShiftRight64:
+ ASSEMBLE_BINOP(srlg, srlg);
+ break;
+#endif
+ case kS390_ShiftRightArith32:
+ if (HasRegisterInput(instr, 1)) {
+ if (i.OutputRegister().is(i.InputRegister(1)) &&
+ !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ __ LoadRR(kScratchReg, i.InputRegister(1));
+ __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
+ kScratchReg);
+ } else {
+ ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+ }
+ } else {
+ ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+ }
+ __ LoadlW(i.OutputRegister(), i.OutputRegister());
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_ShiftRightArith64:
+ ASSEMBLE_BINOP(srag, srag);
+ break;
+#endif
+#if !V8_TARGET_ARCH_S390X
+ case kS390_AddPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
+ i.InputRegister(2));
+ __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
+ i.InputRegister(3));
+ break;
+ case kS390_SubPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
+ i.InputRegister(2));
+ __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
+ i.InputRegister(3));
+ break;
+ case kS390_MulPair:
+ // i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ __ sllg(r0, i.InputRegister(1), Operand(32));
+ __ sllg(r1, i.InputRegister(3), Operand(32));
+ __ lr(r0, i.InputRegister(0));
+ __ lr(r1, i.InputRegister(2));
+ __ msgr(r1, r0);
+ __ lr(i.OutputRegister(0), r1);
+ __ srag(i.OutputRegister(1), r1, Operand(32));
+ break;
+ case kS390_ShiftLeftPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1), kScratchReg,
+ i.InputRegister(2));
+ }
+ break;
+ case kS390_ShiftRightPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1), kScratchReg,
+ i.InputRegister(2));
+ }
+ break;
+ case kS390_ShiftRightArithPair:
+ if (instr->InputAt(2)->IsImmediate()) {
+ __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ i.InputInt32(2));
+ } else {
+ __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1),
+ kScratchReg, i.InputRegister(2));
+ }
+ break;
+#endif
+ case kS390_RotRight32:
+ if (HasRegisterInput(instr, 1)) {
+ __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+ __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+ } else {
+ __ rll(i.OutputRegister(), i.InputRegister(0),
+ Operand(32 - i.InputInt32(1)));
+ }
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_RotRight64:
+ if (HasRegisterInput(instr, 1)) {
+ __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+ __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+ } else {
+ __ rllg(i.OutputRegister(), i.InputRegister(0),
+ Operand(64 - i.InputInt32(1)));
+ }
+ break;
+#endif
+ case kS390_Not:
+ __ LoadRR(i.OutputRegister(), i.InputRegister(0));
+ __ NotP(i.OutputRegister());
+ break;
+ case kS390_RotLeftAndMask32:
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = i.InputInt32(1);
+ int endBit = 63 - i.InputInt32(3);
+ int startBit = 63 - i.InputInt32(2);
+ __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+ __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
+ Operand(endBit), Operand::Zero(), true);
+ } else {
+ int shiftAmount = i.InputInt32(1);
+ int clearBitLeft = 63 - i.InputInt32(2);
+ int clearBitRight = i.InputInt32(3);
+ __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
+ __ srlg(i.OutputRegister(), i.OutputRegister(),
+ Operand((clearBitLeft + clearBitRight)));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
+ }
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_RotLeftAndClear64:
+ UNIMPLEMENTED(); // Find correct instruction
+ break;
+ case kS390_RotLeftAndClearLeft64:
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = i.InputInt32(1);
+ int endBit = 63;
+ int startBit = 63 - i.InputInt32(2);
+ __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+ Operand(endBit), Operand(shiftAmount), true);
+ } else {
+ int shiftAmount = i.InputInt32(1);
+ int clearBit = 63 - i.InputInt32(2);
+ __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+ __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+ }
+ break;
+ case kS390_RotLeftAndClearRight64:
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = i.InputInt32(1);
+ int endBit = 63 - i.InputInt32(2);
+ int startBit = 0;
+ __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+ Operand(endBit), Operand(shiftAmount), true);
+ } else {
+ int shiftAmount = i.InputInt32(1);
+ int clearBit = i.InputInt32(2);
+ __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+ __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+ }
+ break;
+#endif
+ case kS390_Add:
+#if V8_TARGET_ARCH_S390X
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ } else {
+#endif
+ ASSEMBLE_BINOP(AddP, AddP);
+#if V8_TARGET_ARCH_S390X
+ }
+#endif
+ break;
+ case kS390_AddWithOverflow32:
+ ASSEMBLE_ADD_WITH_OVERFLOW32();
+ break;
+ case kS390_AddFloat:
+ // Ensure we don't clobber right/InputReg(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ ASSEMBLE_FLOAT_UNOP(aebr);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_AddDouble:
+ // Ensure we don't clobber right/InputReg(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ ASSEMBLE_FLOAT_UNOP(adbr);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_Sub:
+#if V8_TARGET_ARCH_S390X
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_SUB_WITH_OVERFLOW();
+ } else {
+#endif
+ ASSEMBLE_BINOP(SubP, SubP);
+#if V8_TARGET_ARCH_S390X
+ }
+#endif
+ break;
+ case kS390_SubWithOverflow32:
+ ASSEMBLE_SUB_WITH_OVERFLOW32();
+ break;
+ case kS390_SubFloat:
+ // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ }
+ __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_SubDouble:
+ // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ }
+ __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_Mul32:
+#if V8_TARGET_ARCH_S390X
+ case kS390_Mul64:
+#endif
+ __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kS390_MulHigh32:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ mr_z(r0, i.InputRegister(1));
+ __ LoadW(i.OutputRegister(), r0);
+ break;
+ case kS390_MulHighU32:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ mlr(r0, i.InputRegister(1));
+ __ LoadlW(i.OutputRegister(), r0);
+ break;
+ case kS390_MulFloat:
+ // Ensure we don't clobber right
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ ASSEMBLE_FLOAT_UNOP(meebr);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_MulDouble:
+ // Ensure we don't clobber right
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ ASSEMBLE_FLOAT_UNOP(mdbr);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Div64:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
+ __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ break;
+#endif
+ case kS390_Div32:
+ __ LoadRR(r0, i.InputRegister(0));
+ __ srda(r0, Operand(32));
+ __ dr(r0, i.InputRegister(1));
+ __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
+ r1); // Copy R1: Quotient to output
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_DivU64:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ LoadImmP(r0, Operand::Zero());
+ __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
+ __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ break;
+#endif
+ case kS390_DivU32:
+ __ LoadRR(r0, i.InputRegister(0));
+ __ srdl(r0, Operand(32));
+ __ dlr(r0, i.InputRegister(1)); // R0:R1: Dividend
+ __ LoadlW(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ __ LoadAndTestP_ExtendSrc(r1, r1);
+ break;
+
+ case kS390_DivFloat:
+ // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_DivDouble:
+ // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+ if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+ __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ } else {
+ if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+ __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ case kS390_Mod32:
+ ASSEMBLE_MODULO(dr, srda);
+ break;
+ case kS390_ModU32:
+ ASSEMBLE_MODULO(dlr, srdl);
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Mod64:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
+ __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ break;
+ case kS390_ModU64:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ LoadImmP(r0, Operand::Zero());
+ __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
+ __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ break;
+#endif
+ case kS390_AbsFloat:
+ __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_SqrtFloat:
+ ASSEMBLE_FLOAT_UNOP(sqebr);
+ break;
+ case kS390_FloorFloat:
+ __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+ break;
+ case kS390_CeilFloat:
+ __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+ break;
+ case kS390_TruncateFloat:
+ __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+ break;
+ // Double operations
+ case kS390_ModDouble:
+ ASSEMBLE_FLOAT_MODULO();
+ break;
+ case kS390_Neg:
+ __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_MaxDouble:
+ ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
+ break;
+ case kS390_MinDouble:
+ ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
+ break;
+ case kS390_AbsDouble:
+ __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_SqrtDouble:
+ ASSEMBLE_FLOAT_UNOP(sqdbr);
+ break;
+ case kS390_FloorDouble:
+ __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+ break;
+ case kS390_CeilDouble:
+ __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+ break;
+ case kS390_TruncateDouble:
+ __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+ break;
+ case kS390_RoundDouble:
+ __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
+ break;
+ case kS390_NegDouble:
+ ASSEMBLE_FLOAT_UNOP(lcdbr);
+ break;
+ case kS390_Cntlz32: {
+ __ llgfr(i.OutputRegister(), i.InputRegister(0));
+ __ flogr(r0, i.OutputRegister());
+ __ LoadRR(i.OutputRegister(), r0);
+ __ SubP(i.OutputRegister(), Operand(32));
+ } break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Cntlz64: {
+ __ flogr(r0, i.InputRegister(0));
+ __ LoadRR(i.OutputRegister(), r0);
+ } break;
+#endif
+ case kS390_Popcnt32:
+ __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Popcnt64:
+ __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
+ break;
+#endif
+ case kS390_Cmp32:
+ ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Cmp64:
+ ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
+ break;
+#endif
+ case kS390_CmpFloat:
+ __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kS390_CmpDouble:
+ __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kS390_Tst32:
+ if (HasRegisterInput(instr, 1)) {
+ __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+ __ LoadAndTestP_ExtendSrc(r0, r0);
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_Tst64:
+ if (HasRegisterInput(instr, 1)) {
+ __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+ break;
+#endif
+ case kS390_Push:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ lay(sp, MemOperand(sp, -kDoubleSize));
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kS390_PushFrame: {
+ int num_slots = i.InputInt32(1);
+ __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp));
+ } else {
+ __ StoreP(i.InputRegister(0),
+ MemOperand(sp));
+ }
+ break;
+ }
+ case kS390_StoreToStackSlot: {
+ int slot = i.InputInt32(1);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize));
+ } else {
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ }
+ break;
+ }
+ case kS390_ExtendSignWord8:
+#if V8_TARGET_ARCH_S390X
+ __ lgbr(i.OutputRegister(), i.InputRegister(0));
+#else
+ __ lbr(i.OutputRegister(), i.InputRegister(0));
+#endif
+ break;
+ case kS390_ExtendSignWord16:
+#if V8_TARGET_ARCH_S390X
+ __ lghr(i.OutputRegister(), i.InputRegister(0));
+#else
+ __ lhr(i.OutputRegister(), i.InputRegister(0));
+#endif
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_ExtendSignWord32:
+ __ lgfr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_Uint32ToUint64:
+ // Zero extend
+ __ llgfr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_Int64ToInt32:
+ // sign extend
+ __ lgfr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_Int64ToFloat32:
+ __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
+ case kS390_Int64ToDouble:
+ __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
+ case kS390_Uint64ToFloat32:
+ __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ break;
+ case kS390_Uint64ToDouble:
+ __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ break;
+#endif
+ case kS390_Int32ToFloat32:
+ __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
+ case kS390_Int32ToDouble:
+ __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
+ case kS390_Uint32ToFloat32:
+ __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ break;
+ case kS390_Uint32ToDouble:
+ __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ break;
+ case kS390_DoubleToInt32:
+ case kS390_DoubleToUint32:
+ case kS390_DoubleToInt64: {
+#if V8_TARGET_ARCH_S390X
+ bool check_conversion =
+ (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
+#endif
+ __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+ kScratchReg,
+#endif
+ i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+#endif
+ break;
+ }
+ case kS390_Float32ToInt32: {
+ bool check_conversion = (i.OutputCount() > 1);
+ __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
+ kScratchDoubleReg);
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+ break;
+ }
+ case kS390_Float32ToUint32: {
+ bool check_conversion = (i.OutputCount() > 1);
+ __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_S390X
+ case kS390_Float32ToUint64: {
+ bool check_conversion = (i.OutputCount() > 1);
+ __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+ break;
+ }
+#endif
+ case kS390_Float32ToInt64: {
+#if V8_TARGET_ARCH_S390X
+ bool check_conversion =
+ (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
+#endif
+ __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+ kScratchReg,
+#endif
+ i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+#endif
+ break;
+ }
+#if V8_TARGET_ARCH_S390X
+ case kS390_DoubleToUint64: {
+ bool check_conversion = (i.OutputCount() > 1);
+ __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ Label conversion_done;
+ __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+ __ b(Condition(1), &conversion_done); // special case
+ __ LoadImmP(i.OutputRegister(1), Operand(1));
+ __ bind(&conversion_done);
+ }
+ break;
+ }
+#endif
+ case kS390_DoubleToFloat32:
+ __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_Float32ToDouble:
+ __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_DoubleExtractLowWord32:
+ __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ llgfr(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kS390_DoubleExtractHighWord32:
+ __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
+ break;
+ case kS390_DoubleInsertLowWord32:
+ __ lgdr(kScratchReg, i.OutputDoubleRegister());
+ __ lr(kScratchReg, i.InputRegister(1));
+ __ ldgr(i.OutputDoubleRegister(), kScratchReg);
+ break;
+ case kS390_DoubleInsertHighWord32:
+ __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
+ __ lgdr(r0, i.OutputDoubleRegister());
+ __ lr(kScratchReg, r0);
+ __ ldgr(i.OutputDoubleRegister(), kScratchReg);
+ break;
+ case kS390_DoubleConstruct:
+ __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
+ __ lr(kScratchReg, i.InputRegister(1));
+
+ // Bitwise convert from GPR to FPR
+ __ ldgr(i.OutputDoubleRegister(), kScratchReg);
+ break;
+ case kS390_LoadWordS8:
+ ASSEMBLE_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+ __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+ __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+ break;
+ case kS390_BitcastFloat32ToInt32:
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_BitcastInt32ToFloat32:
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_BitcastDoubleToInt64:
+ __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kS390_BitcastInt64ToDouble:
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#endif
+ case kS390_LoadWordU8:
+ ASSEMBLE_LOAD_INTEGER(LoadlB);
+ break;
+ case kS390_LoadWordU16:
+ ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
+ break;
+ case kS390_LoadWordS16:
+ ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
+ break;
+ case kS390_LoadWordS32:
+ ASSEMBLE_LOAD_INTEGER(LoadW);
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_LoadWord64:
+ ASSEMBLE_LOAD_INTEGER(lg);
+ break;
+#endif
+ case kS390_LoadFloat32:
+ ASSEMBLE_LOAD_FLOAT(LoadFloat32);
+ break;
+ case kS390_LoadDouble:
+ ASSEMBLE_LOAD_FLOAT(LoadDouble);
+ break;
+ case kS390_StoreWord8:
+ ASSEMBLE_STORE_INTEGER(StoreByte);
+ break;
+ case kS390_StoreWord16:
+ ASSEMBLE_STORE_INTEGER(StoreHalfWord);
+ break;
+ case kS390_StoreWord32:
+ ASSEMBLE_STORE_INTEGER(StoreW);
+ break;
+#if V8_TARGET_ARCH_S390X
+ case kS390_StoreWord64:
+ ASSEMBLE_STORE_INTEGER(StoreP);
+ break;
+#endif
+ case kS390_StoreFloat32:
+ ASSEMBLE_STORE_FLOAT32();
+ break;
+ case kS390_StoreDouble:
+ ASSEMBLE_STORE_DOUBLE();
+ break;
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+ __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+ __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+ break;
+ case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_S390X
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
+#else
+ UNREACHABLE();
+#endif
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
+ break;
+ case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_S390X
+ ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
+#else
+ UNREACHABLE();
+#endif
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT32();
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_DOUBLE();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+} // NOLINT(readability/fn_size)
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ S390OperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ ArchOpcode op = instr->arch_opcode();
+ FlagsCondition condition = branch->condition;
+
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kS390_CmpDouble) {
+ // check for unordered if necessary
+ // Branching to flabel/tlabel according to what's expected by tests
+ if (cond == le || cond == eq || cond == lt) {
+ __ bunordered(flabel);
+ } else if (cond == gt || cond == ne || cond == ge) {
+ __ bunordered(tlabel);
+ }
+ }
+ __ b(cond, tlabel);
+ if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ S390OperandConverter i(this, instr);
+ Label done;
+ ArchOpcode op = instr->arch_opcode();
+ bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
+
+ // Overflow checked for add/sub only.
+ DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+ (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
+ (op == kS390_Add || op == kS390_Sub));
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cond = FlagsConditionToCondition(condition, op);
+ switch (cond) {
+ case ne:
+ case ge:
+ case gt:
+ if (check_unordered) {
+ __ LoadImmP(reg, Operand(1));
+ __ LoadImmP(kScratchReg, Operand::Zero());
+ __ bunordered(&done);
+ Label cond_true;
+ __ b(cond, &cond_true, Label::kNear);
+ __ LoadRR(reg, kScratchReg);
+ __ bind(&cond_true);
+ } else {
+ Label cond_true, done_here;
+ __ LoadImmP(reg, Operand(1));
+ __ b(cond, &cond_true, Label::kNear);
+ __ LoadImmP(reg, Operand::Zero());
+ __ bind(&cond_true);
+ }
+ break;
+ case eq:
+ case lt:
+ case le:
+ if (check_unordered) {
+ __ LoadImmP(reg, Operand::Zero());
+ __ LoadImmP(kScratchReg, Operand(1));
+ __ bunordered(&done);
+ Label cond_false;
+ __ b(NegateCondition(cond), &cond_false, Label::kNear);
+ __ LoadRR(reg, kScratchReg);
+ __ bind(&cond_false);
+ } else {
+ __ LoadImmP(reg, Operand::Zero());
+ Label cond_false;
+ __ b(NegateCondition(cond), &cond_false, Label::kNear);
+ __ LoadImmP(reg, Operand(1));
+ __ bind(&cond_false);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ bind(&done);
+}
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ S390OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ CmpP(input, Operand(i.InputInt32(index + 0)));
+ __ beq(GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ S390OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ CmpLogicalP(input, Operand(case_count));
+ __ bge(GetLabel(i.InputRpo(1)));
+ __ larl(kScratchReg, table);
+ __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
+ __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
+ __ Jump(kScratchReg);
+}
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, bailout_type);
+ // TODO(turbofan): We should be able to generate better code by sharing the
+ // actual final call site and just bl'ing to it here, similar to what we do
+ // in the lithium backend.
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ Push(r14, fp);
+ __ LoadRR(fp, sp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ } else {
+ StackFrame::Type type = info()->GetOutputStackFrameType();
+ // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+ // efficient intialization of the constant pool pointer register).
+ __ StubPrologue(type);
+ }
+ }
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+ }
+
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ __ MultiPushDoubles(double_saves);
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+
+ // Save callee-saved registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPush(saves);
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves =
+ kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame()->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore double registers.
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ __ MultiPopDoubles(double_saves);
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
+ }
+ __ Ret(pop_count);
+}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ S390OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ StoreP(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ LoadP(temp, src, r0);
+ __ StoreP(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ __ mov(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ LoadP(dst, g.SlotToMemOperand(slot));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
+ break;
+ }
+ if (destination->IsStackSlot()) {
+ __ StoreP(dst, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+ if (src.type() == Constant::kFloat32) {
+ __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
+ } else {
+ __ LoadDoubleLiteral(dst, value, kScratchReg);
+ }
+
+ if (destination->IsDoubleStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src);
+ } else {
+ DoubleRegister temp = kScratchDoubleReg;
+ __ LoadDouble(temp, src);
+ __ StoreDouble(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ S390OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ LoadRR(temp, src);
+ __ LoadRR(src, dst);
+ __ LoadRR(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ LoadRR(temp, src);
+ __ LoadP(src, dst);
+ __ StoreP(temp, dst);
+ }
+#if V8_TARGET_ARCH_S390X
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+#endif
+ Register temp_0 = kScratchReg;
+ Register temp_1 = r0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ LoadP(temp_0, src);
+ __ LoadP(temp_1, dst);
+ __ StoreP(temp_0, dst);
+ __ StoreP(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister temp = kScratchDoubleReg;
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ ldr(temp, src);
+ __ ldr(src, dst);
+ __ ldr(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ ldr(temp, src);
+ __ LoadDouble(src, dst);
+ __ StoreDouble(temp, dst);
+ }
+#if !V8_TARGET_ARCH_S390X
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ DoubleRegister temp_0 = kScratchDoubleReg;
+ DoubleRegister temp_1 = d0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ // TODO(joransiu): MVC opportunity
+ __ LoadDouble(temp_0, src);
+ __ LoadDouble(temp_1, dst);
+ __ StoreDouble(temp_0, dst);
+ __ StoreDouble(temp_1, src);
+#endif
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ emit_label_addr(targets[index]);
+ }
+}
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // We do not insert nops for inlined Smi code.
+}
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
+ int space_needed = Deoptimizer::patch_size();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % 2);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= 2;
+ }
+ }
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
new file mode 100644
index 0000000000..a32f8753f3
--- /dev/null
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -0,0 +1,160 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+#define V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// S390-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_And) \
+ V(S390_AndComplement) \
+ V(S390_Or) \
+ V(S390_OrComplement) \
+ V(S390_Xor) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftLeftPair) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightPair) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_ShiftRightArithPair) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not) \
+ V(S390_RotLeftAndMask32) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Add) \
+ V(S390_AddWithOverflow32) \
+ V(S390_AddPair) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub) \
+ V(S390_SubWithOverflow32) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_SubPair) \
+ V(S390_MulPair) \
+ V(S390_Mul32) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg) \
+ V(S390_NegDouble) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_MaxDouble) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StoreToStackSlot) \
+ V(S390_ExtendSignWord8) \
+ V(S390_ExtendSignWord16) \
+ V(S390_ExtendSignWord32) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
new file mode 100644
index 0000000000..2d98e1109d
--- /dev/null
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -0,0 +1,163 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kS390_And:
+ case kS390_AndComplement:
+ case kS390_Or:
+ case kS390_OrComplement:
+ case kS390_Xor:
+ case kS390_ShiftLeft32:
+ case kS390_ShiftLeft64:
+ case kS390_ShiftLeftPair:
+ case kS390_ShiftRight32:
+ case kS390_ShiftRight64:
+ case kS390_ShiftRightPair:
+ case kS390_ShiftRightArith32:
+ case kS390_ShiftRightArith64:
+ case kS390_ShiftRightArithPair:
+ case kS390_RotRight32:
+ case kS390_RotRight64:
+ case kS390_Not:
+ case kS390_RotLeftAndMask32:
+ case kS390_RotLeftAndClear64:
+ case kS390_RotLeftAndClearLeft64:
+ case kS390_RotLeftAndClearRight64:
+ case kS390_Add:
+ case kS390_AddWithOverflow32:
+ case kS390_AddPair:
+ case kS390_AddFloat:
+ case kS390_AddDouble:
+ case kS390_Sub:
+ case kS390_SubWithOverflow32:
+ case kS390_SubPair:
+ case kS390_MulPair:
+ case kS390_SubFloat:
+ case kS390_SubDouble:
+ case kS390_Mul32:
+ case kS390_Mul64:
+ case kS390_MulHigh32:
+ case kS390_MulHighU32:
+ case kS390_MulFloat:
+ case kS390_MulDouble:
+ case kS390_Div32:
+ case kS390_Div64:
+ case kS390_DivU32:
+ case kS390_DivU64:
+ case kS390_DivFloat:
+ case kS390_DivDouble:
+ case kS390_Mod32:
+ case kS390_Mod64:
+ case kS390_ModU32:
+ case kS390_ModU64:
+ case kS390_ModDouble:
+ case kS390_Neg:
+ case kS390_NegDouble:
+ case kS390_SqrtFloat:
+ case kS390_FloorFloat:
+ case kS390_CeilFloat:
+ case kS390_TruncateFloat:
+ case kS390_AbsFloat:
+ case kS390_SqrtDouble:
+ case kS390_FloorDouble:
+ case kS390_CeilDouble:
+ case kS390_TruncateDouble:
+ case kS390_RoundDouble:
+ case kS390_MaxDouble:
+ case kS390_MinDouble:
+ case kS390_AbsDouble:
+ case kS390_Cntlz32:
+ case kS390_Cntlz64:
+ case kS390_Popcnt32:
+ case kS390_Popcnt64:
+ case kS390_Cmp32:
+ case kS390_Cmp64:
+ case kS390_CmpFloat:
+ case kS390_CmpDouble:
+ case kS390_Tst32:
+ case kS390_Tst64:
+ case kS390_ExtendSignWord8:
+ case kS390_ExtendSignWord16:
+ case kS390_ExtendSignWord32:
+ case kS390_Uint32ToUint64:
+ case kS390_Int64ToInt32:
+ case kS390_Int64ToFloat32:
+ case kS390_Int64ToDouble:
+ case kS390_Uint64ToFloat32:
+ case kS390_Uint64ToDouble:
+ case kS390_Int32ToFloat32:
+ case kS390_Int32ToDouble:
+ case kS390_Uint32ToFloat32:
+ case kS390_Uint32ToDouble:
+ case kS390_Float32ToInt32:
+ case kS390_Float32ToUint32:
+ case kS390_Float32ToUint64:
+ case kS390_Float32ToDouble:
+ case kS390_DoubleToInt32:
+ case kS390_DoubleToUint32:
+ case kS390_Float32ToInt64:
+ case kS390_DoubleToInt64:
+ case kS390_DoubleToUint64:
+ case kS390_DoubleToFloat32:
+ case kS390_DoubleExtractLowWord32:
+ case kS390_DoubleExtractHighWord32:
+ case kS390_DoubleInsertLowWord32:
+ case kS390_DoubleInsertHighWord32:
+ case kS390_DoubleConstruct:
+ case kS390_BitcastInt32ToFloat32:
+ case kS390_BitcastFloat32ToInt32:
+ case kS390_BitcastInt64ToDouble:
+ case kS390_BitcastDoubleToInt64:
+ return kNoOpcodeFlags;
+
+ case kS390_LoadWordS8:
+ case kS390_LoadWordU8:
+ case kS390_LoadWordS16:
+ case kS390_LoadWordU16:
+ case kS390_LoadWordS32:
+ case kS390_LoadWord64:
+ case kS390_LoadFloat32:
+ case kS390_LoadDouble:
+ return kIsLoadOperation;
+
+ case kS390_StoreWord8:
+ case kS390_StoreWord16:
+ case kS390_StoreWord32:
+ case kS390_StoreWord64:
+ case kS390_StoreFloat32:
+ case kS390_StoreDouble:
+ case kS390_Push:
+ case kS390_PushFrame:
+ case kS390_StoreToStackSlot:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
new file mode 100644
index 0000000000..8a4af5e65c
--- /dev/null
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -0,0 +1,1769 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kInt16Imm,
+ kInt16Imm_Unsigned,
+ kInt16Imm_Negate,
+ kInt16Imm_4ByteAligned,
+ kShift32Imm,
+ kShift64Imm,
+ kNoImmediate
+};
+
+// Adds S390-specific methods for generating operands.
+class S390OperandGenerator final : public OperandGenerator {
+ public:
+ explicit S390OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ return CanBeImmediate(value, mode);
+ }
+
+ bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+ switch (mode) {
+ case kInt16Imm:
+ return is_int16(value);
+ case kInt16Imm_Unsigned:
+ return is_uint16(value);
+ case kInt16Imm_Negate:
+ return is_int16(-value);
+ case kInt16Imm_4ByteAligned:
+ return is_int16(value) && !(value & 3);
+ case kShift32Imm:
+ return 0 <= value && value < 32;
+ case kShift64Imm:
+ return 0 <= value && value < 64;
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ S390OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ S390OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+ ImmediateMode operand_mode) {
+ S390OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ S390OperandGenerator g(selector);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ Matcher m(node);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
+}
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+} // namespace
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode mode = kInt16Imm;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kS390_LoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kS390_LoadDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+ break;
+#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
+ case MachineRepresentation::kWord32:
+ opcode = kS390_LoadWordS32;
+#if V8_TARGET_ARCH_S390X
+ // TODO(john.yan): Remove this mode since s390 do not has this restriction
+ mode = kInt16Imm_4ByteAligned;
+#endif
+ break;
+#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kS390_LoadWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+ }
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
+ // for the store itself, so we must check compatibility with both.
+ if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_S390X
+ && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+ ) {
+ inputs[input_count++] = g.UseImmediate(offset);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(offset);
+ addressing_mode = kMode_MRR;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kS390_StoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kS390_StoreDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kS390_StoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kS390_StoreWord16;
+ break;
+#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
+ case MachineRepresentation::kWord32:
+ opcode = kS390_StoreWord32;
+ break;
+#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kS390_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+#endif
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+#endif
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+ ArchOpcode opcode, bool left_can_cover,
+ bool right_can_cover, ImmediateMode imm_mode) {
+ S390OperandGenerator g(selector);
+
+ // Map instruction to equivalent operation with inverted right input.
+ ArchOpcode inv_opcode = opcode;
+ switch (opcode) {
+ case kS390_And:
+ inv_opcode = kS390_AndComplement;
+ break;
+ case kS390_Or:
+ inv_opcode = kS390_OrComplement;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+ if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+ Matcher mleft(m->left().node());
+ if (mleft.right().Is(-1)) {
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->right().node()),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+
+ // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+ if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+ right_can_cover) {
+ Matcher mright(m->right().node());
+ if (mright.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()),
+ g.UseRegister(mright.left().node()));
+ return;
+ }
+ }
+
+ VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation32(value);
+ int mask_msb = base::bits::CountLeadingZeros32(value);
+ int mask_lsb = base::bits::CountTrailingZeros32(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+
+#if V8_TARGET_ARCH_S390X
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation64(value);
+ int mask_msb = base::bits::CountLeadingZeros64(value);
+ int mask_lsb = base::bits::CountTrailingZeros64(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+#endif
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ int mb = 0;
+ int me = 0;
+ if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+ CanCover(node, left)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord32Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
+ g.TempImmediate(me));
+ return;
+ }
+ }
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kS390_And, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64And(Node* node) {
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ int mb = 0;
+ int me = 0;
+ if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+ CanCover(node, left)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 63)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord64Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kS390_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kS390_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+ match = true;
+ opcode = kS390_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kS390_And, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Or(Node* node) {
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+ }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+ }
+}
+#endif
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(mbrandy): eliminate left sign extension if right >= 32
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kS390_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kS390_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh) {
+ match = true;
+ opcode = kS390_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ if (mb >= me) {
+ Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kS390_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kS390_RotLeftAndClearRight64;
+ mask = me;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+ if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+}
+
+#if !V8_TARGET_ARCH_S390X
+void VisitPairBinop(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ S390OperandGenerator g(selector);
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ VisitPairBinop(this, kS390_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ VisitPairBinop(this, kS390_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ S390OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kS390_MulPair, 2, outputs, 4, inputs);
+}
+
+void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ S390OperandGenerator g(selector);
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitPairShift(this, kS390_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitPairShift(this, kS390_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitPairShift(this, kS390_ShiftRightArithPair, node);
+}
+#endif
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_Cntlz32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_Cntlz64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_Popcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_Popcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+#endif
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+ }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+ }
+}
+#endif
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, kS390_Mul32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, kS390_Mul64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_MulHigh32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_MulHighU32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, kS390_Div32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, kS390_Div64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitRRR(this, kS390_DivU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Div(Node* node) {
+ VisitRRR(this, kS390_DivU64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, kS390_Mod32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, kS390_Mod64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitRRR(this, kS390_ModU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ VisitRRR(this, kS390_ModU64, node);
+}
+#endif
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kS390_Float32ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kS390_Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kS390_Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kS390_Int32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kS390_Uint32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ VisitRR(this, kS390_DoubleToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kS390_ExtendSignWord32, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kS390_Uint32ToUint64, node);
+}
+#endif
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ VisitRR(this, kS390_DoubleToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kS390_DoubleToInt32, node);
+ }
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kS390_Float32ToInt32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kS390_Float32ToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kS390_Int64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kS390_Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kS390_Int64ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kS390_Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kS390_Uint64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kS390_BitcastFloat32ToInt32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kS390_BitcastDoubleToInt64, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kS390_BitcastInt32ToFloat32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kS390_BitcastInt64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kS390_AddFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ // TODO(mbrandy): detect multiply-add
+ VisitRRR(this, kS390_AddDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ S390OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ Emit(kS390_NegDouble, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRRR(this, kS390_SubFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ // TODO(mbrandy): detect multiply-subtract
+ S390OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ // -floor(-x) = ceil(x)
+ Emit(kS390_CeilDouble, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
+ Emit(kS390_NegDouble, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRRR(this, kS390_SubDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kS390_MulFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ // TODO(mbrandy): detect negate
+ VisitRRR(this, kS390_MulDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kS390_DivFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kS390_DivDouble, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kS390_AbsFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kS390_AbsDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kS390_SqrtFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kS390_SqrtDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kS390_FloorFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kS390_FloorDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kS390_CeilFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kS390_CeilDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kS390_TruncateFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kS390_TruncateDouble, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ VisitRR(this, kS390_RoundDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
+ kInt16Imm, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
+ &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
+ kInt16Imm_Negate, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+static bool CompareLogical(FlagsContinuation* cont) {
+ switch (cont->condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative, ImmediateMode immediate_mode) {
+ S390OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, immediate_mode)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, immediate_mode)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+}
+#endif
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, InstructionCode opcode,
+ FlagsContinuation* cont) {
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_S390X
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+#endif
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(selector, node,
+ kS390_SubWithOverflow32,
+ kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_S390X
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
+ kInt16Imm, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
+ kInt16Imm_Negate, cont);
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_S390X
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ S390OperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ cont);
+}
+
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+}
+#endif
+
+} // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ S390OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kS390_Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ S390OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kStackFrameExtraParamSlot;
+ for (PushParameter input : (*arguments)) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot));
+ ++slot;
+ }
+ } else {
+ // Push any stack arguments.
+ int num_slots = static_cast<int>(descriptor->StackParameterCount());
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ if (slot == 0) {
+ DCHECK(input.node());
+ Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(num_slots));
+ } else {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node()) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(),
+ g.UseRegister(input.node()), g.TempImmediate(slot));
+ }
+ }
+ ++slot;
+ }
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ S390OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+ return;
+ }
+ Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ S390OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
+ g.UseRegister(left));
+ return;
+ }
+ Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 455fcd120e..4ac65e5ae4 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -298,6 +298,64 @@ void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
SetControlInput(block, sw);
}
+void Schedule::EnsureSplitEdgeForm() {
+ // Make a copy of all the blocks for the iteration, since adding the split
+ // edges will allocate new blocks.
+ BasicBlockVector all_blocks_copy(all_blocks_);
+
+ // Insert missing split edge blocks.
+ for (auto block : all_blocks_copy) {
+ if (block->PredecessorCount() > 1 && block != end_) {
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ if (pred->SuccessorCount() > 1) {
+ // Found a predecessor block with multiple successors.
+ BasicBlock* split_edge_block = NewBasicBlock();
+ split_edge_block->set_control(BasicBlock::kGoto);
+ split_edge_block->successors().push_back(block);
+ split_edge_block->predecessors().push_back(pred);
+ split_edge_block->set_deferred(pred->deferred());
+ *current_pred = split_edge_block;
+ // Find a corresponding successor in the previous block, replace it
+ // with the split edge block... but only do it once, since we only
+ // replace the previous blocks in the current block one at a time.
+ for (auto successor = pred->successors().begin();
+ successor != pred->successors().end(); ++successor) {
+ if (*successor == block) {
+ *successor = split_edge_block;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void Schedule::PropagateDeferredMark() {
+ // Push forward the deferred block marks through newly inserted blocks and
+ // other improperly marked blocks until a fixed point is reached.
+ // TODO(danno): optimize the propagation
+ bool done = false;
+ while (!done) {
+ done = true;
+ for (auto block : all_blocks_) {
+ if (!block->deferred()) {
+ bool deferred = block->PredecessorCount() > 0;
+ for (auto pred : block->predecessors()) {
+ if (!pred->deferred()) {
+ deferred = false;
+ }
+ }
+ if (deferred) {
+ block->set_deferred(true);
+ done = false;
+ }
+ }
+ }
+ }
+}
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
@@ -331,15 +389,24 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
- for (BasicBlock* block : *s.rpo_order()) {
- os << "--- BLOCK B" << block->rpo_number();
+ for (BasicBlock* block :
+ ((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
+ if (block->rpo_number() == -1) {
+ os << "--- BLOCK id:" << block->id().ToInt();
+ } else {
+ os << "--- BLOCK B" << block->rpo_number();
+ }
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << predecessor->rpo_number();
+ if (predecessor->rpo_number() == -1) {
+ os << "id:" << predecessor->id().ToInt();
+ } else {
+ os << "B" << predecessor->rpo_number();
+ }
}
os << " ---\n";
for (Node* node : *block) {
@@ -364,7 +431,11 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << successor->rpo_number();
+ if (successor->rpo_number() == -1) {
+ os << "id:" << successor->id().ToInt();
+ } else {
+ os << "B" << successor->rpo_number();
+ }
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 9624ff5a4f..c99a0fc525 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -243,6 +243,7 @@ class Schedule final : public ZoneObject {
return AddSuccessor(block, succ);
}
+ const BasicBlockVector* all_blocks() const { return &all_blocks_; }
BasicBlockVector* rpo_order() { return &rpo_order_; }
const BasicBlockVector* rpo_order() const { return &rpo_order_; }
@@ -254,6 +255,12 @@ class Schedule final : public ZoneObject {
private:
friend class Scheduler;
friend class BasicBlockInstrumentor;
+ friend class RawMachineAssembler;
+
+ // Ensure split-edge form for a hand-assembled schedule.
+ void EnsureSplitEdgeForm();
+ // Copy deferred block markers down as far as possible
+ void PropagateDeferredMark();
void AddSuccessor(BasicBlock* block, BasicBlock* succ);
void MoveSuccessors(BasicBlock* from, BasicBlock* to);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 80ce8b1711..b04ba6f926 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -1538,6 +1538,8 @@ class ScheduleLateNodeVisitor {
}
BasicBlock* GetBlockForUse(Edge edge) {
+ // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
+ // Dead uses only occur if the graph is not trimmed before scheduling.
Node* use = edge.from();
if (IrOpcode::IsPhiOpcode(use->opcode())) {
// If the use is from a coupled (i.e. floating) phi, compute the common
@@ -1545,7 +1547,8 @@ class ScheduleLateNodeVisitor {
if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
use->op()->mnemonic());
- DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+ // TODO(titzer): reenable once above TODO is addressed.
+ // DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
return GetCommonDominatorOfUses(use);
}
// If the use is from a fixed (i.e. non-floating) phi, we use the
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index 0e8b36fa73..b1a230962f 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -15,10 +15,7 @@ namespace internal {
namespace compiler {
SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
- : common_(common),
- graph_(graph),
- merges_(Merges::key_compare(), Merges::allocator_type(graph->zone())) {}
-
+ : common_(common), graph_(graph) {}
SelectLowering::~SelectLowering() {}
@@ -30,58 +27,16 @@ Reduction SelectLowering::Reduce(Node* node) {
Node* cond = node->InputAt(0);
Node* vthen = node->InputAt(1);
Node* velse = node->InputAt(2);
- Node* merge = nullptr;
-
- // Check if we already have a diamond for this condition.
- auto range = merges_.equal_range(cond);
- for (auto i = range.first;; ++i) {
- if (i == range.second) {
- // Create a new diamond for this condition and remember its merge node.
- Diamond d(graph(), common(), cond, p.hint());
- merges_.insert(std::make_pair(cond, d.merge));
- merge = d.merge;
- break;
- }
-
- // If the diamond is reachable from the Select, merging them would result in
- // an unschedulable graph, so we cannot reuse the diamond in that case.
- merge = i->second;
- if (!ReachableFrom(merge, node)) {
- break;
- }
- }
- // Create a Phi hanging off the previously determined merge.
+ // Create a diamond and a phi.
+ Diamond d(graph(), common(), cond, p.hint());
node->ReplaceInput(0, vthen);
node->ReplaceInput(1, velse);
- node->ReplaceInput(2, merge);
+ node->ReplaceInput(2, d.merge);
NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
return Changed(node);
}
-
-bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
- // TODO(turbofan): This is probably horribly expensive, and it should be moved
- // into node.h or somewhere else?!
- Zone zone;
- std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
- BoolVector visited(graph()->NodeCount(), false, &zone);
- queue.push(source);
- visited[source->id()] = true;
- while (!queue.empty()) {
- Node* current = queue.front();
- if (current == sink) return true;
- queue.pop();
- for (auto input : current->inputs()) {
- if (!visited[input->id()]) {
- queue.push(input);
- visited[input->id()] = true;
- }
- }
- }
- return false;
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index 5894d356cb..b882a3125f 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -5,10 +5,7 @@
#ifndef V8_COMPILER_SELECT_LOWERING_H_
#define V8_COMPILER_SELECT_LOWERING_H_
-#include <map>
-
#include "src/compiler/graph-reducer.h"
-#include "src/zone-allocator.h"
namespace v8 {
namespace internal {
@@ -28,17 +25,11 @@ class SelectLowering final : public Reducer {
Reduction Reduce(Node* node) override;
private:
- typedef std::multimap<Node*, Node*, std::less<Node*>,
- zone_allocator<std::pair<Node* const, Node*>>> Merges;
-
- bool ReachableFrom(Node* const sink, Node* const source);
-
CommonOperatorBuilder* common() const { return common_; }
Graph* graph() const { return graph_; }
CommonOperatorBuilder* common_;
Graph* graph_;
- Merges merges_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index ed7fe9d14b..88931f5df7 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -104,15 +104,6 @@ class UseInfo {
static UseInfo None() {
return UseInfo(MachineRepresentation::kNone, Truncation::None());
}
-
- // Truncation to a representation that is smaller than the preferred
- // one.
- static UseInfo Float64TruncatingToWord32() {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
- }
- static UseInfo Word64TruncatingToWord32() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
- }
static UseInfo AnyTruncatingToBool() {
return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
}
@@ -242,71 +233,6 @@ class InputUseInfos {
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
- class NodeOutputInfo {
- public:
- NodeOutputInfo(MachineRepresentation representation, Type* type)
- : type_(type), representation_(representation) {}
- NodeOutputInfo()
- : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
-
- MachineRepresentation representation() const { return representation_; }
- Type* type() const { return type_; }
-
- static NodeOutputInfo None() {
- return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
- }
-
- static NodeOutputInfo Float32() {
- return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
- }
-
- static NodeOutputInfo Float64() {
- return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
- }
-
- static NodeOutputInfo NumberTruncatedToWord32() {
- return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
- }
-
- static NodeOutputInfo Int32() {
- return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
- }
-
- static NodeOutputInfo Uint32() {
- return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
- }
-
- static NodeOutputInfo Bool() {
- return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
- }
-
- static NodeOutputInfo Int64() {
- // TODO(jarin) Fix once we have a real int64 type.
- return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
- }
-
- static NodeOutputInfo Uint64() {
- // TODO(jarin) Fix once we have a real uint64 type.
- return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
- }
-
- static NodeOutputInfo AnyTagged() {
- return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
- }
-
- static NodeOutputInfo NumberTagged() {
- return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
- }
-
- static NodeOutputInfo Pointer() {
- return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
- }
-
- private:
- Type* type_;
- MachineRepresentation representation_;
- };
-
class NodeInfo {
public:
// Adds new use to the node. Returns true if something has changed
@@ -322,17 +248,15 @@ class RepresentationSelector {
void set_visited() { visited_ = true; }
bool visited() const { return visited_; }
Truncation truncation() const { return truncation_; }
- void set_output_type(NodeOutputInfo output) { output_ = output; }
+ void set_output(MachineRepresentation output) { representation_ = output; }
- Type* output_type() const { return output_.type(); }
- MachineRepresentation representation() const {
- return output_.representation();
- }
+ MachineRepresentation representation() const { return representation_; }
private:
- bool queued_ = false; // Bookkeeping for the traversal.
- bool visited_ = false; // Bookkeeping for the traversal.
- NodeOutputInfo output_; // Output type and representation.
+ bool queued_ = false; // Bookkeeping for the traversal.
+ bool visited_ = false; // Bookkeeping for the traversal.
+ MachineRepresentation representation_ =
+ MachineRepresentation::kNone; // Output representation.
Truncation truncation_ = Truncation::None(); // Information about uses.
};
@@ -467,76 +391,31 @@ class RepresentationSelector {
}
}
- void SetOutputFromMachineType(Node* node, MachineType machine_type) {
- Type* type = Type::None();
- switch (machine_type.semantic()) {
- case MachineSemantic::kNone:
- type = Type::None();
- break;
- case MachineSemantic::kBool:
- type = Type::Boolean();
- break;
- case MachineSemantic::kInt32:
- type = Type::Signed32();
- break;
- case MachineSemantic::kUint32:
- type = Type::Unsigned32();
- break;
- case MachineSemantic::kInt64:
- // TODO(jarin) Fix once we have proper int64.
- type = Type::Internal();
- break;
- case MachineSemantic::kUint64:
- // TODO(jarin) Fix once we have proper uint64.
- type = Type::Internal();
- break;
- case MachineSemantic::kNumber:
- type = Type::Number();
- break;
- case MachineSemantic::kAny:
- type = Type::Any();
- break;
- }
- return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
- }
-
- void SetOutput(Node* node, NodeOutputInfo output_info) {
- // Every node should have at most one output representation. Note that
- // phis can have 0, if they have not been used in a representation-inducing
- // instruction.
- Type* output_type = output_info.type();
- if (NodeProperties::IsTyped(node)) {
- output_type = Type::Intersect(NodeProperties::GetType(node),
- output_info.type(), jsgraph_->zone());
- }
+ void SetOutput(Node* node, MachineRepresentation representation) {
NodeInfo* info = GetInfo(node);
- DCHECK(info->output_type()->Is(output_type));
- DCHECK(MachineRepresentationIsSubtype(info->representation(),
- output_info.representation()));
- if (!output_type->Is(info->output_type()) ||
- output_info.representation() != info->representation()) {
- EnqueueUses(node);
- }
- info->set_output_type(
- NodeOutputInfo(output_info.representation(), output_type));
+ DCHECK(
+ MachineRepresentationIsSubtype(info->representation(), representation));
+ info->set_output(representation);
}
+ Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
+
bool BothInputsAreSigned32(Node* node) {
DCHECK_EQ(2, node->InputCount());
- return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
- GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+ return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
+ GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
}
bool BothInputsAreUnsigned32(Node* node) {
DCHECK_EQ(2, node->InputCount());
- return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
- GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
+ return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
}
bool BothInputsAre(Node* node, Type* type) {
DCHECK_EQ(2, node->InputCount());
- return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
- GetInfo(node->InputAt(1))->output_type()->Is(type);
+ return GetUpperBound(node->InputAt(0))->Is(type) &&
+ GetUpperBound(node->InputAt(1))->Is(type);
}
void ConvertInput(Node* node, int index, UseInfo use) {
@@ -556,7 +435,7 @@ class RepresentationSelector {
PrintUseInfo(use);
TRACE("\n");
Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), input_info->output_type(),
+ input, input_info->representation(), GetUpperBound(input),
use.preferred(), use.truncation());
node->ReplaceInput(index, n);
}
@@ -602,7 +481,7 @@ class RepresentationSelector {
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
- NodeOutputInfo output) {
+ MachineRepresentation output) {
DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
@@ -613,80 +492,77 @@ class RepresentationSelector {
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+ void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
VisitBinop(node, input_use, input_use, output);
}
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+ void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
DCHECK_EQ(1, node->InputCount());
ProcessInput(node, 0, input_use);
SetOutput(node, output);
}
// Helper for leaf nodes.
- void VisitLeaf(Node* node, NodeOutputInfo output) {
+ void VisitLeaf(Node* node, MachineRepresentation output) {
DCHECK_EQ(0, node->InputCount());
SetOutput(node, output);
}
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+ VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
}
void VisitInt32Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
}
void VisitWord32TruncatingBinop(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo::NumberTruncatedToWord32());
+ MachineRepresentation::kWord32);
}
void VisitUint32Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
}
void VisitInt64Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
+ VisitBinop(node, UseInfo::TruncatingWord64(),
+ MachineRepresentation::kWord64);
}
void VisitUint64Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
+ VisitBinop(node, UseInfo::TruncatingWord64(),
+ MachineRepresentation::kWord64);
}
void VisitFloat64Cmp(Node* node) {
- VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
}
void VisitInt32Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
}
void VisitUint32Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
}
void VisitInt64Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
}
void VisitUint64Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
}
// Infer representation for phi-like nodes.
- NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
- // Compute the type.
- Type* type = GetInfo(node->InputAt(0))->output_type();
- for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
- type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
- jsgraph_->zone());
- }
-
+ MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
// Compute the representation.
- MachineRepresentation rep = MachineRepresentation::kTagged;
+ Type* type = GetUpperBound(node);
if (type->Is(Type::None())) {
- rep = MachineRepresentation::kNone;
+ return MachineRepresentation::kNone;
} else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
- rep = MachineRepresentation::kWord32;
+ return MachineRepresentation::kWord32;
} else if (use.TruncatesToWord32()) {
- rep = MachineRepresentation::kWord32;
+ return MachineRepresentation::kWord32;
} else if (type->Is(Type::Boolean())) {
- rep = MachineRepresentation::kBit;
+ return MachineRepresentation::kBit;
} else if (type->Is(Type::Number())) {
- rep = MachineRepresentation::kFloat64;
+ return MachineRepresentation::kFloat64;
} else if (type->Is(Type::Internal())) {
// We mark (u)int64 as Type::Internal.
// TODO(jarin) This is a workaround for our lack of (u)int64
@@ -702,10 +578,10 @@ class RepresentationSelector {
MachineRepresentation::kWord64);
}
#endif
- rep = is_word64 ? MachineRepresentation::kWord64
- : MachineRepresentation::kTagged;
+ return is_word64 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kTagged;
}
- return NodeOutputInfo(rep, type);
+ return MachineRepresentation::kTagged;
}
// Helper for handling selects.
@@ -713,20 +589,20 @@ class RepresentationSelector {
SimplifiedLowering* lowering) {
ProcessInput(node, 0, UseInfo::Bool());
- NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
SetOutput(node, output);
if (lower()) {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
- if (output.representation() != p.representation()) {
- NodeProperties::ChangeOp(node, lowering->common()->Select(
- output.representation(), p.hint()));
+ if (output != p.representation()) {
+ NodeProperties::ChangeOp(node,
+ lowering->common()->Select(output, p.hint()));
}
}
// Convert inputs to the output representation of this phi, pass the
// truncation truncation along.
- UseInfo input_use(output.representation(), truncation);
+ UseInfo input_use(output, truncation);
ProcessInput(node, 1, input_use);
ProcessInput(node, 2, input_use);
}
@@ -734,21 +610,20 @@ class RepresentationSelector {
// Helper for handling phis.
void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
SetOutput(node, output);
int values = node->op()->ValueInputCount();
if (lower()) {
// Update the phi operator.
- if (output.representation() != PhiRepresentationOf(node->op())) {
- NodeProperties::ChangeOp(
- node, lowering->common()->Phi(output.representation(), values));
+ if (output != PhiRepresentationOf(node->op())) {
+ NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
}
}
// Convert inputs to the output representation of this phi, pass the
// truncation truncation along.
- UseInfo input_use(output.representation(), truncation);
+ UseInfo input_use(output, truncation);
for (int i = 0; i < node->InputCount(); i++) {
ProcessInput(node, i, i < values ? input_use : UseInfo::None());
}
@@ -772,9 +647,10 @@ class RepresentationSelector {
}
if (sig->return_count() > 0) {
- SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
+ SetOutput(node,
+ desc->GetMachineSignature()->GetReturn().representation());
} else {
- SetOutput(node, NodeOutputInfo::AnyTagged());
+ SetOutput(node, MachineRepresentation::kTagged);
}
}
@@ -801,10 +677,10 @@ class RepresentationSelector {
new (zone->New(sizeof(ZoneVector<MachineType>)))
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
- NodeInfo* input_info = GetInfo(node->InputAt(i));
- MachineType machine_type(
- input_info->representation(),
- DeoptValueSemanticOf(input_info->output_type()));
+ Node* input = node->InputAt(i);
+ NodeInfo* input_info = GetInfo(input);
+ MachineType machine_type(input_info->representation(),
+ DeoptValueSemanticOf(GetUpperBound(input)));
DCHECK(machine_type.representation() !=
MachineRepresentation::kWord32 ||
machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -814,7 +690,7 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node,
jsgraph_->common()->TypedStateValues(types));
}
- SetOutput(node, NodeOutputInfo::AnyTagged());
+ SetOutput(node, MachineRepresentation::kTagged);
}
const Operator* Int32Op(Node* node) {
@@ -839,29 +715,34 @@ class RepresentationSelector {
//------------------------------------------------------------------
case IrOpcode::kStart:
case IrOpcode::kDead:
- return VisitLeaf(node, NodeOutputInfo::None());
+ return VisitLeaf(node, MachineRepresentation::kNone);
case IrOpcode::kParameter: {
// TODO(titzer): use representation from linkage.
- Type* type = NodeProperties::GetType(node);
ProcessInput(node, 0, UseInfo::None());
- SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
+ SetOutput(node, MachineRepresentation::kTagged);
return;
}
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, NodeOutputInfo::Int32());
+ return VisitLeaf(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, NodeOutputInfo::Int64());
+ return VisitLeaf(node, MachineRepresentation::kWord64);
case IrOpcode::kFloat32Constant:
- return VisitLeaf(node, NodeOutputInfo::Float32());
+ return VisitLeaf(node, MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, NodeOutputInfo::Float64());
+ return VisitLeaf(node, MachineRepresentation::kFloat64);
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, NodeOutputInfo::Pointer());
+ return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kNumberConstant:
- return VisitLeaf(node, NodeOutputInfo::NumberTagged());
+ return VisitLeaf(node, MachineRepresentation::kTagged);
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, NodeOutputInfo::AnyTagged());
+ return VisitLeaf(node, MachineRepresentation::kTagged);
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ ProcessInput(node, 0, UseInfo::Bool());
+ ProcessInput(node, 1, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 2);
+ break;
case IrOpcode::kBranch:
ProcessInput(node, 0, UseInfo::Bool());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -889,7 +770,7 @@ class RepresentationSelector {
JS_OP_LIST(DEFINE_JS_CASE)
#undef DEFINE_JS_CASE
VisitInputs(node);
- return SetOutput(node, NodeOutputInfo::AnyTagged());
+ return SetOutput(node, MachineRepresentation::kTagged);
//------------------------------------------------------------------
// Simplified operators.
@@ -909,7 +790,7 @@ class RepresentationSelector {
} else {
// No input representation requirement; adapt during lowering.
ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
- SetOutput(node, NodeOutputInfo::Bool());
+ SetOutput(node, MachineRepresentation::kBit);
}
break;
}
@@ -927,7 +808,7 @@ class RepresentationSelector {
} else {
// No input representation requirement; adapt during lowering.
ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
- SetOutput(node, NodeOutputInfo::Int32());
+ SetOutput(node, MachineRepresentation::kWord32);
}
break;
}
@@ -1055,46 +936,80 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberShiftLeft: {
- Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
}
break;
}
case IrOpcode::kNumberShiftRight: {
- Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
}
break;
}
case IrOpcode::kNumberShiftRightLogical: {
- Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
}
break;
}
+ case IrOpcode::kNumberImul: {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ break;
+ }
+ case IrOpcode::kNumberClz32: {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ break;
+ }
+ case IrOpcode::kNumberCeil: {
+ VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
+ break;
+ }
+ case IrOpcode::kNumberFloor: {
+ VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
+ break;
+ }
+ case IrOpcode::kNumberRound: {
+ VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, lowering->Float64Round(node));
+ break;
+ }
+ case IrOpcode::kNumberTrunc: {
+ VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
+ break;
+ }
case IrOpcode::kNumberToInt32: {
// Just change representation if necessary.
- VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
break;
}
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
- VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
break;
}
case IrOpcode::kNumberIsHoleNaN: {
- VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
if (lower()) {
// NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
// #HoleNaNLower32)
@@ -1109,7 +1024,7 @@ class RepresentationSelector {
break;
}
case IrOpcode::kPlainPrimitiveToNumber: {
- VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
Operator::Properties properties = node->op()->properties();
@@ -1126,38 +1041,93 @@ class RepresentationSelector {
break;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
+ VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
break;
}
case IrOpcode::kStringEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
- if (lower()) lowering->DoStringEqual(node);
+ VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+ }
break;
}
case IrOpcode::kStringLessThan: {
- VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
- if (lower()) lowering->DoStringLessThan(node);
+ VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+ }
break;
}
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
- if (lower()) lowering->DoStringLessThanOrEqual(node);
+ VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ // StringLessThanOrEqual(x, y)
+ // => Call(StringLessThanOrEqualStub, x, y, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable =
+ CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+ }
+ break;
+ }
+ case IrOpcode::kStringToNumber: {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+ }
break;
}
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs(node, 1);
- SetOutput(node, NodeOutputInfo::AnyTagged());
+ SetOutput(node, MachineRepresentation::kTagged);
break;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessRemainingInputs(node, 1);
- SetOutputFromMachineType(node, access.machine_type);
+ SetOutput(node, access.machine_type.representation());
break;
}
case IrOpcode::kStoreField: {
@@ -1166,7 +1136,7 @@ class RepresentationSelector {
ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
- SetOutput(node, NodeOutputInfo::None());
+ SetOutput(node, MachineRepresentation::kNone);
break;
}
case IrOpcode::kLoadBuffer: {
@@ -1176,29 +1146,26 @@ class RepresentationSelector {
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 3);
- NodeOutputInfo output_info;
+ MachineRepresentation output;
if (truncation.TruncatesUndefinedToZeroOrNaN()) {
if (truncation.TruncatesNaNToZero()) {
// If undefined is truncated to a non-NaN number, we can use
// the load's representation.
- output_info = NodeOutputInfo(access.machine_type().representation(),
- NodeProperties::GetType(node));
+ output = access.machine_type().representation();
} else {
// If undefined is truncated to a number, but the use can
// observe NaN, we need to output at least the float32
// representation.
if (access.machine_type().representation() ==
MachineRepresentation::kFloat32) {
- output_info =
- NodeOutputInfo(access.machine_type().representation(),
- NodeProperties::GetType(node));
+ output = access.machine_type().representation();
} else {
if (access.machine_type().representation() !=
MachineRepresentation::kFloat64) {
// TODO(bmeurer): See comment on abort_compilation_.
if (lower()) lowering->abort_compilation_ = true;
}
- output_info = NodeOutputInfo::Float64();
+ output = MachineRepresentation::kFloat64;
}
}
} else {
@@ -1207,11 +1174,10 @@ class RepresentationSelector {
// If undefined is not truncated away, we need to have the tagged
// representation.
- output_info = NodeOutputInfo::AnyTagged();
+ output = MachineRepresentation::kTagged;
}
- SetOutput(node, output_info);
- if (lower())
- lowering->DoLoadBuffer(node, output_info.representation(), changer_);
+ SetOutput(node, output);
+ if (lower()) lowering->DoLoadBuffer(node, output, changer_);
break;
}
case IrOpcode::kStoreBuffer: {
@@ -1223,7 +1189,7 @@ class RepresentationSelector {
TruncatingUseInfoFromRepresentation(
access.machine_type().representation())); // value
ProcessRemainingInputs(node, 4);
- SetOutput(node, NodeOutputInfo::None());
+ SetOutput(node, MachineRepresentation::kNone);
if (lower()) lowering->DoStoreBuffer(node);
break;
}
@@ -1232,7 +1198,7 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessRemainingInputs(node, 2);
- SetOutputFromMachineType(node, access.machine_type);
+ SetOutput(node, access.machine_type.representation());
break;
}
case IrOpcode::kStoreElement: {
@@ -1243,22 +1209,15 @@ class RepresentationSelector {
TruncatingUseInfoFromRepresentation(
access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
- SetOutput(node, NodeOutputInfo::None());
+ SetOutput(node, MachineRepresentation::kNone);
break;
}
- case IrOpcode::kObjectIsNumber: {
+ case IrOpcode::kObjectIsNumber:
+ case IrOpcode::kObjectIsReceiver:
+ case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsUndetectable: {
ProcessInput(node, 0, UseInfo::AnyTagged());
- SetOutput(node, NodeOutputInfo::Bool());
- break;
- }
- case IrOpcode::kObjectIsReceiver: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
- SetOutput(node, NodeOutputInfo::Bool());
- break;
- }
- case IrOpcode::kObjectIsSmi: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
- SetOutput(node, NodeOutputInfo::Bool());
+ SetOutput(node, MachineRepresentation::kBit);
break;
}
@@ -1272,7 +1231,7 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutputFromMachineType(node, rep);
+ SetOutput(node, rep.representation());
break;
}
case IrOpcode::kStore: {
@@ -1284,13 +1243,13 @@ class RepresentationSelector {
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, NodeOutputInfo::None());
+ SetOutput(node, MachineRepresentation::kNone);
break;
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
return VisitBinop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo::Uint32());
+ MachineRepresentation::kWord32);
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -1300,14 +1259,14 @@ class RepresentationSelector {
// though the machine bits are the same for either signed or unsigned,
// because JavaScript considers the result from these operations signed.
return VisitBinop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo::Int32());
+ MachineRepresentation::kWord32);
case IrOpcode::kWord32Equal:
return VisitBinop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo::Bool());
+ MachineRepresentation::kBit);
case IrOpcode::kWord32Clz:
return VisitUnop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo::Uint32());
+ MachineRepresentation::kWord32);
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
@@ -1352,45 +1311,33 @@ class RepresentationSelector {
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
return VisitBinop(node, UseInfo::TruncatingWord64(),
- NodeOutputInfo::Int64());
+ MachineRepresentation::kWord64);
case IrOpcode::kWord64Equal:
return VisitBinop(node, UseInfo::TruncatingWord64(),
- NodeOutputInfo::Bool());
+ MachineRepresentation::kBit);
case IrOpcode::kChangeInt32ToInt64:
- return VisitUnop(
- node, UseInfo::TruncatingWord32(),
- NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord64);
case IrOpcode::kChangeUint32ToUint64:
- return VisitUnop(
- node, UseInfo::TruncatingWord32(),
- NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord64);
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
+ return VisitUnop(node, UseInfo::Float64(),
+ MachineRepresentation::kFloat32);
case IrOpcode::kTruncateFloat64ToInt32:
- return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
- case IrOpcode::kTruncateInt64ToInt32:
- // TODO(titzer): Is kTypeInt32 correct here?
- return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
- NodeOutputInfo::Int32());
+ return VisitUnop(node, UseInfo::Float64(),
+ MachineRepresentation::kWord32);
case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
+ return VisitUnop(node, UseInfo::Float32(),
+ MachineRepresentation::kFloat64);
case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(
- node, UseInfo::TruncatingWord32(),
- NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kFloat64);
case IrOpcode::kChangeUint32ToFloat64:
return VisitUnop(node, UseInfo::TruncatingWord32(),
- NodeOutputInfo(MachineRepresentation::kFloat64,
- Type::Unsigned32()));
- case IrOpcode::kChangeFloat64ToInt32:
- return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
- NodeOutputInfo::Int32());
- case IrOpcode::kChangeFloat64ToUint32:
- return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
- NodeOutputInfo::Uint32());
-
+ MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
case IrOpcode::kFloat64Mul:
@@ -1404,29 +1351,40 @@ class RepresentationSelector {
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64RoundUp:
- return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+ return VisitUnop(node, UseInfo::Float64(),
+ MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
+ return VisitUnop(node, UseInfo::Float64(),
+ MachineRepresentation::kWord32);
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
- NodeOutputInfo::Float64());
+ MachineRepresentation::kFloat64);
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
- return VisitLeaf(node, NodeOutputInfo::Pointer());
+ return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kStateValues:
VisitStateValues(node);
break;
+
+ // The following opcodes are not produced before representation
+ // inference runs, so we do not have any real test coverage.
+ // Simply fail here.
+ case IrOpcode::kChangeFloat64ToInt32:
+ case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTruncateInt64ToInt32:
+ FATAL("Representation inference: unsupported opcodes.");
+
default:
VisitInputs(node);
// Assume the output is tagged.
- SetOutput(node, NodeOutputInfo::AnyTagged());
+ SetOutput(node, MachineRepresentation::kTagged);
break;
}
}
@@ -1437,7 +1395,7 @@ class RepresentationSelector {
replacement->op()->mnemonic());
if (replacement->id() < count_ &&
- GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
+ GetUpperBound(node)->Is(GetUpperBound(replacement))) {
// Replace with a previously existing node eagerly only if the type is the
// same.
node->ReplaceUses(replacement);
@@ -1455,9 +1413,7 @@ class RepresentationSelector {
void PrintOutputInfo(NodeInfo* info) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << info->representation() << " (";
- info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
- os << ")";
+ os << info->representation();
}
}
@@ -1595,21 +1551,392 @@ void SimplifiedLowering::DoStoreBuffer(Node* node) {
NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
}
+Node* SimplifiedLowering::Float64Ceil(Node* const node) {
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // Use fast hardware instruction if available.
+ if (machine()->Float64RoundUp().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundUp().op(), input);
+ }
+
+ // General case for ceil.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if temp1 < input then
+ // temp1 + 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+ // -0 - temp3
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-Node* SimplifiedLowering::StringComparison(Node* node) {
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::StringCompare(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, flags, properties);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, input),
+ graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ Node* temp3 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+ graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+ vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+}
+
+Node* SimplifiedLowering::Float64Floor(Node* const node) {
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // Use fast hardware instruction if available.
+ if (machine()->Float64RoundDown().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundDown().op(), input);
+ }
+
+ // General case for floor.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // if temp2 < temp1 then
+ // -1 - temp2
+ // else
+ // -0 - temp2
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+ graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ vfalse2 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+ graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+}
+
+Node* SimplifiedLowering::Float64Round(Node* const node) {
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const one_half = jsgraph()->Float64Constant(0.5);
+ Node* const input = node->InputAt(0);
+
+ // Round up towards Infinity, and adjust if the difference exceeds 0.5.
+ Node* result = Float64Ceil(node);
return graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- NodeProperties::GetValueInput(node, 0),
- NodeProperties::GetValueInput(node, 1), jsgraph()->NoContextConstant(),
- NodeProperties::GetEffectInput(node),
- NodeProperties::GetControlInput(node));
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(
+ machine()->Float64LessThanOrEqual(),
+ graph()->NewNode(machine()->Float64Sub(), result, one_half), input),
+ result, graph()->NewNode(machine()->Float64Sub(), result, one));
}
+Node* SimplifiedLowering::Float64Trunc(Node* const node) {
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // Use fast hardware instruction if available.
+ if (machine()->Float64RoundTruncate().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
+ }
+
+ // General case for trunc.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+ // -0 - temp3
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+ graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ Node* temp3 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+ graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+ vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+}
Node* SimplifiedLowering::Int32Div(Node* const node) {
Int32BinopMatcher m(node);
@@ -1873,53 +2200,6 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
NodeProperties::ChangeOp(node, op);
}
-
-namespace {
-
-void ReplaceEffectUses(Node* node, Node* replacement) {
- // Requires distinguishing between value and effect edges.
- DCHECK(replacement->op()->EffectOutputCount() > 0);
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(replacement);
- } else {
- DCHECK(NodeProperties::IsValueEdge(edge));
- }
- }
-}
-
-} // namespace
-
-
-void SimplifiedLowering::DoStringEqual(Node* node) {
- Node* comparison = StringComparison(node);
- ReplaceEffectUses(node, comparison);
- node->ReplaceInput(0, comparison);
- node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
-void SimplifiedLowering::DoStringLessThan(Node* node) {
- Node* comparison = StringComparison(node);
- ReplaceEffectUses(node, comparison);
- node->ReplaceInput(0, comparison);
- node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, machine()->IntLessThan());
-}
-
-
-void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
- Node* comparison = StringComparison(node);
- ReplaceEffectUses(node, comparison);
- node->ReplaceInput(0, comparison);
- node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, machine()->IntLessThanOrEqual());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 358bd97f9c..8b711a9659 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -37,9 +37,6 @@ class SimplifiedLowering final {
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
- void DoStringEqual(Node* node);
- void DoStringLessThan(Node* node);
- void DoStringLessThanOrEqual(Node* node);
// TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
// typing hack to support the gigantic "asm.js should be fast without proper
@@ -58,7 +55,10 @@ class SimplifiedLowering final {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Node* StringComparison(Node* node);
+ Node* Float64Ceil(Node* const node);
+ Node* Float64Floor(Node* const node);
+ Node* Float64Round(Node* const node);
+ Node* Float64Trunc(Node* const node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
Node* Uint32Div(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 120d7926d5..012004a8af 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -9,14 +9,14 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/conversions-inl.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph) {}
-
+ : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
@@ -89,6 +89,17 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberTrunc: {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Replace(input);
+ }
+ break;
+ }
case IrOpcode::kReferenceEqual:
return ReduceReferenceEqual(node);
default:
@@ -97,7 +108,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
return NoChange();
}
-
Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
Node* const left = NodeProperties::GetValueInput(node, 0);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 979a3d0399..13301c2af5 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -9,6 +9,10 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
namespace compiler {
// Forward declarations.
@@ -42,6 +46,7 @@ class SimplifiedOperatorReducer final : public Reducer {
SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
+ TypeCache const& type_cache_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index c7abe9c96e..daa9501b8c 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -156,7 +156,6 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
return OpParameter<ElementAccess>(op);
}
-
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1) \
V(BooleanToNumber, Operator::kNoProperties, 1) \
@@ -174,10 +173,17 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(NumberShiftLeft, Operator::kNoProperties, 2) \
V(NumberShiftRight, Operator::kNoProperties, 2) \
V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+ V(NumberImul, Operator::kNoProperties, 2) \
+ V(NumberClz32, Operator::kNoProperties, 1) \
+ V(NumberCeil, Operator::kNoProperties, 1) \
+ V(NumberFloor, Operator::kNoProperties, 1) \
+ V(NumberRound, Operator::kNoProperties, 1) \
+ V(NumberTrunc, Operator::kNoProperties, 1) \
V(NumberToInt32, Operator::kNoProperties, 1) \
V(NumberToUint32, Operator::kNoProperties, 1) \
V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
+ V(StringToNumber, Operator::kNoProperties, 1) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
@@ -188,7 +194,8 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(ChangeBitToBool, Operator::kNoProperties, 1) \
V(ObjectIsNumber, Operator::kNoProperties, 1) \
V(ObjectIsReceiver, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1)
+ V(ObjectIsSmi, Operator::kNoProperties, 1) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1)
#define NO_THROW_OP_LIST(V) \
V(StringEqual, Operator::kCommutative, 2) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 2ed4b5fed0..a39d864914 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -143,6 +143,12 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* NumberShiftLeft();
const Operator* NumberShiftRight();
const Operator* NumberShiftRightLogical();
+ const Operator* NumberImul();
+ const Operator* NumberClz32();
+ const Operator* NumberCeil();
+ const Operator* NumberFloor();
+ const Operator* NumberRound();
+ const Operator* NumberTrunc();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
const Operator* NumberIsHoleNaN();
@@ -154,6 +160,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
+ const Operator* StringToNumber();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
@@ -167,6 +174,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ObjectIsNumber();
const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
+ const Operator* ObjectIsUndetectable();
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
index 42c4627b67..da4f2683a3 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ b/deps/v8/src/compiler/type-hint-analyzer.cc
@@ -48,10 +48,10 @@ bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
if (i == infos_.end()) return false;
Handle<Code> code = i->second;
DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
- ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
-// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
-#define ASSERT_COMPATIBLE(NAME, Name) \
- STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+ ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanICStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name) \
+ STATIC_ASSERT(1 << ToBooleanICStub::NAME == \
static_cast<int>(ToBooleanHint::k##Name))
ASSERT_COMPATIBLE(UNDEFINED, Undefined);
ASSERT_COMPATIBLE(BOOLEAN, Boolean);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 9679513219..81c3d3d928 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -57,10 +57,8 @@ Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
falsish_ = Type::Union(
Type::Undetectable(),
- Type::Union(
- Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
- Type::NullOrUndefined(), zone),
- singleton_the_hole_, zone),
+ Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ singleton_the_hole_, zone),
zone);
truish_ = Type::Union(
singleton_true_,
@@ -116,6 +114,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(IfDefault)
DECLARE_CASE(Merge)
DECLARE_CASE(Deoptimize)
+ DECLARE_CASE(DeoptimizeIf)
+ DECLARE_CASE(DeoptimizeUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -160,6 +160,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(IfDefault)
DECLARE_CASE(Merge)
DECLARE_CASE(Deoptimize)
+ DECLARE_CASE(DeoptimizeIf)
+ DECLARE_CASE(DeoptimizeUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -238,12 +240,17 @@ class Typer::Visitor : public Reducer {
static Type* ToNumber(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
+ static Type* NumberCeil(Type*, Typer*);
+ static Type* NumberFloor(Type*, Typer*);
+ static Type* NumberRound(Type*, Typer*);
+ static Type* NumberTrunc(Type*, Typer*);
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
static Type* ObjectIsReceiver(Type*, Typer*);
static Type* ObjectIsSmi(Type*, Typer*);
+ static Type* ObjectIsUndetectable(Type*, Typer*);
static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
@@ -414,6 +421,11 @@ Type* Typer::Visitor::ToInteger(Type* type, Typer* t) {
// ES6 section 7.1.4 ToInteger ( argument )
type = ToNumber(type, t);
if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+ if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Type::Union(
+ Type::Intersect(type, t->cache_.kIntegerOrMinusZero, t->zone()),
+ t->cache_.kSingletonZero, t->zone());
+ }
return t->cache_.kIntegerOrMinusZero;
}
@@ -469,7 +481,9 @@ Type* Typer::Visitor::ToObject(Type* type, Typer* t) {
// ES6 section 7.1.13 ToObject ( argument )
if (type->Is(Type::Receiver())) return type;
if (type->Is(Type::Primitive())) return Type::OtherObject();
- if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+ if (!type->Maybe(Type::OtherUndetectable())) {
+ return Type::DetectableReceiver();
+ }
return Type::Receiver();
}
@@ -482,6 +496,37 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
return Type::String();
}
+// static
+Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberFloor(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberRound(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberTrunc(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
// TODO(neis): DCHECK(type->Is(Type::Number()));
@@ -533,6 +578,13 @@ Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
}
+Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
+ if (type->Is(Type::Undetectable())) return t->singleton_true_;
+ if (!type->Maybe(Type::Undetectable())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
+
// -----------------------------------------------------------------------------
@@ -1171,7 +1223,7 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
return Type::Constant(f->string_string(), t->zone());
} else if (type->Is(Type::Symbol())) {
return Type::Constant(f->symbol_string(), t->zone());
- } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
+ } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
t->zone()))) {
return Type::Constant(f->undefined_string(), t->zone());
} else if (type->Is(Type::Null())) {
@@ -1198,26 +1250,29 @@ Type* Typer::Visitor::TypeJSToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
-
-Type* Typer::Visitor::TypeJSToNumber(Node* node) {
- return TypeUnaryOp(node, ToNumber);
+Type* Typer::Visitor::TypeJSToInteger(Node* node) {
+ return TypeUnaryOp(node, ToInteger);
}
-
-Type* Typer::Visitor::TypeJSToString(Node* node) {
- return TypeUnaryOp(node, ToString);
+Type* Typer::Visitor::TypeJSToLength(Node* node) {
+ return TypeUnaryOp(node, ToLength);
}
-
Type* Typer::Visitor::TypeJSToName(Node* node) {
return TypeUnaryOp(node, ToName);
}
+Type* Typer::Visitor::TypeJSToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
Type* Typer::Visitor::TypeJSToObject(Node* node) {
return TypeUnaryOp(node, ToObject);
}
+Type* Typer::Visitor::TypeJSToString(Node* node) {
+ return TypeUnaryOp(node, ToString);
+}
// JS object operators.
@@ -1502,8 +1557,9 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kMathRandom:
return Type::OrderedNumber();
case kMathFloor:
- case kMathRound:
case kMathCeil:
+ case kMathRound:
+ case kMathTrunc:
return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
case kMathAbs:
@@ -1573,15 +1629,8 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineDoubleHi:
return Type::Signed32();
case Runtime::kInlineConstructDouble:
- case Runtime::kInlineMathFloor:
- case Runtime::kInlineMathSqrt:
- case Runtime::kInlineMathAcos:
- case Runtime::kInlineMathAsin:
- case Runtime::kInlineMathAtan:
case Runtime::kInlineMathAtan2:
return Type::Number();
- case Runtime::kInlineMathClz32:
- return Type::Range(0, 32, zone());
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineRegExpConstructResult:
return Type::OtherObject();
@@ -1708,6 +1757,27 @@ Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
return Type::Unsigned32();
}
+Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
+
+Type* Typer::Visitor::TypeNumberClz32(Node* node) {
+ return typer_->cache_.kZeroToThirtyTwo;
+}
+
+Type* Typer::Visitor::TypeNumberCeil(Node* node) {
+ return TypeUnaryOp(node, NumberCeil);
+}
+
+Type* Typer::Visitor::TypeNumberFloor(Node* node) {
+ return TypeUnaryOp(node, NumberFloor);
+}
+
+Type* Typer::Visitor::TypeNumberRound(Node* node) {
+ return TypeUnaryOp(node, NumberRound);
+}
+
+Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
+ return TypeUnaryOp(node, NumberTrunc);
+}
Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
return TypeUnaryOp(node, NumberToInt32);
@@ -1750,6 +1820,9 @@ Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeStringToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
namespace {
@@ -1925,6 +1998,11 @@ Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
}
+Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
+ return TypeUnaryOp(node, ObjectIsUndetectable);
+}
+
+
// Machine operators.
Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
@@ -2132,6 +2210,10 @@ Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
zone());
}
+Type* Typer::Visitor::TypeTruncateFloat64ToUint32(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+ zone());
+}
Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
@@ -2427,6 +2509,17 @@ Type* Typer::Visitor::TypeCheckedStore(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairMul(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShl(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
// Heap constants.
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 99480ca2ed..a69ace9480 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -29,13 +29,13 @@ namespace compiler {
static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
- auto const uses = def->uses();
+ const Node::Uses uses = def->uses();
return std::find(uses.begin(), uses.end(), use) != uses.end();
}
static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
- auto const inputs = use->inputs();
+ const Node::Inputs inputs = use->inputs();
return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
}
@@ -135,6 +135,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
CHECK(IsDefUseChainLinkPresent(value, node));
CHECK(IsUseDefChainLinkPresent(value, node));
+ // Verify that only parameters and projections can have input nodes with
+ // multiple outputs.
+ CHECK(node->opcode() == IrOpcode::kParameter ||
+ node->opcode() == IrOpcode::kProjection ||
+ value->op()->ValueOutputCount() <= 1);
}
// Verify all context inputs are value nodes.
@@ -161,16 +166,6 @@ void Verifier::Visitor::Check(Node* node) {
CHECK(IsUseDefChainLinkPresent(control, node));
}
- // Verify all successors are projections if multiple value outputs exist.
- if (node->op()->ValueOutputCount() > 1) {
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- CHECK(!NodeProperties::IsValueEdge(edge) ||
- use->opcode() == IrOpcode::kProjection ||
- use->opcode() == IrOpcode::kParameter);
- }
- }
-
switch (node->opcode()) {
case IrOpcode::kStart:
// Start has no inputs.
@@ -194,7 +189,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
int count_true = 0, count_false = 0;
- for (auto use : node->uses()) {
+ for (const Node* use : node->uses()) {
CHECK(use->opcode() == IrOpcode::kIfTrue ||
use->opcode() == IrOpcode::kIfFalse);
if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
@@ -232,10 +227,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kSwitch: {
// Switch uses are Case and Default.
int count_case = 0, count_default = 0;
- for (auto use : node->uses()) {
+ for (const Node* use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfValue: {
- for (auto user : node->uses()) {
+ for (const Node* user : node->uses()) {
if (user != use && user->opcode() == IrOpcode::kIfValue) {
CHECK_NE(OpParameter<int32_t>(use->op()),
OpParameter<int32_t>(user->op()));
@@ -274,11 +269,16 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kThrow:
// Deoptimize, Return and Throw uses are End.
- for (auto use : node->uses()) {
+ for (const Node* use : node->uses()) {
CHECK_EQ(IrOpcode::kEnd, use->opcode());
}
// Type is empty.
@@ -292,7 +292,7 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(IrOpcode::kLoop,
NodeProperties::GetControlInput(node)->opcode());
// Terminate uses are End.
- for (auto use : node->uses()) {
+ for (const Node* use : node->uses()) {
CHECK_EQ(IrOpcode::kEnd, use->opcode());
}
// Type is empty.
@@ -492,6 +492,18 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Boolean.
CheckUpperIs(node, Type::Boolean());
break;
+ case IrOpcode::kJSToInteger:
+ // Type is OrderedNumber.
+ CheckUpperIs(node, Type::OrderedNumber());
+ break;
+ case IrOpcode::kJSToLength:
+ // Type is OrderedNumber.
+ CheckUpperIs(node, Type::OrderedNumber());
+ break;
+ case IrOpcode::kJSToName:
+ // Type is Name.
+ CheckUpperIs(node, Type::Name());
+ break;
case IrOpcode::kJSToNumber:
// Type is Number.
CheckUpperIs(node, Type::Number());
@@ -500,10 +512,6 @@ void Verifier::Visitor::Check(Node* node) {
// Type is String.
CheckUpperIs(node, Type::String());
break;
- case IrOpcode::kJSToName:
- // Type is Name.
- CheckUpperIs(node, Type::Name());
- break;
case IrOpcode::kJSToObject:
// Type is Receiver.
CheckUpperIs(node, Type::Receiver());
@@ -677,6 +685,25 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberImul:
+ // (Unsigned32, Unsigned32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Unsigned32());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
+ case IrOpcode::kNumberClz32:
+ // Unsigned32 -> Unsigned32
+ CheckValueInputIs(node, 0, Type::Unsigned32());
+ CheckUpperIs(node, Type::Unsigned32());
+ break;
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberTrunc:
+ // Number -> Number
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
CheckValueInputIs(node, 0, Type::Number());
@@ -705,6 +732,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckUpperIs(node, Type::Boolean());
break;
+ case IrOpcode::kStringToNumber:
+ // String -> Number
+ CheckValueInputIs(node, 0, Type::String());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
@@ -714,6 +746,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsUndetectable:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
break;
@@ -935,6 +968,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
case IrOpcode::kTryTruncateFloat32ToInt64:
@@ -945,6 +979,12 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
+ case IrOpcode::kInt32PairAdd:
+ case IrOpcode::kInt32PairSub:
+ case IrOpcode::kInt32PairMul:
+ case IrOpcode::kWord32PairShl:
+ case IrOpcode::kWord32PairShr:
+ case IrOpcode::kWord32PairSar:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
@@ -959,7 +999,7 @@ void Verifier::Visitor::Check(Node* node) {
void Verifier::Run(Graph* graph, Typing typing) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
- Zone zone;
+ Zone zone(graph->zone()->allocator());
Visitor visitor(&zone, typing);
AllNodes all(&zone, graph);
for (Node* node : all.live) visitor.Check(node);
@@ -1049,7 +1089,7 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
void ScheduleVerifier::Run(Schedule* schedule) {
const size_t count = schedule->BasicBlockCount();
- Zone tmp_zone;
+ Zone tmp_zone(schedule->zone()->allocator());
Zone* zone = &tmp_zone;
BasicBlock* start = schedule->start();
BasicBlockVector* rpo_order = schedule->rpo_order();
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 9c3858dd43..93d5a084b9 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -6,6 +6,7 @@
#include "src/isolate-inl.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/compiler/access-builder.h"
@@ -71,28 +72,8 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
-
-enum TrapReason {
- kTrapUnreachable,
- kTrapMemOutOfBounds,
- kTrapDivByZero,
- kTrapDivUnrepresentable,
- kTrapRemByZero,
- kTrapFloatUnrepresentable,
- kTrapFuncInvalid,
- kTrapFuncSigMismatch,
- kTrapCount
-};
-
-
-static const char* kTrapMessages[] = {
- "unreachable", "memory access out of bounds",
- "divide by zero", "divide result unrepresentable",
- "remainder by zero", "integer result unrepresentable",
- "invalid function", "function signature mismatch"};
} // namespace
-
// A helper that handles building graph fragments for trapping.
// To avoid generating a ton of redundant code that just calls the runtime
// to trap, we generate a per-trap-reason block of code that all trap sites
@@ -103,17 +84,17 @@ class WasmTrapHelper : public ZoneObject {
: builder_(builder),
jsgraph_(builder->jsgraph()),
graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
- for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+ for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
}
// Make the current control path trap to unreachable.
- void Unreachable() { ConnectTrap(kTrapUnreachable); }
+ void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
// Always trap with the given reason.
- void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+ void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+ Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
Int32Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
@@ -127,12 +108,12 @@ class WasmTrapHelper : public ZoneObject {
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck32(TrapReason reason, Node* node) {
+ Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
return TrapIfEq32(reason, node, 0);
}
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+ Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
Int64Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
AddTrapIfTrue(reason,
@@ -142,22 +123,22 @@ class WasmTrapHelper : public ZoneObject {
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck64(TrapReason reason, Node* node) {
+ Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
return TrapIfEq64(reason, node, 0);
}
// Add a trap if {cond} is true.
- void AddTrapIfTrue(TrapReason reason, Node* cond) {
+ void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
AddTrapIf(reason, cond, true);
}
// Add a trap if {cond} is false.
- void AddTrapIfFalse(TrapReason reason, Node* cond) {
+ void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
AddTrapIf(reason, cond, false);
}
// Add a trap if {cond} is true or false according to {iftrue}.
- void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+ void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
Node** effect_ptr = builder_->effect_;
Node** control_ptr = builder_->control_;
Node* before = *effect_ptr;
@@ -198,14 +179,14 @@ class WasmTrapHelper : public ZoneObject {
WasmGraphBuilder* builder_;
JSGraph* jsgraph_;
Graph* graph_;
- Node* traps_[kTrapCount];
- Node* effects_[kTrapCount];
+ Node* traps_[wasm::kTrapCount];
+ Node* effects_[wasm::kTrapCount];
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
- void ConnectTrap(TrapReason reason) {
+ void ConnectTrap(wasm::TrapReason reason) {
if (traps_[reason] == nullptr) {
// Create trap code for the first time this trap is used.
return BuildTrapCode(reason);
@@ -215,8 +196,9 @@ class WasmTrapHelper : public ZoneObject {
builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
}
- void BuildTrapCode(TrapReason reason) {
- Node* exception = builder_->String(kTrapMessages[reason]);
+ void BuildTrapCode(wasm::TrapReason reason) {
+ Node* exception =
+ builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
Node* end;
Node** control_ptr = builder_->control_;
Node** effect_ptr = builder_->effect_;
@@ -265,7 +247,6 @@ class WasmTrapHelper : public ZoneObject {
}
};
-
WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
wasm::FunctionSig* function_signature)
: zone_(zone),
@@ -351,8 +332,7 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Node** buf = Realloc(vals, count);
- buf = Realloc(buf, count + 1);
+ Node** buf = Realloc(vals, count, count + 1);
buf[count] = control;
return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
buf);
@@ -362,8 +342,7 @@ Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Node** buf = Realloc(effects, count);
- buf = Realloc(buf, count + 1);
+ Node** buf = Realloc(effects, count, count + 1);
buf[count] = control;
return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
buf);
@@ -394,43 +373,14 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
case wasm::kExprI32Mul:
op = m->Int32Mul();
break;
- case wasm::kExprI32DivS: {
- trap_->ZeroCheck32(kTrapDivByZero, right);
- Node* before = *control_;
- Node* denom_is_m1;
- Node* denom_is_not_m1;
- Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
- jsgraph()->Int32Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
- trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
- denom_is_not_m1, *control_);
- } else {
- *control_ = before;
- }
- return graph()->NewNode(m->Int32Div(), left, right, *control_);
- }
+ case wasm::kExprI32DivS:
+ return BuildI32DivS(left, right);
case wasm::kExprI32DivU:
- op = m->Uint32Div();
- return graph()->NewNode(op, left, right,
- trap_->ZeroCheck32(kTrapDivByZero, right));
- case wasm::kExprI32RemS: {
- trap_->ZeroCheck32(kTrapRemByZero, right);
- Diamond d(graph(), jsgraph()->common(),
- graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
- jsgraph()->Int32Constant(-1)));
-
- Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
-
- return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- rem);
- }
+ return BuildI32DivU(left, right);
+ case wasm::kExprI32RemS:
+ return BuildI32RemS(left, right);
case wasm::kExprI32RemU:
- op = m->Uint32Mod();
- return graph()->NewNode(op, left, right,
- trap_->ZeroCheck32(kTrapRemByZero, right));
+ return BuildI32RemU(left, right);
case wasm::kExprI32And:
op = m->Word32And();
break;
@@ -442,13 +392,23 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
break;
case wasm::kExprI32Shl:
op = m->Word32Shl();
+ right = MaskShiftCount32(right);
break;
case wasm::kExprI32ShrU:
op = m->Word32Shr();
+ right = MaskShiftCount32(right);
break;
case wasm::kExprI32ShrS:
op = m->Word32Sar();
+ right = MaskShiftCount32(right);
+ break;
+ case wasm::kExprI32Ror:
+ op = m->Word32Ror();
+ right = MaskShiftCount32(right);
break;
+ case wasm::kExprI32Rol:
+ right = MaskShiftCount32(right);
+ return BuildI32Rol(left, right);
case wasm::kExprI32Eq:
op = m->Word32Equal();
break;
@@ -485,76 +445,62 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
case wasm::kExprI64And:
op = m->Word64And();
break;
-#if WASM_64
- // Opcodes only supported on 64-bit platforms.
- // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ // todo(ahaas): I added a list of missing instructions here to make merging
+ // easier when I do them one by one.
+ // kExprI64Add:
case wasm::kExprI64Add:
op = m->Int64Add();
break;
+ // kExprI64Sub:
case wasm::kExprI64Sub:
op = m->Int64Sub();
break;
+ // kExprI64Mul:
case wasm::kExprI64Mul:
op = m->Int64Mul();
break;
- case wasm::kExprI64DivS: {
- trap_->ZeroCheck64(kTrapDivByZero, right);
- Node* before = *control_;
- Node* denom_is_m1;
- Node* denom_is_not_m1;
- Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
- jsgraph()->Int64Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
- trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
- std::numeric_limits<int64_t>::min());
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
- denom_is_not_m1, *control_);
- } else {
- *control_ = before;
- }
- return graph()->NewNode(m->Int64Div(), left, right, *control_);
- }
+ // kExprI64DivS:
+ case wasm::kExprI64DivS:
+ return BuildI64DivS(left, right);
+ // kExprI64DivU:
case wasm::kExprI64DivU:
- op = m->Uint64Div();
- return graph()->NewNode(op, left, right,
- trap_->ZeroCheck64(kTrapDivByZero, right));
- case wasm::kExprI64RemS: {
- trap_->ZeroCheck64(kTrapRemByZero, right);
- Diamond d(jsgraph()->graph(), jsgraph()->common(),
- graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
- jsgraph()->Int64Constant(-1)));
-
- Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
-
- return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
- rem);
- }
+ return BuildI64DivU(left, right);
+ // kExprI64RemS:
+ case wasm::kExprI64RemS:
+ return BuildI64RemS(left, right);
+ // kExprI64RemU:
case wasm::kExprI64RemU:
- op = m->Uint64Mod();
- return graph()->NewNode(op, left, right,
- trap_->ZeroCheck64(kTrapRemByZero, right));
+ return BuildI64RemU(left, right);
case wasm::kExprI64Ior:
op = m->Word64Or();
break;
+// kExprI64Xor:
case wasm::kExprI64Xor:
op = m->Word64Xor();
break;
+// kExprI64Shl:
case wasm::kExprI64Shl:
op = m->Word64Shl();
+ right = MaskShiftCount64(right);
break;
+ // kExprI64ShrU:
case wasm::kExprI64ShrU:
op = m->Word64Shr();
+ right = MaskShiftCount64(right);
break;
+ // kExprI64ShrS:
case wasm::kExprI64ShrS:
op = m->Word64Sar();
+ right = MaskShiftCount64(right);
break;
+ // kExprI64Eq:
case wasm::kExprI64Eq:
op = m->Word64Equal();
break;
+// kExprI64Ne:
case wasm::kExprI64Ne:
return Invert(Binop(wasm::kExprI64Eq, left, right));
+// kExprI64LtS:
case wasm::kExprI64LtS:
op = m->Int64LessThan();
break;
@@ -583,8 +529,12 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
op = m->Uint64LessThanOrEqual();
std::swap(left, right);
break;
-#endif
-
+ case wasm::kExprI64Ror:
+ op = m->Word64Ror();
+ right = MaskShiftCount64(right);
+ break;
+ case wasm::kExprI64Rol:
+ return BuildI64Rol(left, right);
case wasm::kExprF32CopySign:
return BuildF32CopySign(left, right);
case wasm::kExprF64CopySign:
@@ -659,6 +609,15 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
return BuildF32Max(left, right);
case wasm::kExprF64Max:
return BuildF64Max(left, right);
+ case wasm::kExprF64Pow: {
+ return BuildF64Pow(left, right);
+ }
+ case wasm::kExprF64Atan2: {
+ return BuildF64Atan2(left, right);
+ }
+ case wasm::kExprF64Mod: {
+ return BuildF64Mod(left, right);
+ }
default:
op = UnsupportedOpcode(opcode);
}
@@ -670,7 +629,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
const Operator* op;
MachineOperatorBuilder* m = jsgraph()->machine();
switch (opcode) {
- case wasm::kExprBoolNot:
+ case wasm::kExprI32Eqz:
op = m->Word32Equal();
return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
case wasm::kExprF32Abs:
@@ -786,79 +745,62 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
op = m->Float64RoundTiesEven().op();
break;
}
+ case wasm::kExprF64Acos: {
+ return BuildF64Acos(input);
+ }
+ case wasm::kExprF64Asin: {
+ return BuildF64Asin(input);
+ }
+ case wasm::kExprF64Atan: {
+ return BuildF64Atan(input);
+ }
+ case wasm::kExprF64Cos: {
+ return BuildF64Cos(input);
+ }
+ case wasm::kExprF64Sin: {
+ return BuildF64Sin(input);
+ }
+ case wasm::kExprF64Tan: {
+ return BuildF64Tan(input);
+ }
+ case wasm::kExprF64Exp: {
+ return BuildF64Exp(input);
+ }
+ case wasm::kExprF64Log: {
+ return BuildF64Log(input);
+ }
+ // kExprI32ConvertI64:
case wasm::kExprI32ConvertI64:
op = m->TruncateInt64ToInt32();
break;
-#if WASM_64
- // Opcodes only supported on 64-bit platforms.
- // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ // kExprI64SConvertI32:
case wasm::kExprI64SConvertI32:
op = m->ChangeInt32ToInt64();
break;
+ // kExprI64UConvertI32:
case wasm::kExprI64UConvertI32:
op = m->ChangeUint32ToUint64();
break;
- case wasm::kExprF32SConvertI64:
- op = m->RoundInt64ToFloat32();
- break;
- case wasm::kExprF32UConvertI64:
- op = m->RoundUint64ToFloat32();
- break;
- case wasm::kExprF64SConvertI64:
- op = m->RoundInt64ToFloat64();
- break;
- case wasm::kExprF64UConvertI64:
- op = m->RoundUint64ToFloat64();
- break;
- case wasm::kExprI64SConvertF32: {
- Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
- Node* result =
- graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
- return result;
- }
- case wasm::kExprI64SConvertF64: {
- Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
- Node* result =
- graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
- return result;
- }
- case wasm::kExprI64UConvertF32: {
- Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
- Node* result =
- graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
- return result;
- }
- case wasm::kExprI64UConvertF64: {
- Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
- Node* result =
- graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
- return result;
- }
+ // kExprF64ReinterpretI64:
case wasm::kExprF64ReinterpretI64:
op = m->BitcastInt64ToFloat64();
break;
+ // kExprI64ReinterpretF64:
case wasm::kExprI64ReinterpretF64:
op = m->BitcastFloat64ToInt64();
break;
+ // kExprI64Clz:
case wasm::kExprI64Clz:
op = m->Word64Clz();
break;
+ // kExprI64Ctz:
case wasm::kExprI64Ctz: {
if (m->Word64Ctz().IsSupported()) {
op = m->Word64Ctz().op();
break;
+ } else if (m->Is32() && m->Word32Ctz().IsSupported()) {
+ op = m->Word64CtzPlaceholder();
+ break;
} else if (m->Word64ReverseBits().IsSupported()) {
Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
Node* result = graph()->NewNode(m->Word64Clz(), reversed);
@@ -867,15 +809,64 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
return BuildI64Ctz(input);
}
}
+ // kExprI64Popcnt:
case wasm::kExprI64Popcnt: {
if (m->Word64Popcnt().IsSupported()) {
op = m->Word64Popcnt().op();
- break;
+ } else if (m->Is32() && m->Word32Popcnt().IsSupported()) {
+ op = m->Word64PopcntPlaceholder();
} else {
return BuildI64Popcnt(input);
}
+ break;
+ }
+ // kExprF32SConvertI64:
+ case wasm::kExprI64Eqz:
+ op = m->Word64Equal();
+ return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
+ case wasm::kExprF32SConvertI64:
+ if (m->Is32()) {
+ return BuildF32SConvertI64(input);
+ }
+ op = m->RoundInt64ToFloat32();
+ break;
+ // kExprF32UConvertI64:
+ case wasm::kExprF32UConvertI64:
+ if (m->Is32()) {
+ return BuildF32UConvertI64(input);
+ }
+ op = m->RoundUint64ToFloat32();
+ break;
+ // kExprF64SConvertI64:
+ case wasm::kExprF64SConvertI64:
+ if (m->Is32()) {
+ return BuildF64SConvertI64(input);
+ }
+ op = m->RoundInt64ToFloat64();
+ break;
+ // kExprF64UConvertI64:
+ case wasm::kExprF64UConvertI64:
+ if (m->Is32()) {
+ return BuildF64UConvertI64(input);
+ }
+ op = m->RoundUint64ToFloat64();
+ break;
+// kExprI64SConvertF32:
+ case wasm::kExprI64SConvertF32: {
+ return BuildI64SConvertF32(input);
+ }
+ // kExprI64SConvertF64:
+ case wasm::kExprI64SConvertF64: {
+ return BuildI64SConvertF64(input);
+ }
+ // kExprI64UConvertF32:
+ case wasm::kExprI64UConvertF32: {
+ return BuildI64UConvertF32(input);
+ }
+ // kExprI64UConvertF64:
+ case wasm::kExprI64UConvertF64: {
+ return BuildI64UConvertF64(input);
}
-#endif
default:
op = UnsupportedOpcode(opcode);
}
@@ -937,8 +928,7 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
count = 1;
}
- Node** buf = Realloc(vals, count);
- buf = Realloc(buf, count + 2);
+ Node** buf = Realloc(vals, count, count + 2);
buf[count] = *effect_;
buf[count + 1] = *control_;
Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
@@ -956,6 +946,37 @@ Node* WasmGraphBuilder::Unreachable() {
return nullptr;
}
+Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
+ static const int32_t kMask32 = 0x1f;
+ if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+ // Shifts by constants are so common we pattern-match them here.
+ Int32Matcher match(node);
+ if (match.HasValue()) {
+ int32_t masked = (match.Value() & kMask32);
+ if (match.Value() != masked) node = jsgraph()->Int32Constant(masked);
+ } else {
+ node = graph()->NewNode(jsgraph()->machine()->Word32And(), node,
+ jsgraph()->Int32Constant(kMask32));
+ }
+ }
+ return node;
+}
+
+Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
+ static const int64_t kMask64 = 0x3f;
+ if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+ // Shifts by constants are so common we pattern-match them here.
+ Int64Matcher match(node);
+ if (match.HasValue()) {
+ int64_t masked = (match.Value() & kMask64);
+ if (match.Value() != masked) node = jsgraph()->Int64Constant(masked);
+ } else {
+ node = graph()->NewNode(jsgraph()->machine()->Word64And(), node,
+ jsgraph()->Int64Constant(kMask64));
+ }
+ }
+ return node;
+}
Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
Node* result =
@@ -1115,6 +1136,13 @@ Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(
+ m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+ }
+
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1123,7 +1151,7 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32SConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
return result;
}
@@ -1131,7 +1159,8 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js) {
+ if (module_ && module_->asm_js()) {
+ // asm.js must use the wacky JS semantics.
return graph()->NewNode(
m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
}
@@ -1143,7 +1172,7 @@ Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64SConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
return result;
}
@@ -1151,6 +1180,13 @@ Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(
+ m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+ }
+
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1159,7 +1195,7 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32UConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
return result;
}
@@ -1167,19 +1203,20 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js) {
+ if (module_ && module_->asm_js()) {
+ // asm.js must use the wacky JS semantics.
return graph()->NewNode(
m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
}
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+ Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
// Convert the result back to f64. If we end up at a different value than the
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64UConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
return result;
}
@@ -1363,89 +1400,557 @@ Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
MachineType type = MachineType::Float32();
ExternalReference ref =
- ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f32_trunc(jsgraph()->isolate());
+
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
MachineType type = MachineType::Float32();
ExternalReference ref =
- ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f32_floor(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
MachineType type = MachineType::Float32();
ExternalReference ref =
- ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f32_ceil(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
MachineType type = MachineType::Float32();
ExternalReference ref =
- ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f32_nearest_int(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
MachineType type = MachineType::Float64();
ExternalReference ref =
- ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f64_trunc(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
MachineType type = MachineType::Float64();
ExternalReference ref =
- ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f64_floor(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
MachineType type = MachineType::Float64();
ExternalReference ref =
- ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f64_ceil(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
}
Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
MachineType type = MachineType::Float64();
ExternalReference ref =
- ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
- return BuildRoundingInstruction(input, ref, type);
+ ExternalReference::wasm_f64_nearest_int(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Acos(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_acos_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Asin(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_asin_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Log(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_pow_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, left, right);
}
-Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
- ExternalReference ref,
- MachineType type) {
- // We do truncation by calling a C function which calculates the truncation
- // for us. The input is passed to the C function as a double* to avoid double
- // parameters. For this we reserve a slot on the stack, store the parameter in
- // that slot, pass a pointer to the slot to the C function, and after calling
- // the C function we collect the return value from the stack slot.
+Node* WasmGraphBuilder::BuildF64Mod(Node* left, Node* right) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_mod_wrapper_function(jsgraph()->isolate());
+ return BuildCFuncInstruction(ref, type, left, right);
+}
- Node* stack_slot_param =
+Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
+ MachineType type, Node* input0,
+ Node* input1) {
+ // We do truncation by calling a C function which calculates the result.
+ // The input is passed to the C function as a double*'s to avoid double
+ // parameters. For this we reserve slots on the stack, store the parameters
+ // in those slots, pass pointers to the slot to the C function,
+ // and after calling the C function we collect the return value from
+ // the stack slot.
+
+ Node* stack_slot_param0 =
graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
- const Operator* store_op = jsgraph()->machine()->Store(
+ const Operator* store_op0 = jsgraph()->machine()->Store(
StoreRepresentation(type.representation(), kNoWriteBarrier));
+ *effect_ = graph()->NewNode(store_op0, stack_slot_param0,
+ jsgraph()->Int32Constant(0), input0, *effect_,
+ *control_);
+
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node** args = Buffer(5);
+ args[0] = function;
+ args[1] = stack_slot_param0;
+ int input_count = 1;
+
+ if (input1 != nullptr) {
+ Node* stack_slot_param1 = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(type.representation()));
+ const Operator* store_op1 = jsgraph()->machine()->Store(
+ StoreRepresentation(type.representation(), kNoWriteBarrier));
+ *effect_ = graph()->NewNode(store_op1, stack_slot_param1,
+ jsgraph()->Int32Constant(0), input1, *effect_,
+ *control_);
+ args[2] = stack_slot_param1;
+ ++input_count;
+ }
+
+ Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
+ input_count);
+ sig_builder.AddParam(MachineType::Pointer());
+ if (input1 != nullptr) {
+ sig_builder.AddParam(MachineType::Pointer());
+ }
+ BuildCCall(sig_builder.Build(), args);
+
+ const Operator* load_op = jsgraph()->machine()->Load(type);
+
+ Node* load =
+ graph()->NewNode(load_op, stack_slot_param0, jsgraph()->Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+}
+
+Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
+ // TODO(titzer/bradnelson): Check handlng of asm.js case.
+ return BuildIntToFloatConversionInstruction(
+ input, ExternalReference::wasm_int64_to_float32(jsgraph()->isolate()),
+ MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF32UConvertI64(Node* input) {
+ // TODO(titzer/bradnelson): Check handlng of asm.js case.
+ return BuildIntToFloatConversionInstruction(
+ input, ExternalReference::wasm_uint64_to_float32(jsgraph()->isolate()),
+ MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF64SConvertI64(Node* input) {
+ return BuildIntToFloatConversionInstruction(
+ input, ExternalReference::wasm_int64_to_float64(jsgraph()->isolate()),
+ MachineRepresentation::kWord64, MachineType::Float64());
+}
+Node* WasmGraphBuilder::BuildF64UConvertI64(Node* input) {
+ return BuildIntToFloatConversionInstruction(
+ input, ExternalReference::wasm_uint64_to_float64(jsgraph()->isolate()),
+ MachineRepresentation::kWord64, MachineType::Float64());
+}
+
+Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
+ Node* input, ExternalReference ref,
+ MachineRepresentation parameter_representation,
+ const MachineType result_type) {
+ Node* stack_slot_param = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(parameter_representation));
+ Node* stack_slot_result = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(result_type.representation()));
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(parameter_representation, kNoWriteBarrier));
*effect_ =
graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
input, *effect_, *control_);
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 0, 2);
+ sig_builder.AddParam(MachineType::Pointer());
+ sig_builder.AddParam(MachineType::Pointer());
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node* args[] = {function, stack_slot_param, stack_slot_result};
+ BuildCCall(sig_builder.Build(), args);
+ const Operator* load_op = jsgraph()->machine()->Load(result_type);
+ Node* load =
+ graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+}
- Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildFloatToIntConversionInstruction(
+ input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
+ MachineRepresentation::kFloat32, MachineType::Int64());
+ } else {
+ Node* trunc = graph()->NewNode(
+ jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildFloatToIntConversionInstruction(
+ input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
+ MachineRepresentation::kFloat32, MachineType::Int64());
+ } else {
+ Node* trunc = graph()->NewNode(
+ jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+}
+
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildFloatToIntConversionInstruction(
+ input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
+ MachineRepresentation::kFloat64, MachineType::Int64());
+ } else {
+ Node* trunc = graph()->NewNode(
+ jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildFloatToIntConversionInstruction(
+ input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
+ MachineRepresentation::kFloat64, MachineType::Int64());
+ } else {
+ Node* trunc = graph()->NewNode(
+ jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+}
+
+Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
+ Node* input, ExternalReference ref,
+ MachineRepresentation parameter_representation,
+ const MachineType result_type) {
+ Node* stack_slot_param = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(parameter_representation));
+ Node* stack_slot_result = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(result_type.representation()));
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(parameter_representation, kNoWriteBarrier));
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ input, *effect_, *control_);
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+ sig_builder.AddReturn(MachineType::Int32());
+ sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node* args[] = {function, stack_slot_param, stack_slot_result};
+ trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
+ BuildCCall(sig_builder.Build(), args));
+ const Operator* load_op = jsgraph()->machine()->Load(result_type);
+ Node* load =
+ graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+}
- Node* args[] = {function, stack_slot_param};
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Int32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
+ }
- BuildCCall(sig_builder.Build(), args);
+ // Check denominator for zero.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ // Check numerator for -1. (avoid minint / -1 case).
+ Diamond n(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+
+ Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+ Node* neg =
+ graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+ return n.Phi(MachineRepresentation::kWord32, neg,
+ z.Phi(MachineRepresentation::kWord32,
+ jsgraph()->Int32Constant(0), div));
+ }
- const Operator* load_op = jsgraph()->machine()->Load(type);
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+ *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int32Div(), left, right, *control_);
+}
+
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ // Explicit check for x % -1.
+ Diamond d(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+ d.Chain(z.if_false);
+
+ return z.Phi(
+ MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+ }
+
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+
+ Diamond d(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+ d.Chain(*control_);
+
+ return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Uint32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
+ }
+
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
+ right, z.if_false));
+ }
+ return graph()->NewNode(m->Uint32Div(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js()) {
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+ z.if_false);
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
+ }
+
+ return graph()->NewNode(m->Uint32Mod(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+}
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildDiv64Call(
+ left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
+ MachineType::Int64(), wasm::kTrapDivByZero);
+ }
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
+ std::numeric_limits<int64_t>::min());
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+ *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(jsgraph()->machine()->Int64Div(), left, right,
+ *control_);
+}
+
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildDiv64Call(
+ left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
+ MachineType::Int64(), wasm::kTrapRemByZero);
+ }
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+ Diamond d(jsgraph()->graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)));
+
+ Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
+ d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+ rem);
+}
+
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildDiv64Call(
+ left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
+ MachineType::Int64(), wasm::kTrapDivByZero);
+ }
+ return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+}
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+ if (jsgraph()->machine()->Is32()) {
+ return BuildDiv64Call(
+ left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
+ MachineType::Int64(), wasm::kTrapRemByZero);
+ }
+ return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
+ ExternalReference ref,
+ MachineType result_type, int trap_zero) {
+ Node* stack_slot_dst = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+ Node* stack_slot_src = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_dst, jsgraph()->Int32Constant(0),
+ left, *effect_, *control_);
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_src, jsgraph()->Int32Constant(0),
+ right, *effect_, *control_);
+
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+ sig_builder.AddReturn(MachineType::Int32());
+ sig_builder.AddParam(MachineType::Pointer());
+ sig_builder.AddParam(MachineType::Pointer());
+
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node* args[] = {function, stack_slot_dst, stack_slot_src};
+
+ Node* call = BuildCCall(sig_builder.Build(), args);
+
+ // TODO(wasm): This can get simpler if we have a specialized runtime call to
+ // throw WASM exceptions by trap code instead of by string.
+ trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+ const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
- graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
*effect_, *control_);
*effect_ = load;
return load;
@@ -1457,7 +1962,7 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
- args = Realloc(args, count);
+ args = Realloc(args, 1 + params, count);
// Add effect and control inputs.
args[params + 1] = *effect_;
@@ -1478,7 +1983,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
- args = Realloc(args, count);
+ args = Realloc(args, 1 + params, count);
// Add effect and control inputs.
args[params + 1] = *effect_;
@@ -1493,7 +1998,6 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
return call;
}
-
Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
DCHECK_NULL(args[0]);
@@ -1529,10 +2033,10 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
// Bounds check against the table size.
Node* size = Int32Constant(static_cast<int>(table_size));
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
- trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
} else {
// No function table. Generate a trap and return a constant.
- trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
return trap_->GetTrapValue(module_->GetSignature(index));
}
Node* table = FunctionTable();
@@ -1552,7 +2056,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
*effect_, *control_);
Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
jsgraph()->SmiConstant(index));
- trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
}
// Load code object from the table.
@@ -1640,9 +2144,34 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
return num;
}
+Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
+ // Implement Rol by Ror since TurboFan does not have Rol opcode.
+ // TODO(weiliang): support Word32Rol opcode in TurboFan.
+ Int32Matcher m(right);
+ if (m.HasValue()) {
+ return Binop(wasm::kExprI32Ror, left,
+ jsgraph()->Int32Constant(32 - m.Value()));
+ } else {
+ return Binop(wasm::kExprI32Ror, left,
+ Binop(wasm::kExprI32Sub, jsgraph()->Int32Constant(32), right));
+ }
+}
+
+Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
+ // Implement Rol by Ror since TurboFan does not have Rol opcode.
+ // TODO(weiliang): support Word64Rol opcode in TurboFan.
+ Int64Matcher m(right);
+ if (m.HasValue()) {
+ return Binop(wasm::kExprI64Ror, left,
+ jsgraph()->Int64Constant(64 - m.Value()));
+ } else {
+ return Binop(wasm::kExprI64Ror, left,
+ Binop(wasm::kExprI64Sub, jsgraph()->Int64Constant(64), right));
+ }
+}
Node* WasmGraphBuilder::Invert(Node* node) {
- return Unop(wasm::kExprBoolNot, node);
+ return Unop(wasm::kExprI32Eqz, node);
}
@@ -1653,19 +2182,22 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
- Node* start = Start(params + 3);
+ Node* start = Start(params + 5);
*control_ = start;
*effect_ = start;
- // JS context is the last parameter.
+ // Create the context parameter
Node* context = graph()->NewNode(
- jsgraph()->common()->Parameter(params + 1, "context"), start);
+ jsgraph()->common()->Parameter(
+ Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+ graph()->start());
int pos = 0;
args[pos++] = Constant(wasm_code);
// Convert JS parameters to WASM numbers.
for (int i = 0; i < params; i++) {
- Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ Node* param =
+ graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
args[pos++] = FromJS(param, context, sig->GetParam(i));
}
@@ -1800,7 +2332,7 @@ Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
MachineType mem_type = module_->GetGlobalType(index);
Node* addr = jsgraph()->IntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals->at(index).offset));
+ module_->module->globals[index].offset));
const Operator* op = jsgraph()->machine()->Load(mem_type);
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
*control_);
@@ -1814,7 +2346,7 @@ Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
MachineType mem_type = module_->GetGlobalType(index);
Node* addr = jsgraph()->IntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals->at(index).offset));
+ module_->module->globals[index].offset));
const Operator* op = jsgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1843,7 +2375,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
}
- trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+ trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
}
@@ -1851,7 +2383,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset) {
Node* load;
- if (module_ && module_->asm_js) {
+ if (module_ && module_->asm_js()) {
// asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
DCHECK_EQ(0, offset);
const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
@@ -1886,7 +2418,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
uint32_t offset, Node* val) {
Node* store;
- if (module_ && module_->asm_js) {
+ if (module_ && module_->asm_js()) {
// asm.js semantics use CheckedStore (i.e. ignore OOB writes).
DCHECK_EQ(0, offset);
const Operator* op =
@@ -1920,7 +2452,7 @@ Node* WasmGraphBuilder::String(const char* string) {
Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::Int64LoweringForTesting() {
- if (kPointerSize == 4) {
+ if (jsgraph()->machine()->Is32()) {
Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
jsgraph()->common(), jsgraph()->zone(),
function_signature_);
@@ -1931,12 +2463,13 @@ void WasmGraphBuilder::Int64LoweringForTesting() {
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
const char* message, uint32_t index,
- const char* func_name) {
+ wasm::WasmName func_name) {
Isolate* isolate = info->isolate();
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
ScopedVector<char> buffer(128);
- SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+ SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
+ func_name.name);
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
Handle<String> script_str =
@@ -1944,15 +2477,15 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<Code> code = info->code();
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
- PROFILE(isolate,
- CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+ PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
+ info, *script_str, 0, 0));
}
}
Handle<JSFunction> CompileJSToWasmWrapper(
Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
- wasm::WasmFunction* func = &module->module->functions->at(index);
+ wasm::WasmFunction* func = &module->module->functions[index];
//----------------------------------------------------------------------------
// Create the JSFunction object.
@@ -1961,7 +2494,7 @@ Handle<JSFunction> CompileJSToWasmWrapper(
isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
int params = static_cast<int>(func->sig->parameter_count());
shared->set_length(params);
- shared->set_internal_formal_parameter_count(1 + params);
+ shared->set_internal_formal_parameter_count(params);
Handle<JSFunction> function = isolate->factory()->NewFunction(
isolate->wasm_function_map(), name, MaybeHandle<Code>());
function->SetInternalField(0, *module_object);
@@ -1970,7 +2503,7 @@ Handle<JSFunction> CompileJSToWasmWrapper(
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
JSOperatorBuilder javascript(&zone);
@@ -2015,8 +2548,7 @@ Handle<JSFunction> CompileJSToWasmWrapper(
module->GetFunctionSignature(index)->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
- Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
bool debugging =
#if DEBUG
true;
@@ -2036,12 +2568,19 @@ Handle<JSFunction> CompileJSToWasmWrapper(
CompilationInfo info(func_name, isolate, &zone, flags);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
if (debugging) {
buffer.Dispose();
}
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
- module->module->GetName(func->name_offset));
+ RecordFunctionCompilation(
+ Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+ module->module->GetName(func->name_offset, func->name_length));
// Set the JSFunction's machine code.
function->set_code(*code);
}
@@ -2050,11 +2589,13 @@ Handle<JSFunction> CompileJSToWasmWrapper(
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<JSFunction> function,
- wasm::FunctionSig* sig, const char* name) {
+ wasm::FunctionSig* sig,
+ wasm::WasmName module_name,
+ wasm::WasmName function_name) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
JSOperatorBuilder javascript(&zone);
@@ -2095,8 +2636,7 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
// Schedule and compile to machine code.
CallDescriptor* incoming =
wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
- // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
- Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
bool debugging =
#if DEBUG
true;
@@ -2114,12 +2654,18 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
CompilationInfo info(func_name, isolate, &zone, flags);
code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
if (debugging) {
buffer.Dispose();
}
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
- name);
+ module_name);
}
return code;
}
@@ -2129,24 +2675,21 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
wasm::ModuleEnv* module_env,
const wasm::WasmFunction& function) {
- if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+ if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
os << "Compiling WASM function "
<< wasm::WasmFunctionName(&function, module_env) << std::endl;
os << std::endl;
}
- // Initialize the function environment for decoding.
- wasm::FunctionEnv env;
- env.module = module_env;
- env.sig = function.sig;
- env.local_i32_count = function.local_i32_count;
- env.local_i64_count = function.local_i64_count;
- env.local_f32_count = function.local_f32_count;
- env.local_f64_count = function.local_f64_count;
- env.SumLocals();
+
+ double decode_ms = 0;
+ base::ElapsedTimer decode_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ decode_timer.Start();
+ }
// Create a TF graph during decoding.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
MachineOperatorBuilder machine(
@@ -2154,11 +2697,12 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
InstructionSelector::SupportedMachineOperatorFlags());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
- wasm::TreeResult result = wasm::BuildTFGraph(
- &builder, &env, // --
- module_env->module->module_start, // --
- module_env->module->module_start + function.code_start_offset, // --
- module_env->module->module_start + function.code_end_offset); // --
+ wasm::FunctionBody body = {
+ module_env, function.sig, module_env->module->module_start,
+ module_env->module->module_start + function.code_start_offset,
+ module_env->module->module_start + function.code_end_offset};
+ wasm::TreeResult result =
+ wasm::BuildTFGraph(isolate->allocator(), &builder, body);
if (result.failed()) {
if (FLAG_trace_wasm_compiler) {
@@ -2167,17 +2711,31 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
}
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
- function.func_index,
- module_env->module->GetName(function.name_offset));
+ wasm::WasmName name =
+ module_env->module->GetName(function.name_offset, function.name_length);
+ SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+ function.func_index, name.length, name.name);
thrower.Failed(buffer.start(), result);
return Handle<Code>::null();
}
+ int index = static_cast<int>(function.func_index);
+ if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
+ PrintAst(isolate->allocator(), body);
+ }
+
+ if (FLAG_trace_wasm_decode_time) {
+ decode_ms = decode_timer.Elapsed().InMillisecondsF();
+ }
+
+ base::ElapsedTimer compile_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ compile_timer.Start();
+ }
// Run the compiler pipeline to generate machine code.
CallDescriptor* descriptor =
wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
- if (kPointerSize == 4) {
+ if (machine.Is32()) {
descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
}
Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
@@ -2192,8 +2750,10 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
Vector<char> buffer;
if (debugging) {
buffer = Vector<char>::New(128);
- SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
- module_env->module->GetName(function.name_offset));
+ wasm::WasmName name =
+ module_env->module->GetName(function.name_offset, function.name_length);
+ SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
+ name.name);
func_name = buffer.start();
}
CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2204,11 +2764,20 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
buffer.Dispose();
}
if (!code.is_null()) {
- RecordFunctionCompilation(
- Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
- module_env->module->GetName(function.name_offset));
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
+ function.func_index,
+ module_env->module->GetName(
+ function.name_offset, function.name_length));
}
+ if (FLAG_trace_wasm_decode_time) {
+ double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
+ "compile\n",
+ static_cast<int>(function.code_end_offset - function.code_start_offset),
+ decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
+ }
return code;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 2e86b5600e..bbcafa7296 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -40,7 +40,9 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
// Wraps a JS function, producing a code object that can be called from WASM.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<JSFunction> function,
- wasm::FunctionSig* sig, const char* name);
+ wasm::FunctionSig* sig,
+ wasm::WasmName module_name,
+ wasm::WasmName function_name);
// Wraps a given wasm code object, producing a JSFunction that can be called
// from JavaScript.
@@ -162,8 +164,12 @@ class WasmGraphBuilder {
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+ Node* MaskShiftCount32(Node* node);
+ Node* MaskShiftCount64(Node* node);
+
Node* BuildCCall(MachineSignature* sig, Node** args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+
Node* BuildF32Neg(Node* input);
Node* BuildF64Neg(Node* input);
Node* BuildF32CopySign(Node* left, Node* right);
@@ -180,8 +186,8 @@ class WasmGraphBuilder {
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
Node* BuildI64Popcnt(Node* input);
- Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
- MachineType type);
+ Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
+ Node* input0, Node* input1 = nullptr);
Node* BuildF32Trunc(Node* input);
Node* BuildF32Floor(Node* input);
Node* BuildF32Ceil(Node* input);
@@ -190,10 +196,54 @@ class WasmGraphBuilder {
Node* BuildF64Floor(Node* input);
Node* BuildF64Ceil(Node* input);
Node* BuildF64NearestInt(Node* input);
-
- Node** Realloc(Node** buffer, size_t count) {
- Node** buf = Buffer(count);
- if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+ Node* BuildI32Rol(Node* left, Node* right);
+ Node* BuildI64Rol(Node* left, Node* right);
+
+ Node* BuildF64Acos(Node* input);
+ Node* BuildF64Asin(Node* input);
+ Node* BuildF64Atan(Node* input);
+ Node* BuildF64Cos(Node* input);
+ Node* BuildF64Sin(Node* input);
+ Node* BuildF64Tan(Node* input);
+ Node* BuildF64Exp(Node* input);
+ Node* BuildF64Log(Node* input);
+ Node* BuildF64Pow(Node* left, Node* right);
+ Node* BuildF64Atan2(Node* left, Node* right);
+ Node* BuildF64Mod(Node* left, Node* right);
+
+ Node* BuildIntToFloatConversionInstruction(
+ Node* input, ExternalReference ref,
+ MachineRepresentation parameter_representation,
+ const MachineType result_type);
+ Node* BuildF32SConvertI64(Node* input);
+ Node* BuildF32UConvertI64(Node* input);
+ Node* BuildF64SConvertI64(Node* input);
+ Node* BuildF64UConvertI64(Node* input);
+
+ Node* BuildFloatToIntConversionInstruction(
+ Node* input, ExternalReference ref,
+ MachineRepresentation parameter_representation,
+ const MachineType result_type);
+ Node* BuildI64SConvertF32(Node* input);
+ Node* BuildI64UConvertF32(Node* input);
+ Node* BuildI64SConvertF64(Node* input);
+ Node* BuildI64UConvertF64(Node* input);
+
+ Node* BuildI32DivS(Node* left, Node* right);
+ Node* BuildI32RemS(Node* left, Node* right);
+ Node* BuildI32DivU(Node* left, Node* right);
+ Node* BuildI32RemU(Node* left, Node* right);
+
+ Node* BuildI64DivS(Node* left, Node* right);
+ Node* BuildI64RemS(Node* left, Node* right);
+ Node* BuildI64DivU(Node* left, Node* right);
+ Node* BuildI64RemU(Node* left, Node* right);
+ Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
+ MachineType result_type, int trap_zero);
+
+ Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
+ Node** buf = Buffer(new_count);
+ if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
return buf;
}
};
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index 3176fd3e2a..f0e14ce3e9 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@ LinkageLocation stackloc(int i) {
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -125,6 +125,24 @@ LinkageLocation stackloc(int i) {
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2
+#define FP_PARAM_REGISTERS d0, d2, d4, d6
+#define FP_RETURN_REGISTERS d0, d2, d4, d6
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2, r3
+#define FP_PARAM_REGISTERS d0, d2
+#define FP_RETURN_REGISTERS d0, d2
+
#else
// ===========================================================================
// == unknown ================================================================
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 510c0c6a0c..2e4eccb483 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -49,8 +49,11 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand ToOperand(InstructionOperand* op, int extra = 0) {
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
+ return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
+ }
+
+ Operand SlotToOperand(int slot_index, int extra = 0) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot_index);
return Operand(offset.from_stack_pointer() ? rsp : rbp,
offset.offset() + extra);
}
@@ -599,6 +602,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (false)
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ movq(rsp, rbp);
+ __ popq(rbp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -615,18 +624,43 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ movq(rbp, MemOperand(rbp, 0));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &done, Label::kNear);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ SmiToInteger32(
+ caller_args_count_reg,
+ Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack);
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
-
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
@@ -641,9 +675,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -668,6 +708,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RecordCallPosition(instr);
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -677,6 +718,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
+ }
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
break;
@@ -735,7 +781,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movq(i.OutputRegister(), rbp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ movq(i.OutputRegister(), Operand(rbp, 0));
} else {
__ movq(i.OutputRegister(), rbp);
@@ -799,12 +845,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64And:
ASSEMBLE_BINOP(andq);
break;
+ case kX64Cmp8:
+ ASSEMBLE_COMPARE(cmpb);
+ break;
+ case kX64Cmp16:
+ ASSEMBLE_COMPARE(cmpw);
+ break;
case kX64Cmp32:
ASSEMBLE_COMPARE(cmpl);
break;
case kX64Cmp:
ASSEMBLE_COMPARE(cmpq);
break;
+ case kX64Test8:
+ ASSEMBLE_COMPARE(testb);
+ break;
+ case kX64Test16:
+ ASSEMBLE_COMPARE(testw);
+ break;
case kX64Test32:
ASSEMBLE_COMPARE(testl);
break;
@@ -1001,7 +1059,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
}
- __ AssertZeroExtended(i.OutputRegister());
break;
}
case kSSEFloat64Cmp:
@@ -1100,7 +1157,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
- __ AssertZeroExtended(i.OutputRegister());
+ if (MiscField::decode(instr->opcode())) {
+ __ AssertZeroExtended(i.OutputRegister());
+ }
break;
}
case kSSEFloat32ToInt64:
@@ -1877,18 +1936,16 @@ static const int kQuadWordSize = 16;
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- __ pushq(rbp);
- __ movq(rbp, rsp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ pushq(rbp);
+ __ movq(rbp, rsp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ }
}
- frame_access_state()->SetFrameAccessToDefault();
-
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1968,17 +2025,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
- __ popq(rbp); // Pop caller's frame pointer.
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
- __ popq(rbp); // Pop caller's frame pointer.
+ AssembleDeconstructFrame();
}
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -2043,9 +2098,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int offset;
- if (IsMaterializableFromFrame(src_object, &offset)) {
- __ movp(dst, Operand(rbp, offset));
+ int slot;
+ if (IsMaterializableFromFrame(src_object, &slot)) {
+ __ movp(dst, g.SlotToOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 6d5e77ccee..bd19386d6a 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -18,8 +18,12 @@ namespace compiler {
V(X64And32) \
V(X64Cmp) \
V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
V(X64Test) \
V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
V(X64Or) \
V(X64Or32) \
V(X64Xor) \
@@ -139,7 +143,6 @@ namespace compiler {
V(X64Poke) \
V(X64StackCheck)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 1f10b51bcc..3c31965d72 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -20,8 +20,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64And32:
case kX64Cmp:
case kX64Cmp32:
+ case kX64Cmp16:
+ case kX64Cmp8:
case kX64Test:
case kX64Test32:
+ case kX64Test16:
+ case kX64Test8:
case kX64Or:
case kX64Or32:
case kX64Xor:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index d3a2a8e753..f46ff5946d 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -36,6 +36,37 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
+ if (input->opcode() != IrOpcode::kLoad ||
+ !selector()->CanCover(node, input)) {
+ return false;
+ }
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
+ MachineRepresentation rep =
+ LoadRepresentationOf(input->op()).representation();
+ switch (opcode) {
+ case kX64Cmp:
+ case kX64Test:
+ return rep == MachineRepresentation::kWord64 ||
+ rep == MachineRepresentation::kTagged;
+ case kX64Cmp32:
+ case kX64Test32:
+ return rep == MachineRepresentation::kWord32;
+ case kX64Cmp16:
+ case kX64Test16:
+ return rep == MachineRepresentation::kWord16;
+ case kX64Cmp8:
+ case kX64Test8:
+ return rep == MachineRepresentation::kWord8;
+ default:
+ break;
+ }
+ return false;
+ }
+
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
Node* base, Node* displacement,
InstructionOperand inputs[],
@@ -173,9 +204,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -399,8 +428,13 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
@@ -588,6 +622,75 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
+ X64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+ m.right().Is(32)) {
+ // Just load and sign-extend the interesting 4 bytes instead. This happens,
+ // for example, when we're loading and untagging SMIs.
+ BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+ if (mleft.matches() && (mleft.displacement() == nullptr ||
+ g.CanBeImmediate(mleft.displacement()))) {
+ size_t input_count = 0;
+ InstructionOperand inputs[3];
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ m.left().node(), inputs, &input_count);
+ if (mleft.displacement() == nullptr) {
+ // Make sure that the addressing mode indicates the presence of an
+ // immediate displacement. It seems that we never use M1 and M2, but we
+ // handle them here anyways.
+ switch (mode) {
+ case kMode_MR:
+ mode = kMode_MRI;
+ break;
+ case kMode_MR1:
+ mode = kMode_MR1I;
+ break;
+ case kMode_MR2:
+ mode = kMode_MR2I;
+ break;
+ case kMode_MR4:
+ mode = kMode_MR4I;
+ break;
+ case kMode_MR8:
+ mode = kMode_MR8I;
+ break;
+ case kMode_M1:
+ mode = kMode_M1I;
+ break;
+ case kMode_M2:
+ mode = kMode_M2I;
+ break;
+ case kMode_M4:
+ mode = kMode_M4I;
+ break;
+ case kMode_M8:
+ mode = kMode_M8I;
+ break;
+ case kMode_None:
+ case kMode_MRI:
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I:
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I:
+ UNREACHABLE();
+ }
+ inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
+ } else {
+ ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
+ int32_t displacement = sequence()->GetImmediate(op).ToInt32();
+ *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+ }
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+ return;
+ }
+ }
VisitWord64Shift(this, node, kX64Sar);
}
@@ -668,8 +771,8 @@ void InstructionSelector::VisitInt64Add(Node* node) {
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
- VisitBinop(this, node, kX64Add, &cont);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Add, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX64Add, &cont);
@@ -708,7 +811,7 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kX64Sub, &cont);
}
FlagsContinuation cont;
@@ -865,9 +968,15 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
X64OperandGenerator g(this);
- Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
X64OperandGenerator g(this);
@@ -1336,6 +1445,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
namespace {
@@ -1357,6 +1467,9 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1364,23 +1477,6 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
}
}
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
- Node* node, Node* input) {
- if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
- return false;
- }
- MachineRepresentation rep =
- LoadRepresentationOf(input->op()).representation();
- if (rep == MachineRepresentation::kWord64 ||
- rep == MachineRepresentation::kTagged) {
- return opcode == kX64Cmp || opcode == kX64Test;
- } else if (rep == MachineRepresentation::kWord32) {
- return opcode == kX64Cmp32 || opcode == kX64Test32;
- }
- return false;
-}
-
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1390,6 +1486,9 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1408,6 +1507,37 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+ Node* right) {
+ if (opcode != kX64Cmp32 && opcode != kX64Test32) {
+ return opcode;
+ }
+ // Currently, if one of the two operands is not a Load, we don't know what its
+ // machine representation is, so we bail out.
+ // TODO(epertoso): we can probably get some size information out of immediates
+ // and phi nodes.
+ if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+ return opcode;
+ }
+ // If the load representations don't match, both operands will be
+ // zero/sign-extended to 32bit.
+ LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+ if (left_representation != LoadRepresentationOf(right->op())) {
+ return opcode;
+ }
+ switch (left_representation.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
+ case MachineRepresentation::kWord16:
+ return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
+ default:
+ return opcode;
+ }
+}
+
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -1415,15 +1545,26 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- // If one of the two inputs is an immediate, make sure it's on the right.
- if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+ // If one of the two inputs is an immediate, make sure it's on the right, or
+ // if one of the two inputs is a memory operand, make sure it's on the left.
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
+ if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+ (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
return VisitCompareWithMemoryOperand(selector, opcode, left,
g.UseImmediate(right), cont);
}
@@ -1431,15 +1572,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
+ // Match memory operands on left side of comparison.
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseRegister(right), cont);
+ }
+
if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
- return VisitCompareWithMemoryOperand(selector, opcode, left,
- g.UseRegister(right), cont);
- }
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1460,6 +1603,9 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1500,98 +1646,87 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, opcode, right, left, cont, false);
}
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- X64OperandGenerator g(this);
- Node* user = branch;
- Node* value = branch->InputAt(0);
-
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
- if (CanCover(user, value)) {
+// Shared routine for word comparison against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ while (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ }
case IrOpcode::kInt32LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
case IrOpcode::kInt32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
case IrOpcode::kWord64Equal: {
- cont.OverwriteAndNegateIfEqual(kEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (CanCover(user, value)) {
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, &cont);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kX64Test, &cont);
+ return VisitWordCompare(selector, value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(this, value, kX64Cmp, &cont);
+ return VisitCompareZero(selector, value, kX64Cmp, cont);
}
- return VisitWord64Compare(this, value, &cont);
+ return VisitWord64Compare(selector, value, cont);
}
case IrOpcode::kInt64LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kUint64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat32Equal:
- cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(this, value, &cont);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1603,20 +1738,20 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(this, node, kX64Add32, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Add32, cont);
case IrOpcode::kInt32SubWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(this, node, kX64Sub32, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Sub32, cont);
case IrOpcode::kInt64AddWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(this, node, kX64Add, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Add, cont);
case IrOpcode::kInt64SubWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(this, node, kX64Sub, &cont);
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Sub, cont);
default:
break;
}
@@ -1624,22 +1759,42 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kX64Cmp32, &cont);
+ return VisitWordCompare(selector, value, kX64Cmp32, cont);
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, &cont);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(this, value, kX64Test32, &cont);
+ return VisitWordCompare(selector, value, kX64Test32, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kX64Test, &cont);
+ return VisitWordCompare(selector, value, kX64Test, cont);
default:
break;
}
+ break;
}
// Branch could not be combined with a compare, emit compare against 0.
- VisitCompareZero(this, value, kX64Cmp32, &cont);
+ VisitCompareZero(selector, value, kX64Cmp32, cont);
+}
+
+} // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X64OperandGenerator g(this);
@@ -1674,7 +1829,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* user = node;
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(user);
if (m.right().Is(0)) {
Node* value = m.left().node();
@@ -1709,31 +1864,33 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
void InstructionSelector::VisitWord64Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
// Try to combine the equality check with a comparison.
@@ -1756,8 +1913,8 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
- VisitBinop(this, node, kX64Add32, &cont);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Add32, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX64Add32, &cont);
@@ -1766,7 +1923,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kX64Sub32, &cont);
}
FlagsContinuation cont;
@@ -1775,61 +1932,67 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index 15755703e0..da7fdb481b 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -43,16 +43,13 @@ class X87OperandConverter : public InstructionOperandConverter {
return Operand(ToRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- AllocatedOperand::cast(op)->index());
- return Operand(offset.from_stack_pointer() ? esp : ebp,
- offset.offset() + extra);
+ return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
- Operand ToMaterializableOperand(int materializable_offset) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(
- FPOffsetToFrameSlot(materializable_offset));
- return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ Operand SlotToOperand(int slot, int extra = 0) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
}
Operand HighOperand(InstructionOperand* op) {
@@ -340,6 +337,42 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(&done); \
} while (false)
+#define ASSEMBLE_COMPARE(asm_instr) \
+ do { \
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+ size_t index = 0; \
+ Operand left = i.MemoryOperand(&index); \
+ if (HasImmediateInput(instr, index)) { \
+ __ asm_instr(left, i.InputImmediate(index)); \
+ } else { \
+ __ asm_instr(left, i.InputRegister(index)); \
+ } \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+}
+
+// For insert fninit/fld1 instructions after the Prologue
+thread_local bool is_block_0 = false;
+
+void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -356,18 +389,64 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
- if (frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+ Register, Register,
+ Register) {
+ // There are not enough temp registers left on ia32 for a call instruction
+ // so we pick some scratch registers and save/restore them manually here.
+ int scratch_count = 3;
+ Register scratch1 = ebx;
+ Register scratch2 = ecx;
+ Register scratch3 = edx;
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Label done;
+
+ // Check if current frame is an arguments adaptor frame.
+ __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &done, Label::kNear);
+
+ __ push(scratch1);
+ __ push(scratch2);
+ __ push(scratch3);
+
+ // Load arguments count from current arguments adaptor frame (note, it
+ // does not include receiver).
+ Register caller_args_count_reg = scratch1;
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack, scratch_count);
+ __ pop(scratch3);
+ __ pop(scratch2);
+ __ pop(scratch1);
+
+ __ bind(&done);
+}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X87OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+
+ // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
+ if (is_block_0) {
+ __ fninit();
+ __ fld1();
+ is_block_0 = false;
+ }
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ switch (arch_opcode) {
case kArchCallCodeObject: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
@@ -399,6 +478,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
@@ -406,6 +486,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fstp(0);
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ no_reg, no_reg, no_reg);
+ }
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -447,6 +531,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -460,6 +545,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fstp(0);
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
+ if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ no_reg, no_reg, no_reg);
+ }
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
break;
@@ -554,7 +643,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), esp);
break;
case kArchParentFramePointer:
- if (frame_access_state()->frame()->needs_frame()) {
+ if (frame_access_state()->has_frame()) {
__ mov(i.OutputRegister(), Operand(ebp, 0));
} else {
__ mov(i.OutputRegister(), ebp);
@@ -615,38 +704,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kX87Cmp:
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ cmp(operand, i.InputImmediate(index));
- } else {
- __ cmp(operand, i.InputRegister(index));
- }
- } else {
- if (HasImmediateInput(instr, 1)) {
- __ cmp(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ cmp(i.InputRegister(0), i.InputOperand(1));
- }
- }
+ ASSEMBLE_COMPARE(cmp);
+ break;
+ case kX87Cmp16:
+ ASSEMBLE_COMPARE(cmpw);
+ break;
+ case kX87Cmp8:
+ ASSEMBLE_COMPARE(cmpb);
break;
case kX87Test:
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ test(operand, i.InputImmediate(index));
- } else {
- __ test(i.InputRegister(index), operand);
- }
- } else {
- if (HasImmediateInput(instr, 1)) {
- __ test(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ test(i.InputRegister(0), i.InputOperand(1));
- }
- }
+ ASSEMBLE_COMPARE(test);
+ break;
+ case kX87Test16:
+ ASSEMBLE_COMPARE(test_w);
+ break;
+ case kX87Test8:
+ ASSEMBLE_COMPARE(test_b);
break;
case kX87Imul:
if (HasImmediateInput(instr, 1)) {
@@ -717,6 +790,92 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sar_cl(i.OutputOperand());
}
break;
+ case kX87AddPair: {
+ // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ bool use_temp = false;
+ if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+ // We cannot write to the output register directly, because it would
+ // overwrite an input for adc. We have to use the temp register.
+ use_temp = true;
+ __ Move(i.TempRegister(0), i.InputRegister(0));
+ __ add(i.TempRegister(0), i.InputRegister(2));
+ } else {
+ __ add(i.OutputRegister(0), i.InputRegister(2));
+ }
+ __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+ if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+ __ Move(i.OutputRegister(1), i.InputRegister(1));
+ }
+ if (use_temp) {
+ __ Move(i.OutputRegister(0), i.TempRegister(0));
+ }
+ break;
+ }
+ case kX87SubPair: {
+ // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+ // i.InputRegister(1) ... left high word.
+ // i.InputRegister(2) ... right low word.
+ // i.InputRegister(3) ... right high word.
+ bool use_temp = false;
+ if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+ i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+ // We cannot write to the output register directly, because it would
+ // overwrite an input for adc. We have to use the temp register.
+ use_temp = true;
+ __ Move(i.TempRegister(0), i.InputRegister(0));
+ __ sub(i.TempRegister(0), i.InputRegister(2));
+ } else {
+ __ sub(i.OutputRegister(0), i.InputRegister(2));
+ }
+ __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+ if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+ __ Move(i.OutputRegister(1), i.InputRegister(1));
+ }
+ if (use_temp) {
+ __ Move(i.OutputRegister(0), i.TempRegister(0));
+ }
+ break;
+ }
+ case kX87MulPair: {
+ __ imul(i.OutputRegister(1), i.InputOperand(0));
+ __ mov(i.TempRegister(0), i.InputOperand(1));
+ __ imul(i.TempRegister(0), i.InputOperand(2));
+ __ add(i.OutputRegister(1), i.TempRegister(0));
+ __ mov(i.OutputRegister(0), i.InputOperand(0));
+ // Multiplies the low words and stores them in eax and edx.
+ __ mul(i.InputRegister(2));
+ __ add(i.OutputRegister(1), i.TempRegister(0));
+
+ break;
+ }
+ case kX87ShlPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
+ case kX87ShrPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
+ case kX87SarPair:
+ if (HasImmediateInput(instr, 2)) {
+ __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+ } else {
+ // Shift has been loaded into CL by the register allocator.
+ __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+ }
+ break;
case kX87Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1176,8 +1335,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
InstructionOperand* input = instr->InputAt(0);
if (input->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
- __ fld_d(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(input->IsDoubleStackSlot());
@@ -1264,11 +1423,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld_d(i.InputOperand(0));
}
__ fild_s(Operand(esp, 0));
- __ fadd(1);
- __ fstp(0);
+ __ fld(1);
+ __ faddp();
__ TruncateX87TOSToI(i.OutputRegister(0));
__ add(esp, Immediate(kInt32Size));
__ add(i.OutputRegister(), Immediate(0x80000000));
+ __ fstp(0);
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fstp(0);
}
@@ -1610,8 +1770,16 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
X87OperandConverter i(this, instr);
Label::Distance flabel_distance =
branch->fallthru ? Label::kNear : Label::kFar;
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
+
+ Label done;
+ Label tlabel_tmp;
+ Label flabel_tmp;
+ Label* tlabel = &tlabel_tmp;
+ Label* flabel = &flabel_tmp;
+
+ Label* tlabel_dst = branch->true_label;
+ Label* flabel_dst = branch->false_label;
+
switch (branch->condition) {
case kUnorderedEqual:
__ j(parity_even, flabel, flabel_distance);
@@ -1661,6 +1829,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
+
+ __ jmp(&done);
+ __ bind(&tlabel_tmp);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode == kFlags_deoptimize) {
+ int double_register_param_count = 0;
+ int x87_layout = 0;
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ if (instr->InputAt(i)->IsDoubleRegister()) {
+ double_register_param_count++;
+ }
+ }
+ // Currently we use only one X87 register. If double_register_param_count
+ // is bigger than 1, it means duplicated double register is added to input
+ // of this instruction.
+ if (double_register_param_count > 0) {
+ x87_layout = (0 << 3) | 1;
+ }
+ // The layout of x87 register stack is loaded on the top of FPU register
+ // stack for deoptimization.
+ __ push(Immediate(x87_layout));
+ __ fild_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kPointerSize));
+ }
+ __ jmp(tlabel_dst);
+ __ bind(&flabel_tmp);
+ __ jmp(flabel_dst);
+ __ bind(&done);
}
@@ -1914,21 +2110,16 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall()) {
- // Assemble a prologue similar the to cdecl calling convention.
- __ push(ebp);
- __ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
- // TODO(turbofan): this prologue is redundant with OSR, but needed for
- // code aging.
- __ Prologue(this->info()->GeneratePreagedPrologue());
- } else if (frame()->needs_frame()) {
- __ StubPrologue();
- } else {
- frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+ if (frame_access_state()->has_frame()) {
+ if (descriptor->IsCFunctionCall()) {
+ __ push(ebp);
+ __ mov(ebp, esp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ }
}
- frame_access_state()->SetFrameAccessToDefault();
-
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1941,6 +2132,10 @@ void CodeGenerator::AssemblePrologue() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
}
const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1958,10 +2153,6 @@ void CodeGenerator::AssemblePrologue() {
}
frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
-
- // Initailize FPU state.
- __ fninit();
- __ fld1();
}
@@ -1994,17 +2185,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- } else if (frame()->needs_frame()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
+ AssembleDeconstructFrame();
}
}
if (pop_count == 0) {
@@ -2040,15 +2229,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
- int offset;
- if (IsMaterializableFromFrame(src, &offset)) {
+ int slot;
+ if (IsMaterializableFromFrame(src, &slot)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, g.ToMaterializableOperand(offset));
+ __ mov(dst, g.SlotToOperand(slot));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(g.ToMaterializableOperand(offset));
+ __ push(g.SlotToOperand(slot));
__ pop(dst);
}
} else if (destination->IsRegister()) {
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index e5d0912910..d70a737023 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -17,7 +17,11 @@ namespace compiler {
V(X87Add) \
V(X87And) \
V(X87Cmp) \
+ V(X87Cmp16) \
+ V(X87Cmp8) \
V(X87Test) \
+ V(X87Test16) \
+ V(X87Test8) \
V(X87Or) \
V(X87Xor) \
V(X87Sub) \
@@ -31,6 +35,12 @@ namespace compiler {
V(X87Shl) \
V(X87Shr) \
V(X87Sar) \
+ V(X87AddPair) \
+ V(X87SubPair) \
+ V(X87MulPair) \
+ V(X87ShlPair) \
+ V(X87ShrPair) \
+ V(X87SarPair) \
V(X87Ror) \
V(X87Lzcnt) \
V(X87Popcnt) \
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 079d5d2026..f5376bc3d4 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -27,6 +27,34 @@ class X87OperandGenerator final : public OperandGenerator {
return DefineAsRegister(node);
}
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
+ if (input->opcode() != IrOpcode::kLoad ||
+ !selector()->CanCover(node, input)) {
+ return false;
+ }
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
+ MachineRepresentation rep =
+ LoadRepresentationOf(input->op()).representation();
+ switch (opcode) {
+ case kX87Cmp:
+ case kX87Test:
+ return rep == MachineRepresentation::kWord32 ||
+ rep == MachineRepresentation::kTagged;
+ case kX87Cmp16:
+ case kX87Test16:
+ return rep == MachineRepresentation::kWord16;
+ case kX87Cmp8:
+ case kX87Test8:
+ return rep == MachineRepresentation::kWord8;
+ default:
+ break;
+ }
+ return false;
+ }
+
InstructionOperand CreateImmediate(int imm) {
return sequence()->AddImmediate(Constant(imm));
}
@@ -193,9 +221,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
- inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
- ? g.UseRegister(value)
- : g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -365,10 +391,11 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
}
}
+namespace {
// Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
X87OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
@@ -417,18 +444,24 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
- inputs);
+ opcode = cont->Encode(opcode);
+ if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+ cont->frame_state());
+ } else {
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ }
}
// Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
+} // namespace
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX87And);
@@ -491,9 +524,10 @@ void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X87OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister(eax)};
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)));
+ g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
}
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
@@ -539,6 +573,93 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitShift(this, node, kX87Sar);
}
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ X87OperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ InstructionOperand temps[] = {g.TempRegister()};
+
+ Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ X87OperandGenerator g(this);
+
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineSameAsFirst(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ InstructionOperand temps[] = {g.TempRegister()};
+
+ Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ X87OperandGenerator g(this);
+
+ // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+ // register and one mov instruction.
+ InstructionOperand inputs[] = {
+ g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+
+ Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ X87OperandGenerator g(selector);
+
+ Node* shift = node->InputAt(2);
+ InstructionOperand shift_operand;
+ if (g.CanBeImmediate(shift)) {
+ shift_operand = g.UseImmediate(shift);
+ } else {
+ shift_operand = g.UseFixed(shift, ecx);
+ }
+ InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+ g.UseFixed(node->InputAt(1), edx),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kX87ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kX87ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kX87SarPair, node);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kX87Ror);
@@ -714,6 +835,10 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
X87OperandGenerator g(this);
@@ -990,6 +1115,7 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
namespace {
@@ -1011,6 +1137,9 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1018,33 +1147,21 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
}
}
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
- Node* node, Node* input) {
- if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
- return false;
- }
- MachineRepresentation load_representation =
- LoadRepresentationOf(input->op()).representation();
- if (load_representation == MachineRepresentation::kWord32 ||
- load_representation == MachineRepresentation::kTagged) {
- return opcode == kX87Cmp || opcode == kX87Test;
- }
- return false;
-}
-
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
X87OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+ selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
- selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
- left, right);
+ selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
}
}
@@ -1060,6 +1177,36 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+ Node* right) {
+ if (opcode != kX87Cmp && opcode != kX87Test) {
+ return opcode;
+ }
+ // Currently, if one of the two operands is not a Load, we don't know what its
+ // machine representation is, so we bail out.
+ // TODO(epertoso): we can probably get some size information out of immediates
+ // and phi nodes.
+ if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+ return opcode;
+ }
+ // If the load representations don't match, both operands will be
+ // zero/sign-extended to 32bit.
+ LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+ if (left_representation != LoadRepresentationOf(right->op())) {
+ return opcode;
+ }
+ switch (left_representation.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ return opcode == kX87Cmp ? kX87Cmp8 : kX87Test8;
+ case MachineRepresentation::kWord16:
+ return opcode == kX87Cmp ? kX87Cmp16 : kX87Test16;
+ default:
+ return opcode;
+ }
+}
// Shared routine for multiple float32 compare operations (inputs commuted).
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1070,6 +1217,10 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+ g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float32Cmp),
@@ -1087,6 +1238,10 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+ g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float64Cmp),
@@ -1101,15 +1256,28 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- // If one of the two inputs is an immediate, make sure it's on the right.
- if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
+ // If one of the two inputs is an immediate, make sure it's on the right, or
+ // if one of the two inputs is a memory operand, make sure it's on the left.
+ if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+ (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
+ // TODO(epertoso): we should use `narrowed_opcode' here once we match
+ // immediates too.
return VisitCompareWithMemoryOperand(selector, opcode, left,
g.UseImmediate(right), cont);
}
@@ -1117,15 +1285,21 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
+ // Match memory operands on left side of comparison.
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+ bool needs_byte_register =
+ narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
+ return VisitCompareWithMemoryOperand(
+ selector, narrowed_opcode, left,
+ needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+ cont);
+ }
+
if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
- if (CanUseMemoryOperand(selector, opcode, node, left)) {
- return VisitCompareWithMemoryOperand(selector, opcode, left,
- g.UseRegister(right), cont);
- }
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1145,6 +1319,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1254,6 +1431,17 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X87OperandGenerator g(this);
@@ -1284,7 +1472,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont(kEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1294,32 +1482,34 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
VisitWordCompare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kX87Add, &cont);
}
FlagsContinuation cont;
@@ -1329,7 +1519,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont(kOverflow, ovf);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop(this, node, kX87Sub, &cont);
}
FlagsContinuation cont;
@@ -1338,37 +1528,41 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThan, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/zone-pool.cc b/deps/v8/src/compiler/zone-pool.cc
index 2006a79d2c..13fec35a00 100644
--- a/deps/v8/src/compiler/zone-pool.cc
+++ b/deps/v8/src/compiler/zone-pool.cc
@@ -13,7 +13,7 @@ ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
max_allocated_bytes_(0) {
zone_pool_->stats_.push_back(this);
- for (auto zone : zone_pool_->used_) {
+ for (Zone* zone : zone_pool_->used_) {
size_t size = static_cast<size_t>(zone->allocation_size());
std::pair<InitialValues::iterator, bool> res =
initial_values_.insert(std::make_pair(zone, size));
@@ -64,9 +64,8 @@ void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
}
}
-
-ZonePool::ZonePool() : max_allocated_bytes_(0), total_deleted_bytes_(0) {}
-
+ZonePool::ZonePool(base::AccountingAllocator* allocator)
+ : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
ZonePool::~ZonePool() {
DCHECK(used_.empty());
@@ -103,7 +102,7 @@ Zone* ZonePool::NewEmptyZone() {
zone = unused_.back();
unused_.pop_back();
} else {
- zone = new Zone();
+ zone = new Zone(allocator_);
}
used_.push_back(zone);
DCHECK_EQ(0u, zone->allocation_size());
@@ -116,7 +115,7 @@ void ZonePool::ReturnZone(Zone* zone) {
// Update max.
max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
// Update stats.
- for (auto stat_scope : stats_) {
+ for (StatsScope* stat_scope : stats_) {
stat_scope->ZoneReturned(zone);
}
// Remove from used.
diff --git a/deps/v8/src/compiler/zone-pool.h b/deps/v8/src/compiler/zone-pool.h
index aaf9daac46..44a649fcfb 100644
--- a/deps/v8/src/compiler/zone-pool.h
+++ b/deps/v8/src/compiler/zone-pool.h
@@ -61,7 +61,7 @@ class ZonePool final {
DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
- ZonePool();
+ explicit ZonePool(base::AccountingAllocator* allocator);
~ZonePool();
size_t GetMaxAllocatedBytes();
@@ -82,6 +82,7 @@ class ZonePool final {
Stats stats_;
size_t max_allocated_bytes_;
size_t total_deleted_bytes_;
+ base::AccountingAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(ZonePool);
};
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index c26ce5bd47..344d5db578 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -105,6 +105,10 @@ bool Context::IsWithContext() {
return map == map->GetHeap()->with_context_map();
}
+bool Context::IsDebugEvaluateContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->debug_evaluate_context_map();
+}
bool Context::IsBlockContext() {
Map* map = this->map();
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 7549d20518..67a9fea8b8 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -164,14 +164,16 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
Handle<Object> unscopables;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, unscopables,
- Object::GetProperty(it->GetReceiver(),
- isolate->factory()->unscopables_symbol()),
+ JSReceiver::GetProperty(Handle<JSReceiver>::cast(it->GetReceiver()),
+ isolate->factory()->unscopables_symbol()),
Nothing<bool>());
if (!unscopables->IsJSReceiver()) return Just(true);
Handle<Object> blacklist;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, blacklist,
- Object::GetProperty(unscopables, it->name()),
- Nothing<bool>());
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, blacklist,
+ JSReceiver::GetProperty(Handle<JSReceiver>::cast(unscopables),
+ it->name()),
+ Nothing<bool>());
return Just(!blacklist->BooleanValue());
}
@@ -231,6 +233,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
+ bool failed_whitelist = false;
*index = kNotFound;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@@ -291,7 +294,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
if (name->Equals(*isolate->factory()->this_string())) {
maybe = Just(ABSENT);
} else {
- LookupIterator it(object, name);
+ LookupIterator it(object, name, object);
Maybe<bool> found = UnscopableLookup(&it);
if (found.IsNothing()) {
maybe = Nothing<PropertyAttributes>();
@@ -376,6 +379,31 @@ Handle<Object> Context::Lookup(Handle<String> name,
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
+ } else if (context->IsDebugEvaluateContext()) {
+ // Check materialized locals.
+ Object* obj = context->get(EXTENSION_INDEX);
+ if (obj->IsJSReceiver()) {
+ Handle<JSReceiver> extension(JSReceiver::cast(obj));
+ LookupIterator it(extension, name, extension);
+ Maybe<bool> found = JSReceiver::HasProperty(&it);
+ if (found.FromMaybe(false)) {
+ *attributes = NONE;
+ return extension;
+ }
+ }
+ // Check the original context, but do not follow its context chain.
+ obj = context->get(WRAPPED_CONTEXT_INDEX);
+ if (obj->IsContext()) {
+ Handle<Object> result = Context::cast(obj)->Lookup(
+ name, DONT_FOLLOW_CHAINS, index, attributes, binding_flags);
+ if (!result.is_null()) return result;
+ }
+ // Check whitelist. Names that do not pass whitelist shall only resolve
+ // to with, script or native contexts up the context chain.
+ obj = context->get(WHITE_LIST_INDEX);
+ if (obj->IsStringSet()) {
+ failed_whitelist = failed_whitelist || !StringSet::cast(obj)->Has(name);
+ }
}
// 3. Prepare to continue with the previous (next outermost) context.
@@ -384,7 +412,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
context->is_declaration_context())) {
follow_context_chain = false;
} else {
- context = Handle<Context>(context->previous(), isolate);
+ do {
+ context = Handle<Context>(context->previous(), isolate);
+ // If we come across a whitelist context, and the name is not
+ // whitelisted, then only consider with, script or native contexts.
+ } while (failed_whitelist && !context->IsScriptContext() &&
+ !context->IsNativeContext() && !context->IsWithContext());
}
} while (follow_context_chain);
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 38ebf64ae1..90fb9a4278 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -79,7 +79,6 @@ enum BindingFlags {
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(CONCAT_ITERABLE_TO_ARRAY_INDEX, JSFunction, concat_iterable_to_array) \
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
@@ -94,7 +93,9 @@ enum BindingFlags {
V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)
+ V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance) \
+ V(MATH_FLOOR, JSFunction, math_floor) \
+ V(MATH_SQRT, JSFunction, math_sqrt)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
@@ -116,6 +117,7 @@ enum BindingFlags {
V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(MATH_POW_METHOD_INDEX, JSFunction, math_pow) \
V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number) \
V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line) \
@@ -184,7 +186,7 @@ enum BindingFlags {
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
- V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, ObjectHashTable, \
+ V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
template_instantiations_cache) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
@@ -211,21 +213,8 @@ enum BindingFlags {
js_array_fast_double_elements_map_index) \
V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_holey_double_elements_map_index) \
- V(JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_smi_elements_strong_map_index) \
- V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_holey_smi_elements_strong_map_index) \
- V(JS_ARRAY_FAST_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_elements_strong_map_index) \
- V(JS_ARRAY_FAST_HOLEY_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_holey_elements_strong_map_index) \
- V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_double_elements_strong_map_index) \
- V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
- js_array_fast_holey_double_elements_strong_map_index) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
- V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
@@ -269,10 +258,6 @@ enum BindingFlags {
V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
- V(STRONG_CONSTRUCTOR_MAP_INDEX, Map, strong_constructor_map) \
- V(STRONG_FUNCTION_MAP_INDEX, Map, strong_function_map) \
- V(STRONG_GENERATOR_FUNCTION_MAP_INDEX, Map, strong_generator_function_map) \
- V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function) \
@@ -416,12 +401,14 @@ class Context: public FixedArray {
NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX,
- FIRST_JS_ARRAY_STRONG_MAP_SLOT =
- JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX,
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
+ // These slots hold values in debug evaluate contexts.
+ WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS,
+ WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
};
void IncrementErrorsThrown();
@@ -474,6 +461,7 @@ class Context: public FixedArray {
inline bool IsFunctionContext();
inline bool IsCatchContext();
inline bool IsWithContext();
+ inline bool IsDebugEvaluateContext();
inline bool IsBlockContext();
inline bool IsModuleContext();
inline bool IsScriptContext();
@@ -539,34 +527,27 @@ class Context: public FixedArray {
static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
if (IsGeneratorFunction(kind)) {
- return is_strong(language_mode) ? STRONG_GENERATOR_FUNCTION_MAP_INDEX :
- is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
+ return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
: SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
}
if (IsClassConstructor(kind)) {
// Use strict function map (no own "caller" / "arguments")
- return is_strong(language_mode) ? STRONG_CONSTRUCTOR_MAP_INDEX
- : STRICT_FUNCTION_MAP_INDEX;
+ return STRICT_FUNCTION_MAP_INDEX;
}
if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
IsAccessorFunction(kind)) {
- return is_strong(language_mode)
- ? STRONG_FUNCTION_MAP_INDEX
- : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+ return STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
}
- return is_strong(language_mode) ? STRONG_FUNCTION_MAP_INDEX :
- is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ return is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
: SLOPPY_FUNCTION_MAP_INDEX;
}
- static int ArrayMapIndex(ElementsKind elements_kind,
- Strength strength = Strength::WEAK) {
+ static int ArrayMapIndex(ElementsKind elements_kind) {
DCHECK(IsFastElementsKind(elements_kind));
- return elements_kind + (is_strong(strength) ? FIRST_JS_ARRAY_STRONG_MAP_SLOT
- : FIRST_JS_ARRAY_MAP_SLOT);
+ return elements_kind + FIRST_JS_ARRAY_MAP_SLOT;
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index a10494e165..4f5c251a0c 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -273,7 +273,9 @@ void RuntimeCallCounter::Reset() {
}
void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
- Enter(new RuntimeCallTimer(counter, current_timer_));
+ RuntimeCallTimer* timer = new RuntimeCallTimer();
+ timer->Initialize(counter, current_timer_);
+ Enter(timer);
}
void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
@@ -303,31 +305,34 @@ void RuntimeCallStats::Print(std::ostream& os) {
#undef PRINT_COUNTER
entries.Add(&this->ExternalCallback);
+ entries.Add(&this->GC);
entries.Add(&this->UnexpectedStubMiss);
entries.Print(os);
}
void RuntimeCallStats::Reset() {
+ if (!FLAG_runtime_call_stats) return;
#define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
FOR_EACH_INTRINSIC(RESET_COUNTER)
#undef RESET_COUNTER
#define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
BUILTIN_LIST_C(RESET_COUNTER)
#undef RESET_COUNTER
+ this->ExternalCallback.Reset();
+ this->GC.Reset();
+ this->UnexpectedStubMiss.Reset();
}
-RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallCounter* counter)
- : isolate_(isolate),
- timer_(counter,
- isolate->counters()->runtime_call_stats()->current_timer()) {
- if (!FLAG_runtime_call_stats) return;
- isolate->counters()->runtime_call_stats()->Enter(&timer_);
+void RuntimeCallTimerScope::Enter(Isolate* isolate,
+ RuntimeCallCounter* counter) {
+ isolate_ = isolate;
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+ timer_.Initialize(counter, stats->current_timer());
+ stats->Enter(&timer_);
}
-RuntimeCallTimerScope::~RuntimeCallTimerScope() {
- if (!FLAG_runtime_call_stats) return;
+void RuntimeCallTimerScope::Leave() {
isolate_->counters()->runtime_call_stats()->Leave(&timer_);
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index a417da3924..7183d0e52e 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -492,8 +492,11 @@ struct RuntimeCallCounter {
// timers used for properly measuring the own time of a RuntimeCallCounter.
class RuntimeCallTimer {
public:
- RuntimeCallTimer(RuntimeCallCounter* counter, RuntimeCallTimer* parent)
- : counter_(counter), parent_(parent) {}
+ inline void Initialize(RuntimeCallCounter* counter,
+ RuntimeCallTimer* parent) {
+ counter_ = counter;
+ parent_ = parent;
+ }
inline void Start() {
timer_.Start();
@@ -509,7 +512,9 @@ class RuntimeCallTimer {
return parent_;
}
- void AdjustForSubTimer(base::TimeDelta delta) { counter_->time -= delta; }
+ inline void AdjustForSubTimer(base::TimeDelta delta) {
+ counter_->time -= delta;
+ }
private:
RuntimeCallCounter* counter_;
@@ -523,6 +528,7 @@ struct RuntimeCallStats {
RuntimeCallCounter("UnexpectedStubMiss");
// Counter for runtime callbacks into JavaScript.
RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
+ RuntimeCallCounter GC = RuntimeCallCounter("GC");
#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
@@ -557,8 +563,16 @@ struct RuntimeCallStats {
// the time of C++ scope.
class RuntimeCallTimerScope {
public:
- explicit RuntimeCallTimerScope(Isolate* isolate, RuntimeCallCounter* counter);
- ~RuntimeCallTimerScope();
+ inline explicit RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallCounter* counter) {
+ if (FLAG_runtime_call_stats) Enter(isolate, counter);
+ }
+ inline ~RuntimeCallTimerScope() {
+ if (FLAG_runtime_call_stats) Leave();
+ }
+
+ void Enter(Isolate* isolate, RuntimeCallCounter* counter);
+ void Leave();
private:
Isolate* isolate_;
@@ -742,17 +756,11 @@ class RuntimeCallTimerScope {
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos_runtime, V8.MathAcosRuntime) \
- SC(math_asin_runtime, V8.MathAsinRuntime) \
- SC(math_atan_runtime, V8.MathAtanRuntime) \
SC(math_atan2_runtime, V8.MathAtan2Runtime) \
SC(math_clz32_runtime, V8.MathClz32Runtime) \
SC(math_exp_runtime, V8.MathExpRuntime) \
- SC(math_floor_runtime, V8.MathFloorRuntime) \
SC(math_log_runtime, V8.MathLogRuntime) \
SC(math_pow_runtime, V8.MathPowRuntime) \
- SC(math_round_runtime, V8.MathRoundRuntime) \
- SC(math_sqrt_runtime, V8.MathSqrtRuntime) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
SC(runtime_calls, V8.RuntimeCalls) \
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index d5590f5c05..4072982513 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -248,27 +248,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -567,12 +546,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -897,22 +871,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -925,14 +893,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1059,16 +1027,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1092,6 +1050,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1100,6 +1061,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1216,22 +1180,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(r3);
- vector = FixedTemp(r2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
@@ -1843,13 +1791,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2488,13 +2429,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r3);
@@ -2531,11 +2465,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2596,11 +2528,5 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 91435cf785..60fe79d402 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -133,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +143,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -227,6 +223,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1721,23 +1727,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1800,29 +1789,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2426,19 +2392,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2555,18 +2508,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index 8b4e6c9904..c64aac3cc8 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -263,18 +263,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ PushFixedFrame();
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(ip);
- __ PopFixedFrame();
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -327,7 +324,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ PushFixedFrame();
+ __ PushCommonFrame();
__ bl(&needs_frame);
} else {
__ bl(&call_deopt_entry);
@@ -342,10 +339,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(ip);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -2070,29 +2066,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ cmp(reg, Operand::Zero());
__ b(eq, instr->FalseLabel(chunk_));
@@ -2115,13 +2112,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ b(ge, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2133,19 +2130,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2365,11 +2362,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(r1));
DCHECK(ToRegister(instr->right()).is(r0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand::Zero());
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -3057,17 +3053,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ sub(result, sp, Operand(2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ ldr(result, MemOperand(scratch,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ mov(result, fp, LeaveCC, ne);
__ mov(result, scratch, LeaveCC, eq);
+ } else {
+ __ mov(result, fp);
}
}
@@ -3189,15 +3188,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ b(ne, &loop);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r0);
+ // It is safe to use r3, r4 and r5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r3, r4, r5);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3244,10 +3254,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3265,17 +3274,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ mov(r0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(ip);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(ip);
+ } else {
+ __ Call(ip);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3562,22 +3589,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r3, r4 and r5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r3, r4, r5);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3640,56 +3722,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- // Change context.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ mov(r0, Operand(instr->arity()));
-
- // Load the code entry address
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(r3));
- DCHECK(vector_register.is(r2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Move(vector_register, vector);
- __ mov(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r1));
@@ -5152,13 +5184,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(r0));
- __ push(r0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -5517,13 +5542,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 67925ccdf6..8bbacc3c58 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -218,11 +218,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index c5d42082bd..6cfc846548 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -66,13 +66,6 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -135,20 +128,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -735,33 +714,22 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -817,13 +785,6 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = NULL;
@@ -993,7 +954,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
}
- ToBooleanStub::Types expected = instr->expected_input_types();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp1 = needs_temps ? TempRegister() : NULL;
LOperand* temp2 = needs_temps ? TempRegister() : NULL;
@@ -1011,16 +972,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), x1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1045,23 +996,10 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
ops,
zone());
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), x1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(x3);
- vector = FixedTemp(x2);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
}
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, x0), instr);
+ return MarkAsCall(DefineFixed(result, x0), instr);
}
@@ -1454,11 +1392,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if ((instr->arguments_var() != NULL) &&
instr->arguments_object()->IsLinked()) {
@@ -1553,6 +1489,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
LOperand* function = UseFixed(instr->function(), x1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2457,13 +2396,6 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2688,12 +2620,5 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
return AssignEnvironment(DefineAsRegister(result));
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 14abeb0ba6..237487ff88 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -32,8 +32,6 @@ class LCodeGen;
V(BitS) \
V(BoundsCheck) \
V(Branch) \
- V(CallFunction) \
- V(CallJSFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CallWithDescriptor) \
@@ -142,7 +140,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -157,7 +154,6 @@ class LCodeGen;
V(SubS) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(TruncateDoubleToIntOrSmi) \
@@ -238,6 +234,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -265,6 +268,8 @@ class LInstruction : public ZoneObject {
private:
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -635,6 +640,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -801,46 +807,6 @@ class LBranch final : public LControlInstruction<1, 2> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2734,19 +2700,6 @@ class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
@@ -2888,18 +2841,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 855cac14c0..9bbc8b87e8 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -359,38 +359,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
}
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).Is(x1));
- DCHECK(ToRegister(instr->result()).Is(x0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(x3));
- DCHECK(vector_register.is(x2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Mov(vector_register, vector);
- __ Mov(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Mov(x0, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
- RecordPushedArgumentsDelta(hinstr->argument_delta());
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->context()).is(cp));
@@ -606,19 +574,20 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(
+ StackFrame::STUB,
+ GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
}
frame_is_built_ = true;
}
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ Claim(slots, kPointerSize);
- }
-
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
@@ -740,11 +709,11 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ Push(lr, fp, cp);
+ __ Push(lr, fp);
__ Mov(fp, Smi::FromInt(StackFrame::STUB));
__ Push(fp);
__ Add(fp, __ StackPointer(),
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ TypedFrameConstants::kFixedFrameSizeFromFp);
Comment(";;; Deferred code");
}
@@ -753,7 +722,7 @@ bool LCodeGen::GenerateDeferredCode() {
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ Pop(xzr, cp, fp, lr);
+ __ Pop(xzr, fp, lr);
frame_is_built_ = false;
}
@@ -1560,14 +1529,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ B(ne, &loop);
__ Bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(x0);
+ // It is safe to use x3, x4 and x5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) x3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, x3, x4, x5);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in argc (receiver) which is x0, as
// expected by InvokeFunction.
ParameterCount actual(argc);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -1585,16 +1566,18 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// get a pointer which will work well with LAccessArgumentsAt.
DCHECK(masm()->StackPointer().Is(jssp));
__ Sub(result, jssp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
DCHECK(instr->temp() != NULL);
Register previous_fp = ToRegister(instr->temp());
__ Ldr(previous_fp,
MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result,
- MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Ldr(result, MemOperand(previous_fp,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ Csel(result, fp, previous_fp, ne);
+ } else {
+ __ Mov(result, fp);
}
}
@@ -1763,17 +1746,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
EmitCompareAndBranch(instr, ne, temp, 0);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ JumpIfRoot(
value, Heap::kUndefinedValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ JumpIfRoot(
value, Heap::kTrueValueRootIndex, true_label);
@@ -1781,13 +1765,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
value, Heap::kFalseValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ JumpIfRoot(
value, Heap::kNullValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
DCHECK(Smi::FromInt(0) == 0);
__ Cbz(value, false_label);
@@ -1815,13 +1799,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
__ B(ge, true_label);
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
@@ -1832,19 +1816,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, scratch, SYMBOL_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -1867,10 +1851,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -1898,21 +1881,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
__ Mov(arity_reg, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(x10);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(x10);
+ } else {
+ __ Call(x10);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -1959,26 +1959,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->function()).is(x1));
-
- // Change context.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
- __ Mov(x0, instr->arity());
-
- // Load the code entry address
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ Call(x10);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -2838,23 +2818,79 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
__ Scvtf(result, value);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ Ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ B(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ Mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
// The function is required to be in x1.
DCHECK(ToRegister(instr->function()).is(x1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use x3, x4 and x5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) x3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, x3, x4, x5);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(x1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -5245,10 +5281,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(x1));
DCHECK(ToRegister(instr->right()).is(x0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -5375,14 +5411,6 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).Is(x0));
- DCHECK(ToRegister(instr->result()).Is(x0));
- __ Push(x0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object = ToRegister(instr->object());
@@ -5703,12 +5731,5 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index cf7de10394..f67ad5ab5d 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -322,11 +322,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in x1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) override;
diff --git a/deps/v8/src/crankshaft/compilation-phase.cc b/deps/v8/src/crankshaft/compilation-phase.cc
new file mode 100644
index 0000000000..9b40ccaec4
--- /dev/null
+++ b/deps/v8/src/crankshaft/compilation-phase.cc
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/compilation-phase.h"
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+ : name_(name), info_(info), zone_(info->isolate()->allocator()) {
+ if (FLAG_hydrogen_stats) {
+ info_zone_start_allocation_size_ = info->zone()->allocation_size();
+ timer_.Start();
+ }
+}
+
+CompilationPhase::~CompilationPhase() {
+ if (FLAG_hydrogen_stats) {
+ size_t size = zone()->allocation_size();
+ size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
+ }
+}
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+ // Trace if the appropriate trace flag is set and the phase name's first
+ // character is in the FLAG_trace_phase command line parameter.
+ AllowHandleDereference allow_deref;
+ bool tracing_on =
+ info()->IsStub()
+ ? FLAG_trace_hydrogen_stubs
+ : (FLAG_trace_hydrogen &&
+ info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter));
+ return (tracing_on &&
+ base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) !=
+ NULL);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/compilation-phase.h b/deps/v8/src/crankshaft/compilation-phase.h
new file mode 100644
index 0000000000..99e24c72c8
--- /dev/null
+++ b/deps/v8/src/crankshaft/compilation-phase.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_
+#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
+
+#include "src/allocation.h"
+#include "src/compiler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationPhase BASE_EMBEDDED {
+ public:
+ CompilationPhase(const char* name, CompilationInfo* info);
+ ~CompilationPhase();
+
+ protected:
+ bool ShouldProduceTraceOutput() const;
+
+ const char* name() const { return name_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
+ Zone* zone() { return &zone_; }
+
+ private:
+ const char* name_;
+ CompilationInfo* info_;
+ Zone zone_;
+ size_t info_zone_start_allocation_size_;
+ base::ElapsedTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_COMPILATION_PHASE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-bch.cc b/deps/v8/src/crankshaft/hydrogen-bch.cc
deleted file mode 100644
index 060e0bcdab..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bch.cc
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-bch.h"
-
-namespace v8 {
-namespace internal {
-
-/*
- * This class is a table with one element for eack basic block.
- *
- * It is used to check if, inside one loop, all execution paths contain
- * a bounds check for a particular [index, length] combination.
- * The reason is that if there is a path that stays in the loop without
- * executing a check then the check cannot be hoisted out of the loop (it
- * would likely fail and cause a deopt for no good reason).
- * We also check is there are paths that exit the loop early, and if yes we
- * perform the hoisting only if graph()->use_optimistic_licm() is true.
- * The reason is that such paths are realtively common and harmless (like in
- * a "search" method that scans an array until an element is found), but in
- * some cases they could cause a deopt if we hoist the check so this is a
- * situation we need to detect.
- */
-class InductionVariableBlocksTable BASE_EMBEDDED {
- public:
- class Element {
- public:
- static const int kNoBlock = -1;
-
- HBasicBlock* block() { return block_; }
- void set_block(HBasicBlock* block) { block_ = block; }
- bool is_start() { return is_start_; }
- bool is_proper_exit() { return is_proper_exit_; }
- bool is_in_loop() { return is_in_loop_; }
- bool has_check() { return has_check_; }
- void set_has_check() { has_check_ = true; }
- InductionVariableLimitUpdate* additional_limit() {
- return &additional_limit_;
- }
-
- /*
- * Initializes the table element for a given loop (identified by its
- * induction variable).
- */
- void InitializeLoop(InductionVariableData* data) {
- DCHECK(data->limit() != NULL);
- HLoopInformation* loop = data->phi()->block()->current_loop();
- is_start_ = (block() == loop->loop_header());
- is_proper_exit_ = (block() == data->induction_exit_target());
- is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
- has_check_ = false;
- }
-
- // Utility methods to iterate over dominated blocks.
- void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
- HBasicBlock* CurrentDominatedBlock() {
- DCHECK(current_dominated_block_ != kNoBlock);
- return current_dominated_block_ < block()->dominated_blocks()->length() ?
- block()->dominated_blocks()->at(current_dominated_block_) : NULL;
- }
- HBasicBlock* NextDominatedBlock() {
- current_dominated_block_++;
- return CurrentDominatedBlock();
- }
-
- Element()
- : block_(NULL), is_start_(false), is_proper_exit_(false),
- has_check_(false), additional_limit_(),
- current_dominated_block_(kNoBlock) {}
-
- private:
- HBasicBlock* block_;
- bool is_start_;
- bool is_proper_exit_;
- bool is_in_loop_;
- bool has_check_;
- InductionVariableLimitUpdate additional_limit_;
- int current_dominated_block_;
- };
-
- HGraph* graph() const { return graph_; }
- Counters* counters() const { return graph()->isolate()->counters(); }
- HBasicBlock* loop_header() const { return loop_header_; }
- Element* at(int index) const { return &(elements_.at(index)); }
- Element* at(HBasicBlock* block) const { return at(block->block_id()); }
-
- void AddCheckAt(HBasicBlock* block) {
- at(block->block_id())->set_has_check();
- }
-
- /*
- * Initializes the table for a given loop (identified by its induction
- * variable).
- */
- void InitializeLoop(InductionVariableData* data) {
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- at(i)->InitializeLoop(data);
- }
- loop_header_ = data->phi()->block()->current_loop()->loop_header();
- }
-
-
- enum Hoistability {
- HOISTABLE,
- OPTIMISTICALLY_HOISTABLE,
- NOT_HOISTABLE
- };
-
- /*
- * This method checks if it is appropriate to hoist the bounds checks on an
- * induction variable out of the loop.
- * The problem is that in the loop code graph there could be execution paths
- * where the check is not performed, but hoisting the check has the same
- * semantics as performing it at every loop iteration, which could cause
- * unnecessary check failures (which would mean unnecessary deoptimizations).
- * The method returns OK if there are no paths that perform an iteration
- * (loop back to the header) without meeting a check, or UNSAFE is set if
- * early exit paths are found.
- */
- Hoistability CheckHoistability() {
- for (int i = 0; i < elements_.length(); i++) {
- at(i)->ResetCurrentDominatedBlock();
- }
- bool unsafe = false;
-
- HBasicBlock* current = loop_header();
- while (current != NULL) {
- HBasicBlock* next;
-
- if (at(current)->has_check() || !at(current)->is_in_loop()) {
- // We found a check or we reached a dominated block out of the loop,
- // therefore this block is safe and we can backtrack.
- next = NULL;
- } else {
- for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
- Element* successor = at(current->end()->SuccessorAt(i));
-
- if (!successor->is_in_loop()) {
- if (!successor->is_proper_exit()) {
- // We found a path that exits the loop early, and is not the exit
- // related to the induction limit, therefore hoisting checks is
- // an optimistic assumption.
- unsafe = true;
- }
- }
-
- if (successor->is_start()) {
- // We found a path that does one loop iteration without meeting any
- // check, therefore hoisting checks would be likely to cause
- // unnecessary deopts.
- return NOT_HOISTABLE;
- }
- }
-
- next = at(current)->NextDominatedBlock();
- }
-
- // If we have no next block we need to backtrack the tree traversal.
- while (next == NULL) {
- current = current->dominator();
- if (current != NULL) {
- next = at(current)->NextDominatedBlock();
- } else {
- // We reached the root: next stays NULL.
- next = NULL;
- break;
- }
- }
-
- current = next;
- }
-
- return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
- }
-
- explicit InductionVariableBlocksTable(HGraph* graph)
- : graph_(graph), loop_header_(NULL),
- elements_(graph->blocks()->length(), graph->zone()) {
- for (int i = 0; i < graph->blocks()->length(); i++) {
- Element element;
- element.set_block(graph->blocks()->at(i));
- elements_.Add(element, graph->zone());
- DCHECK(at(i)->block()->block_id() == i);
- }
- }
-
- // Tries to hoist a check out of its induction loop.
- void ProcessRelatedChecks(
- InductionVariableData::InductionVariableCheck* check,
- InductionVariableData* data) {
- HValue* length = check->check()->length();
- check->set_processed();
- HBasicBlock* header =
- data->phi()->block()->current_loop()->loop_header();
- HBasicBlock* pre_header = header->predecessors()->at(0);
- // Check that the limit is defined in the loop preheader.
- if (!data->limit()->IsInteger32Constant()) {
- HBasicBlock* limit_block = data->limit()->block();
- if (limit_block != pre_header &&
- !limit_block->Dominates(pre_header)) {
- return;
- }
- }
- // Check that the length and limit have compatible representations.
- if (!(data->limit()->representation().Equals(
- length->representation()) ||
- data->limit()->IsInteger32Constant())) {
- return;
- }
- // Check that the length is defined in the loop preheader.
- if (check->check()->length()->block() != pre_header &&
- !check->check()->length()->block()->Dominates(pre_header)) {
- return;
- }
-
- // Add checks to the table.
- for (InductionVariableData::InductionVariableCheck* current_check = check;
- current_check != NULL;
- current_check = current_check->next()) {
- if (current_check->check()->length() != length) continue;
-
- AddCheckAt(current_check->check()->block());
- current_check->set_processed();
- }
-
- // Check that we will not cause unwanted deoptimizations.
- Hoistability hoistability = CheckHoistability();
- if (hoistability == NOT_HOISTABLE ||
- (hoistability == OPTIMISTICALLY_HOISTABLE &&
- !graph()->use_optimistic_licm())) {
- return;
- }
-
- // We will do the hoisting, but we must see if the limit is "limit" or if
- // all checks are done on constants: if all check are done against the same
- // constant limit we will use that instead of the induction limit.
- bool has_upper_constant_limit = true;
- int32_t upper_constant_limit =
- check->HasUpperLimit() ? check->upper_limit() : 0;
- for (InductionVariableData::InductionVariableCheck* current_check = check;
- current_check != NULL;
- current_check = current_check->next()) {
- has_upper_constant_limit =
- has_upper_constant_limit && current_check->HasUpperLimit() &&
- current_check->upper_limit() == upper_constant_limit;
- counters()->bounds_checks_eliminated()->Increment();
- current_check->check()->set_skip_check();
- }
-
- // Choose the appropriate limit.
- Zone* zone = graph()->zone();
- HValue* context = graph()->GetInvalidContext();
- HValue* limit = data->limit();
- if (has_upper_constant_limit) {
- HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
- upper_constant_limit);
- new_limit->InsertBefore(pre_header->end());
- limit = new_limit;
- }
-
- // If necessary, redefine the limit in the preheader.
- if (limit->IsInteger32Constant() &&
- limit->block() != pre_header &&
- !limit->block()->Dominates(pre_header)) {
- HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
- limit->GetInteger32Constant());
- new_limit->InsertBefore(pre_header->end());
- limit = new_limit;
- }
-
- // Do the hoisting.
- HBoundsCheck* hoisted_check = HBoundsCheck::New(
- graph()->isolate(), zone, context, limit, check->check()->length());
- hoisted_check->InsertBefore(pre_header->end());
- hoisted_check->set_allow_equality(true);
- counters()->bounds_checks_hoisted()->Increment();
- }
-
- void CollectInductionVariableData(HBasicBlock* bb) {
- bool additional_limit = false;
-
- for (int i = 0; i < bb->phis()->length(); i++) {
- HPhi* phi = bb->phis()->at(i);
- phi->DetectInductionVariable();
- }
-
- additional_limit = InductionVariableData::ComputeInductionVariableLimit(
- bb, at(bb)->additional_limit());
-
- if (additional_limit) {
- at(bb)->additional_limit()->updated_variable->
- UpdateAdditionalLimit(at(bb)->additional_limit());
- }
-
- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
- if (!i->IsBoundsCheck()) continue;
- HBoundsCheck* check = HBoundsCheck::cast(i);
- InductionVariableData::BitwiseDecompositionResult decomposition;
- InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
- if (!decomposition.base->IsPhi()) continue;
- HPhi* phi = HPhi::cast(decomposition.base);
-
- if (!phi->IsInductionVariable()) continue;
- InductionVariableData* data = phi->induction_variable_data();
-
- // For now ignore loops decrementing the index.
- if (data->increment() <= 0) continue;
- if (!data->LowerLimitIsNonNegativeConstant()) continue;
-
- // TODO(mmassi): skip OSR values for check->length().
- if (check->length() == data->limit() ||
- check->length() == data->additional_upper_limit()) {
- counters()->bounds_checks_eliminated()->Increment();
- check->set_skip_check();
- continue;
- }
-
- if (!phi->IsLimitedInductionVariable()) continue;
-
- int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
- decomposition.or_mask);
- phi->induction_variable_data()->AddCheck(check, limit);
- }
-
- for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
- CollectInductionVariableData(bb->dominated_blocks()->at(i));
- }
-
- if (additional_limit) {
- at(bb->block_id())->additional_limit()->updated_variable->
- UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
- }
- }
-
- void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
- for (int i = 0; i < bb->phis()->length(); i++) {
- HPhi* phi = bb->phis()->at(i);
- if (!phi->IsLimitedInductionVariable()) continue;
-
- InductionVariableData* induction_data = phi->induction_variable_data();
- InductionVariableData::ChecksRelatedToLength* current_length_group =
- induction_data->checks();
- while (current_length_group != NULL) {
- current_length_group->CloseCurrentBlock();
- InductionVariableData::InductionVariableCheck* current_base_check =
- current_length_group->checks();
- InitializeLoop(induction_data);
-
- while (current_base_check != NULL) {
- ProcessRelatedChecks(current_base_check, induction_data);
- while (current_base_check != NULL &&
- current_base_check->processed()) {
- current_base_check = current_base_check->next();
- }
- }
-
- current_length_group = current_length_group->next();
- }
- }
- }
-
- private:
- HGraph* graph_;
- HBasicBlock* loop_header_;
- ZoneList<Element> elements_;
-};
-
-
-void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
- InductionVariableBlocksTable table(graph());
- table.CollectInductionVariableData(graph()->entry_block());
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-bch.h b/deps/v8/src/crankshaft/hydrogen-bch.h
deleted file mode 100644
index cdcd407a09..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bch.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
-#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HBoundsCheckHoistingPhase : public HPhase {
- public:
- explicit HBoundsCheckHoistingPhase(HGraph* graph)
- : HPhase("H_Bounds checks hoisting", graph) { }
-
- void Run() {
- HoistRedundantBoundsChecks();
- }
-
- private:
- void HoistRedundantBoundsChecks();
-
- DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
index ae0bd08837..7965a9432a 100644
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -37,9 +37,9 @@ void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
int index, HSimulate* simulate) {
int operand_index = simulate->ToOperandIndex(index);
if (operand_index == -1) {
- simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
+ simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut());
} else {
- simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
+ simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut());
}
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 729fc588bc..b57bebd8fc 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -25,6 +25,8 @@
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
@@ -773,8 +775,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kArgumentsLength:
case HValue::kArgumentsObject:
case HValue::kBlockEntry:
- case HValue::kBoundsCheckBaseIndexInformation:
- case HValue::kCallFunction:
case HValue::kCallNewArray:
case HValue::kCapturedObject:
case HValue::kClassOfTestAndBranch:
@@ -815,7 +815,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kReturn:
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
- case HValue::kStoreFrameContext:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
@@ -832,7 +831,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kBitwise:
case HValue::kBoundsCheck:
case HValue::kBranch:
- case HValue::kCallJSFunction:
case HValue::kCallRuntime:
case HValue::kCallWithDescriptor:
case HValue::kChange:
@@ -873,7 +871,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStringAdd:
case HValue::kStringCompareAndBranch:
case HValue::kSub:
- case HValue::kToFastProperties:
case HValue::kTransitionElementsKind:
case HValue::kTrapAllocationMemento:
case HValue::kTypeof:
@@ -907,97 +904,24 @@ std::ostream& HUnaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
}
-std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(function()) << " #" << argument_count();
-}
-
-
-HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* function,
- int argument_count) {
- bool has_stack_check = false;
- if (function->IsConstant()) {
- HConstant* fun_const = HConstant::cast(function);
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate));
- has_stack_check = !jsfun.is_null() &&
- (jsfun->code()->kind() == Code::FUNCTION ||
- jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
- }
-
- return new (zone) HCallJSFunction(function, argument_count, has_stack_check);
-}
-
-
std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
return os << NameOf(first()) << " " << NameOf(second()) << " #"
<< argument_count();
}
-
-std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(context()) << " " << NameOf(function());
- if (HasVectorAndSlot()) {
- os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
- }
- os << " (convert mode" << convert_mode() << ")";
- return os;
+std::ostream& HInvokeFunction::PrintTo(std::ostream& os) const { // NOLINT
+ if (tail_call_mode() == TailCallMode::kAllow) os << "Tail";
+ return HBinaryCall::PrintTo(os);
}
-
-void HBoundsCheck::ApplyIndexChange() {
- if (skip_check()) return;
-
- DecompositionResult decomposition;
- bool index_is_decomposable = index()->TryDecompose(&decomposition);
- if (index_is_decomposable) {
- DCHECK(decomposition.base() == base());
- if (decomposition.offset() == offset() &&
- decomposition.scale() == scale()) return;
- } else {
- return;
- }
-
- ReplaceAllUsesWith(index());
-
- HValue* current_index = decomposition.base();
- int actual_offset = decomposition.offset() + offset();
- int actual_scale = decomposition.scale() + scale();
-
- HGraph* graph = block()->graph();
- Isolate* isolate = graph->isolate();
- Zone* zone = graph->zone();
- HValue* context = graph->GetInvalidContext();
- if (actual_offset != 0) {
- HConstant* add_offset =
- HConstant::New(isolate, zone, context, actual_offset);
- add_offset->InsertBefore(this);
- HInstruction* add =
- HAdd::New(isolate, zone, context, current_index, add_offset);
- add->InsertBefore(this);
- add->AssumeRepresentation(index()->representation());
- add->ClearFlag(kCanOverflow);
- current_index = add;
- }
-
- if (actual_scale != 0) {
- HConstant* sar_scale = HConstant::New(isolate, zone, context, actual_scale);
- sar_scale->InsertBefore(this);
- HInstruction* sar =
- HSar::New(isolate, zone, context, current_index, sar_scale);
- sar->InsertBefore(this);
- sar->AssumeRepresentation(index()->representation());
- current_index = sar;
+std::ostream& HInvokeFunction::PrintDataTo(std::ostream& os) const { // NOLINT
+ HBinaryCall::PrintDataTo(os);
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
}
-
- SetOperandAt(0, current_index);
-
- base_ = NULL;
- offset_ = 0;
- scale_ = 0;
+ return os;
}
-
std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(index()) << " " << NameOf(length());
if (base() != NULL && (offset() != 0 || scale() != 0)) {
@@ -1053,20 +977,16 @@ Range* HBoundsCheck::InferRange(Zone* zone) {
}
-std::ostream& HBoundsCheckBaseIndexInformation::PrintDataTo(
- std::ostream& os) const { // NOLINT
- // TODO(svenpanne) This 2nd base_index() looks wrong...
- return os << "base: " << NameOf(base_index())
- << ", check: " << NameOf(base_index());
-}
-
-
std::ostream& HCallWithDescriptor::PrintDataTo(
std::ostream& os) const { // NOLINT
for (int i = 0; i < OperandCount(); i++) {
os << NameOf(OperandAt(i)) << " ";
}
- return os << "#" << argument_count();
+ os << "#" << argument_count();
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
+ }
+ return os;
}
@@ -1129,23 +1049,23 @@ std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT
Representation HBranch::observed_input_representation(int index) {
- if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
- expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
- expected_input_types_.Contains(ToBooleanStub::STRING) ||
- expected_input_types_.Contains(ToBooleanStub::SYMBOL) ||
- expected_input_types_.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::NULL_TYPE) ||
+ expected_input_types_.Contains(ToBooleanICStub::SPEC_OBJECT) ||
+ expected_input_types_.Contains(ToBooleanICStub::STRING) ||
+ expected_input_types_.Contains(ToBooleanICStub::SYMBOL) ||
+ expected_input_types_.Contains(ToBooleanICStub::SIMD_VALUE)) {
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
- if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
return Representation::Double();
}
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
return Representation::Double();
}
- if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::SMI)) {
return Representation::Smi();
}
return Representation::None();
@@ -1563,7 +1483,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
val, representation(), false, false));
}
}
- if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) {
+ if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
+ value()->IsDiv() && value()->HasOneUse()) {
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
@@ -1980,452 +1901,6 @@ Range* HMod::InferRange(Zone* zone) {
}
-InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
- if (phi->block()->loop_information() == NULL) return NULL;
- if (phi->OperandCount() != 2) return NULL;
- int32_t candidate_increment;
-
- candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
- if (candidate_increment != 0) {
- return new(phi->block()->graph()->zone())
- InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
- }
-
- candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
- if (candidate_increment != 0) {
- return new(phi->block()->graph()->zone())
- InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
- }
-
- return NULL;
-}
-
-
-/*
- * This function tries to match the following patterns (and all the relevant
- * variants related to |, & and + being commutative):
- * base | constant_or_mask
- * base & constant_and_mask
- * (base + constant_offset) & constant_and_mask
- * (base - constant_offset) & constant_and_mask
- */
-void InductionVariableData::DecomposeBitwise(
- HValue* value,
- BitwiseDecompositionResult* result) {
- HValue* base = IgnoreOsrValue(value);
- result->base = value;
-
- if (!base->representation().IsInteger32()) return;
-
- if (base->IsBitwise()) {
- bool allow_offset = false;
- int32_t mask = 0;
-
- HBitwise* bitwise = HBitwise::cast(base);
- if (bitwise->right()->IsInteger32Constant()) {
- mask = bitwise->right()->GetInteger32Constant();
- base = bitwise->left();
- } else if (bitwise->left()->IsInteger32Constant()) {
- mask = bitwise->left()->GetInteger32Constant();
- base = bitwise->right();
- } else {
- return;
- }
- if (bitwise->op() == Token::BIT_AND) {
- result->and_mask = mask;
- allow_offset = true;
- } else if (bitwise->op() == Token::BIT_OR) {
- result->or_mask = mask;
- } else {
- return;
- }
-
- result->context = bitwise->context();
-
- if (allow_offset) {
- if (base->IsAdd()) {
- HAdd* add = HAdd::cast(base);
- if (add->right()->IsInteger32Constant()) {
- base = add->left();
- } else if (add->left()->IsInteger32Constant()) {
- base = add->right();
- }
- } else if (base->IsSub()) {
- HSub* sub = HSub::cast(base);
- if (sub->right()->IsInteger32Constant()) {
- base = sub->left();
- }
- }
- }
-
- result->base = base;
- }
-}
-
-
-void InductionVariableData::AddCheck(HBoundsCheck* check,
- int32_t upper_limit) {
- DCHECK(limit_validity() != NULL);
- if (limit_validity() != check->block() &&
- !limit_validity()->Dominates(check->block())) return;
- if (!phi()->block()->current_loop()->IsNestedInThisLoop(
- check->block()->current_loop())) return;
-
- ChecksRelatedToLength* length_checks = checks();
- while (length_checks != NULL) {
- if (length_checks->length() == check->length()) break;
- length_checks = length_checks->next();
- }
- if (length_checks == NULL) {
- length_checks = new(check->block()->zone())
- ChecksRelatedToLength(check->length(), checks());
- checks_ = length_checks;
- }
-
- length_checks->AddCheck(check, upper_limit);
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
- if (checks() != NULL) {
- InductionVariableCheck* c = checks();
- HBasicBlock* current_block = c->check()->block();
- while (c != NULL && c->check()->block() == current_block) {
- c->set_upper_limit(current_upper_limit_);
- c = c->next();
- }
- }
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
- Token::Value token,
- int32_t mask,
- HValue* index_base,
- HValue* context) {
- DCHECK(first_check_in_block() != NULL);
- HValue* previous_index = first_check_in_block()->index();
- DCHECK(context != NULL);
-
- Zone* zone = index_base->block()->graph()->zone();
- Isolate* isolate = index_base->block()->graph()->isolate();
- set_added_constant(HConstant::New(isolate, zone, context, mask));
- if (added_index() != NULL) {
- added_constant()->InsertBefore(added_index());
- } else {
- added_constant()->InsertBefore(first_check_in_block());
- }
-
- if (added_index() == NULL) {
- first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
- HInstruction* new_index = HBitwise::New(isolate, zone, context, token,
- index_base, added_constant());
- DCHECK(new_index->IsBitwise());
- new_index->ClearAllSideEffects();
- new_index->AssumeRepresentation(Representation::Integer32());
- set_added_index(HBitwise::cast(new_index));
- added_index()->InsertBefore(first_check_in_block());
- }
- DCHECK(added_index()->op() == token);
-
- added_index()->SetOperandAt(1, index_base);
- added_index()->SetOperandAt(2, added_constant());
- first_check_in_block()->SetOperandAt(0, added_index());
- if (previous_index->HasNoUses()) {
- previous_index->DeleteAndReplaceWith(NULL);
- }
-}
-
-void InductionVariableData::ChecksRelatedToLength::AddCheck(
- HBoundsCheck* check,
- int32_t upper_limit) {
- BitwiseDecompositionResult decomposition;
- InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
-
- if (first_check_in_block() == NULL ||
- first_check_in_block()->block() != check->block()) {
- CloseCurrentBlock();
-
- first_check_in_block_ = check;
- set_added_index(NULL);
- set_added_constant(NULL);
- current_and_mask_in_block_ = decomposition.and_mask;
- current_or_mask_in_block_ = decomposition.or_mask;
- current_upper_limit_ = upper_limit;
-
- InductionVariableCheck* new_check = new(check->block()->graph()->zone())
- InductionVariableCheck(check, checks_, upper_limit);
- checks_ = new_check;
- return;
- }
-
- if (upper_limit > current_upper_limit()) {
- current_upper_limit_ = upper_limit;
- }
-
- if (decomposition.and_mask != 0 &&
- current_or_mask_in_block() == 0) {
- if (current_and_mask_in_block() == 0 ||
- decomposition.and_mask > current_and_mask_in_block()) {
- UseNewIndexInCurrentBlock(Token::BIT_AND,
- decomposition.and_mask,
- decomposition.base,
- decomposition.context);
- current_and_mask_in_block_ = decomposition.and_mask;
- }
- check->set_skip_check();
- }
- if (current_and_mask_in_block() == 0) {
- if (decomposition.or_mask > current_or_mask_in_block()) {
- UseNewIndexInCurrentBlock(Token::BIT_OR,
- decomposition.or_mask,
- decomposition.base,
- decomposition.context);
- current_or_mask_in_block_ = decomposition.or_mask;
- }
- check->set_skip_check();
- }
-
- if (!check->skip_check()) {
- InductionVariableCheck* new_check = new(check->block()->graph()->zone())
- InductionVariableCheck(check, checks_, upper_limit);
- checks_ = new_check;
- }
-}
-
-
-/*
- * This method detects if phi is an induction variable, with phi_operand as
- * its "incremented" value (the other operand would be the "base" value).
- *
- * It cheks is phi_operand has the form "phi + constant".
- * If yes, the constant is the increment that the induction variable gets at
- * every loop iteration.
- * Otherwise it returns 0.
- */
-int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
- HValue* phi_operand) {
- if (!phi_operand->representation().IsSmiOrInteger32()) return 0;
-
- if (phi_operand->IsAdd()) {
- HAdd* operation = HAdd::cast(phi_operand);
- if (operation->left() == phi &&
- operation->right()->IsInteger32Constant()) {
- return operation->right()->GetInteger32Constant();
- } else if (operation->right() == phi &&
- operation->left()->IsInteger32Constant()) {
- return operation->left()->GetInteger32Constant();
- }
- } else if (phi_operand->IsSub()) {
- HSub* operation = HSub::cast(phi_operand);
- if (operation->left() == phi &&
- operation->right()->IsInteger32Constant()) {
- int constant = operation->right()->GetInteger32Constant();
- if (constant == kMinInt) return 0;
- return -constant;
- }
- }
-
- return 0;
-}
-
-
-/*
- * Swaps the information in "update" with the one contained in "this".
- * The swapping is important because this method is used while doing a
- * dominator tree traversal, and "update" will retain the old data that
- * will be restored while backtracking.
- */
-void InductionVariableData::UpdateAdditionalLimit(
- InductionVariableLimitUpdate* update) {
- DCHECK(update->updated_variable == this);
- if (update->limit_is_upper) {
- swap(&additional_upper_limit_, &update->limit);
- swap(&additional_upper_limit_is_included_, &update->limit_is_included);
- } else {
- swap(&additional_lower_limit_, &update->limit);
- swap(&additional_lower_limit_is_included_, &update->limit_is_included);
- }
-}
-
-
-int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
- int32_t or_mask) {
- // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
- const int32_t MAX_LIMIT = 1 << 30;
-
- int32_t result = MAX_LIMIT;
-
- if (limit() != NULL &&
- limit()->IsInteger32Constant()) {
- int32_t limit_value = limit()->GetInteger32Constant();
- if (!limit_included()) {
- limit_value--;
- }
- if (limit_value < result) result = limit_value;
- }
-
- if (additional_upper_limit() != NULL &&
- additional_upper_limit()->IsInteger32Constant()) {
- int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
- if (!additional_upper_limit_is_included()) {
- limit_value--;
- }
- if (limit_value < result) result = limit_value;
- }
-
- if (and_mask > 0 && and_mask < MAX_LIMIT) {
- if (and_mask < result) result = and_mask;
- return result;
- }
-
- // Add the effect of the or_mask.
- result |= or_mask;
-
- return result >= MAX_LIMIT ? kNoLimit : result;
-}
-
-
-HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
- if (!v->IsPhi()) return v;
- HPhi* phi = HPhi::cast(v);
- if (phi->OperandCount() != 2) return v;
- if (phi->OperandAt(0)->block()->is_osr_entry()) {
- return phi->OperandAt(1);
- } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
- return phi->OperandAt(0);
- } else {
- return v;
- }
-}
-
-
-InductionVariableData* InductionVariableData::GetInductionVariableData(
- HValue* v) {
- v = IgnoreOsrValue(v);
- if (v->IsPhi()) {
- return HPhi::cast(v)->induction_variable_data();
- }
- return NULL;
-}
-
-
-/*
- * Check if a conditional branch to "current_branch" with token "token" is
- * the branch that keeps the induction loop running (and, conversely, will
- * terminate it if the "other_branch" is taken).
- *
- * Three conditions must be met:
- * - "current_branch" must be in the induction loop.
- * - "other_branch" must be out of the induction loop.
- * - "token" and the induction increment must be "compatible": the token should
- * be a condition that keeps the execution inside the loop until the limit is
- * reached.
- */
-bool InductionVariableData::CheckIfBranchIsLoopGuard(
- Token::Value token,
- HBasicBlock* current_branch,
- HBasicBlock* other_branch) {
- if (!phi()->block()->current_loop()->IsNestedInThisLoop(
- current_branch->current_loop())) {
- return false;
- }
-
- if (phi()->block()->current_loop()->IsNestedInThisLoop(
- other_branch->current_loop())) {
- return false;
- }
-
- if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
- return true;
- }
- if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
- return true;
- }
- if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
- return true;
- }
-
- return false;
-}
-
-
-void InductionVariableData::ComputeLimitFromPredecessorBlock(
- HBasicBlock* block,
- LimitFromPredecessorBlock* result) {
- if (block->predecessors()->length() != 1) return;
- HBasicBlock* predecessor = block->predecessors()->at(0);
- HInstruction* end = predecessor->last();
-
- if (!end->IsCompareNumericAndBranch()) return;
- HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
-
- Token::Value token = branch->token();
- if (!Token::IsArithmeticCompareOp(token)) return;
-
- HBasicBlock* other_target;
- if (block == branch->SuccessorAt(0)) {
- other_target = branch->SuccessorAt(1);
- } else {
- other_target = branch->SuccessorAt(0);
- token = Token::NegateCompareOp(token);
- DCHECK(block == branch->SuccessorAt(1));
- }
-
- InductionVariableData* data;
-
- data = GetInductionVariableData(branch->left());
- HValue* limit = branch->right();
- if (data == NULL) {
- data = GetInductionVariableData(branch->right());
- token = Token::ReverseCompareOp(token);
- limit = branch->left();
- }
-
- if (data != NULL) {
- result->variable = data;
- result->token = token;
- result->limit = limit;
- result->other_target = other_target;
- }
-}
-
-
-/*
- * Compute the limit that is imposed on an induction variable when entering
- * "block" (if any).
- * If the limit is the "proper" induction limit (the one that makes the loop
- * terminate when the induction variable reaches it) it is stored directly in
- * the induction variable data.
- * Otherwise the limit is written in "additional_limit" and the method
- * returns true.
- */
-bool InductionVariableData::ComputeInductionVariableLimit(
- HBasicBlock* block,
- InductionVariableLimitUpdate* additional_limit) {
- LimitFromPredecessorBlock limit;
- ComputeLimitFromPredecessorBlock(block, &limit);
- if (!limit.LimitIsValid()) return false;
-
- if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
- block,
- limit.other_target)) {
- limit.variable->limit_ = limit.limit;
- limit.variable->limit_included_ = limit.LimitIsIncluded();
- limit.variable->limit_validity_ = block;
- limit.variable->induction_exit_block_ = block->predecessors()->at(0);
- limit.variable->induction_exit_target_ = limit.other_target;
- return false;
- } else {
- additional_limit->updated_variable = limit.variable;
- additional_limit->limit = limit.limit;
- additional_limit->limit_is_upper = limit.LimitIsUpper();
- additional_limit->limit_is_included = limit.LimitIsIncluded();
- return true;
- }
-}
-
-
Range* HMathMinMax::InferRange(Zone* zone) {
if (representation().IsSmiOrInteger32()) {
Range* a = left()->range();
@@ -2652,7 +2127,11 @@ void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << function()->debug_name()->ToCString().get();
+ os << function()->debug_name()->ToCString().get();
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
+ }
+ return os;
}
@@ -3272,6 +2751,17 @@ bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
? FirstSuccessor() : SecondSuccessor();
return true;
}
+ if (value()->type().IsNull() || value()->type().IsUndefined()) {
+ *block = FirstSuccessor();
+ return true;
+ }
+ if (value()->type().IsBoolean() ||
+ value()->type().IsSmi() ||
+ value()->type().IsString() ||
+ value()->type().IsJSReceiver()) {
+ *block = SecondSuccessor();
+ return true;
+ }
*block = NULL;
return false;
}
@@ -3767,12 +3257,12 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats;
+ bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
#ifdef VERIFY_HEAP
- keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap;
+ keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
#endif
- if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) {
+ if (keep_heap_iterable) {
dominator_allocate->MakePrefillWithFiller();
} else {
// TODO(hpayer): This is a short-term hack to make allocation mementos
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 22ed052ba3..196a14fc70 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -56,11 +56,8 @@ class LChunkBuilder;
V(Bitwise) \
V(BlockEntry) \
V(BoundsCheck) \
- V(BoundsCheckBaseIndexInformation) \
V(Branch) \
V(CallWithDescriptor) \
- V(CallJSFunction) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CapturedObject) \
@@ -135,7 +132,6 @@ class LChunkBuilder;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +142,6 @@ class LChunkBuilder;
V(StringCompareAndBranch) \
V(Sub) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -735,14 +730,6 @@ class HValue : public ZoneObject {
virtual void Verify() = 0;
#endif
- virtual bool TryDecompose(DecompositionResult* decomposition) {
- if (RedefinedOperand() != NULL) {
- return RedefinedOperand()->TryDecompose(decomposition);
- } else {
- return false;
- }
- }
-
// Returns true conservatively if the program might be able to observe a
// ToString() operation on this value.
bool ToStringCanBeObserved() const {
@@ -1368,10 +1355,8 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
class HBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
- ToBooleanStub::Types);
- DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
- ToBooleanStub::Types,
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanICStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanICStub::Types,
HBasicBlock*, HBasicBlock*);
Representation RequiredInputRepresentation(int index) override {
@@ -1383,23 +1368,22 @@ class HBranch final : public HUnaryControlInstruction {
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- ToBooleanStub::Types expected_input_types() const {
+ ToBooleanICStub::Types expected_input_types() const {
return expected_input_types_;
}
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
- HBranch(HValue* value,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
+ HBranch(HValue* value, ToBooleanICStub::Types expected_input_types =
+ ToBooleanICStub::Types(),
+ HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
expected_input_types_(expected_input_types) {
SetFlag(kAllowUndefinedAsNaN);
}
- ToBooleanStub::Types expected_input_types_;
+ ToBooleanICStub::Types expected_input_types_;
};
@@ -1954,10 +1938,12 @@ class HEnterInlined final : public HTemplateInstruction<0> {
HConstant* closure_context, int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind, Variable* arguments_var,
- HArgumentsObject* arguments_object) {
- return new (zone) HEnterInlined(return_id, closure, closure_context,
- arguments_count, function, inlining_kind,
- arguments_var, arguments_object, zone);
+ HArgumentsObject* arguments_object,
+ TailCallMode syntactic_tail_call_mode) {
+ return new (zone)
+ HEnterInlined(return_id, closure, closure_context, arguments_count,
+ function, inlining_kind, arguments_var, arguments_object,
+ syntactic_tail_call_mode, zone);
}
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
@@ -1973,6 +1959,9 @@ class HEnterInlined final : public HTemplateInstruction<0> {
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
+ TailCallMode syntactic_tail_call_mode() const {
+ return syntactic_tail_call_mode_;
+ }
BailoutId ReturnId() const { return return_id_; }
int inlining_id() const { return inlining_id_; }
void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
@@ -1991,7 +1980,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
HConstant* closure_context, int arguments_count,
FunctionLiteral* function, InliningKind inlining_kind,
Variable* arguments_var, HArgumentsObject* arguments_object,
- Zone* zone)
+ TailCallMode syntactic_tail_call_mode, Zone* zone)
: return_id_(return_id),
shared_(handle(closure->shared())),
closure_(closure),
@@ -2000,6 +1989,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
arguments_pushed_(false),
function_(function),
inlining_kind_(inlining_kind),
+ syntactic_tail_call_mode_(syntactic_tail_call_mode),
inlining_id_(0),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
@@ -2013,6 +2003,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
bool arguments_pushed_;
FunctionLiteral* function_;
InliningKind inlining_kind_;
+ TailCallMode syntactic_tail_call_mode_;
int inlining_id_;
Variable* arguments_var_;
HArgumentsObject* arguments_object_;
@@ -2220,51 +2211,17 @@ class HBinaryCall : public HCall<2> {
};
-class HCallJSFunction final : public HCall<1> {
- public:
- static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function, int argument_count);
-
- HValue* function() const { return OperandAt(0); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) final {
- DCHECK(index == 0);
- return Representation::Tagged();
- }
-
- bool HasStackCheck() final { return has_stack_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
-
- private:
- // The argument count includes the receiver.
- HCallJSFunction(HValue* function,
- int argument_count,
- bool has_stack_check)
- : HCall<1>(argument_count),
- has_stack_check_(has_stack_check) {
- SetOperandAt(0, function);
- }
-
- bool has_stack_check_;
-};
-
-
-enum CallMode { NORMAL_CALL, TAIL_CALL };
-
-
class HCallWithDescriptor final : public HInstruction {
public:
- static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* target, int argument_count,
- CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands,
- CallMode call_mode = NORMAL_CALL) {
- HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
- target, argument_count, descriptor, operands, call_mode, zone);
- DCHECK(operands.length() == res->GetParameterCount());
+ static HCallWithDescriptor* New(
+ Isolate* isolate, Zone* zone, HValue* context, HValue* target,
+ int argument_count, CallInterfaceDescriptor descriptor,
+ const Vector<HValue*>& operands,
+ TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow) {
+ HCallWithDescriptor* res = new (zone)
+ HCallWithDescriptor(target, argument_count, descriptor, operands,
+ syntactic_tail_call_mode, tail_call_mode, zone);
return res;
}
@@ -2286,7 +2243,16 @@ class HCallWithDescriptor final : public HInstruction {
HType CalculateInferredType() final { return HType::Tagged(); }
- bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+ // Defines whether this instruction corresponds to a JS call at tail position.
+ TailCallMode syntactic_tail_call_mode() const {
+ return SyntacticTailCallModeField::decode(bit_field_);
+ }
+
+ // Defines whether this call should be generated as a tail call.
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+ bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
virtual int argument_count() const {
return argument_count_;
@@ -2306,14 +2272,18 @@ class HCallWithDescriptor final : public HInstruction {
// The argument count includes the receiver.
HCallWithDescriptor(HValue* target, int argument_count,
CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands, CallMode call_mode,
- Zone* zone)
+ const Vector<HValue*>& operands,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode, Zone* zone)
: descriptor_(descriptor),
values_(GetParameterCount() + 1, zone),
argument_count_(argument_count),
- call_mode_(call_mode) {
+ bit_field_(
+ TailCallModeField::encode(tail_call_mode) |
+ SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+ DCHECK_EQ(operands.length(), GetParameterCount());
// We can only tail call without any stack arguments.
- DCHECK(call_mode != TAIL_CALL || argument_count == 0);
+ DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
AddOperand(target, zone);
for (int i = 0; i < operands.length(); i++) {
AddOperand(operands[i], zone);
@@ -2338,97 +2308,75 @@ class HCallWithDescriptor final : public HInstruction {
CallInterfaceDescriptor descriptor_;
ZoneList<HValue*> values_;
int argument_count_;
- CallMode call_mode_;
+ class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+ class SyntacticTailCallModeField
+ : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
+ uint32_t bit_field_;
};
class HInvokeFunction final : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
-
- HInvokeFunction(HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function) {
- formal_parameter_count_ =
- known_function.is_null()
- ? 0
- : known_function->shared()->internal_formal_parameter_count();
- has_stack_check_ = !known_function.is_null() &&
- (known_function->code()->kind() == Code::FUNCTION ||
- known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- }
-
- static HInvokeFunction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function,
- known_function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HInvokeFunction, HValue*,
+ Handle<JSFunction>, int,
+ TailCallMode, TailCallMode);
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
- bool HasStackCheck() final { return has_stack_check_; }
+ bool HasStackCheck() final { return HasStackCheckField::decode(bit_field_); }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
-
- private:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count),
- has_stack_check_(false) {
+ // Defines whether this instruction corresponds to a JS call at tail position.
+ TailCallMode syntactic_tail_call_mode() const {
+ return SyntacticTailCallModeField::decode(bit_field_);
}
- Handle<JSFunction> known_function_;
- int formal_parameter_count_;
- bool has_stack_check_;
-};
-
-
-class HCallFunction final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
- ConvertReceiverMode);
-
- HValue* context() const { return first(); }
- HValue* function() const { return second(); }
-
- ConvertReceiverMode convert_mode() const {
- return ConvertReceiverModeField::decode(bit_field_);
- }
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
- bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
+ // Defines whether this call should be generated as a tail call.
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
}
- DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+ std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- int argument_delta() const override { return -argument_count(); }
-
private:
- HCallFunction(HValue* context, HValue* function, int argument_count,
- ConvertReceiverMode convert_mode)
+ void set_has_stack_check(bool has_stack_check) {
+ bit_field_ = HasStackCheckField::update(bit_field_, has_stack_check);
+ }
+
+ HInvokeFunction(HValue* context, HValue* function,
+ Handle<JSFunction> known_function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode)
: HBinaryCall(context, function, argument_count),
- bit_field_(ConvertReceiverModeField::encode(convert_mode)) {}
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
+ known_function_(known_function),
+ bit_field_(
+ TailCallModeField::encode(tail_call_mode) |
+ SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+ DCHECK(tail_call_mode != TailCallMode::kAllow ||
+ syntactic_tail_call_mode == TailCallMode::kAllow);
+ formal_parameter_count_ =
+ known_function.is_null()
+ ? 0
+ : known_function->shared()->internal_formal_parameter_count();
+ set_has_stack_check(
+ !known_function.is_null() &&
+ (known_function->code()->kind() == Code::FUNCTION ||
+ known_function->code()->kind() == Code::OPTIMIZED_FUNCTION));
+ }
- class ConvertReceiverModeField : public BitField<ConvertReceiverMode, 0, 2> {
- };
+ Handle<JSFunction> known_function_;
+ int formal_parameter_count_;
+ class HasStackCheckField : public BitField<bool, 0, 1> {};
+ class TailCallModeField
+ : public BitField<TailCallMode, HasStackCheckField::kNext, 1> {};
+ class SyntacticTailCallModeField
+ : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
uint32_t bit_field_;
};
@@ -2550,10 +2498,10 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
// Indicates if we support a double (and int32) output for Math.floor and
// Math.round.
bool SupportsFlexibleFloorAndRound() const {
-#ifdef V8_TARGET_ARCH_ARM64
- // TODO(rmcilroy): Re-enable this for Arm64 once http://crbug.com/476477 is
- // fixed.
- return false;
+#if V8_TARGET_ARCH_ARM64
+ return true;
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ return CpuFeatures::IsSupported(SSE4_1);
#else
return false;
#endif
@@ -2997,226 +2945,6 @@ class HCheckHeapObject final : public HUnaryOperation {
};
-class InductionVariableData;
-
-
-struct InductionVariableLimitUpdate {
- InductionVariableData* updated_variable;
- HValue* limit;
- bool limit_is_upper;
- bool limit_is_included;
-
- InductionVariableLimitUpdate()
- : updated_variable(NULL), limit(NULL),
- limit_is_upper(false), limit_is_included(false) {}
-};
-
-
-class HBoundsCheck;
-class HPhi;
-class HBitwise;
-
-
-class InductionVariableData final : public ZoneObject {
- public:
- class InductionVariableCheck : public ZoneObject {
- public:
- HBoundsCheck* check() { return check_; }
- InductionVariableCheck* next() { return next_; }
- bool HasUpperLimit() { return upper_limit_ >= 0; }
- int32_t upper_limit() {
- DCHECK(HasUpperLimit());
- return upper_limit_;
- }
- void set_upper_limit(int32_t upper_limit) {
- upper_limit_ = upper_limit;
- }
-
- bool processed() { return processed_; }
- void set_processed() { processed_ = true; }
-
- InductionVariableCheck(HBoundsCheck* check,
- InductionVariableCheck* next,
- int32_t upper_limit = kNoLimit)
- : check_(check), next_(next), upper_limit_(upper_limit),
- processed_(false) {}
-
- private:
- HBoundsCheck* check_;
- InductionVariableCheck* next_;
- int32_t upper_limit_;
- bool processed_;
- };
-
- class ChecksRelatedToLength : public ZoneObject {
- public:
- HValue* length() { return length_; }
- ChecksRelatedToLength* next() { return next_; }
- InductionVariableCheck* checks() { return checks_; }
-
- void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
- void CloseCurrentBlock();
-
- ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
- : length_(length), next_(next), checks_(NULL),
- first_check_in_block_(NULL),
- added_index_(NULL),
- added_constant_(NULL),
- current_and_mask_in_block_(0),
- current_or_mask_in_block_(0) {}
-
- private:
- void UseNewIndexInCurrentBlock(Token::Value token,
- int32_t mask,
- HValue* index_base,
- HValue* context);
-
- HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
- HBitwise* added_index() { return added_index_; }
- void set_added_index(HBitwise* index) { added_index_ = index; }
- HConstant* added_constant() { return added_constant_; }
- void set_added_constant(HConstant* constant) { added_constant_ = constant; }
- int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
- int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
- int32_t current_upper_limit() { return current_upper_limit_; }
-
- HValue* length_;
- ChecksRelatedToLength* next_;
- InductionVariableCheck* checks_;
-
- HBoundsCheck* first_check_in_block_;
- HBitwise* added_index_;
- HConstant* added_constant_;
- int32_t current_and_mask_in_block_;
- int32_t current_or_mask_in_block_;
- int32_t current_upper_limit_;
- };
-
- struct LimitFromPredecessorBlock {
- InductionVariableData* variable;
- Token::Value token;
- HValue* limit;
- HBasicBlock* other_target;
-
- bool LimitIsValid() { return token != Token::ILLEGAL; }
-
- bool LimitIsIncluded() {
- return Token::IsEqualityOp(token) ||
- token == Token::GTE || token == Token::LTE;
- }
- bool LimitIsUpper() {
- return token == Token::LTE || token == Token::LT || token == Token::NE;
- }
-
- LimitFromPredecessorBlock()
- : variable(NULL),
- token(Token::ILLEGAL),
- limit(NULL),
- other_target(NULL) {}
- };
-
- static const int32_t kNoLimit = -1;
-
- static InductionVariableData* ExaminePhi(HPhi* phi);
- static void ComputeLimitFromPredecessorBlock(
- HBasicBlock* block,
- LimitFromPredecessorBlock* result);
- static bool ComputeInductionVariableLimit(
- HBasicBlock* block,
- InductionVariableLimitUpdate* additional_limit);
-
- struct BitwiseDecompositionResult {
- HValue* base;
- int32_t and_mask;
- int32_t or_mask;
- HValue* context;
-
- BitwiseDecompositionResult()
- : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
- };
- static void DecomposeBitwise(HValue* value,
- BitwiseDecompositionResult* result);
-
- void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
-
- bool CheckIfBranchIsLoopGuard(Token::Value token,
- HBasicBlock* current_branch,
- HBasicBlock* other_branch);
-
- void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
-
- HPhi* phi() { return phi_; }
- HValue* base() { return base_; }
- int32_t increment() { return increment_; }
- HValue* limit() { return limit_; }
- bool limit_included() { return limit_included_; }
- HBasicBlock* limit_validity() { return limit_validity_; }
- HBasicBlock* induction_exit_block() { return induction_exit_block_; }
- HBasicBlock* induction_exit_target() { return induction_exit_target_; }
- ChecksRelatedToLength* checks() { return checks_; }
- HValue* additional_upper_limit() { return additional_upper_limit_; }
- bool additional_upper_limit_is_included() {
- return additional_upper_limit_is_included_;
- }
- HValue* additional_lower_limit() { return additional_lower_limit_; }
- bool additional_lower_limit_is_included() {
- return additional_lower_limit_is_included_;
- }
-
- bool LowerLimitIsNonNegativeConstant() {
- if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
- return true;
- }
- if (additional_lower_limit() != NULL &&
- additional_lower_limit()->IsInteger32Constant() &&
- additional_lower_limit()->GetInteger32Constant() >= 0) {
- // Ignoring the corner case of !additional_lower_limit_is_included()
- // is safe, handling it adds unneeded complexity.
- return true;
- }
- return false;
- }
-
- int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
-
- private:
- template <class T> void swap(T* a, T* b) {
- T c(*a);
- *a = *b;
- *b = c;
- }
-
- InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
- : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
- limit_(NULL), limit_included_(false), limit_validity_(NULL),
- induction_exit_block_(NULL), induction_exit_target_(NULL),
- checks_(NULL),
- additional_upper_limit_(NULL),
- additional_upper_limit_is_included_(false),
- additional_lower_limit_(NULL),
- additional_lower_limit_is_included_(false) {}
-
- static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
-
- static HValue* IgnoreOsrValue(HValue* v);
- static InductionVariableData* GetInductionVariableData(HValue* v);
-
- HPhi* phi_;
- HValue* base_;
- int32_t increment_;
- HValue* limit_;
- bool limit_included_;
- HBasicBlock* limit_validity_;
- HBasicBlock* induction_exit_block_;
- HBasicBlock* induction_exit_target_;
- ChecksRelatedToLength* checks_;
- HValue* additional_upper_limit_;
- bool additional_upper_limit_is_included_;
- HValue* additional_lower_limit_;
- bool additional_lower_limit_is_included_;
-};
-
-
class HPhi final : public HValue {
public:
HPhi(int merged_index, Zone* zone)
@@ -3250,21 +2978,6 @@ class HPhi final : public HValue {
int merged_index() const { return merged_index_; }
- InductionVariableData* induction_variable_data() {
- return induction_variable_data_;
- }
- bool IsInductionVariable() {
- return induction_variable_data_ != NULL;
- }
- bool IsLimitedInductionVariable() {
- return IsInductionVariable() &&
- induction_variable_data_->limit() != NULL;
- }
- void DetectInductionVariable() {
- DCHECK(induction_variable_data_ == NULL);
- induction_variable_data_ = InductionVariableData::ExaminePhi(this);
- }
-
std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
#ifdef DEBUG
@@ -3310,7 +3023,6 @@ class HPhi final : public HValue {
int merged_index_ = 0;
int phi_id_ = -1;
- InductionVariableData* induction_variable_data_ = nullptr;
Representation representation_from_indirect_uses_ = Representation::None();
Representation representation_from_non_phi_uses_ = Representation::None();
@@ -3865,8 +3577,8 @@ class HWrapReceiver final : public HTemplateInstruction<2> {
class HApplyArguments final : public HTemplateInstruction<4> {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
- HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P5(HApplyArguments, HValue*, HValue*, HValue*,
+ HValue*, TailCallMode);
Representation RequiredInputRepresentation(int index) override {
// The length is untagged, all other inputs are tagged.
@@ -3880,13 +3592,16 @@ class HApplyArguments final : public HTemplateInstruction<4> {
HValue* length() { return OperandAt(2); }
HValue* elements() { return OperandAt(3); }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
private:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
+ HApplyArguments(HValue* function, HValue* receiver, HValue* length,
+ HValue* elements, TailCallMode tail_call_mode)
+ : bit_field_(TailCallModeField::encode(tail_call_mode)) {
set_representation(Representation::Tagged());
SetOperandAt(0, function);
SetOperandAt(1, receiver);
@@ -3894,12 +3609,16 @@ class HApplyArguments final : public HTemplateInstruction<4> {
SetOperandAt(3, elements);
SetAllSideEffects();
}
+
+ class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+ uint32_t bit_field_;
};
class HArgumentsElements final : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
+ DECLARE_INSTRUCTION_FACTORY_P2(HArgumentsElements, bool, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
@@ -3908,12 +3627,14 @@ class HArgumentsElements final : public HTemplateInstruction<0> {
}
bool from_inlined() const { return from_inlined_; }
+ bool arguments_adaptor() const { return arguments_adaptor_; }
protected:
bool DataEquals(HValue* other) override { return true; }
private:
- explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
+ explicit HArgumentsElements(bool from_inlined, bool arguments_adaptor = true)
+ : from_inlined_(from_inlined), arguments_adaptor_(arguments_adaptor) {
// The value produced by this instruction is a pointer into the stack
// that looks as if it was a smi because of alignment.
set_representation(Representation::Tagged());
@@ -3923,6 +3644,7 @@ class HArgumentsElements final : public HTemplateInstruction<0> {
bool IsDeletable() const override { return true; }
bool from_inlined_;
+ bool arguments_adaptor_;
};
@@ -3981,9 +3703,6 @@ class HAccessArgumentsAt final : public HTemplateInstruction<3> {
};
-class HBoundsCheckBaseIndexInformation;
-
-
class HBoundsCheck final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -3995,24 +3714,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
int offset() const { return offset_; }
int scale() const { return scale_; }
- void ApplyIndexChange();
- bool DetectCompoundIndex() {
- DCHECK(base() == NULL);
-
- DecompositionResult decomposition;
- if (index()->TryDecompose(&decomposition)) {
- base_ = decomposition.base();
- offset_ = decomposition.offset();
- scale_ = decomposition.scale();
- return true;
- } else {
- base_ = index();
- offset_ = 0;
- scale_ = 0;
- return false;
- }
- }
-
Representation RequiredInputRepresentation(int index) override {
return representation();
}
@@ -4031,8 +3732,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
- friend class HBoundsCheckBaseIndexInformation;
-
Range* InferRange(Zone* zone) override;
bool DataEquals(HValue* other) override { return true; }
@@ -4061,34 +3760,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
};
-class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
- public:
- explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
- DecompositionResult decomposition;
- if (check->index()->TryDecompose(&decomposition)) {
- SetOperandAt(0, decomposition.base());
- SetOperandAt(1, check);
- } else {
- UNREACHABLE();
- }
- }
-
- HValue* base_index() const { return OperandAt(0); }
- HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- int RedefinedOperandIndex() override { return 0; }
- bool IsPurelyInformativeDefinition() override { return true; }
-};
-
-
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
@@ -4711,18 +4382,6 @@ class HAdd final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (left()->IsInteger32Constant()) {
- decomposition->Apply(right(), left()->GetInteger32Constant());
- return true;
- } else if (right()->IsInteger32Constant()) {
- decomposition->Apply(left(), right()->GetInteger32Constant());
- return true;
- } else {
- return false;
- }
- }
-
void RepresentationChanged(Representation to) override {
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
@@ -4802,15 +4461,6 @@ class HSub final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- decomposition->Apply(left(), -right()->GetInteger32Constant());
- return true;
- } else {
- return false;
- }
- }
-
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
@@ -5065,18 +4715,6 @@ class HShr final : public HBitwiseBinaryOperation {
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right);
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
- // This is intended to look for HAdd and HSub, to handle compounds
- // like ((base + offset) >> scale) with one single decomposition.
- left()->TryDecompose(decomposition);
- return true;
- }
- }
- return false;
- }
-
Range* InferRange(Zone* zone) override;
void UpdateRepresentation(Representation new_rep,
@@ -5102,18 +4740,6 @@ class HSar final : public HBitwiseBinaryOperation {
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right);
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
- // This is intended to look for HAdd and HSub, to handle compounds
- // like ((base + offset) >> scale) with one single decomposition.
- left()->TryDecompose(decomposition);
- return true;
- }
- }
- return false;
- }
-
Range* InferRange(Zone* zone) override;
void UpdateRepresentation(Representation new_rep,
@@ -5572,11 +5198,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
return false;
}
- // Stores to old space allocations require no write barriers if the value is
- // a constant provably not in new space.
- if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
- return false;
- }
}
return true;
}
@@ -5928,6 +5549,10 @@ class HObjectAccess final {
Representation::Integer32());
}
+ static HObjectAccess ForMapDescriptors() {
+ return HObjectAccess(kInobject, Map::kDescriptorsOffset);
+ }
+
static HObjectAccess ForNameHashField() {
return HObjectAccess(kInobject,
Name::kHashFieldOffset,
@@ -7391,35 +7016,6 @@ class HMaybeGrowElements final : public HTemplateInstruction<5> {
};
-class HToFastProperties final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
-
- private:
- explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
-
- // This instruction is not marked as kChangesMaps, but does
- // change the map of the input operand. Use it only when creating
- // object literals via a runtime call.
- DCHECK(value->IsCallRuntime());
-#ifdef DEBUG
- const Runtime::Function* function = HCallRuntime::cast(value)->function();
- DCHECK(function->function_id == Runtime::kCreateObjectLiteral);
-#endif
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
class HSeqStringGetChar final : public HTemplateInstruction<2> {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -7646,28 +7242,6 @@ class HLoadFieldByIndex final : public HTemplateInstruction<2> {
bool IsDeletable() const override { return true; }
};
-
-class HStoreFrameContext: public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
-
- HValue* context() { return OperandAt(0); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
- private:
- explicit HStoreFrameContext(HValue* context)
- : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetChangesFlag(kContextSlots);
- }
-};
-
-
-
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
index c98bbf627f..8de3ac0705 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.cc
+++ b/deps/v8/src/crankshaft/hydrogen-osr.cc
@@ -30,7 +30,7 @@ HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanICStub::Types(),
non_osr_entry, osr_entry_);
builder_->FinishCurrentBlock(test);
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 8c8562581a..4266e28da0 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -22,7 +22,7 @@ HType HType::FromType(Type* type) {
if (type->Is(Type::Boolean())) return HType::Boolean();
if (type->Is(Type::Undefined())) return HType::Undefined();
if (type->Is(Type::Object())) return HType::JSObject();
- if (type->Is(Type::Receiver())) return HType::JSReceiver();
+ if (type->Is(Type::DetectableReceiver())) return HType::JSReceiver();
return HType::Tagged();
}
@@ -43,8 +43,13 @@ HType HType::FromValue(Handle<Object> value) {
if (value->IsString()) return HType::String();
if (value->IsBoolean()) return HType::Boolean();
if (value->IsUndefined()) return HType::Undefined();
- if (value->IsJSArray()) return HType::JSArray();
- if (value->IsJSObject()) return HType::JSObject();
+ if (value->IsJSArray()) {
+ DCHECK(!value->IsUndetectable());
+ return HType::JSArray();
+ }
+ if (value->IsJSObject() && !value->IsUndetectable()) {
+ return HType::JSObject();
+ }
DCHECK(value->IsHeapObject());
return HType::HeapObject();
}
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index b6fdd3a315..fd232701f2 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -11,7 +11,6 @@
#include "src/ast/scopeinfo.h"
#include "src/code-factory.h"
#include "src/crankshaft/hydrogen-bce.h"
-#include "src/crankshaft/hydrogen-bch.h"
#include "src/crankshaft/hydrogen-canonicalize.h"
#include "src/crankshaft/hydrogen-check-elimination.h"
#include "src/crankshaft/hydrogen-dce.h"
@@ -58,6 +57,8 @@
#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
@@ -688,28 +689,32 @@ HConstant* HGraph::GetConstantBool(bool value) {
return value ? GetConstantTrue() : GetConstantFalse();
}
-#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value, \
- undetectable) \
- HConstant* HGraph::GetConstant##Name() { \
- if (!constant_##name##_.is_set()) { \
- HConstant* constant = new (zone()) HConstant( \
- Unique<Object>::CreateImmovable( \
- isolate()->factory()->name##_value()), \
- Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
- false, Representation::Tagged(), htype, true, boolean_value, \
- undetectable, ODDBALL_TYPE); \
- constant->InsertAfter(entry_block()->first()); \
- constant_##name##_.set(constant); \
- } \
- return ReinsertConstantIfNecessary(constant_##name##_.get()); \
- }
-
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false,
- true)
-DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true, false)
-DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false, false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false, false)
-DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false, true)
+#define DEFINE_GET_CONSTANT(Name, name, constant, type, htype, boolean_value, \
+ undetectable) \
+ HConstant* HGraph::GetConstant##Name() { \
+ if (!constant_##name##_.is_set()) { \
+ HConstant* constant = new (zone()) HConstant( \
+ Unique<Object>::CreateImmovable(isolate()->factory()->constant()), \
+ Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
+ false, Representation::Tagged(), htype, true, boolean_value, \
+ undetectable, ODDBALL_TYPE); \
+ constant->InsertAfter(entry_block()->first()); \
+ constant_##name##_.set(constant); \
+ } \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
+ }
+
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined_value, undefined,
+ HType::Undefined(), false, true)
+DEFINE_GET_CONSTANT(True, true, true_value, boolean, HType::Boolean(), true,
+ false)
+DEFINE_GET_CONSTANT(False, false, false_value, boolean, HType::Boolean(), false,
+ false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole_value, the_hole, HType::None(),
+ false, false)
+DEFINE_GET_CONSTANT(Null, null, null_value, null, HType::Null(), false, true)
+DEFINE_GET_CONSTANT(OptimizedOut, optimized_out, optimized_out, optimized_out,
+ HType::None(), false, false)
#undef DEFINE_GET_CONSTANT
@@ -906,8 +911,8 @@ void HGraphBuilder::IfBuilder::Then() {
// so that the graph builder visits it and sees any live range extending
// constructs within it.
HConstant* constant_false = builder()->graph()->GetConstantFalse();
- ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
- boolean_type.Add(ToBooleanStub::BOOLEAN);
+ ToBooleanICStub::Types boolean_type = ToBooleanICStub::Types();
+ boolean_type.Add(ToBooleanICStub::BOOLEAN);
HBranch* branch = builder()->New<HBranch>(
constant_false, boolean_type, first_true_block_, first_false_block_);
builder()->FinishCurrentBlock(branch);
@@ -1302,9 +1307,9 @@ HValue* HGraphBuilder::BuildCheckString(HValue* string) {
return string;
}
-
-HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* checked) {
if (object->type().IsJSObject()) return object;
+ HValue* function = checked->ActualValue();
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
@@ -1312,7 +1317,7 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
SharedFunctionInfo* shared = f->shared();
if (is_strict(shared->language_mode()) || shared->native()) return object;
}
- return Add<HWrapReceiver>(object, function);
+ return Add<HWrapReceiver>(object, checked);
}
@@ -3179,58 +3184,6 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
}
-void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
- HIfContinuation* continuation,
- MapEmbedding map_embedding) {
- IfBuilder if_nil(this);
-
- if (type->Maybe(Type::Undetectable())) {
- if_nil.If<HIsUndetectableAndBranch>(value);
- } else {
- bool maybe_null = type->Maybe(Type::Null());
- if (maybe_null) {
- if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
- }
-
- if (type->Maybe(Type::Undefined())) {
- if (maybe_null) if_nil.Or();
- if_nil.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantUndefined());
- }
-
- if_nil.Then();
- if_nil.Else();
-
- if (type->NumClasses() == 1) {
- BuildCheckHeapObject(value);
- // For ICs, the map checked below is a sentinel map that gets replaced by
- // the monomorphic map when the code is used as a template to generate a
- // new IC. For optimized functions, there is no sentinel map, the map
- // emitted below is the actual monomorphic map.
- if (map_embedding == kEmbedMapsViaWeakCells) {
- HValue* cell =
- Add<HConstant>(Map::WeakCellForMap(type->Classes().Current()));
- HValue* expected_map = Add<HLoadNamedField>(
- cell, nullptr, HObjectAccess::ForWeakCellValue());
- HValue* map =
- Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
- IfBuilder map_check(this);
- map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
- map_check.End();
- } else {
- DCHECK(map_embedding == kEmbedMapsDirectly);
- Add<HCheckMaps>(value, type->Classes().Current());
- }
- } else {
- if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
- }
- }
-
- if_nil.CaptureContinuation(continuation);
-}
-
-
void HGraphBuilder::BuildCreateAllocationMemento(
HValue* previous_object,
HValue* previous_object_size,
@@ -3544,11 +3497,11 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
-
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info, CallInterfaceDescriptor()),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN, 0),
+ initial_function_state_(this, info, NORMAL_RETURN, 0,
+ TailCallMode::kAllow),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -3621,9 +3574,16 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
IterationStatement* statement) {
- HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
- ? osr()->BuildOsrLoopEntry(statement)
- : BuildLoopEntry();
+ HBasicBlock* loop_entry;
+
+ if (osr()->HasOsrEntryAt(statement)) {
+ loop_entry = osr()->BuildOsrLoopEntry(statement);
+ if (function_state()->IsInsideDoExpressionScope()) {
+ Bailout(kDoExpressionUnmodelable);
+ }
+ } else {
+ loop_entry = BuildLoopEntry();
+ }
return loop_entry;
}
@@ -3652,7 +3612,6 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
info_(info),
descriptor_(descriptor),
zone_(info->zone()),
- is_recursive_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
@@ -4085,11 +4044,12 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info, InliningKind inlining_kind,
- int inlining_id)
+ int inlining_id, TailCallMode tail_call_mode)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
inlining_kind_(inlining_kind),
+ tail_call_mode_(tail_call_mode),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
@@ -4097,6 +4057,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
arguments_elements_(NULL),
inlining_id_(inlining_id),
outer_source_position_(SourcePosition::Unknown()),
+ do_expression_scope_count_(0),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -4153,7 +4114,7 @@ AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
typeof_mode_(NOT_INSIDE_TYPEOF) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
- DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
+ DCHECK_EQ(JS_FUNCTION, owner->environment()->frame_type());
original_length_ = owner->environment()->length();
#endif
}
@@ -4165,18 +4126,18 @@ AstContext::~AstContext() {
EffectContext::~EffectContext() {
- DCHECK(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
+ DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ &&
- owner()->environment()->frame_type() == JS_FUNCTION));
+ (owner()->environment()->frame_type() == JS_FUNCTION ||
+ owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
}
ValueContext::~ValueContext() {
- DCHECK(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
+ DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ + 1 &&
- owner()->environment()->frame_type() == JS_FUNCTION));
+ (owner()->environment()->frame_type() == JS_FUNCTION ||
+ owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
}
@@ -4350,7 +4311,7 @@ void TestContext::BuildBranch(HValue* value) {
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- ToBooleanStub::Types expected(condition()->to_boolean_types());
+ ToBooleanICStub::Types expected(condition()->to_boolean_types());
ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
@@ -4566,7 +4527,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
Run<HStackCheckEliminationPhase>();
if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
- if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
@@ -4739,12 +4699,8 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
HInstruction* inner_context = Add<HCallRuntime>(
Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
- HInstruction* instr = Add<HStoreFrameContext>(inner_context);
set_scope(scope);
environment()->BindContext(inner_context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
- }
}
VisitDeclarations(scope->declarations());
AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
@@ -4759,11 +4715,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
inner_context, nullptr,
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- HInstruction* instr = Add<HStoreFrameContext>(outer_context);
environment()->BindContext(outer_context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
- }
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
@@ -4811,23 +4763,24 @@ void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
HBasicBlock* cond_false = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
- if (cond_true->HasPredecessor()) {
- cond_true->SetJoinId(stmt->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(stmt->then_statement()));
- cond_true = current_block();
- } else {
- cond_true = NULL;
- }
+ // Technically, we should be able to handle the case when one side of
+ // the test is not connected, but this can trip up liveness analysis
+ // if we did not fully connect the test context based on some optimistic
+ // assumption. If such an assumption was violated, we would end up with
+ // an environment with optimized-out values. So we should always
+ // conservatively connect the test context.
+ CHECK(cond_true->HasPredecessor());
+ CHECK(cond_false->HasPredecessor());
- if (cond_false->HasPredecessor()) {
- cond_false->SetJoinId(stmt->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(stmt->else_statement()));
- cond_false = current_block();
- } else {
- cond_false = NULL;
- }
+ cond_true->SetJoinId(stmt->ThenId());
+ set_current_block(cond_true);
+ CHECK_BAILOUT(Visit(stmt->then_statement()));
+ cond_true = current_block();
+
+ cond_false->SetJoinId(stmt->ElseId());
+ set_current_block(cond_false);
+ CHECK_BAILOUT(Visit(stmt->else_statement()));
+ cond_false = current_block();
HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
set_current_block(join);
@@ -4881,6 +4834,11 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
+ if (function_state()->IsInsideDoExpressionScope()) {
+ return Bailout(kDoExpressionUnmodelable);
+ }
+
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
@@ -4897,10 +4855,6 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
- HInstruction* instr = Add<HStoreFrameContext>(context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
- }
environment()->BindContext(context);
}
@@ -4913,6 +4867,11 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
+ if (function_state()->IsInsideDoExpressionScope()) {
+ return Bailout(kDoExpressionUnmodelable);
+ }
+
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
@@ -4929,10 +4888,6 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
- HInstruction* instr = Add<HStoreFrameContext>(context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
- }
environment()->BindContext(context);
}
Goto(break_block);
@@ -5156,7 +5111,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
- if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
+ if (body_exit != NULL) {
set_current_block(body_exit);
loop_successor = graph()->CreateBasicBlock();
if (stmt->cond()->ToBooleanIsFalse()) {
@@ -5198,19 +5153,17 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
- if (!stmt->cond()->ToBooleanIsTrue()) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
+ loop_successor = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+ if (body_entry->HasPredecessor()) {
+ body_entry->SetJoinId(stmt->BodyId());
+ set_current_block(body_entry);
+ }
+ if (loop_successor->HasPredecessor()) {
+ loop_successor->SetJoinId(stmt->ExitId());
+ } else {
+ loop_successor = NULL;
}
BreakAndContinueInfo break_info(stmt, scope());
@@ -5239,10 +5192,9 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
DCHECK(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
- HBasicBlock* loop_successor = NULL;
+ HBasicBlock* loop_successor = graph()->CreateBasicBlock();
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
if (stmt->cond() != NULL) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
if (body_entry->HasPredecessor()) {
body_entry->SetJoinId(stmt->BodyId());
@@ -5253,6 +5205,14 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
loop_successor = NULL;
}
+ } else {
+ // Create dummy control flow so that variable liveness analysis
+ // produces teh correct result.
+ HControlInstruction* branch = New<HBranch>(graph()->GetConstantTrue());
+ branch->SetSuccessorAt(0, body_entry);
+ branch->SetSuccessorAt(1, loop_successor);
+ FinishCurrentBlock(branch);
+ set_current_block(body_entry);
}
BreakAndContinueInfo break_info(stmt, scope());
@@ -5540,9 +5500,8 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
FastNewClosureDescriptor descriptor(isolate());
HValue* values[] = {context(), shared_info_value};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
- instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
- Vector<HValue*>(values, arraysize(values)),
- NORMAL_CALL);
+ instr = New<HCallWithDescriptor>(
+ stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
} else {
Add<HPushArguments>(shared_info_value);
Runtime::FunctionId function_id =
@@ -5571,10 +5530,12 @@ void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ DoExpressionScope scope(this);
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- return Bailout(kDoExpression);
+ CHECK_ALIVE(VisitBlock(expr->block()));
+ Visit(expr->result());
}
@@ -5821,9 +5782,9 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
HConstant* stub_value = Add<HConstant>(callable.code());
- HInstruction* instr = New<HCallWithDescriptor>(
- stub_value, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
+ HInstruction* instr =
+ New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -6019,17 +5980,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- // Return the result of the transformation to fast properties
- // instead of the original since this operation changes the map
- // of the object. This makes sure that the original object won't
- // be used by other optimized code before it is transformed
- // (e.g. because of code motion).
- HToFastProperties* result = Add<HToFastProperties>(Pop());
- return ast_context()->ReturnValue(result);
- } else {
- return ast_context()->ReturnValue(Pop());
- }
+ return ast_context()->ReturnValue(Pop());
}
@@ -6053,9 +6004,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate(), raw_boilerplate,
- Runtime::CreateArrayLiteralBoilerplate(
- isolate(), literals, expr->constant_elements(),
- is_strong(function_language_mode())),
+ Runtime::CreateArrayLiteralBoilerplate(isolate(), literals,
+ expr->constant_elements()),
Bailout(kArrayBoilerplateCreationFailed));
boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
@@ -6591,13 +6541,7 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (!info->IsFound()) {
DCHECK(info->IsLoad());
- if (is_strong(function_language_mode())) {
- return New<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
- 0);
- } else {
- return graph()->GetConstantUndefined();
- }
+ return graph()->GetConstantUndefined();
}
if (info->IsData()) {
@@ -6625,8 +6569,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
- return New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ return NewCallFunction(function, argument_count, TailCallMode::kDisallow,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
} else if (FLAG_inline_accessors && can_inline_accessor) {
bool success = info->IsLoad()
? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
@@ -6640,8 +6585,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
Bailout(kInliningBailedOut);
return nullptr;
}
- return BuildCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
- argument_count);
+ return NewCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
+ argument_count, TailCallMode::kDisallow,
+ TailCallMode::kDisallow);
}
DCHECK(info->IsDataConstant());
@@ -7600,9 +7546,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
- Handle<Map> transitioned_map =
- Map::FindTransitionedMap(map, &possible_transitioned_maps);
- transition_target.Add(transitioned_map);
+ Map* transitioned_map =
+ map->FindElementsKindTransitionedMap(&possible_transitioned_maps);
+ if (transitioned_map != nullptr) {
+ transition_target.Add(handle(transitioned_map));
+ } else {
+ transition_target.Add(Handle<Map>());
+ }
}
MapHandleList untransitionable_maps(maps->length());
@@ -8039,56 +7989,81 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
}
}
-
-HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
- int argument_count) {
- return New<HCallJSFunction>(fun, argument_count);
+void HOptimizedGraphBuilder::BuildEnsureCallable(HValue* object) {
+ NoObservableSideEffectsScope scope(this);
+ const Runtime::Function* throw_called_non_callable =
+ Runtime::FunctionForId(Runtime::kThrowCalledNonCallable);
+
+ IfBuilder is_not_function(this);
+ HValue* smi_check = is_not_function.If<HIsSmiAndBranch>(object);
+ is_not_function.Or();
+ HValue* map = AddLoadMap(object, smi_check);
+ HValue* bit_field =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+ HValue* bit_field_masked = AddUncasted<HBitwise>(
+ Token::BIT_AND, bit_field, Add<HConstant>(1 << Map::kIsCallable));
+ is_not_function.IfNot<HCompareNumericAndBranch>(
+ bit_field_masked, Add<HConstant>(1 << Map::kIsCallable), Token::EQ);
+ is_not_function.Then();
+ {
+ Add<HPushArguments>(object);
+ Add<HCallRuntime>(throw_called_non_callable, 1);
+ }
+ is_not_function.End();
}
-
-HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
- HValue* fun, HValue* context,
- int argument_count, HValue* expected_param_count) {
- HValue* new_target = graph()->GetConstantUndefined();
+HInstruction* HOptimizedGraphBuilder::NewCallFunction(
+ HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ BuildEnsureCallable(function);
+ } else {
+ DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
+ }
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
+ HValue* op_vals[] = {context(), function, arity};
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ Callable callable =
+ CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)));
+ Vector<HValue*>(op_vals, arraysize(op_vals)),
+ syntactic_tail_call_mode);
}
-
-HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
- Handle<JSFunction> jsfun, int argument_count) {
- HValue* target = Add<HConstant>(jsfun);
- // For constant functions, we try to avoid calling the
- // argument adaptor and instead call the function directly
- int formal_parameter_count =
- jsfun->shared()->internal_formal_parameter_count();
- bool dont_adapt_arguments =
- (formal_parameter_count ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- int arity = argument_count - 1;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
- if (can_invoke_directly) {
- if (jsfun.is_identical_to(current_info()->closure())) {
- graph()->MarkRecursive();
- }
- return NewPlainFunctionCall(target, argument_count);
+HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
+ HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
+ FeedbackVectorSlot slot) {
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ BuildEnsureCallable(function);
} else {
- HValue* param_count_value = Add<HConstant>(formal_parameter_count);
- HValue* context = Add<HLoadNamedField>(
- target, nullptr, HObjectAccess::ForFunctionContextPointer());
- return NewArgumentAdaptorCall(target, context,
- argument_count, param_count_value);
+ DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
}
- UNREACHABLE();
- return NULL;
+ int arity = argument_count - 1;
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
+ HValue* vector_val = Add<HConstant>(vector);
+
+ HValue* op_vals[] = {context(), function, index_val, vector_val};
+
+ Callable callable = CodeFactory::CallICInOptimizedCode(
+ isolate(), arity, convert_mode, tail_call_mode);
+ HConstant* stub = Add<HConstant>(callable.code());
+
+ return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals)),
+ syntactic_tail_call_mode);
+}
+
+HInstruction* HOptimizedGraphBuilder::NewCallConstantFunction(
+ Handle<JSFunction> function, int argument_count,
+ TailCallMode syntactic_tail_call_mode, TailCallMode tail_call_mode) {
+ HValue* target = Add<HConstant>(function);
+ return New<HInvokeFunction>(target, function, argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
}
@@ -8126,6 +8101,10 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
bool handled_string = false;
int ordered_functions = 0;
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
int i;
for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
@@ -8230,14 +8209,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (HasStackOverflow()) return;
} else {
// Since HWrapReceiver currently cannot actually wrap numbers and strings,
- // use the regular CallFunctionStub for method calls to wrap the receiver.
+ // use the regular call builtin for method calls to wrap the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
HInstruction* call =
- needs_wrapping ? NewUncasted<HCallFunction>(
- function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined)
- : BuildCallConstantFunction(target, argument_count);
+ needs_wrapping
+ ? NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode)
+ : NewCallConstantFunction(target, argument_count,
+ syntactic_tail_call_mode,
+ tail_call_mode);
PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
Drop(1); // Drop the function.
@@ -8266,8 +8248,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
environment()->SetExpressionStackAt(0, receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- HInstruction* call = New<HCallFunction>(
- function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
+ HInstruction* call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
PushArgumentsFromEnvironment(argument_count);
@@ -8295,17 +8278,19 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
}
}
-
void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
Handle<JSFunction> caller,
- const char* reason) {
+ const char* reason,
+ TailCallMode tail_call_mode) {
if (FLAG_trace_inlining) {
base::SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
base::SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
- PrintF("Inlined %s called from %s.\n", target_name.get(),
+ const char* call_mode =
+ tail_call_mode == TailCallMode::kAllow ? "tail called" : "called";
+ PrintF("Inlined %s %s from %s.\n", target_name.get(), call_mode,
caller_name.get());
} else {
PrintF("Did not inline %s called from %s (%s).\n",
@@ -8362,12 +8347,12 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
return nodes_added;
}
-
bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id, BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) {
if (target->context()->native_context() !=
top_info()->closure()->context()->native_context()) {
return false;
@@ -8376,7 +8361,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
if (nodes_added == kNotInlinable) return false;
Handle<JSFunction> caller = current_info()->closure();
-
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
@@ -8498,15 +8482,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
- // In strong mode it is an error to call a function with too few arguments.
- // In that case do not inline because then the arity check would be skipped.
- if (is_strong(function->language_mode()) &&
- arguments_count < function->parameter_count()) {
- TraceInline(target, caller,
- "too few arguments passed to a strong function");
- return false;
- }
-
// Generate the deoptimization data for the unoptimized version of
// the target function if we don't already have it.
if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
@@ -8537,17 +8512,15 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
- FunctionState* target_state =
- new FunctionState(this, &target_info, inlining_kind, inlining_id);
+ FunctionState* target_state = new FunctionState(
+ this, &target_info, inlining_kind, inlining_id,
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode));
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner_env =
- environment()->CopyForInlining(target,
- arguments_count,
- function,
- undefined,
- function_state()->inlining_kind());
+ HEnvironment* inner_env = environment()->CopyForInlining(
+ target, arguments_count, function, undefined,
+ function_state()->inlining_kind(), syntactic_tail_call_mode);
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
@@ -8577,10 +8550,10 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
current_block()->UpdateEnvironment(inner_env);
Scope* saved_scope = scope();
set_scope(target_info.scope());
- HEnterInlined* enter_inlined =
- Add<HEnterInlined>(return_id, target, context, arguments_count, function,
- function_state()->inlining_kind(),
- function->scope()->arguments(), arguments_object);
+ HEnterInlined* enter_inlined = Add<HEnterInlined>(
+ return_id, target, context, arguments_count, function,
+ function_state()->inlining_kind(), function->scope()->arguments(),
+ arguments_object, syntactic_tail_call_mode);
if (top_info()->is_tracking_positions()) {
enter_inlined->set_inlining_id(inlining_id);
}
@@ -8608,7 +8581,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
- TraceInline(target, caller, NULL);
+ TraceInline(target, caller, NULL, syntactic_tail_call_mode);
if (current_block() != NULL) {
FunctionState* state = function_state();
@@ -8692,7 +8665,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
return TryInline(expr->target(), expr->arguments()->length(), NULL,
- expr->id(), expr->ReturnId(), NORMAL_RETURN);
+ expr->id(), expr->ReturnId(), NORMAL_RETURN,
+ expr->tail_call_mode());
}
@@ -8700,7 +8674,7 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
return TryInline(expr->target(), expr->arguments()->length(),
implicit_return_value, expr->id(), expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN, TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
@@ -8710,7 +8684,7 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
return getter->IsJSFunction() &&
TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN, TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
@@ -8721,7 +8695,8 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
if (TryInlineApiSetter(setter, receiver_map, id)) return true;
return setter->IsJSFunction() &&
TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
- id, assignment_id, SETTER_CALL_RETURN);
+ id, assignment_id, SETTER_CALL_RETURN,
+ TailCallMode::kDisallow);
}
@@ -8729,13 +8704,15 @@ bool HOptimizedGraphBuilder::TryInlineIndirectCall(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
return TryInline(function, arguments_count, NULL, expr->id(),
- expr->ReturnId(), NORMAL_RETURN);
+ expr->ReturnId(), NORMAL_RETURN, expr->tail_call_mode());
}
bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+ // We intentionally ignore expr->tail_call_mode() here because builtins
+ // we inline here do not observe if they were tail called or not.
switch (id) {
case kMathExp:
if (!FLAG_fast_math) break;
@@ -8819,6 +8796,25 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
// Try to inline calls like Math.* as operations in the calling function.
switch (id) {
+ case kObjectHasOwnProperty: {
+ // It's not safe to look through the phi for elements if we're compiling
+ // for osr.
+ if (top_info()->is_osr()) return false;
+ if (argument_count != 2) return false;
+ HValue* key = Top();
+ if (!key->IsLoadKeyed()) return false;
+ HValue* elements = HLoadKeyed::cast(key)->elements();
+ if (!elements->IsPhi() || elements->OperandCount() != 1) return false;
+ if (!elements->OperandAt(0)->IsForInCacheArray()) return false;
+ HForInCacheArray* cache = HForInCacheArray::cast(elements->OperandAt(0));
+ HValue* receiver = environment()->ExpressionStackAt(1);
+ if (!receiver->IsPhi() || receiver->OperandCount() != 1) return false;
+ if (cache->enumerable() != receiver->OperandAt(0)) return false;
+ Drop(3); // key, receiver, function
+ Add<HCheckMapValue>(receiver, cache->map());
+ ast_context()->ReturnValue(graph()->GetConstantTrue());
+ return true;
+ }
case kStringCharCodeAt:
case kStringCharAt:
if (argument_count == 2) {
@@ -8841,6 +8837,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
+ argument = AddUncasted<HForceRepresentation>(
+ argument, Representation::Integer32());
+ argument->SetFlag(HValue::kTruncatingToInt32);
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -9053,7 +9052,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Drop(args_count_no_receiver);
HValue* receiver = Pop();
- HValue* function = Pop();
+ Drop(1); // Function.
HValue* result;
{
@@ -9129,7 +9128,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if_inline.Else();
{
Add<HPushArguments>(receiver);
- result = Add<HCallJSFunction>(function, 1);
+ result = AddInstruction(NewCallConstantFunction(
+ function, 1, TailCallMode::kDisallow, TailCallMode::kDisallow));
if (!ast_context()->IsEffect()) Push(result);
}
if_inline.End();
@@ -9193,12 +9193,8 @@ bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
SmallMapList receiver_maps;
- return TryInlineApiCall(function,
- receiver,
- &receiver_maps,
- argc,
- expr->id(),
- kCallApiFunction);
+ return TryInlineApiCall(function, receiver, &receiver_maps, argc, expr->id(),
+ kCallApiFunction, expr->tail_call_mode());
}
@@ -9208,12 +9204,8 @@ bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
SmallMapList* receiver_maps) {
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
- return TryInlineApiCall(function,
- receiver,
- receiver_maps,
- argc,
- expr->id(),
- kCallApiMethod);
+ return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
+ kCallApiMethod, expr->tail_call_mode());
}
bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
@@ -9223,10 +9215,8 @@ bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
- &receiver_maps,
- 0,
- ast_id,
- kCallApiGetter);
+ &receiver_maps, 0, ast_id, kCallApiGetter,
+ TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
@@ -9236,22 +9226,23 @@ bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
- &receiver_maps,
- 1,
- ast_id,
- kCallApiSetter);
+ &receiver_maps, 1, ast_id, kCallApiSetter,
+ TailCallMode::kDisallow);
}
-bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
- HValue* receiver,
- SmallMapList* receiver_maps,
- int argc, BailoutId ast_id,
- ApiCallType call_type) {
+bool HOptimizedGraphBuilder::TryInlineApiCall(
+ Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
+ int argc, BailoutId ast_id, ApiCallType call_type,
+ TailCallMode syntactic_tail_call_mode) {
if (function->IsJSFunction() &&
Handle<JSFunction>::cast(function)->context()->native_context() !=
top_info()->closure()->context()->native_context()) {
return false;
}
+ if (argc > CallApiCallbackStub::kArgMax) {
+ return false;
+ }
+
CallOptimization optimization(function);
if (!optimization.is_simple_api_call()) return false;
Handle<Map> holder_map;
@@ -9347,33 +9338,24 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
api_function_address, nullptr};
HInstruction* call = nullptr;
+ CHECK(argc <= CallApiCallbackStub::kArgMax);
if (!is_function) {
- CallApiAccessorStub stub(isolate(), is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate(), is_store, call_data_undefined,
!optimization.is_constant_call());
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
- ApiAccessorDescriptor descriptor(isolate());
- call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
- } else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
- CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
- Handle<Code> code = stub.GetCode();
- HConstant* code_value = Add<HConstant>(code);
- ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
- Drop(1); // Drop function.
+ code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+ syntactic_tail_call_mode);
} else {
- op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
- CallApiFunctionStub stub(isolate(), call_data_undefined);
+ CallApiCallbackStub stub(isolate(), argc, call_data_undefined);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
- ApiFunctionDescriptor descriptor(isolate());
- call =
- New<HCallWithDescriptor>(code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals)));
+ call = New<HCallWithDescriptor>(
+ code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+ syntactic_tail_call_mode);
Drop(1); // Drop function.
}
@@ -9405,9 +9387,14 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
}
}
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
PushArgumentsFromEnvironment(arguments_count);
HInvokeFunction* call =
- New<HInvokeFunction>(function, known_function, arguments_count);
+ New<HInvokeFunction>(function, known_function, arguments_count,
+ syntactic_tail_call_mode, tail_call_mode);
Drop(1); // Function
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -9459,13 +9446,15 @@ void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
HValue* checked_function = AddCheckMap(function, function_map);
if (function_state()->outer() == NULL) {
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
- HInstruction* result = New<HApplyArguments>(function,
- wrapped_receiver,
- length,
- elements);
+ HInstruction* result = New<HApplyArguments>(
+ function, wrapped_receiver, length, elements, tail_call_mode);
ast_context()->ReturnInstruction(result, expr->id());
} else {
// We are inside inlined function and we know exactly what is inside
@@ -9733,9 +9722,6 @@ bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- return Bailout(kTailCall);
- }
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
@@ -9744,6 +9730,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
HInstruction* call = NULL;
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
Property* prop = callee->AsProperty();
if (prop != NULL) {
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -9797,16 +9787,19 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// Wrap the receiver if necessary.
if (NeedsWrapping(maps->first(), known_function)) {
// Since HWrapReceiver currently cannot actually wrap numbers and
- // strings, use the regular CallFunctionStub for method calls to wrap
+ // strings, use the regular call builtin for method calls to wrap
// the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- call = New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
} else if (TryInlineCall(expr)) {
return;
} else {
- call = BuildCallConstantFunction(known_function, argument_count);
+ call =
+ NewCallConstantFunction(known_function, argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
}
} else {
@@ -9825,8 +9818,9 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
- call = New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ call = NewCallFunction(function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ tail_call_mode);
}
PushArgumentsFromEnvironment(argument_count);
@@ -9873,20 +9867,22 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (TryInlineCall(expr)) return;
PushArgumentsFromEnvironment(argument_count);
- call = BuildCallConstantFunction(expr->target(), argument_count);
+ call = NewCallConstantFunction(expr->target(), argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
} else {
PushArgumentsFromEnvironment(argument_count);
- HCallFunction* call_function = New<HCallFunction>(
- function, argument_count, ConvertReceiverMode::kNullOrUndefined);
- call = call_function;
if (expr->is_uninitialized() &&
expr->IsUsingCallFeedbackICSlot(isolate())) {
// We've never seen this call before, so let's have Crankshaft learn
// through the type vector.
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
- call_function->SetVectorAndSlot(vector, slot);
+ call = NewCallFunctionViaIC(function, argument_count,
+ syntactic_tail_call_mode,
+ ConvertReceiverMode::kNullOrUndefined,
+ tail_call_mode, expr->CallFeedbackICSlot());
+ } else {
+ call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNullOrUndefined, tail_call_mode);
}
}
}
@@ -10509,7 +10505,29 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
- return Bailout(kCallToAJavaScriptRuntimeFunction);
+ // Crankshaft always specializes to the native context, so we can just grab
+ // the constant function from the current native context and embed that into
+ // the code object.
+ Handle<JSFunction> known_function(
+ JSFunction::cast(
+ current_info()->native_context()->get(expr->context_index())),
+ isolate());
+
+ // The callee and the receiver both have to be pushed onto the operand stack
+ // before arguments are being evaluated.
+ HConstant* function = Add<HConstant>(known_function);
+ HValue* receiver = ImplicitReceiverFor(function, known_function);
+ Push(function);
+ Push(receiver);
+
+ int argument_count = expr->arguments()->length() + 1; // Count receiver.
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
+ HInstruction* call = NewCallConstantFunction(known_function, argument_count,
+ TailCallMode::kDisallow,
+ TailCallMode::kDisallow);
+ Drop(1); // Function
+ return ast_context()->ReturnInstruction(call, expr->id());
}
const Runtime::Function* function = expr->function();
@@ -10661,7 +10679,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
rep = Representation::Smi();
}
- if (returns_original_input && !is_strong(function_language_mode())) {
+ if (returns_original_input) {
// We need an explicit HValue representing ToNumber(input). The
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
@@ -10686,11 +10704,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
add->set_observed_input_representation(1, rep);
add->set_observed_input_representation(2, Representation::Smi());
}
- if (!is_strong(function_language_mode())) {
- instr->ClearAllSideEffects();
- } else {
- Add<HSimulate>(expr->ToNumberId(), REMOVABLE_SIMULATE);
- }
+ instr->ClearAllSideEffects();
instr->SetFlag(HInstruction::kCannotBeTagged);
return instr;
}
@@ -11331,12 +11345,10 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// Translate right subexpression by visiting it in the same AST
// context as the entire expression.
- if (eval_right->HasPredecessor()) {
- eval_right->SetJoinId(expr->RightId());
- set_current_block(eval_right);
- Visit(expr->right());
- }
-
+ CHECK(eval_right->HasPredecessor());
+ eval_right->SetJoinId(expr->RightId());
+ set_current_block(eval_right);
+ Visit(expr->right());
} else if (ast_context()->IsValue()) {
CHECK_ALIVE(VisitForValue(expr->left()));
DCHECK(current_block() != NULL);
@@ -11358,7 +11370,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- ToBooleanStub::Types expected(expr->left()->to_boolean_types());
+ ToBooleanICStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
? New<HBranch>(left_value, expected, eval_right, empty_block)
: New<HBranch>(left_value, expected, empty_block, eval_right);
@@ -11392,20 +11404,22 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// second one is not a merge node, and that we really have no good AST ID to
// put on that first HSimulate.
- if (empty_block->HasPredecessor()) {
- empty_block->SetJoinId(expr->id());
- } else {
- empty_block = NULL;
- }
+ // Technically, we should be able to handle the case when one side of
+ // the test is not connected, but this can trip up liveness analysis
+ // if we did not fully connect the test context based on some optimistic
+ // assumption. If such an assumption was violated, we would end up with
+ // an environment with optimized-out values. So we should always
+ // conservatively connect the test context.
- if (right_block->HasPredecessor()) {
- right_block->SetJoinId(expr->RightId());
- set_current_block(right_block);
- CHECK_BAILOUT(VisitForEffect(expr->right()));
- right_block = current_block();
- } else {
- right_block = NULL;
- }
+ CHECK(right_block->HasPredecessor());
+ CHECK(empty_block->HasPredecessor());
+
+ empty_block->SetJoinId(expr->id());
+
+ right_block->SetJoinId(expr->RightId());
+ set_current_block(right_block);
+ CHECK_BAILOUT(VisitForEffect(expr->right()));
+ right_block = current_block();
HBasicBlock* join_block =
CreateJoin(empty_block, right_block, expr->id());
@@ -11474,7 +11488,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
return HandleLiteralCompareTypeof(expr, sub_expr, check);
}
- if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
@@ -11510,6 +11524,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
if (op == Token::INSTANCEOF) {
+ DCHECK(!FLAG_harmony_instanceof);
// Check to see if the rhs of the instanceof is a known function.
if (right->IsConstant() &&
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
@@ -11699,6 +11714,20 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
New<HCompareNumericAndBranch>(left, right, op);
return result;
} else {
+ if (op == Token::EQ) {
+ if (left->IsConstant() &&
+ HConstant::cast(left)->GetInstanceType() == ODDBALL_TYPE &&
+ HConstant::cast(left)->IsUndetectable()) {
+ return New<HIsUndetectableAndBranch>(right);
+ }
+
+ if (right->IsConstant() &&
+ HConstant::cast(right)->GetInstanceType() == ODDBALL_TYPE &&
+ HConstant::cast(right)->IsUndetectable()) {
+ return New<HIsUndetectableAndBranch>(left);
+ }
+ }
+
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
@@ -11738,22 +11767,17 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
+ HControlInstruction* instr;
if (expr->op() == Token::EQ_STRICT) {
HConstant* nil_constant = nil == kNullValue
? graph()->GetConstantNull()
: graph()->GetConstantUndefined();
- HCompareObjectEqAndBranch* instr =
- New<HCompareObjectEqAndBranch>(value, nil_constant);
- return ast_context()->ReturnControl(instr, expr->id());
+ instr = New<HCompareObjectEqAndBranch>(value, nil_constant);
} else {
DCHECK_EQ(Token::EQ, expr->op());
- Type* type = expr->combined_type()->Is(Type::None())
- ? Type::Any()
- : expr->combined_type();
- HIfContinuation continuation;
- BuildCompareNil(value, type, &continuation);
- return ast_context()->ReturnContinuation(&continuation, expr->id());
+ instr = New<HIsUndetectableAndBranch>(value);
}
+ return ast_context()->ReturnControl(instr, expr->id());
}
@@ -12268,22 +12292,13 @@ void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
if (input->type().IsSmi()) {
return ast_context()->ReturnValue(input);
} else {
- IfBuilder if_inputissmi(this);
- if_inputissmi.If<HIsSmiAndBranch>(input);
- if_inputissmi.Then();
- {
- // Return the input value.
- Push(input);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_inputissmi.Else();
- {
- Add<HPushArguments>(input);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToInteger), 1));
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_inputissmi.End();
- return ast_context()->ReturnValue(Pop());
+ Callable callable = CodeFactory::ToInteger(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
}
}
@@ -12528,6 +12543,18 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
return ast_context()->ReturnInstruction(result, call->id());
}
+// Support for direct creation of new objects.
+void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
+ DCHECK_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ FastNewObjectStub stub(isolate());
+ FastNewObjectDescriptor descriptor(isolate());
+ HValue* values[] = {context(), Pop(), Pop()};
+ HConstant* stub_value = Add<HConstant>(stub.GetCode());
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
@@ -12621,6 +12648,45 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
DCHECK_LE(2, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
+
+ // Try and customize ES6 instanceof here.
+ // We should at least have the constructor on the expression stack.
+ if (FLAG_harmony_instanceof && FLAG_harmony_instanceof_opt &&
+ call->arguments()->length() == 3) {
+ HValue* target = environment()->ExpressionStackAt(2);
+ if (target->IsConstant()) {
+ HConstant* constant_function = HConstant::cast(target);
+ if (constant_function->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> func =
+ Handle<JSFunction>::cast(constant_function->handle(isolate()));
+ if (*func == isolate()->native_context()->ordinary_has_instance()) {
+ // Look at the function, which will be argument 1.
+ HValue* right = environment()->ExpressionStackAt(1);
+ if (right->IsConstant() &&
+ HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> constructor = Handle<JSFunction>::cast(
+ HConstant::cast(right)->handle(isolate()));
+ if (constructor->IsConstructor() &&
+ !constructor->map()->has_non_instance_prototype()) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ top_info()->dependencies()->AssumeInitialMapCantChange(
+ initial_map);
+ HInstruction* prototype =
+ Add<HConstant>(handle(initial_map->prototype(), isolate()));
+ HValue* left = environment()->ExpressionStackAt(0);
+ HHasInPrototypeChainAndBranch* result =
+ New<HHasInPrototypeChainAndBranch>(left, prototype);
+ Drop(3);
+ return ast_context()->ReturnControl(result, call->id());
+ }
+ }
+ }
+ }
+ }
+ }
+
CallTrampolineDescriptor descriptor(isolate());
PushArgumentsFromEnvironment(call->arguments()->length() - 1);
HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
@@ -12646,24 +12712,6 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12673,15 +12721,6 @@ void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12888,6 +12927,12 @@ void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
return ast_context()->ReturnValue(value);
}
+void HOptimizedGraphBuilder::GenerateGetOrdinaryHasInstance(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 0);
+ // ordinary_has_instance is immutable so we can treat it as a constant.
+ HValue* value = Add<HConstant>(isolate()->ordinary_has_instance());
+ return ast_context()->ReturnValue(value);
+}
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -13121,14 +13166,21 @@ HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
return new_env;
}
+void HEnvironment::MarkAsTailCaller() {
+ DCHECK_EQ(JS_FUNCTION, frame_type());
+ frame_type_ = TAIL_CALLER_FUNCTION;
+}
+
+void HEnvironment::ClearTailCallerMark() {
+ DCHECK_EQ(TAIL_CALLER_FUNCTION, frame_type());
+ frame_type_ = JS_FUNCTION;
+}
HEnvironment* HEnvironment::CopyForInlining(
- Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind) const {
- DCHECK(frame_type() == JS_FUNCTION);
+ Handle<JSFunction> target, int arguments, FunctionLiteral* function,
+ HConstant* undefined, InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) const {
+ DCHECK_EQ(JS_FUNCTION, frame_type());
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
@@ -13137,6 +13189,11 @@ HEnvironment* HEnvironment::CopyForInlining(
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ DCHECK_EQ(NORMAL_RETURN, inlining_kind);
+ outer->MarkAsTailCaller();
+ }
+
if (inlining_kind == CONSTRUCT_CALL_RETURN) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index ce0d0df6aa..10c0baa29d 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -11,6 +11,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
@@ -293,8 +294,6 @@ class HLoopInformation final : public ZoneObject {
};
-class BoundsCheckTable;
-class InductionVariableBlocksTable;
class HGraph final : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
@@ -333,6 +332,7 @@ class HGraph final : public ZoneObject {
HConstant* GetConstantBool(bool value);
HConstant* GetConstantHole();
HConstant* GetConstantNull();
+ HConstant* GetConstantOptimizedOut();
HConstant* GetInvalidContext();
bool IsConstantUndefined(HConstant* constant);
@@ -400,9 +400,6 @@ class HGraph final : public ZoneObject {
use_optimistic_licm_ = value;
}
- void MarkRecursive() { is_recursive_ = true; }
- bool is_recursive() const { return is_recursive_; }
-
void MarkDependsOnEmptyArrayProtoElements() {
// Add map dependency if not already added.
if (depends_on_empty_array_proto_elements_) return;
@@ -474,6 +471,7 @@ class HGraph final : public ZoneObject {
SetOncePointer<HConstant> constant_false_;
SetOncePointer<HConstant> constant_the_hole_;
SetOncePointer<HConstant> constant_null_;
+ SetOncePointer<HConstant> constant_optimized_out_;
SetOncePointer<HConstant> constant_invalid_context_;
HOsrBuilder* osr_;
@@ -482,7 +480,6 @@ class HGraph final : public ZoneObject {
CallInterfaceDescriptor descriptor_;
Zone* zone_;
- bool is_recursive_;
bool use_optimistic_licm_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
@@ -504,10 +501,10 @@ enum FrameType {
JS_GETTER,
JS_SETTER,
ARGUMENTS_ADAPTOR,
+ TAIL_CALLER_FUNCTION,
STUB
};
-
class HEnvironment final : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
@@ -616,16 +613,21 @@ class HEnvironment final : public ZoneObject {
// Create an "inlined version" of this environment, where the original
// environment is the outer environment but the top expression stack
// elements are moved to an inner environment as parameters.
- HEnvironment* CopyForInlining(Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind) const;
+ HEnvironment* CopyForInlining(Handle<JSFunction> target, int arguments,
+ FunctionLiteral* function, HConstant* undefined,
+ InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) const;
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
- while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+ while (outer->frame_type() != JS_FUNCTION &&
+ outer->frame_type() != TAIL_CALLER_FUNCTION) {
+ outer = outer->outer_;
+ }
if (drop_extra) outer->Drop(1);
+ if (outer->frame_type() == TAIL_CALLER_FUNCTION) {
+ outer->ClearTailCallerMark();
+ }
return outer;
}
@@ -683,6 +685,11 @@ class HEnvironment final : public ZoneObject {
FrameType frame_type,
int arguments) const;
+ // Marks current environment as tail caller by setting frame type to
+ // TAIL_CALLER_FUNCTION.
+ void MarkAsTailCaller();
+ void ClearTailCallerMark();
+
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
@@ -852,10 +859,9 @@ class TestContext final : public AstContext {
class FunctionState final {
public:
- FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info,
- InliningKind inlining_kind,
- int inlining_id);
+ FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info,
+ InliningKind inlining_kind, int inlining_id,
+ TailCallMode tail_call_mode);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -870,6 +876,11 @@ class FunctionState final {
FunctionState* outer() { return outer_; }
+ TailCallMode ComputeTailCallMode(TailCallMode tail_call_mode) const {
+ if (tail_call_mode_ == TailCallMode::kDisallow) return tail_call_mode_;
+ return tail_call_mode;
+ }
+
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
@@ -887,6 +898,10 @@ class FunctionState final {
int inlining_id() const { return inlining_id_; }
+ void IncrementInDoExpressionScope() { do_expression_scope_count_++; }
+ void DecrementInDoExpressionScope() { do_expression_scope_count_--; }
+ bool IsInsideDoExpressionScope() { return do_expression_scope_count_ > 0; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -899,6 +914,10 @@ class FunctionState final {
// The kind of call which is currently being inlined.
InliningKind inlining_kind_;
+ // Defines whether the calls with TailCallMode::kAllow in the function body
+ // can be generated as tail calls.
+ TailCallMode tail_call_mode_;
+
// When inlining in an effect or value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
// pair of return blocks in the context. When not inlining, there is no
@@ -919,6 +938,8 @@ class FunctionState final {
int inlining_id_;
SourcePosition outer_source_position_;
+ int do_expression_scope_count_;
+
FunctionState* outer_;
};
@@ -1267,6 +1288,26 @@ class HGraphBuilder {
return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
}
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8,
+ p9);
+ }
+
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
+ P8 p8, P9 p9) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p8));
+ }
+
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
+ }
+
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
// When initializing arrays, we'll unfold the loop if the number of elements
@@ -1837,11 +1878,6 @@ class HGraphBuilder {
HValue* BuildElementIndexHash(HValue* index);
- enum MapEmbedding { kEmbedMapsDirectly, kEmbedMapsViaWeakCells };
-
- void BuildCompareNil(HValue* value, Type* type, HIfContinuation* continuation,
- MapEmbedding map_embedding = kEmbedMapsDirectly);
-
void BuildCreateAllocationMemento(HValue* previous_object,
HValue* previous_object_size,
HValue* payload);
@@ -2198,6 +2234,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
+ F(NewObject) \
F(ValueOf) \
F(StringCharFromCode) \
F(StringCharAt) \
@@ -2222,6 +2259,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
+ F(GetOrdinaryHasInstance) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(MaxSmi) \
@@ -2235,9 +2273,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(ConstructDouble) \
F(DoubleHi) \
F(DoubleLo) \
- F(MathClz32) \
- F(MathFloor) \
- F(MathSqrt) \
F(MathLogRT) \
/* ES6 Collections */ \
F(MapClear) \
@@ -2404,7 +2439,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(Handle<JSFunction> target, int arguments_count,
HValue* implicit_return_value, BailoutId ast_id,
- BailoutId return_id, InliningKind inlining_kind);
+ BailoutId return_id, InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2435,16 +2471,17 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId ast_id);
bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
SmallMapList* receiver_maps, int argc, BailoutId ast_id,
- ApiCallType call_type);
+ ApiCallType call_type,
+ TailCallMode syntactic_tail_call_mode);
static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
// non-NULL reason string.
- void TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* failure_reason);
+ void TraceInline(Handle<JSFunction> target, Handle<JSFunction> caller,
+ const char* failure_reason,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
void HandleGlobalVariableAssignment(Variable* var, HValue* value,
FeedbackVectorSlot slot,
@@ -2826,14 +2863,23 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
- HInstruction* NewPlainFunctionCall(HValue* fun, int argument_count);
+ void BuildEnsureCallable(HValue* object);
+
+ HInstruction* NewCallFunction(HValue* function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode);
- HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
- int argument_count,
- HValue* expected_param_count);
+ HInstruction* NewCallFunctionViaIC(HValue* function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode,
+ FeedbackVectorSlot slot);
- HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
- int argument_count);
+ HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
+ int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode);
bool CanBeFunctionApplyArguments(Call* expr);
@@ -3032,6 +3078,19 @@ class NoObservableSideEffectsScope final {
HGraphBuilder* builder_;
};
+class DoExpressionScope final {
+ public:
+ explicit DoExpressionScope(HOptimizedGraphBuilder* builder)
+ : builder_(builder) {
+ builder_->function_state()->IncrementInDoExpressionScope();
+ }
+ ~DoExpressionScope() {
+ builder_->function_state()->DecrementInDoExpressionScope();
+ }
+
+ private:
+ HOptimizedGraphBuilder* builder_;
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 239db8ba13..d8b20c87a7 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -57,13 +57,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -130,31 +123,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -162,61 +130,29 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
}
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, kFrameIsExpectedToBeAligned);
- }
-
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
DCHECK(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
+ MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
+ if (FLAG_debug_code) {
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
}
if (info()->saves_caller_doubles()) SaveCallerDoubles();
@@ -298,47 +234,11 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- // Align ebp + 4 to a multiple of 2 * kPointerSize.
- __ test(ebp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
- // Move all parts of the frame over one word. The frame consists of:
- // unoptimized frame slots, alignment state, context, frame pointer, return
- // address, receiver, and the arguments.
- __ mov(ecx, Immediate(scope()->num_parameters() +
- 5 + graph()->osr()->UnoptimizedFrameSlots()));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ sub(Operand(ebp), Immediate(kPointerSize));
- __ bind(&do_not_pad);
- }
-
- // Save the first local, which is overwritten by the alignment state.
- Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
- __ push(alignment_loc);
-
- // Set the dynamic frame alignment state.
- __ mov(alignment_loc, edx);
-
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 1);
- __ sub(esp, Immediate((slots - 1) * kPointerSize));
+ DCHECK(slots >= 0);
+ __ sub(esp, Immediate(slots * kPointerSize));
}
@@ -380,29 +280,24 @@ bool LCodeGen::GenerateJumpTable() {
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
/* stack layout
- 4: entry address
- 3: return address <-- esp
- 2: garbage
+ 3: entry address
+ 2: return address <-- esp
1: garbage
0: garbage
*/
- __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
- __ push(MemOperand(esp, kPointerSize)); // Copy return address.
- __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+ __ push(MemOperand(esp, 0)); // Copy return address.
+ __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
/* stack layout
4: entry address
3: return address
- 2: garbage
1: return address
0: entry address <-- esp
*/
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
- // Copy context.
- __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
// Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
@@ -411,8 +306,7 @@ bool LCodeGen::GenerateJumpTable() {
Immediate(Smi::FromInt(StackFrame::STUB)));
/* stack layout
- 4: old ebp
- 3: context pointer
+ 3: old ebp
2: stub marker
1: return address
0: entry address <-- esp
@@ -447,9 +341,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -1969,15 +1862,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -1985,13 +1879,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2011,18 +1905,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, instr->FalseLabel(chunk_));
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2033,19 +1927,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2237,7 +2131,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
}
@@ -2267,11 +2161,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(edx));
DCHECK(ToRegister(instr->right()).is(eax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ test(eax, eax);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -2430,7 +2323,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2463,18 +2356,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ bind(&done);
}
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
- int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+ int extra_value_count = 1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
- if (dynamic_frame_alignment && FLAG_debug_code) {
- __ cmp(Operand(esp,
- (parameter_count + extra_value_count) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
@@ -2482,20 +2368,9 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
// The argument count parameter is a smi
__ SmiUntag(reg);
Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
- if (dynamic_frame_alignment && FLAG_debug_code) {
- DCHECK(extra_value_count == 2);
- __ cmp(Operand(esp, reg, times_pointer_size,
- extra_value_count * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
// emit code to restore stack based on instr->parameter_count()
__ pop(return_addr_reg); // save return address
- if (dynamic_frame_alignment) {
- __ inc(reg); // 1 more for alignment
- }
-
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
@@ -2514,25 +2389,12 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding, Label::kNear);
-
- EmitReturn(instr, true);
- __ bind(&no_padding);
- }
- EmitReturn(instr, false);
+ EmitReturn(instr);
}
@@ -2942,11 +2804,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ mov(result,
+ Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
@@ -2962,6 +2825,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ mov(result, Operand(ebp));
}
}
@@ -3005,12 +2870,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
}
@@ -3066,13 +2931,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(eax);
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3116,10 +2993,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3135,21 +3011,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function directly.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ jmp(target);
+ } else {
+ __ call(target);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3191,35 +3084,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, instr->arity());
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3227,8 +3091,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+ uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3314,8 +3189,14 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
}
}
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ roundsd(output_reg, input_reg, kRoundDown);
+}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3379,8 +3260,23 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ Label done;
+ __ roundsd(output_reg, input_reg, kRoundUp);
+ __ Move(xmm_scratch, -0.5);
+ __ addsd(xmm_scratch, output_reg);
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &done, Label::kNear);
+ __ Move(xmm_scratch, 1.0);
+ __ subsd(output_reg, xmm_scratch);
+ __ bind(&done);
+}
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
@@ -3570,54 +3466,78 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
}
-}
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(ebp, scratch2);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack, 0);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(edx));
- DCHECK(vector_register.is(ebx));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ mov(vector_register, vector);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(edi, no_reg, actual, flag, generator);
} else {
- __ Set(eax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -4483,7 +4403,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
+ __ test_b(temp_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4705,7 +4625,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- 1 << JSArrayBuffer::WasNeutered::kShift);
+ Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
}
@@ -4721,8 +4641,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
// If there is only one type in the interval check for equality.
if (first == last) {
@@ -4731,8 +4650,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
@@ -4743,7 +4661,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
} else {
@@ -5027,13 +4945,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5100,7 +5011,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
@@ -5121,7 +5032,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
// clang-format off
@@ -5382,13 +5293,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index 589ef2e05e..bc61c96339 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -29,8 +29,6 @@ class LCodeGen: public LCodeGenBase {
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -193,11 +191,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -300,7 +301,7 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+ void EmitReturn(LReturn* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -319,8 +320,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index e2772d5ee3..4afeef5d68 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -267,27 +267,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -434,13 +413,6 @@ LPlatformChunk* LChunkBuilder::Build() {
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 4);
- USE(alignment_state_index);
- }
-
// If compiling for OSR, reserve space for the unoptimized frame,
// which will be subsumed into this frame.
if (graph()->has_osr()) {
@@ -618,11 +590,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -936,22 +904,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -964,15 +926,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1100,16 +1062,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1132,6 +1084,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1140,6 +1095,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1170,22 +1128,33 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
}
}
-
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new (zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathFloorD* result = new (zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathRoundD* result = new (zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
@@ -1253,22 +1222,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(edx);
- vector = FixedTemp(ebx);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1834,13 +1787,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2507,11 +2453,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
- if (spill_index == 0) {
- // The dynamic frame alignment state overwrites the first local.
- // The first local is saved at the end of the unoptimized frame.
- spill_index = graph()->osr()->UnoptimizedFrameSlots();
- }
spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2551,13 +2492,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), ebx);
@@ -2595,11 +2529,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2660,13 +2592,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index e22ab437fc..68541a48c4 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -33,9 +33,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -105,12 +103,14 @@ class LCodeGen;
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
- V(MathFloor) \
+ V(MathFloorD) \
+ V(MathFloorI) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
- V(MathRound) \
+ V(MathRoundD) \
+ V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -135,7 +135,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -147,7 +146,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -156,7 +154,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -228,6 +225,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -539,6 +545,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -808,23 +815,43 @@ class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
void PrintDataTo(StringStream* stream) override;
};
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
+ explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* value, LOperand* temp) {
+ LMathRoundI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -832,7 +859,7 @@ class LMathRound final : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
@@ -1723,23 +1750,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1798,29 +1808,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- void PrintDataTo(StringStream* stream) override;
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2415,19 +2402,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2542,18 +2516,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/lithium-allocator-inl.h b/deps/v8/src/crankshaft/lithium-allocator-inl.h
index 22611b5efb..631af6024b 100644
--- a/deps/v8/src/crankshaft/lithium-allocator-inl.h
+++ b/deps/v8/src/crankshaft/lithium-allocator-inl.h
@@ -21,6 +21,8 @@
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index 5d05292642..6155dc0f23 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -510,9 +510,9 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
return LifetimePosition::Invalid();
}
-
LAllocator::LAllocator(int num_values, HGraph* graph)
- : chunk_(NULL),
+ : zone_(graph->isolate()->allocator()),
+ chunk_(NULL),
live_in_sets_(graph->blocks()->length(), zone()),
live_ranges_(num_values * 2, zone()),
fixed_live_ranges_(NULL),
@@ -529,7 +529,6 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
has_osr_entry_(false),
allocation_ok_(true) {}
-
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
int block_count = graph_->blocks()->length();
diff --git a/deps/v8/src/crankshaft/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
index 46289e0fbb..b648bd80c6 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.h
+++ b/deps/v8/src/crankshaft/lithium-allocator.h
@@ -6,6 +6,7 @@
#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/lithium.h"
#include "src/zone.h"
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index c5b7e9c470..53fedcf1df 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -30,6 +30,9 @@
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -237,8 +240,8 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
break;
}
case JS_GETTER: {
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
+ DCHECK_EQ(1, translation_size);
+ DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
@@ -252,8 +255,8 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
break;
}
case JS_SETTER: {
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
+ DCHECK_EQ(2, translation_size);
+ DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
@@ -266,6 +269,20 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
}
break;
}
+ case TAIL_CALLER_FUNCTION: {
+ DCHECK_EQ(0, translation_size);
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginTailCallerFrame(shared_id);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
case ARGUMENTS_ADAPTOR: {
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
diff --git a/deps/v8/src/crankshaft/lithium-inl.h b/deps/v8/src/crankshaft/lithium-inl.h
index 9044b4ca7a..938588e396 100644
--- a/deps/v8/src/crankshaft/lithium-inl.h
+++ b/deps/v8/src/crankshaft/lithium-inl.h
@@ -21,6 +21,8 @@
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 677639095a..d34b04f5da 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -30,6 +30,9 @@
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -247,7 +250,9 @@ void LPointerMap::PrintTo(StringStream* stream) {
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : base_frame_slots_(StandardFrameConstants::kFixedFrameSize / kPointerSize),
+ : base_frame_slots_(info->IsStub()
+ ? TypedFrameConstants::kFixedSlotCount
+ : StandardFrameConstants::kFixedSlotCount),
current_frame_slots_(base_frame_slots_),
info_(info),
graph_(graph),
@@ -333,7 +338,6 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
}
}
-
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id(), zone());
}
@@ -461,7 +465,8 @@ Handle<Code> LChunk::Codegen() {
void* jit_handler_data =
assembler.positions_recorder()->DetachJITHandlerData();
LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+ CodeEndLinePosInfoRecordEvent(AbstractCode::cast(*code),
+ jit_handler_data));
CodeGenerator::PrintCode(code, info());
DCHECK(!(info()->isolate()->serializer_enabled() &&
@@ -502,18 +507,94 @@ void LChunkBuilderBase::Retry(BailoutReason reason) {
status_ = ABORTED;
}
+void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block,
+ LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ if (!instr->IsCall()) return;
+
+ HEnvironment* hydrogen_env = current_block->last_environment();
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ DCHECK_NOT_NULL(hydrogen_env);
+ if (instr->IsSyntacticTailCall()) {
+ // If it was a syntactic tail call we need to drop the current frame and
+ // all the frames on top of it that are either an arguments adaptor frame
+ // or a tail caller frame.
+ hydrogen_env = hydrogen_env->outer();
+ while (hydrogen_env != nullptr &&
+ (hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR ||
+ hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) {
+ hydrogen_env = hydrogen_env->outer();
+ }
+ if (hydrogen_env != nullptr) {
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
+ // In case an outer frame is a function frame we have to replay
+ // environment manually because
+ // 1) it does not contain a result of inlined function yet,
+ // 2) we can't find the proper simulate that corresponds to the point
+ // after inlined call to do a ReplayEnvironment() on.
+ // So we push return value on top of outer environment.
+ // As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here,
+ // the deoptimizer ensures that the result of the callee is correctly
+ // propagated to result register during deoptimization.
+ hydrogen_env = hydrogen_env->Copy();
+ hydrogen_env->Push(hydrogen_val);
+ }
+ } else {
+ // Although we don't need this lazy bailout for normal execution
+ // (because when we tail call from the outermost function we should pop
+ // its frame) we still need it when debugger is on.
+ hydrogen_env = current_block->last_environment();
+ }
+ } else {
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ sim->ReplayEnvironment(hydrogen_env);
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ }
+ LInstruction* bailout = LChunkBuilderBase::AssignEnvironment(
+ new (zone()) LLazyBailout(), hydrogen_env);
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block);
+}
+
+LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr,
+ HEnvironment* hydrogen_env) {
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type());
+ instr->set_environment(CreateEnvironment(
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+ return instr;
+}
LEnvironment* LChunkBuilderBase::CreateEnvironment(
HEnvironment* hydrogen_env, int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
+ BailoutId ast_id = hydrogen_env->ast_id();
+ DCHECK(!ast_id.IsNone() ||
+ (hydrogen_env->frame_type() != JS_FUNCTION &&
+ hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION));
+
+ if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) {
+ // Skip potential outer arguments adaptor frame.
+ HEnvironment* outer_hydrogen_env = hydrogen_env->outer();
+ if (outer_hydrogen_env != nullptr &&
+ outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) {
+ outer_hydrogen_env = outer_hydrogen_env->outer();
+ }
+ LEnvironment* outer = CreateEnvironment(
+ outer_hydrogen_env, argument_index_accumulator, objects_to_materialize);
+ return new (zone())
+ LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(),
+ ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone());
+ }
+
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- DCHECK(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
? 0
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index 5cfc0c358a..a2c028330b 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -9,6 +9,7 @@
#include "src/allocation.h"
#include "src/bailout-reason.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone-allocator.h"
@@ -744,6 +745,16 @@ class LChunkBuilderBase BASE_EMBEDDED {
// Will not be moved to a register even if one is freely available.
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
+ // Constructs proper environment for a lazy bailout point after call, creates
+ // LLazyBailout instruction and adds it to current block.
+ void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr,
+ HInstruction* hydrogen_val);
+
+ // Assigns given environment to an instruction. An instruction which can
+ // deoptimize must have an environment.
+ LInstruction* AssignEnvironment(LInstruction* instr,
+ HEnvironment* hydrogen_env);
+
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 66fcf75ec0..f1717ca474 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -136,7 +136,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -278,18 +278,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(at);
- __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -328,7 +325,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ Call(&needs_frame);
} else {
__ Call(&call_deopt_entry);
@@ -342,10 +339,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(at);
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -1966,29 +1962,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2009,14 +2006,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2027,14 +2024,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2042,7 +2039,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2274,11 +2271,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(a1));
DCHECK(ToRegister(instr->right()).is(a0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
- Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq, v0, Operand(at));
}
@@ -2989,17 +2985,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ Subu(result, sp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ lw(result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ } else {
+ __ mov(result, fp);
}
}
@@ -3122,15 +3121,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ sll(scratch, length, 2);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(a0);
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3176,10 +3185,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3196,17 +3204,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(at);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(at);
+ } else {
+ __ Call(at);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3540,22 +3566,78 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ lw(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(a1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3597,56 +3679,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Change context.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(instr->arity()));
-
- // Load the code entry address
- __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(a3));
- DCHECK(vector_register.is(a2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ li(vector_register, vector);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4190,8 +4222,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
- ne, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(al, instr);
__ bind(&no_memento_found);
}
@@ -5140,14 +5171,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
- __ push(a0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5535,13 +5558,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index df72b2e93c..7a316e5957 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -212,11 +212,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index a7c5488d04..71c34df516 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -255,27 +255,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -907,22 +881,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -935,14 +903,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1064,16 +1032,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1105,6 +1066,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1224,22 +1188,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(a3);
- vector = FixedTemp(a2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1790,13 +1738,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2435,13 +2376,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a3);
@@ -2478,11 +2412,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2543,13 +2475,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index 8b36c5d055..7d41093be1 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -132,7 +130,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -144,7 +141,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -224,6 +220,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -258,6 +261,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -532,6 +537,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1684,23 +1690,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1763,29 +1752,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2385,19 +2351,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2514,18 +2467,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 5937f97386..c7bbe9f07a 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -111,7 +111,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -253,19 +253,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Daddu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(at);
- __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -300,7 +296,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
__ li(t9, Operand(entry - base));
} else {
@@ -313,7 +309,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ BranchAndLink(&needs_frame);
} else {
__ BranchAndLink(&call_deopt_entry);
@@ -327,10 +323,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(at);
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -1343,9 +1338,10 @@ void LCodeGen::DoMulS(LMulS* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Dsubu(result, zero_reg, left);
}
@@ -1444,9 +1440,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Subu(result, zero_reg, left);
}
@@ -1652,13 +1649,13 @@ void LCodeGen::DoSubS(LSubS* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1673,13 +1670,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1813,13 +1810,13 @@ void LCodeGen::DoAddS(LAddS* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
+ Label no_overflow_label;
Register scratch = scratch1();
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1834,13 +1831,13 @@ void LCodeGen::DoAddI(LAddI* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
+ Label no_overflow_label;
Register scratch = scratch1();
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -2083,29 +2080,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ ld(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2126,14 +2124,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2144,14 +2142,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2159,7 +2157,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2391,11 +2389,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(a1));
DCHECK(ToRegister(instr->right()).is(a0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
- Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq, v0, Operand(at));
}
@@ -3169,17 +3166,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ Dsubu(result, sp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ ld(result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ } else {
+ __ mov(result, fp);
}
}
@@ -3306,15 +3306,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ dsll(scratch, length, kPointerSizeLog2);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(a0);
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3360,10 +3370,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3380,17 +3389,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(at);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(at);
+ } else {
+ __ Call(at);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3743,22 +3770,75 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ ld(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(a1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3800,56 +3880,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Change context.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(instr->arity()));
-
- // Load the code entry address
- __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(a3));
- DCHECK(vector_register.is(a2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ li(vector_register, vector);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4424,8 +4454,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
- ne, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -5345,14 +5374,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
- __ push(a0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5739,13 +5760,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index 2f1cefae76..4a700bd66c 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -214,11 +214,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index b66e8ba18a..bcfbc249d2 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -255,27 +255,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -907,22 +881,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -935,14 +903,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1064,16 +1032,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1105,6 +1066,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1224,22 +1188,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(a3);
- vector = FixedTemp(a2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1796,13 +1744,6 @@ return result;
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2440,13 +2381,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a3);
@@ -2483,11 +2417,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,13 +2480,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 8d2324f717..41cf93c2a4 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -31,9 +31,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -134,7 +132,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -147,7 +144,6 @@ class LCodeGen;
V(SubS) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -227,6 +223,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1746,23 +1752,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1825,29 +1814,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2431,19 +2397,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 9cf1db64bc..d5d01043dd 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -123,7 +123,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(prologue_offset);
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue(ip, prologue_offset);
+ __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
} else {
__ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
}
@@ -265,15 +265,14 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(info()->IsStub());
frame_is_built_ = true;
__ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
- __ PushFixedFrame(scratch0());
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ PopFixedFrame(ip);
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ b(code->exit());
@@ -322,7 +321,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ PushFixedFrame();
+ __ PushCommonFrame();
__ b(&needs_frame, SetLK);
} else {
__ b(&call_deopt_entry, SetLK);
@@ -336,10 +335,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ push(ip);
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -2103,29 +2101,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpi(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ beq(instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ cmpi(reg, Operand::Zero());
__ beq(instr->FalseLabel(chunk_));
@@ -2148,13 +2147,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2166,20 +2165,20 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
Label not_simd;
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2423,11 +2422,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(r4));
DCHECK(ToRegister(instr->right()).is(r3));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ cmpi(r3, Operand::Zero());
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -3209,11 +3207,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ subi(result, sp, Operand(2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
__ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(result,
- MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ LoadP(
+ result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
// Result is the frame pointer for the frame if not adapted and for the real
@@ -3230,6 +3229,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ mr(result, scratch);
__ bind(&done);
}
+ } else {
+ __ mr(result, fp);
}
}
@@ -3349,14 +3350,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bdnz(&loop);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r3);
+ // It is safe to use r6, r7 and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r6 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r6, r7, r8);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r3, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3401,10 +3414,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3426,19 +3438,31 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Invoke function.
if (is_self_call) {
- __ CallSelf();
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
__ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ CallJSEntry(ip);
+ if (is_tail_call) {
+ __ JumpToJSEntry(ip);
+ } else {
+ __ CallJSEntry(ip);
+ }
}
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3767,22 +3791,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ cntlzw_(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mr(fp, scratch2);
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r6, r7 and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r6 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r6, r7, r8);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r4, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3824,67 +3903,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- // Change context.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ mov(r3, Operand(instr->arity()));
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ CallJSEntry(ip);
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(r6));
- DCHECK(vector_register.is(r5));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Move(vector_register, vector);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r3, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r4));
@@ -4470,9 +4488,10 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -5390,13 +5409,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(r3));
- __ push(r3);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5743,13 +5755,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 1b72bf82dc..28f168036c 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -194,11 +194,14 @@ class LCodeGen : public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 2a04d9926c..b7397869bb 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -261,27 +261,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -580,11 +559,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -911,22 +886,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -939,14 +908,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new (zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1070,15 +1039,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r4);
-
- LCallJSFunction* result = new (zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1101,6 +1061,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
LCallWithDescriptor* result =
new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r3), instr);
}
@@ -1109,6 +1072,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r4);
LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1225,22 +1191,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r4);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(r6);
- vector = FixedTemp(r5);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
@@ -1806,13 +1756,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2263,9 +2206,10 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
HTrapAllocationMemento* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
LTrapAllocationMemento* result =
- new (zone()) LTrapAllocationMemento(object, temp);
+ new (zone()) LTrapAllocationMemento(object, temp1, temp2);
return AssignEnvironment(result);
}
@@ -2441,13 +2385,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r3);
- LToFastProperties* result = new (zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r6);
@@ -2486,7 +2423,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(
instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind());
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,11 +2485,5 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new (zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index 0dfde053b7..c39f6204f8 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -133,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +143,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -223,6 +219,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -257,6 +260,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits : public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -525,6 +530,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1659,21 +1665,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1736,29 +1727,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2090,16 +2058,17 @@ class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
inputs_[0] = object;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
};
@@ -2326,17 +2295,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2445,16 +2403,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/s390/OWNERS b/deps/v8/src/crankshaft/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
new file mode 100644
index 0000000000..689f4bc1ae
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -0,0 +1,5668 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+ virtual ~SafepointGenerator() {}
+
+ void BeforeCall(int call_size) const override {}
+
+ void AfterCall() const override {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ DCHECK(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
+}
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ DCHECK(is_done());
+ code->set_stack_slots(GetTotalFrameSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ PopulateDeoptimizationData(code);
+}
+
+void LCodeGen::SaveCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ std(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+void LCodeGen::RestoreCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ld(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+bool LCodeGen::GeneratePrologue() {
+ DCHECK(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // r3: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+ // ip: Our own function entry (required by the prologue)
+ }
+
+ int prologue_offset = masm_->pc_offset();
+
+ if (prologue_offset) {
+ // Prologue logic requires its starting address in ip and the
+ // corresponding offset from the function entry. Need to add
+ // 4 bytes for the size of AHI/AGHI that AddP expands into.
+ prologue_offset += sizeof(FourByteInstr);
+ __ AddP(ip, ip, Operand(prologue_offset));
+ }
+ info()->set_prologue_offset(prologue_offset);
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
+ } else {
+ __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
+ }
+ frame_is_built_ = true;
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
+ if (FLAG_debug_code) {
+ __ Push(r2, r3);
+ __ mov(r2, Operand(slots * kPointerSize));
+ __ mov(r3, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&loop);
+ __ Pop(r2, r3);
+ }
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+ return !is_aborted();
+}
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
+
+ // Possibly allocate a local context.
+ if (info()->scope()->num_heap_slots() > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is in r3.
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(r3);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r3);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ RecordSafepoint(deopt_mode);
+
+ // Context is returned in both r2 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ LoadRR(cp, r2);
+ __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r2, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ StoreP(r2, target);
+ // Update the write barrier. This clobbers r5 and r2.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
+ GetLinkRegisterState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r2, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ Comment(";;; Prologue end");
+}
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ DCHECK(slots >= 0);
+ __ lay(sp, MemOperand(sp, -slots * kPointerSize));
+}
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+bool LCodeGen::GenerateDeferredCode() {
+ DCHECK(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(
+ ";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(), code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
+ frame_is_built_ = true;
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ PushCommonFrame(scratch0());
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ DCHECK(frame_is_built_);
+ __ PopCommonFrame(scratch0());
+ frame_is_built_ = false;
+ }
+ __ b(code->exit());
+ }
+ }
+
+ return !is_aborted();
+}
+
+bool LCodeGen::GenerateJumpTable() {
+ // Check that the jump table is accessible from everywhere in the function
+ // code, i.e. that offsets in halfworld to the table can be encoded in the
+ // 32-bit signed immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table. We also don't consider the pc load delta.
+ // Each entry in the jump table generates one instruction and inlines one
+ // 32bit data after it.
+ // TODO(joransiu): The Int24 condition can likely be relaxed for S390
+ if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
+ Abort(kGeneratedCodeIsTooLarge);
+ }
+
+ if (jump_table_.length() > 0) {
+ Label needs_frame, call_deopt_entry;
+
+ Comment(";;; -------------------- Jump table --------------------");
+ Address base = jump_table_[0].address;
+
+ Register entry_offset = scratch0();
+
+ int length = jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->deopt_info);
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load an immediate
+ // offset which will be added to the base address later.
+ __ mov(entry_offset, Operand(entry - base));
+
+ if (table_entry->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ Comment(";;; call deopt with frame");
+ __ PushCommonFrame();
+ __ b(r14, &needs_frame);
+ } else {
+ __ b(r14, &call_deopt_entry);
+ }
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ push(ip);
+ DCHECK(info()->IsStub());
+ }
+
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+ __ AddP(ip, entry_offset, ip);
+ __ Jump(ip);
+ }
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+bool LCodeGen::GenerateSafepointTable() {
+ DCHECK(is_done());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
+ return !is_aborted();
+}
+
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DoubleRegister::from_code(code);
+}
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ DCHECK(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
+ DCHECK(literal->IsNumber());
+ __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+ } else if (r.IsDouble()) {
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+ } else {
+ DCHECK(r.IsSmiOrTagged());
+ __ Move(scratch, literal);
+ }
+ return scratch;
+ } else if (op->IsStackSlot()) {
+ __ LoadP(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+ Register dst) {
+ DCHECK(IsInteger32(const_op));
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ int32_t value = constant->Integer32Value();
+ if (IsSmi(const_op)) {
+ __ LoadSmiLiteral(dst, Smi::FromInt(value));
+ } else {
+ __ LoadIntLiteral(dst, value);
+ }
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ DCHECK(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ DCHECK(r.IsSmiOrTagged());
+ return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ DCHECK(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ DCHECK(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ DCHECK(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand::Zero();
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand::Zero();
+}
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ DCHECK(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ DCHECK(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp,
+ ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
+}
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+
+ WriteTranslation(environment->outer(), translation);
+ WriteTranslationFrame(environment, translation);
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+ }
+}
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation, LOperand* op,
+ bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment, translation, value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer, dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ int index = op->index();
+ if (is_tagged) {
+ translation->StoreStackSlot(index);
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(index);
+ } else {
+ translation->StoreInt32StackSlot(index);
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ int index = op->index();
+ translation->StoreDoubleStackSlot(index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ DCHECK(instr != NULL);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr, SaveFPRegsMode save_doubles) {
+ DCHECK(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ LoadP(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(instr->pointer_map(), argc,
+ Safepoint::kNoLazyDeopt);
+}
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index, translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type,
+ CRegister cr) {
+ LEnvironment* environment = instr->environment();
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ DCHECK(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+
+ // Store the condition on the stack if necessary
+ if (cond != al) {
+ Label done;
+ __ LoadImmP(scratch, Operand::Zero());
+ __ b(NegateCondition(cond), &done, Label::kNear);
+ __ LoadImmP(scratch, Operand(1));
+ __ bind(&done);
+ __ push(scratch);
+ }
+
+ Label done;
+ __ Push(r3);
+ __ mov(scratch, Operand(count));
+ __ LoadW(r3, MemOperand(scratch));
+ __ Sub32(r3, r3, Operand(1));
+ __ Cmp32(r3, Operand::Zero());
+ __ bne(&no_deopt, Label::kNear);
+
+ __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
+ __ StoreW(r3, MemOperand(scratch));
+ __ Pop(r3);
+
+ if (cond != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+
+ __ b(&done);
+
+ __ bind(&no_deopt);
+ __ StoreW(r3, MemOperand(scratch));
+ __ Pop(r3);
+
+ if (cond != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ bind(&done);
+
+ if (cond != al) {
+ cond = ne;
+ __ CmpP(scratch, Operand::Zero());
+ }
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+ }
+
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+ DCHECK(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+ } else {
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+ !frame_is_built_);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
+ }
+ __ b(cond, &jump_table_.last().label /*, cr*/);
+ }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ CRegister cr) {
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
+}
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode) {
+ DCHECK(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_, label->hydrogen_value()->id(),
+ label->block_id(), LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ DCHECK(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ CmpP(dividend, Operand::Zero());
+ __ bge(&dividend_is_not_negative, Label::kNear);
+ if (shift) {
+ // Note that this is correct even for kMinInt operands.
+ __ LoadComplementRR(dividend, dividend);
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ __ LoadComplementRR(dividend, dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ mov(dividend, Operand::Zero());
+ } else {
+ DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+ }
+ __ b(&done, Label::kNear);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ if (shift) {
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ } else {
+ __ mov(dividend, Operand::Zero());
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ Mul(result, result, ip);
+ __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&remainder_not_zero);
+ }
+}
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+ Label done;
+
+ // Check for x % 0.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for kMinInt % -1, dr will return undefined, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Cmp32(left_reg, Operand(kMinInt));
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ Cmp32(right_reg, Operand(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ } else {
+ __ b(ne, &no_overflow_possible, Label::kNear);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // Divide instruction dr will implicity use register pair
+ // r0 & r1 below.
+ DCHECK(!left_reg.is(r1));
+ DCHECK(!right_reg.is(r1));
+ DCHECK(!result_reg.is(r1));
+ __ LoadRR(r0, left_reg);
+ __ srda(r0, Operand(32));
+ __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
+
+ __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ bne(&done, Label::kNear);
+ __ Cmp32(left_reg, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+ DCHECK(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ Cmp32(dividend, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ }
+
+ int32_t shift = WhichPowerOf2Abs(divisor);
+
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+ __ TestBitRange(dividend, shift - 1, 0, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ LoadComplementRR(result, dividend);
+ return;
+ }
+ if (shift == 0) {
+ __ LoadRR(result, dividend);
+ } else {
+ if (shift == 1) {
+ __ ShiftRight(result, dividend, Operand(31));
+ } else {
+ __ ShiftRightArith(result, dividend, Operand(31));
+ __ ShiftRight(result, result, Operand(32 - shift));
+ }
+ __ AddP(result, dividend, result);
+ __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ if (divisor < 0) __ LoadComplementRR(result, result);
+}
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register scratch = scratch0();
+ __ mov(ip, Operand(divisor));
+ __ Mul(scratch, result, ip);
+ __ Cmp32(scratch, dividend);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ }
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ Cmp32(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero, Label::kNear);
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ Cmp32(dividend, Operand(kMinInt));
+ __ bne(&dividend_not_min_int, Label::kNear);
+ __ Cmp32(divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ __ bind(&dividend_not_min_int);
+ }
+
+ __ LoadRR(r0, dividend);
+ __ srda(r0, Operand(32));
+ __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+ __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ Cmp32(r0, Operand::Zero());
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ }
+}
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 0) {
+ if (shift || !result.is(dividend)) {
+ __ ShiftRightArith(result, dividend, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ return;
+ }
+
+// If the divisor is negative, we have to negate and handle edge cases.
+#if V8_TARGET_ARCH_S390X
+ if (divisor == -1 && can_overflow) {
+ __ Cmp32(dividend, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ }
+#endif
+
+ __ LoadComplementRR(result, dividend);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_S390X
+ if (!can_overflow) {
+#endif
+ if (shift) {
+ __ ShiftRightArithP(result, result, Operand(shift));
+ }
+ return;
+#if !V8_TARGET_ARCH_S390X
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ return;
+ }
+
+ Label overflow_label, done;
+ __ b(overflow, &overflow_label, Label::kNear);
+ __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ __ b(&done, Label::kNear);
+ __ bind(&overflow_label);
+ __ mov(result, Operand(kMinInt / divisor));
+ __ bind(&done);
+#endif
+}
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ DCHECK(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Cmp32(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ __ b(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ __ SubP(result, result, Operand(1));
+ __ bind(&done);
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ Cmp32(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero, Label::kNear);
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Cmp32(dividend, Operand(kMinInt));
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ Cmp32(divisor, Operand(-1));
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ } else {
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ LoadRR(result, dividend);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ __ LoadRR(r0, dividend);
+ __ srda(r0, Operand(32));
+ __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+ __ lr(result, r1); // Move quotient to result register
+
+ Label done;
+ Register scratch = scratch0();
+ // If both operands have the same sign then we are done.
+ __ Xor(scratch, dividend, divisor);
+ __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit
+ __ bge(&done, Label::kNear);
+
+ // If there is no remainder then we are done.
+ __ lr(scratch, result);
+ __ msr(scratch, divisor);
+ __ Cmp32(dividend, scratch);
+ __ beq(&done, Label::kNear);
+
+ // We performed a truncating division. Correct the result.
+ __ Sub32(result, result, Operand(1));
+ __ bind(&done);
+}
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ // Unable to use madbr as the intermediate value is not rounded
+ // to proper precision
+ __ ldr(result, multiplier);
+ __ mdbr(result, multiplicand);
+ __ adbr(result, addend);
+}
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ // Unable to use msdbr as the intermediate value is not rounded
+ // to proper precision
+ __ ldr(result, multiplier);
+ __ mdbr(result, multiplicand);
+ __ sdbr(result, minuend);
+}
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
+
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ CmpP(left, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ switch (constant) {
+ case -1:
+ if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ LoadComplementRR(result, left);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ LoadComplementRR(result, left);
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
+ } else {
+ __ LoadComplementRR(result, left);
+ }
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ Cmp32(left, Operand::Zero());
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ Cmp32(left, Operand::Zero());
+ }
+#endif
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ __ LoadImmP(result, Operand::Zero());
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ ShiftLeftP(result, left, Operand(shift));
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ ShiftLeftP(scratch, left, Operand(shift));
+ __ AddP(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ ShiftLeftP(scratch, left, Operand(shift));
+ __ SubP(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else {
+ // Generate standard code.
+ __ Move(result, left);
+ __ MulP(result, Operand(constant));
+ }
+ }
+
+ } else {
+ DCHECK(right_op->IsRegister());
+ Register right = ToRegister(right_op);
+
+ if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+ // result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ SmiUntag(scratch, right);
+ __ msgr(result, scratch);
+ } else {
+ __ LoadRR(result, left);
+ __ msgr(result, right);
+ }
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiTag(result);
+ }
+#else
+ // r0:scratch = scratch * right
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ } else {
+ // r0:scratch = scratch * right
+ __ LoadRR(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ }
+ __ TestIfInt32(r0, result, scratch);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+#endif
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
+ }
+
+ if (bailout_on_minus_zero) {
+ Label done;
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ XorP(r0, left, right);
+ __ LoadAndTestRR(r0, r0);
+ __ bge(&done, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ XorP(r0, left, right);
+ __ Cmp32(r0, Operand::Zero());
+ __ bge(&done, Label::kNear);
+ }
+#endif
+ // Bail out if the result is minus zero.
+ __ CmpP(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+}
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
+ DCHECK(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+
+ if (right_op->IsConstantOperand()) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, left, Operand(ToOperand(right_op)));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, left, Operand(ToOperand(right_op)));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, left, Operand(ToOperand(right_op)));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (right_op->IsStackSlot()) {
+ // Reg-Mem instruction clobbers, so copy src to dst first.
+ if (!left.is(result)) __ LoadRR(result, left);
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, ToMemOperand(right_op));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, ToMemOperand(right_op));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, ToMemOperand(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ DCHECK(right_op->IsRegister());
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, left, ToRegister(right_op));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, left, ToRegister(right_op));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, left, ToRegister(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
+ switch (instr->op()) {
+ case Token::ROR:
+ // rotate_right(a, b) == rotate_left(a, 32 - b)
+ __ LoadComplementRR(scratch, scratch);
+ __ rll(result, left, scratch, Operand(32));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ case Token::SAR:
+ __ ShiftRightArith(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ case Token::SHR:
+ __ ShiftRight(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ if (instr->can_deopt()) {
+#if V8_TARGET_ARCH_S390X
+ __ ltgfr(result, result /*, SetRC*/);
+#else
+ __ ltr(result, result); // Set the <,==,> condition
+#endif
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+ }
+ break;
+ case Token::SHL:
+ __ ShiftLeft(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rll(result, left, Operand(32 - shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ ShiftRightArith(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ ShiftRight(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ if (instr->can_deopt()) {
+ __ Cmp32(left, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ ShiftLeftP(result, left, Operand(shift_count));
+#else
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ ShiftLeft(result, left, Operand(shift_count - 1));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ __ SmiTagCheckOverflow(result, result, scratch);
+ } else {
+ __ SmiTagCheckOverflow(result, left, scratch);
+ }
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+ } else {
+ __ ShiftLeft(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ if (right->IsConstantOperand()) {
+ if (!isInteger || !checkOverflow)
+ __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
+ else
+ __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else if (right->IsRegister()) {
+ if (!isInteger)
+ __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
+ else if (!checkOverflow)
+ __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
+ ToRegister(right));
+ else
+ __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
+ } else {
+ if (!left->Equals(instr->result()))
+ __ LoadRR(ToRegister(result), ToRegister(left));
+
+ MemOperand mem = ToMemOperand(right);
+ if (!isInteger) {
+ __ SubP(ToRegister(result), mem);
+ } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+ if (checkOverflow) {
+ __ Sub32(ToRegister(result), Upper32Mem);
+ } else {
+ __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
+ }
+ }
+ }
+
+#if V8_TARGET_ARCH_S390X
+ if (isInteger && checkOverflow)
+ __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ }
+}
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+ right->IsConstantOperand());
+
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ Operand right_operand = ToOperand(right);
+ __ mov(r0, right_operand);
+
+ if (!checkOverflow) {
+ __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
+ } else {
+ __ Sub32(ToRegister(result), r0, ToRegister(left));
+ }
+}
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ DCHECK(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ uint64_t bits = instr->bits();
+ __ LoadDoubleLiteral(result, bits, scratch0());
+}
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
+}
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ DCHECK(!scratch.is(string));
+ DCHECK(!scratch.is(ToRegister(index)));
+ // TODO(joransiu) : Fold Add into FieldMemOperand
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ AddP(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
+ __ AddP(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ AndP(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ CmpP(scratch,
+ Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+ : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ llc(result, operand);
+ } else {
+ __ llh(result, operand);
+ }
+}
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type
+ : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ stc(value, operand);
+ } else {
+ __ sth(value, operand);
+ }
+}
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ if (right->IsConstantOperand()) {
+ if (!isInteger || !checkOverflow)
+ __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
+ else
+ __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else if (right->IsRegister()) {
+ if (!isInteger)
+ __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
+ else if (!checkOverflow)
+ __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
+ ToRegister(right));
+ else
+ __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
+ } else {
+ if (!left->Equals(instr->result()))
+ __ LoadRR(ToRegister(result), ToRegister(left));
+
+ MemOperand mem = ToMemOperand(right);
+ if (!isInteger) {
+ __ AddP(ToRegister(result), mem);
+ } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+ if (checkOverflow) {
+ __ Add32(ToRegister(result), Upper32Mem);
+ } else {
+ __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
+ }
+ }
+ }
+
+#if V8_TARGET_ARCH_S390X
+ if (isInteger && checkOverflow)
+ __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+ // Doptimize on overflow
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ }
+}
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Register left_reg = ToRegister(left);
+ Register right_reg = EmitLoadRegister(right, ip);
+ Register result_reg = ToRegister(instr->result());
+ Label return_left, done;
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+ __ CmpP(left_reg, right_reg);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ Cmp32(left_reg, right_reg);
+ }
+#endif
+ __ b(cond, &return_left, Label::kNear);
+ __ Move(result_reg, right_reg);
+ __ b(&done, Label::kNear);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister left_reg = ToDoubleRegister(left);
+ DoubleRegister right_reg = ToDoubleRegister(right);
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ cdbr(left_reg, right_reg);
+ __ bunordered(&check_nan_left, Label::kNear);
+ __ beq(&check_zero);
+ __ b(cond, &return_left, Label::kNear);
+ __ b(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(left_reg, kDoubleRegZero);
+ __ bne(&return_left, Label::kNear); // left == right != 0.
+
+ // At this point, both left and right are either 0 or -0.
+ // N.B. The following works because +0 + -0 == +0
+ if (operation == HMathMinMax::kMathMin) {
+ // For min we want logical-or of sign bit: -(-L + -R)
+ __ lcdbr(left_reg, left_reg);
+ __ ldr(result_reg, left_reg);
+ if (left_reg.is(right_reg)) {
+ __ adbr(result_reg, right_reg);
+ } else {
+ __ sdbr(result_reg, right_reg);
+ }
+ __ lcdbr(result_reg, result_reg);
+ } else {
+ // For max we want logical-and of sign bit: (L + R)
+ __ ldr(result_reg, left_reg);
+ __ adbr(result_reg, right_reg);
+ }
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ cdbr(left_reg, left_reg);
+ __ bunordered(&return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ ldr(result_reg, right_reg);
+ }
+ __ b(&done, Label::kNear);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ ldr(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ // All operations except MOD are computed in-place.
+ DCHECK(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ adbr(result, right);
+ break;
+ case Token::SUB:
+ __ sdbr(result, right);
+ break;
+ case Token::MUL:
+ __ mdbr(result, right);
+ break;
+ case Token::DIV:
+ __ ddbr(result, right);
+ break;
+ case Token::MOD: {
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ MovToFloatParameters(left, right);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block || cond == al) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block));
+ __ b(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(true_block));
+}
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(false_block));
+}
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (r.IsInteger32()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ Cmp32(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ CmpP(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsDouble()) {
+ DCHECK(!info()->IsStub());
+ DoubleRegister reg = ToDoubleRegister(instr->value());
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(reg, kDoubleRegZero);
+ // Test the double value. Zero and NaN are false.
+ Condition lt_gt = static_cast<Condition>(lt | gt);
+
+ EmitBranch(instr, lt_gt);
+ } else {
+ DCHECK(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ DCHECK(!info()->IsStub());
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ __ CmpP(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ DCHECK(!info()->IsStub());
+ __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(dbl_scratch, kDoubleRegZero);
+ Condition lt_gt = static_cast<Condition>(lt | gt);
+ EmitBranch(instr, lt_gt);
+ } else if (type.IsString()) {
+ DCHECK(!info()->IsStub());
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ CmpP(ip, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else {
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ // undefined -> false.
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ beq(instr->TrueLabel(chunk_));
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ CmpP(reg, Operand::Zero());
+ __ beq(instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ TestIfSmi(reg);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ __ bne(instr->FalseLabel(chunk_));
+ }
+ }
+
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
+ __ bge(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ bge(&not_string, Label::kNear);
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ CmpP(ip, Operand::Zero());
+ __ bne(instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ Label not_simd;
+ __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ bne(&not_heap_number, Label::kNear);
+ __ LoadDouble(dbl_scratch,
+ FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(dbl_scratch, kDoubleRegZero);
+ __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
+ __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
+ __ b(instr->TrueLabel(chunk_));
+ __ bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ }
+ }
+ }
+}
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = lt;
+ break;
+ case Token::GT:
+ cond = gt;
+ break;
+ case Token::LTE:
+ cond = le;
+ break;
+ case Token::GTE:
+ cond = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op());
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right operands as doubles and load the
+ // resulting flags into the normal status register.
+ __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ bunordered(instr->FalseLabel(chunk_));
+ } else {
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(left), ToOperand(right));
+ } else {
+ __ Cmp32(ToRegister(left), ToOperand(right));
+ }
+ }
+ } else if (left->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(right), ToOperand(left));
+ } else {
+ __ Cmp32(ToRegister(right), ToOperand(left));
+ }
+ }
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
+ } else if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalP(ToRegister(left), ToRegister(right));
+ } else {
+ __ CmpP(ToRegister(left), ToRegister(right));
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(left), ToRegister(right));
+ } else {
+ __ Cmp32(ToRegister(left), ToRegister(right));
+ }
+ }
+ }
+ EmitBranch(instr, cond);
+ }
+}
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ __ CmpP(left, right);
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ CmpP(input_reg, Operand(factory()->the_hole_value()));
+ EmitBranch(instr, eq);
+ return;
+ }
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ __ cdbr(input_reg, input_reg);
+ EmitFalseBranch(instr, ordered);
+
+ Register scratch = scratch0();
+ // Convert to GPR and examine the upper 32 bits
+ __ lgdr(scratch, input_reg);
+ __ srlg(scratch, scratch, Operand(32));
+ __ Cmp32(scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq);
+}
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
+ __ TestIfSmi(input_reg);
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ EmitBranch(instr, ne);
+}
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+}
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ DCHECK(from == to || to == LAST_TYPE);
+ return from;
+}
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return ge;
+ if (from == FIRST_TYPE) return le;
+ UNREACHABLE();
+ return eq;
+}
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+ __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+ __ AndP(r0, scratch);
+ EmitBranch(instr, eq);
+}
+
+// Branches to a label or falls through with the answer in flags. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+ Handle<String> class_name, Register input,
+ Register temp, Register temp2) {
+ DCHECK(!input.is(temp));
+ DCHECK(!input.is(temp2));
+ DCHECK(!temp.is(temp2));
+
+ __ JumpIfSmi(input, is_false);
+
+ __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+ __ bge(is_true);
+ } else {
+ __ bge(is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ Register instance_type = ip;
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
+ __ bne(is_true);
+ } else {
+ __ bne(is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(temp,
+ FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ __ CmpP(temp, Operand(class_name));
+ // End with the answer in flags.
+}
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->temp());
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ mov(temp, Operand(instr->map()));
+ __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+ InstanceOfStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_instance_type = ip;
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
+
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ TestIfSmi(object);
+ EmitFalseBranch(instr, eq);
+ }
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ LoadlB(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ CmpP(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(&loop);
+}
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // This instruction also signals no smi code inlined
+ __ CmpP(r2, Operand::Zero());
+
+ Condition condition = ComputeCompareCondition(op);
+ Label true_value, done;
+
+ __ b(condition, &true_value, Label::kNear);
+
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ b(&done, Label::kNear);
+
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r2. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(r2);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit);
+ }
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (NeedsEagerFrame()) {
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ } else if (sp_delta != 0) {
+ // TODO(joransiu): Clean this up into Macro Assembler
+ if (sp_delta >= 0 && sp_delta < 4096)
+ __ la(sp, MemOperand(sp, sp_delta));
+ else
+ __ lay(sp, MemOperand(sp, sp_delta));
+ }
+ } else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ if (NeedsEagerFrame()) {
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ }
+ __ SmiToPtrArrayOffset(r0, reg);
+ __ AddP(sp, sp, r0);
+ }
+
+ __ Ret();
+}
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = LoadDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(r2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ // No need to allocate this register.
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ } else {
+ Label skip;
+ __ bne(&skip, Label::kNear);
+ __ mov(result, Operand(factory()->undefined_value()));
+ __ bind(&skip);
+ }
+ }
+}
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadP(scratch, target);
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ } else {
+ __ bne(&skip_assignment);
+ }
+ }
+
+ __ StoreP(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ __ LoadRepresentation(result, operand, access.representation(), r0);
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ld(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+
+ Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
+ }
+#endif
+
+ __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+ r0);
+}
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ // Name is always in r4.
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Get the prototype or initial map from the function.
+ __ LoadP(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ bne(&done, Label::kNear);
+
+ // Get the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ LoadP(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SubP(result, index, Operand(const_length + 1));
+ __ LoadComplementRR(result, result);
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ SubP(result, length, Operand(loc));
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ } else {
+ __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ __ SubP(result, length, index);
+ __ AddP(result, result, Operand(1));
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+}
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+ bool use_scratch = false;
+
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ base_offset += constant_key << element_size_shift;
+ if (!is_int20(base_offset)) {
+ __ mov(scratch0(), Operand(base_offset));
+ base_offset = 0;
+ use_scratch = true;
+ }
+ } else {
+ __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
+ use_scratch = true;
+ }
+ if (elements_kind == FLOAT32_ELEMENTS) {
+ if (!use_scratch) {
+ __ ldeb(result, MemOperand(external_pointer, base_offset));
+ } else {
+ __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
+ }
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ if (!use_scratch) {
+ __ ld(result, MemOperand(external_pointer, base_offset));
+ } else {
+ __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
+ }
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case INT8_ELEMENTS:
+ __ LoadB(result, mem_operand);
+ break;
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ LoadlB(result, mem_operand);
+ break;
+ case INT16_ELEMENTS:
+ __ LoadHalfWordP(result, mem_operand);
+ break;
+ case UINT16_ELEMENTS:
+ __ LoadLogicalHalfWordP(result, mem_operand);
+ break;
+ case INT32_ELEMENTS:
+ __ LoadW(result, mem_operand, r0);
+ break;
+ case UINT32_ELEMENTS:
+ __ LoadlW(result, mem_operand, r0);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ CmpLogical32(result, Operand(0x80000000));
+ DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ bool use_scratch = false;
+ intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ if (!key_is_constant) {
+ use_scratch = true;
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ }
+
+ // Memory references support up to 20-bits signed displacement in RXY form
+ // Include Register::kExponentOffset in check, so we are guaranteed not to
+ // overflow displacement later.
+ if (!is_int20(base_offset + Register::kExponentOffset)) {
+ use_scratch = true;
+ if (key_is_constant) {
+ __ mov(scratch, Operand(base_offset));
+ } else {
+ __ AddP(scratch, Operand(base_offset));
+ }
+ base_offset = 0;
+ }
+
+ if (!use_scratch) {
+ __ ld(result, MemOperand(elements, base_offset));
+ } else {
+ __ ld(result, MemOperand(scratch, elements, base_offset));
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (!use_scratch) {
+ __ LoadlW(r0,
+ MemOperand(elements, base_offset + Register::kExponentOffset));
+ } else {
+ __ LoadlW(r0, MemOperand(scratch, elements,
+ base_offset + Register::kExponentOffset));
+ }
+ __ Cmp32(r0, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ int offset = instr->base_offset();
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ } else {
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(scratch, key);
+ } else {
+ __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+ }
+ }
+
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsInteger32() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ DCHECK(!requires_hole_check);
+ // Read int value directly from upper half of the smi.
+ offset = SmiWordOffset(offset);
+ }
+#endif
+
+ if (instr->key()->IsConstantOperand()) {
+ __ LoadRepresentation(result, MemOperand(elements, offset), representation,
+ r1);
+ } else {
+ __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
+ representation, r1);
+ }
+
+ // Check for the hole value.
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+ __ TestIfSmi(result);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ }
+ } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+ DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+ Label done;
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ CmpP(result, scratch);
+ __ bne(&done);
+ if (info()->IsStub()) {
+ // A stub can safely convert the hole to undefined only if the array
+ // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // it needs to bail out.
+ __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+ __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ }
+}
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_fixed_typed_array()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_smi,
+ int constant_key,
+ int element_size_shift,
+ int base_offset) {
+ Register scratch = scratch0();
+
+ if (key_is_constant) {
+ int offset = (base_offset + (constant_key << element_size_shift));
+ if (!is_int20(offset)) {
+ __ mov(scratch, Operand(offset));
+ return MemOperand(base, scratch);
+ } else {
+ return MemOperand(base,
+ (constant_key << element_size_shift) + base_offset);
+ }
+ }
+
+ bool needs_shift =
+ (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+ if (needs_shift) {
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ } else {
+ scratch = key;
+ }
+
+ if (!is_int20(base_offset)) {
+ __ AddP(scratch, Operand(base_offset));
+ base_offset = 0;
+ }
+ return MemOperand(scratch, base, base_offset);
+}
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ lay(result, MemOperand(sp, -2 * kPointerSize));
+ } else if (instr->hydrogen()->arguments_adaptor()) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(
+ result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ beq(&adapted, Label::kNear);
+ __ LoadRR(result, fp);
+ __ b(&done, Label::kNear);
+
+ __ bind(&adapted);
+ __ LoadRR(result, scratch);
+ __ bind(&done);
+ } else {
+ __ LoadRR(result, fp);
+ }
+}
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ CmpP(fp, elem);
+ __ mov(result, Operand(scope()->num_parameters()));
+ __ beq(&done, Label::kNear);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, result_in_receiver;
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions or builtins.
+ __ LoadP(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(scratch, FieldMemOperand(
+ scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
+ __ bne(&result_in_receiver, Label::kNear);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ beq(&global_object, Label::kNear);
+ __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ beq(&global_object, Label::kNear);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ TestIfSmi(receiver);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+
+ __ b(&result_in_receiver, Label::kNear);
+ __ bind(&global_object);
+ __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok, Label::kNear);
+ __ bind(&result_in_receiver);
+ __ LoadRR(result, receiver);
+ __ bind(&result_ok);
+ }
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DCHECK(receiver.is(r2)); // Used for parameter count.
+ DCHECK(function.is(r3)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ CmpLogicalP(length, Operand(kArgumentsLimit));
+ DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ LoadRR(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ AddP(elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ CmpP(length, Operand::Zero());
+ __ beq(&invoke, Label::kNear);
+ __ bind(&loop);
+ __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(elements, r1));
+ __ push(scratch);
+ __ BranchOnCount(length, &loop);
+
+ __ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r2);
+ // It is safe to use r5, r6 and r7 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r5 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r5, r6, r7);
+ }
+
+ DCHECK(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in receiver which is r2, as expected
+ // by InvokeFunction.
+ ParameterCount actual(receiver);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
+}
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, ip);
+ __ push(argument_reg);
+ }
+}
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ DCHECK(result.is(cp));
+ }
+}
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ push(scratch0());
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+ __ push(scratch0());
+ CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ bool is_tail_call, LInstruction* instr) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ Register function_reg = r3;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, Operand(arity));
+
+ bool is_self_call = function.is_identical_to(info()->closure());
+
+ // Invoke function.
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ JumpToJSEntry(ip);
+ } else {
+ __ CallJSEntry(ip);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(arity);
+ ParameterCount expected(formal_parameter_count);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
+ }
+}
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ Cmp32(exponent, Operand::Zero());
+ // Move the input to the result if necessary.
+ __ Move(result, input);
+ __ bge(&done);
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r3) ? r2 : r3;
+ Register tmp2 = input.is(r4) ? r2 : r4;
+ Register tmp3 = input.is(r5) ? r2 : r5;
+ Register tmp4 = input.is(r6) ? r2 : r6;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+
+ // Clear the sign bit.
+ __ nilf(exponent, Operand(~HeapNumber::kSignMask));
+ __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ CmpP(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done, Label::kNear);
+ __ LoadComplementRR(result, result);
+ // Deoptimize on overflow.
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ __ bind(&done);
+}
+
+#if V8_TARGET_ARCH_S390X
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ Cmp32(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done, Label::kNear);
+
+ // Deoptimize on overflow.
+ __ Cmp32(input, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+
+ __ LoadComplementRR(result, result);
+ __ bind(&done);
+}
+#endif
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMathAbs* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ lpdbr(result, input);
+#if V8_TARGET_ARCH_S390X
+ } else if (r.IsInteger32()) {
+ EmitInteger32MathAbs(instr);
+ } else if (r.IsSmi()) {
+#else
+ } else if (r.IsSmiOrInteger32()) {
+#endif
+ EmitMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register input_high = scratch0();
+ Register scratch = ip;
+ Label done, exact;
+
+ __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+ &exact);
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+ __ bind(&exact);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ CmpP(result, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ Cmp32(input_high, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+ DoubleRegister input_plus_dot_five = double_scratch1;
+ Register scratch1 = scratch0();
+ Register scratch2 = ip;
+ DoubleRegister dot_five = double_scratch0();
+ Label convert, done;
+
+ __ LoadDoubleLiteral(dot_five, 0.5, r0);
+ __ lpdbr(double_scratch1, input);
+ __ cdbr(double_scratch1, dot_five);
+ DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // [-0.5, -0] (negative) yields minus zero.
+ __ TestDoubleSign(input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ Label return_zero;
+ __ cdbr(input, dot_five);
+ __ bne(&return_zero, Label::kNear);
+ __ LoadImmP(result, Operand(1)); // +0.5.
+ __ b(&done, Label::kNear);
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ bind(&return_zero);
+ __ LoadImmP(result, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ __ bind(&convert);
+ __ ldr(input_plus_dot_five, input);
+ __ adbr(input_plus_dot_five, dot_five);
+ // Reuse dot_five (double_scratch0) as we no longer need this value.
+ __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+ double_scratch0(), &done, &done);
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ __ bind(&done);
+}
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DoubleRegister output_reg = ToDoubleRegister(instr->result());
+
+ // Round double to float
+ __ ledbr(output_reg, input_reg);
+ // Extend from float to double
+ __ ldebr(output_reg, output_reg);
+}
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ sqdbr(result, input);
+}
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = double_scratch0();
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label skip, done;
+
+ __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+ __ cdbr(input, temp);
+ __ bne(&skip, Label::kNear);
+ __ lcdbr(result, temp);
+ __ b(&done, Label::kNear);
+
+ // Add +0 to convert -0 to +0.
+ __ bind(&skip);
+ __ ldr(result, input);
+ __ lzdr(kDoubleRegZero);
+ __ adbr(result, kDoubleRegZero);
+ __ sqdbr(result, result);
+ __ bind(&done);
+}
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d2));
+ DCHECK(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(ToDoubleRegister(instr->left()).is(d1));
+ DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ __ bind(&no_deopt);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ DCHECK(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+ double_scratch2, temp1, temp2, scratch0());
+}
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+ 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ llgfr(result, input);
+ __ flogr(r0, result);
+ __ LoadRR(result, r0);
+ __ CmpP(r0, Operand::Zero());
+ __ beq(&done, Label::kNear);
+ __ SubP(result, Operand(32));
+ __ bind(&done);
+}
+
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ LoadRR(fp, scratch2);
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r3));
+ DCHECK(instr->HasPointerMap());
+
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r5, r6 and r7 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r5 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r5, r6, r7);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r3, no_reg, actual, flag, generator);
+ } else {
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
+ }
+}
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+ }
+ } else {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(ip);
+ }
+ generator.AfterCall();
+ }
+}
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r3));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ __ mov(r2, Operand(instr->arity()));
+ if (instr->arity() == 1) {
+ // We only need the allocation site for the case we have a length argument.
+ // The case may bail out to the runtime, which will determine the correct
+ // elements kind with the site.
+ __ Move(r4, instr->hydrogen()->site());
+ } else {
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ }
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ LoadP(r7, MemOperand(sp, 0));
+ __ CmpP(r7, Operand::Zero());
+ __ beq(&packed_case, Label::kNear);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ b(&done, Label::kNear);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
+}
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lay(code_object,
+ MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lay(result, MemOperand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lay(result, MemOperand(base, offset));
+ }
+}
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register scratch = scratch0();
+ HObjectAccess access = hinstr->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ return;
+ }
+
+ __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_S390X
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DCHECK(!hinstr->has_transition());
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DCHECK(offset >= 0);
+ __ std(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(scratch, Operand(transition));
+ __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+ if (hinstr->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+ kSaveFPRegs);
+ }
+ }
+
+ // Do the store.
+ Register record_dest = object;
+ Register record_value = no_reg;
+ Register record_scratch = scratch;
+#if V8_TARGET_ARCH_S390X
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ std(value, FieldMemOperand(object, offset));
+ if (hinstr->NeedsWriteBarrier()) {
+ record_value = ToRegister(instr->value());
+ }
+ } else {
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ // 64-bit Smi optimization
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
+ }
+#endif
+ if (access.IsInobject()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_value = value;
+ } else {
+ Register value = ToRegister(instr->value());
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_dest = scratch;
+ record_value = value;
+ record_scratch = object;
+ }
+#if V8_TARGET_ARCH_S390X
+ }
+#endif
+
+ if (hinstr->NeedsWriteBarrier()) {
+ __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
+ }
+}
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Representation representation = instr->hydrogen()->length()->representation();
+ DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+ DCHECK(representation.IsSmiOrInteger32());
+
+ Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
+ } else {
+ __ CmpLogical32(index, Operand(length));
+ }
+ cc = CommuteCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
+ } else {
+ __ CmpLogical32(length, Operand(index));
+ }
+ } else {
+ Register index = ToRegister(instr->index());
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(length, index);
+ } else {
+ __ CmpLogical32(length, index);
+ }
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(cc), &done, Label::kNear);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ }
+}
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+ Register address = scratch0();
+ DoubleRegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ base_offset += constant_key << element_size_shift;
+ if (!is_int20(base_offset)) {
+ __ mov(address, Operand(base_offset));
+ __ AddP(address, external_pointer);
+ } else {
+ __ AddP(address, external_pointer, Operand(base_offset));
+ }
+ base_offset = 0;
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
+ __ AddP(address, external_pointer);
+ }
+ if (elements_kind == FLOAT32_ELEMENTS) {
+ __ ledbr(double_scratch0(), value);
+ __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ StoreDouble(value, MemOperand(address, base_offset));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreByte(value, mem_operand, r0);
+ } else {
+ __ StoreByte(value, mem_operand);
+ }
+ break;
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreHalfWord(value, mem_operand, r0);
+ } else {
+ __ StoreHalfWord(value, mem_operand);
+ }
+ break;
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreW(value, mem_operand, r0);
+ } else {
+ __ StoreW(value, mem_operand);
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ bool use_scratch = false;
+ intptr_t address_offset = base_offset;
+
+ if (key_is_constant) {
+ // Memory references support up to 20-bits signed displacement in RXY form
+ if (!is_int20((address_offset))) {
+ __ mov(scratch, Operand(address_offset));
+ address_offset = 0;
+ use_scratch = true;
+ }
+ } else {
+ use_scratch = true;
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ // Memory references support up to 20-bits signed displacement in RXY form
+ if (!is_int20((address_offset))) {
+ __ AddP(scratch, Operand(address_offset));
+ address_offset = 0;
+ }
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Turn potential sNaN value into qNaN.
+ __ CanonicalizeNaN(double_scratch, value);
+ DCHECK(address_offset >= 0);
+ if (use_scratch)
+ __ std(double_scratch, MemOperand(scratch, elements, address_offset));
+ else
+ __ std(double_scratch, MemOperand(elements, address_offset));
+ } else {
+ if (use_scratch)
+ __ std(value, MemOperand(scratch, elements, address_offset));
+ else
+ __ std(value, MemOperand(elements, address_offset));
+ }
+}
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ HStoreKeyed* hinstr = instr->hydrogen();
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ int offset = instr->base_offset();
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(scratch, key);
+ } else {
+ if (instr->hydrogen()->IsDehoisted() ||
+ !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+#if V8_TARGET_ARCH_S390X
+ // If array access is dehoisted, the key, being an int32, can contain
+ // a negative value, as needs to be sign-extended to 64-bit for
+ // memory access.
+ __ lgfr(key, key);
+#endif
+ __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+ } else {
+ // Small optimization to reduce pathlength. After Bounds Check,
+ // the key is guaranteed to be non-negative. Leverage RISBG,
+ // which also performs zero-extension.
+ __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
+ Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
+ true);
+ }
+ }
+ }
+
+ Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ }
+#endif
+
+ if (instr->key()->IsConstantOperand()) {
+ __ StoreRepresentation(value, MemOperand(elements, offset), representation,
+ scratch);
+ } else {
+ __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
+ representation, r0);
+ }
+
+ if (hinstr->NeedsWriteBarrier()) {
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ if (instr->key()->IsConstantOperand()) {
+ __ lay(key, MemOperand(elements, offset));
+ } else {
+ __ lay(key, MemOperand(scratch, elements, offset));
+ }
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed,
+ hinstr->PointersToHereCheckForValue());
+ }
+}
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_fixed_typed_array()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = r2;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ b(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
+ __ ble(deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Cmp32(ToRegister(key), Operand(constant_capacity));
+ __ bge(deferred->entry());
+ } else {
+ __ Cmp32(ToRegister(key), ToRegister(current_capacity));
+ __ bge(deferred->entry());
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ Move(result, ToRegister(instr->elements()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = r2;
+ __ LoadImmP(result, Operand::Zero());
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
+ } else {
+ __ SmiTag(r5, ToRegister(key));
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ TestIfSmi(result);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+}
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, Operand(from_map));
+ __ bne(&not_applicable);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
+ __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(object_reg.is(r2));
+ PushSafepointRegistersScope scope(this);
+ __ Move(r3, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+ __ bind(&not_applicable);
+}
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ __ bind(&no_memento_found);
+}
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+ StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt final : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new (zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(
+ masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+ ToRegister(instr->result()), deferred->entry());
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadImmP(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+ instr->context());
+ __ AssertSmi(r2);
+ __ SmiUntag(r2);
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode final : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new (zone()) DeferredStringCharFromCode(this, instr);
+
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ DCHECK(!char_code.is(result));
+
+ __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
+ __ bgt(deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
+ __ AddP(result, r0);
+ __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ beq(deferred->entry());
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadImmP(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ DCHECK(output->IsDoubleRegister());
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, ToMemOperand(input));
+ __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+ } else {
+ __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+ }
+}
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI final : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), SIGNED_INT32);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagI* instr_;
+ };
+
+ Register src = ToRegister(instr->value());
+ Register dst = ToRegister(instr->result());
+
+ DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(dst, src);
+#else
+ // Add src to itself to defect SMI overflow.
+ __ Add32(dst, src, src);
+ __ b(overflow, deferred->entry());
+#endif
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU final : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+ __ CmpLogicalP(input, Operand(Smi::kMaxValue));
+ __ bgt(deferred->entry());
+ __ SmiTag(result, input);
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register src = ToRegister(value);
+ Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ xilf(src, Operand(HeapNumber::kSignMask));
+ }
+ __ ConvertIntToDouble(src, dbl_scratch);
+ } else {
+ __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+ }
+
+ if (FLAG_inline_new) {
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+ __ b(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ LoadImmP(dst, Operand::Zero());
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, dst);
+ }
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD final : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ } else {
+ __ b(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ LoadImmP(reg, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, reg);
+}
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ TestUnsignedSmiCandidate(input, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+ }
+#if !V8_TARGET_ARCH_S390X
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, r0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ } else {
+#endif
+ __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_S390X
+ }
+#endif
+}
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ if (instr->needs_check()) {
+ __ tmll(input, Operand(kHeapObjectTag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ __ SmiUntag(result, input);
+ } else {
+ __ SmiUntag(result, input);
+ }
+}
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ DoubleRegister result_reg,
+ NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+ Register scratch = scratch0();
+ DCHECK(!result_reg.is(double_scratch0()));
+
+ Label convert, load_smi, done;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+ // Heap number map check.
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
+
+ if (can_convert_undefined_to_nan) {
+ __ bne(&convert, Label::kNear);
+ } else {
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ }
+ // load heap number
+ __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ TestDoubleIsMinusZero(result_reg, scratch, ip);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ __ b(&done, Label::kNear);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ b(&done, Label::kNear);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ // scratch: untagged value of input_reg
+ __ ConvertIntToDouble(scratch, result_reg);
+ __ bind(&done);
+}
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->value());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+ DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // Heap number map check.
+ __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label no_heap_number, check_bools, check_false;
+ __ bne(&no_heap_number, Label::kNear);
+ __ LoadRR(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ b(&done, Label::kNear);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ __ bne(&check_bools);
+ __ LoadImmP(input_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_bools);
+ __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+ __ bne(&check_false, Label::kNear);
+ __ LoadImmP(input_reg, Operand(1));
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_false);
+ __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ __ LoadImmP(input_reg, Operand::Zero());
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+ __ ld(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // preserve heap number pointer in scratch2 for minus zero check below
+ __ LoadRR(scratch2, input_reg);
+ }
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+ double_scratch);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ CmpP(input_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestHeapNumberSign(scratch2, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI final : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ DCHECK(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+ // Branch to deferred code if the input is a HeapObject.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
+}
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ LOperand* result = instr->result();
+ DCHECK(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI
+ : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ CmpP(result_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestDoubleSign(double_input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+}
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ CmpP(result_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestDoubleSign(double_input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(result_reg);
+#else
+ __ SmiTagCheckOverflow(result_reg, r0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+}
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+}
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ }
+}
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+ LCheckArrayBufferNotNeutered* instr) {
+ Register view = ToRegister(instr->view());
+ Register scratch = scratch0();
+
+ __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+ __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+}
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+ Operand(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ } else {
+ DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+ Operand(last));
+ DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+ __ AndP(scratch, Operand(mask));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ } else {
+ __ AndP(scratch, Operand(mask));
+ __ CmpP(scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ }
+ }
+}
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ mov(ip, Operand(cell));
+ __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
+ } else {
+ __ CmpP(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ LoadImmP(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(instr->pointer_map(), 1,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, temp);
+ }
+ __ TestIfSmi(temp);
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+}
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps final : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ void Generate() override {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(reg, map, &success);
+ __ beq(&success);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ bne(deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ }
+
+ __ bind(&success);
+}
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+ // Check for heap number
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, Operand(factory()->heap_number_map()));
+ __ beq(&heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ CmpP(input_reg, Operand(factory()->undefined_value()));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ __ LoadImmP(result_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+ __ b(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ __ ClampUint8(result_reg, result_reg);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ __ lgdr(result_reg, value_reg);
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ srlg(result_reg, result_reg, Operand(32));
+ } else {
+ __ llgfr(result_reg, result_reg);
+ }
+}
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Combine hi_reg:lo_reg into a single 64-bit register.
+ __ sllg(scratch, hi_reg, Operand(32));
+ __ lr(scratch, lo_reg);
+
+ // Bitwise convert from GPR to FPR
+ __ ldgr(result_reg, scratch);
+}
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate final : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ LoadIntLiteral(scratch, size);
+ } else {
+ scratch = ToRegister(instr->size());
+ }
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+ Label loop;
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ bind(&loop);
+ __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
+#if V8_TARGET_ARCH_S390X
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+#else
+ // TODO(joransiu): Improve the following sequence.
+ // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
+ // incorrect result with the signed compare
+ __ AddP(scratch, Operand(-kPointerSize));
+#endif
+ __ CmpP(scratch, Operand::Zero());
+ __ bge(&loop);
+ }
+}
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ DCHECK(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_S390X
+ if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+ __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_S390X
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
+#endif
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ DCHECK(ToRegister(instr->value()).is(r5));
+ DCHECK(ToRegister(instr->result()).is(r2));
+ Label end, do_call;
+ Register value_register = ToRegister(instr->value());
+ __ JumpIfNotSmi(value_register, &do_call);
+ __ mov(r2, Operand(isolate()->factory()->number_string()));
+ __ b(&end);
+ __ bind(&do_call);
+ TypeofStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&end);
+}
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ Condition final_branch_condition =
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+ instr->type_literal());
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(instr, final_branch_condition);
+ }
+}
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+ Register input, Handle<String> type_name) {
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
+ __ JumpIfSmi(input, true_label);
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->string_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+ final_branch_condition = lt;
+
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ beq(true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->undefined_string())) {
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(false_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ __ CmpP(r0, Operand::Zero());
+ final_branch_condition = ne;
+
+ } else if (String::Equals(type_name, factory->function_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ AndP(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(scratch, Operand(1 << Map::kIsCallable));
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->object_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(true_label);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
+ __ blt(false_label);
+ // Check for callable or undetectable objects => false.
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ AndP(r0, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(r0, Operand::Zero());
+ final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
+ } else {
+ __ b(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % 2);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= 2;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck final : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStackCheck* instr_;
+ };
+
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&done, Label::kNear);
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new (zone()) DeferredStackCheck(this, instr);
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ blt(deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ DCHECK(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r2);
+ CallRuntime(Runtime::kForInEnumerate, instr);
+ __ bind(&use_cache);
+}
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+ __ bne(&load_cache, Label::kNear);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ b(&done, Label::kNear);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ CmpP(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CmpP(map, scratch0());
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result, Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object, index);
+ __ LoadImmP(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(instr->pointer_map(), 2,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble final : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+ Register result, Register object, Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {}
+ void Generate() override {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new (zone())
+ DeferredLoadMutableDouble(this, instr, result, object, index);
+
+ Label out_of_object, done;
+
+ __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+ __ bne(deferred->entry());
+ __ ShiftRightArithP(index, index, Operand(1));
+
+ __ CmpP(index, Operand::Zero());
+ __ blt(&out_of_object, Label::kNear);
+
+ __ SmiToPtrArrayOffset(r0, index);
+ __ AddP(scratch, object, r0);
+ __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ b(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ SmiToPtrArrayOffset(r0, index);
+ __ SubP(scratch, result, r0);
+ __ LoadP(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
new file mode 100644
index 0000000000..6d364cbe11
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
@@ -0,0 +1,359 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/crankshaft/s390/lithium-s390.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ jump_table_(4, info->zone()),
+ scope_(info->scope()),
+ deferred_(8, info->zone()),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LConstantOperand must be an Integer32 or Smi
+ void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ intptr_t ToRepresentation(LConstantOperand* op,
+ const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+ Register object, Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ MemOperand PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_tagged,
+ int constant_key, int element_size_shift,
+ int base_offset);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ LanguageMode language_mode() const { return info()->language_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ Register scratch0() { return kLithiumScratch; }
+ DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true, Label* if_false,
+ Handle<String> class_name, Register input,
+ Register temporary, Register temporary2);
+
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) override;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr, SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context);
+
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in r4.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ bool is_tail_call, LInstruction* instr);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+
+ void AddToTranslation(LEnvironment* environment, Translation* translation,
+ LOperand* op, bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding);
+
+ void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_S390X
+ void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) override;
+
+ static Condition TokenToCondition(Token::Value op);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template <class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition);
+ template <class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+ Handle<String> type_name);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+ int* offset, AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) override;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
+
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+ Scope* const scope_;
+ ZoneList<LDeferredCode*> deferred_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope final BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ }
+
+ ~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
new file mode 100644
index 0000000000..cffcede226
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {1};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ DCHECK(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ DCHECK(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ DCHECK(!moves_[index].IsPending());
+ DCHECK(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ DCHECK(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ DCHECK(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+void LGapResolver::RestoreValue() {
+ DCHECK(in_cycle_);
+ DCHECK(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ LoadRR(cgen_->ToRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ __ StoreP(source_register, cgen_->ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(cgen_->ToRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ __ LoadP(ip, source_operand);
+ __ StoreP(ip, destination_operand);
+ } else {
+ __ LoadP(kSavedValueRegister, source_operand);
+ __ StoreP(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, dst);
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ LoadDoubleLiteral(result, v, ip);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+ } else {
+ __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+ }
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldr(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_S390X
+ __ lg(kSavedValueRegister, source_operand);
+ __ stg(kSavedValueRegister, destination_operand);
+#else
+ MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ LoadlW(kSavedValueRegister, source_operand);
+ __ StoreW(kSavedValueRegister, destination_operand);
+ __ LoadlW(kSavedValueRegister, source_high_operand);
+ __ StoreW(kSavedValueRegister, destination_high_operand);
+#endif
+ } else {
+ __ LoadDouble(kScratchDoubleReg, source_operand);
+ __ StoreDouble(kScratchDoubleReg, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
new file mode 100644
index 0000000000..087224c861
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
new file mode 100644
index 0000000000..a18f877187
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -0,0 +1,2290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-s390.h"
+
+#include <sstream>
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-d";
+ case Token::SUB:
+ return "sub-d";
+ case Token::MUL:
+ return "mul-d";
+ case Token::DIV:
+ return "div-d";
+ case Token::MOD:
+ return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-t";
+ case Token::SUB:
+ return "sub-t";
+ case Token::MUL:
+ return "mul-t";
+ case Token::MOD:
+ return "mod-t";
+ case Token::DIV:
+ return "div-t";
+ case Token::BIT_AND:
+ return "bit-and-t";
+ case Token::BIT_OR:
+ return "bit-or-t";
+ case Token::BIT_XOR:
+ return "bit-xor-t";
+ case Token::ROR:
+ return "ror-t";
+ case Token::SHL:
+ return "shl-t";
+ case Token::SAR:
+ return "sar-t";
+ case Token::SHR:
+ return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+ true_block_id(), false_block_id());
+}
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+ false_block_id());
+}
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ std::ostringstream os;
+ os << hydrogen()->access() << " <- ";
+ stream->Add(os.str().c_str());
+ value()->PrintTo(stream);
+}
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ DCHECK(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
+}
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ DCHECK(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+LPlatformChunk* LChunkBuilder::Build() {
+ DCHECK(is_unused());
+ chunk_ = new (zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
+}
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ DCHECK(!instr->HasPointerMap());
+ instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+ return instr;
+}
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new (zone()) LLabel(instr->block());
+}
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ DCHECK(left->representation().IsTagged());
+ DCHECK(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, r3);
+ LOperand* right_operand = UseFixed(right, r2);
+ LArithmeticT* result =
+ new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ DCHECK(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ DCHECK(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ DCHECK(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ DCHECK(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ DCHECK(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new (zone()) LDummy());
+ } else {
+ DCHECK(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new (zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new (zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ DCHECK(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ DCHECK(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
+}
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new (zone()) LDebugBreak();
+}
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegister(instr->value());
+ return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), r3);
+ LOperand* receiver = UseFixed(instr->receiver(), r2);
+ LOperand* length = UseFixed(instr->length(), r4);
+ LOperand* elements = UseFixed(instr->elements(), r5);
+ LApplyArguments* result =
+ new (zone()) LApplyArguments(function, receiver, length, elements);
+ return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new (zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(new (zone())
+ LInnerAllocatedObject(base_object, offset));
+}
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL
+ : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new (zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new (zone()) LContext);
+}
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+ CallInterfaceDescriptor descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
+ ops.Add(target, zone());
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result =
+ new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), r3);
+ LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
+ return MarkAsCall(DefineFixed(result, r2), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor:
+ return DoMathFloor(instr);
+ case kMathRound:
+ return DoMathRound(instr);
+ case kMathFround:
+ return DoMathFround(instr);
+ case kMathAbs:
+ return DoMathAbs(instr);
+ case kMathLog:
+ return DoMathLog(instr);
+ case kMathExp:
+ return DoMathExp(instr);
+ case kMathSqrt:
+ return DoMathSqrt(instr);
+ case kMathPowHalf:
+ return DoMathPowHalf(instr);
+ case kMathClz32:
+ return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new (zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempDoubleRegister();
+ LMathRound* result = new (zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFround* result = new (zone()) LMathFround(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d1);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new (zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = TempDoubleRegister();
+ LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new (zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r3);
+ LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineAsRegister(new (zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+ ? NULL
+ : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+ if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ int32_t constant_value = 0;
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ LMulI* mul = new (zone()) LMulI(left_op, right_op);
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
+
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+ if (instr->left()->IsConstant() &&
+ !instr->CheckFlag(HValue::kCanOverflow)) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new (zone()) LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new (zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegister(mul->left());
+ LOperand* multiplicand_op = UseRegister(mul->right());
+ LOperand* addend_op = UseRegister(addend);
+ return DefineAsRegister(
+ new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegister(minuend);
+ LOperand* multiplier_op = UseRegister(mul->left());
+ LOperand* multiplicand_op = UseRegister(mul->right());
+
+ return DefineAsRegister(
+ new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ DCHECK(instr->IsConsistentExternalRepresentation());
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ left = UseRegister(instr->left());
+ right = UseRegister(instr->right());
+ }
+ return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ DCHECK(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ DCHECK(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d2)
+ : UseFixed(instr->right(), r4);
+ LPower* result = new (zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3), instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ LCmpT* result = new (zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(r));
+ DCHECK(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ DCHECK(r.IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LCmpHoleAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ LStringCompareAndBranch* result =
+ new (zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone())
+ LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new (zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(
+ DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ DCHECK(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+ HCheckArrayBufferNotNeutered* instr) {
+ LOperand* view = UseRegisterAtStart(instr->value());
+ LCheckArrayBufferNotNeutered* result =
+ new (zone()) LCheckArrayBufferNotNeutered(view);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ AssignEnvironment(new (zone()) LCheckMaps(value, temp));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+ } else {
+ DCHECK(input_rep.IsSmiOrTagged());
+ LClampTToUint8* result =
+ new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ DCHECK(value->representation().IsDouble());
+ return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new (zone())
+ LReturn(UseFixed(instr->value(), r2), context, parameter_count);
+}
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new (zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new (zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new (zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new (zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+ LLoadGlobalGeneric* result =
+ new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+
+ LInstruction* result =
+ DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ DCHECK(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_fixed_typed_array()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+ } else {
+ DCHECK((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+ }
+
+ bool needs_environment;
+ if (instr->is_fixed_typed_array()) {
+ // see LCodeGen::DoLoadKeyedExternalArray
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
+ !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ needs_environment =
+ instr->RequiresHoleCheck() ||
+ (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+ }
+
+ if (needs_environment) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+
+ LInstruction* result = DefineFixed(
+ new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_fixed_typed_array()) {
+ DCHECK(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
+ }
+
+ DCHECK((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ DCHECK(instr->elements()->representation().IsExternal());
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+ DCHECK(instr->object()->representation().IsTagged());
+ DCHECK(instr->key()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
+
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), r2);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, context, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new (zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, r2);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map =
+ instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ LOperand* val;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ return MarkAsCall(
+ DefineFixed(new (zone()) LStringAdd(context, left, right), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new (zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new (zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ DCHECK(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new (zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ DCHECK(info()->IsStub());
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor.GetRegisterParameter(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Retry(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ spill_index += StandardFrameConstants::kFixedSlotCount;
+ }
+ return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(), r5);
+ LTypeof* result = new (zone()) LTypeof(context, value);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LStackCheck(context), instr);
+ } else {
+ DCHECK(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new (zone()) LStackCheck(context)));
+ }
+}
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->BindContext(instr->closure_context());
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedFunction(instr->shared());
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new (zone()) LDrop(argument_count);
+ DCHECK(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->enumerable(), r2);
+ LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
new file mode 100644
index 0000000000..b6a161411d
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -0,0 +1,2414 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallWithDescriptor) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CheckArrayBufferNotNeutered) \
+ V(CheckInstanceType) \
+ V(CheckNonSmi) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(MaybeGrowElements) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(Prologue) \
+ V(PushArgument) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(RSubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ Opcode opcode() const final { return LInstruction::k##type; } \
+ void CompileToNative(LCodeGen* generator) final; \
+ const char* Mnemonic() const final { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ DCHECK(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {}
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ private:
+ // Iterator support.
+ friend class InputIterator;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits : public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ bool HasResult() const final { return R != 0 && result() != NULL; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const override { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ int InputCount() final { return I; }
+ LOperand* InputAt(int i) final { return inputs_[i]; }
+
+ int TempCount() final { return T; }
+ LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block) : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ bool IsGap() const override { return true; }
+ void PrintDataTo(StringStream* stream) override;
+ static LGap* cast(LInstruction* instr) {
+ DCHECK(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new (zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+class LInstructionGap final : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ void PrintDataTo(StringStream* stream) override;
+ bool IsControl() const override { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ LDummy() {}
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ bool IsControl() const override { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+class LLabel final : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+ bool IsControl() const final { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+class LModI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+class LDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
+ }
+
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LRSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Opcode opcode() const override { return LInstruction::kArithmeticD; }
+ void CompileToNative(LCodeGen* generator) override;
+ const char* Mnemonic() const override;
+
+ private:
+ Token::Value op_;
+};
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ Opcode opcode() const override { return LInstruction::kArithmeticT; }
+ void CompileToNative(LCodeGen* generator) override;
+ const char* Mnemonic() const override;
+
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+ private:
+ Token::Value op_;
+};
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ DCHECK(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ void PrintDataTo(StringStream* stream) override;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) {}
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ void PrintDataTo(StringStream* stream) override;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ void PrintDataTo(StringStream* stream) override;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ CallInterfaceDescriptor descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ int InputCount() final { return inputs_.length(); }
+ LOperand* InputAt(int i) final { return inputs_[i]; }
+
+ int TempCount() final { return 0; }
+ LOperand* TempAt(int i) final { return NULL; }
+};
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
+ }
+
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ void PrintDataTo(StringStream* stream) override;
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LTransitionElementsKind(LOperand* object, LOperand* context,
+ LOperand* new_map_temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+class LCheckArrayBufferNotNeutered final
+ : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+ LOperand* view() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+ "check-array-buffer-not-neutered")
+ DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(info, graph),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) {}
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathFround(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr, HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_S390_H_
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 31ff12537e..28dfe8a8dd 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -125,7 +125,7 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -306,34 +306,27 @@ bool LCodeGen::GenerateJumpTable() {
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
/* stack layout
- 4: return address <-- rsp
- 3: garbage
+ 3: return address <-- rsp
2: garbage
1: garbage
0: garbage
*/
- // Reserve space for context and stub marker.
- __ subp(rsp, Immediate(2 * kPointerSize));
- __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
- __ Push(kScratchRegister); // Save entry address for ret(0)
+ // Reserve space for stub marker.
+ __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
+ __ Push(MemOperand(
+ rsp, TypedFrameConstants::kFrameTypeSize)); // Copy return address.
+ __ Push(kScratchRegister);
/* stack layout
- 4: return address
- 3: garbage
+ 3: return address
2: garbage
1: return address
0: entry address <-- rsp
*/
- // Remember context pointer.
- __ movp(kScratchRegister,
- MemOperand(rbp, StandardFrameConstants::kContextOffset));
- // Save context pointer into the stack frame.
- __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
-
// Create a stack frame.
- __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
- __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
+ __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
+ __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@@ -342,8 +335,7 @@ bool LCodeGen::GenerateJumpTable() {
__ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
/* stack layout
- 4: old rbp
- 3: context pointer
+ 3: old rbp
2: stub marker
1: return address
0: entry address <-- rsp
@@ -379,9 +371,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ pushq(rbp); // Caller's frame pointer.
- __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ leap(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -2012,16 +2003,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ j(equal, instr->TrueLabel(chunk_));
@@ -2029,13 +2021,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Cmp(reg, Smi::FromInt(0));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2058,13 +2050,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2075,19 +2067,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2317,11 +2309,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(rdx));
DCHECK(ToRegister(instr->right()).is(rax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ testp(rax, rax);
-
- EmitBranch(instr, TokenToCondition(instr->op(), false));
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -3011,11 +3002,11 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+ __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
@@ -3030,6 +3021,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ movp(result, rbp);
}
}
@@ -3141,13 +3134,24 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(rax);
+ // It is safe to use rbx, rcx and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) rbx (expected number of arguments) will be initialized below.
+ PrepareForTailCall(actual, rbx, rcx, r8);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3186,10 +3190,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3206,23 +3209,36 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ Set(rax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ Jump(target);
+ } else {
+ __ Call(target);
+ }
}
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ }
} else {
// We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
- generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
}
}
@@ -3264,39 +3280,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- // Change context.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Set(rax, instr->arity());
-
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- Handle<JSFunction> jsfun = Handle<JSFunction>::null();
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
- generator.BeforeCall(__ CallSize(target));
- __ Call(target);
- }
- generator.AfterCall();
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3304,8 +3287,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(rax) ? rcx : rax;
- Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+ uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3402,8 +3396,14 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
}
}
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ Roundsd(output_reg, input_reg, kRoundDown);
+}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3461,8 +3461,23 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ Label done;
+ __ Roundsd(output_reg, input_reg, kRoundUp);
+ __ Move(xmm_scratch, -0.5);
+ __ Addsd(xmm_scratch, output_reg);
+ __ Ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &done, Label::kNear);
+ __ Move(xmm_scratch, 1.0);
+ __ Subsd(output_reg, xmm_scratch);
+ __ bind(&done);
+}
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3655,54 +3670,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Lzcntl(result, input);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
}
-}
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ movp(rbp, scratch2);
+ __ SmiToInteger32(
+ caller_args_count_reg,
+ Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
- int arity = instr->arity();
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ movp(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(rdx));
- DCHECK(vector_register.is(rbx));
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
+ DCHECK(ToRegister(instr->context()).is(rsi));
+ DCHECK(ToRegister(instr->function()).is(rdi));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ Move(vector_register, vector);
- __ Move(slot_register, Smi::FromInt(index));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use rbx, rcx and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) rbx (expected number of arguments) will be initialized below.
+ PrepareForTailCall(actual, rbx, rcx, r8);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(rdi, no_reg, actual, flag, generator);
} else {
- __ Set(rax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -5208,13 +5246,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(rax));
- __ Push(rax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5574,13 +5605,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index 873a3dd1ac..139645e6cd 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -192,11 +192,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 6be40931de..e86b90c838 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -261,27 +261,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -602,11 +581,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -921,17 +896,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
@@ -941,7 +906,11 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -954,14 +923,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1081,16 +1050,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1114,6 +1073,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1122,6 +1084,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1152,22 +1117,33 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
}
}
-
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new (zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathFloorD* result = new (zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathRoundD* result = new (zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
@@ -1234,21 +1210,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(rdx);
- vector = FixedTemp(rbx);
- }
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LCallRuntime* result = new(zone()) LCallRuntime(context);
@@ -1813,13 +1774,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2540,13 +2494,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* value = UseFixed(instr->value(), rbx);
@@ -2584,11 +2531,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2650,13 +2595,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 406159b1ff..1feba4bf20 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -101,12 +99,14 @@ class LCodeGen;
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
- V(MathFloor) \
+ V(MathFloorD) \
+ V(MathFloorI) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
- V(MathRound) \
+ V(MathRoundD) \
+ V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -131,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -143,7 +142,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -152,7 +150,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -224,6 +221,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -544,6 +550,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -805,23 +812,43 @@ class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
void PrintDataTo(StringStream* stream) override;
};
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
+ explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* value, LOperand* temp) {
+ LMathRoundI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -829,7 +856,7 @@ class LMathRound final : public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
@@ -1715,23 +1742,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1790,29 +1800,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2399,19 +2386,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2528,18 +2502,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index f80e0768a6..1ca3a99271 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -57,13 +57,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -98,31 +91,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -130,61 +98,29 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
}
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, kFrameIsExpectedToBeAligned);
- }
-
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
DCHECK(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
+ MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
+ if (FLAG_debug_code) {
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
}
}
@@ -265,50 +201,11 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- // Align ebp + 4 to a multiple of 2 * kPointerSize.
- __ test(ebp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
- // Move all parts of the frame over one word. The frame consists of:
- // unoptimized frame slots, alignment state, context, frame pointer, return
- // address, receiver, and the arguments.
- __ mov(ecx, Immediate(scope()->num_parameters() +
- 5 + graph()->osr()->UnoptimizedFrameSlots()));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ sub(Operand(ebp), Immediate(kPointerSize));
- __ bind(&do_not_pad);
- }
-
- // Save the first local, which is overwritten by the alignment state.
- Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
- __ push(alignment_loc);
-
- // Set the dynamic frame alignment state.
- __ mov(alignment_loc, edx);
-
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 1);
- __ sub(esp, Immediate((slots - 1) * kPointerSize));
-
- // Initailize FPU state.
- __ fninit();
+ DCHECK(slots >= 0);
+ __ sub(esp, Immediate(slots * kPointerSize));
}
@@ -376,32 +273,24 @@ bool LCodeGen::GenerateJumpTable() {
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
-
/* stack layout
- 4: entry address
- 3: return address <-- esp
- 2: garbage
+ 3: entry address
+ 2: return address <-- esp
1: garbage
0: garbage
*/
- __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
- __ push(MemOperand(esp, kPointerSize)); // Copy return address.
- __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+ __ push(MemOperand(esp, 0)); // Copy return address.
+ __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
/* stack layout
4: entry address
3: return address
- 2: garbage
1: return address
0: entry address <-- esp
*/
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
-
- // Copy context.
- __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
// Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@@ -411,8 +300,7 @@ bool LCodeGen::GenerateJumpTable() {
Immediate(Smi::FromInt(StackFrame::STUB)));
/* stack layout
- 4: old ebp
- 3: context pointer
+ 3: old ebp
2: stub marker
1: return address
0: entry address <-- esp
@@ -449,9 +337,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -2240,15 +2127,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -2256,13 +2144,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2282,18 +2170,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, instr->FalseLabel(chunk_));
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2304,19 +2192,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2524,7 +2412,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
}
@@ -2554,11 +2442,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(edx));
DCHECK(ToRegister(instr->right()).is(eax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ test(eax, eax);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -2717,7 +2604,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2750,18 +2637,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ bind(&done);
}
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
- int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+ int extra_value_count = 1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
- if (dynamic_frame_alignment && FLAG_debug_code) {
- __ cmp(Operand(esp,
- (parameter_count + extra_value_count) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
@@ -2769,19 +2649,9 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
// The argument count parameter is a smi
__ SmiUntag(reg);
Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
- if (dynamic_frame_alignment && FLAG_debug_code) {
- DCHECK(extra_value_count == 2);
- __ cmp(Operand(esp, reg, times_pointer_size,
- extra_value_count * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
// emit code to restore stack based on instr->parameter_count()
__ pop(return_addr_reg); // save return address
- if (dynamic_frame_alignment) {
- __ inc(reg); // 1 more for alignment
- }
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
@@ -2799,25 +2669,12 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit);
}
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding, Label::kNear);
- EmitReturn(instr, true);
- __ bind(&no_padding);
- }
-
- EmitReturn(instr, false);
+ EmitReturn(instr);
}
@@ -3218,11 +3075,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ mov(result,
+ Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
@@ -3238,6 +3096,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ mov(result, Operand(ebp));
}
}
@@ -3272,6 +3132,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label receiver_ok, global_object;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
Register scratch = ToRegister(instr->temp());
if (!instr->hydrogen()->known_function()) {
@@ -3280,20 +3141,20 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
}
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
__ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
@@ -3341,13 +3202,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(eax);
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3391,10 +3264,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3410,21 +3282,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function directly.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ jmp(target);
+ } else {
+ __ call(target);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3466,35 +3355,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, instr->arity());
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3502,8 +3362,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+ uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3621,6 +3492,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ sub(esp, Immediate(kPointerSize));
__ fist_s(Operand(esp, 0));
__ pop(output_reg);
+ __ X87SetRC(0x0000);
__ X87CheckIA();
DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
__ fnclex();
@@ -3653,6 +3525,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Clear exception bits.
__ fnclex();
__ fistp_s(MemOperand(esp, 0));
+ // Restore round mode.
+ __ X87SetRC(0x0000);
// Check overflow.
__ X87CheckIA();
__ pop(result);
@@ -3687,6 +3561,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Clear exception bits.
__ fnclex();
__ fistp_s(MemOperand(esp, 0));
+ // Restore round mode.
+ __ X87SetRC(0x0000);
// Check overflow.
__ X87CheckIA();
__ pop(result);
@@ -3927,54 +3803,78 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
X87CommitWrite(result_reg);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
}
-}
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(ebp, scratch2);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack, 0);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(edx));
- DCHECK(vector_register.is(ebx));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ mov(vector_register, vector);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(edi, no_reg, actual, flag, generator);
} else {
- __ Set(eax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -5181,7 +5081,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- 1 << JSArrayBuffer::WasNeutered::kShift);
+ Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
}
@@ -5197,8 +5097,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
// If there is only one type in the interval check for equality.
if (first == last) {
@@ -5207,8 +5106,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
@@ -5219,7 +5117,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
} else {
@@ -5589,13 +5487,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5662,7 +5553,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
@@ -5683,7 +5574,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
// clang-format off
@@ -5947,13 +5838,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 0cfbf70388..3719236a40 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -31,8 +31,6 @@ class LCodeGen: public LCodeGenBase {
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
@@ -221,11 +219,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
- // Generate a direct call to a known function. Expects the function
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
+ // Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -329,7 +330,7 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+ void EmitReturn(LReturn* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -354,8 +355,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
bool frame_is_built_;
class X87Stack : public ZoneObject {
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index f770509076..163d2c9cfb 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -278,27 +278,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -445,13 +424,6 @@ LPlatformChunk* LChunkBuilder::Build() {
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 4);
- USE(alignment_state_index);
- }
-
// If compiling for OSR, reserve space for the unoptimized frame,
// which will be subsumed into this frame.
if (graph()->has_osr()) {
@@ -623,12 +595,7 @@ LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -952,22 +919,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -980,8 +941,8 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
@@ -990,7 +951,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
: new (zone()) LBranch(UseRegisterAtStart(value), temp);
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1118,16 +1079,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1150,6 +1101,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1158,6 +1112,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1263,22 +1220,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(edx);
- vector = FixedTemp(ebx);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1840,13 +1781,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2512,11 +2446,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
- if (spill_index == 0) {
- // The dynamic frame alignment state overwrites the first local.
- // The first local is saved at the end of the unoptimized frame.
- spill_index = graph()->osr()->UnoptimizedFrameSlots();
- }
spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2556,13 +2485,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), ebx);
@@ -2600,11 +2522,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2665,13 +2585,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index 0f2813f85a..d83322acd3 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -33,9 +33,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -136,7 +134,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -148,7 +145,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -229,6 +225,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -267,6 +270,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -555,6 +560,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -1735,23 +1741,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1810,29 +1799,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- void PrintDataTo(StringStream* stream) override;
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2433,19 +2399,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7c9a24f520..06883803f9 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -133,8 +133,8 @@ class PredictablePlatform : public Platform {
}
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t id, uint64_t bind_id,
- int numArgs, const char** argNames,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int numArgs, const char** argNames,
const uint8_t* argTypes, const uint64_t* argValues,
unsigned int flags) override {
return 0;
@@ -251,7 +251,7 @@ CounterCollection* Shell::counters_ = &local_counters_;
base::LazyMutex Shell::context_mutex_;
const base::TimeTicks Shell::kInitialTicks =
base::TimeTicks::HighResolutionNow();
-Global<Context> Shell::utility_context_;
+Global<Function> Shell::stringify_function_;
base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
i::List<Worker*> Shell::workers_;
@@ -412,24 +412,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
}
#if !defined(V8_SHARED)
} else {
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Context::Scope context_scope(context);
- Local<Object> global = context->Global();
- Local<Value> fun =
- global->Get(context, String::NewFromUtf8(isolate, "Stringify",
- v8::NewStringType::kNormal)
- .ToLocalChecked()).ToLocalChecked();
- Local<Value> argv[1] = {result};
- Local<Value> s;
- if (!Local<Function>::Cast(fun)
- ->Call(context, global, 1, argv)
- .ToLocal(&s)) {
- return true;
- }
- DCHECK(!try_catch.HasCaught());
- v8::String::Utf8Value str(s);
+ v8::String::Utf8Value str(Stringify(isolate, result));
fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n");
}
@@ -906,11 +889,11 @@ void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#ifndef V8_SHARED
- Local<Context> utility_context;
+ Local<Context> context;
bool enter_context = !isolate->InContext();
if (enter_context) {
- utility_context = Local<Context>::New(isolate, utility_context_);
- utility_context->Enter();
+ context = Local<Context>::New(isolate, evaluation_context_);
+ context->Enter();
}
#endif // !V8_SHARED
v8::String::Utf8Value exception(try_catch->Exception());
@@ -954,7 +937,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
}
printf("\n");
#ifndef V8_SHARED
- if (enter_context) utility_context->Exit();
+ if (enter_context) context->Exit();
#endif // !V8_SHARED
}
@@ -1057,60 +1040,37 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
counter->AddSample(sample);
}
-
-class NoUseStrongForUtilityScriptScope {
- public:
- NoUseStrongForUtilityScriptScope() : flag_(i::FLAG_use_strong) {
- i::FLAG_use_strong = false;
- }
- ~NoUseStrongForUtilityScriptScope() { i::FLAG_use_strong = flag_; }
-
- private:
- bool flag_;
-};
-
-
-void Shell::InstallUtilityScript(Isolate* isolate) {
- NoUseStrongForUtilityScriptScope no_use_strong;
- HandleScope scope(isolate);
- // If we use the utility context, we have to set the security tokens so that
- // utility, evaluation and debug context can all access each other.
- Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- utility_context_.Reset(isolate, Context::New(isolate, NULL, global_template));
- v8::Local<v8::Context> utility_context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Local<v8::Context> evaluation_context =
+// Turn a value into a human-readable string.
+Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
+ v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
- utility_context->SetSecurityToken(Undefined(isolate));
- evaluation_context->SetSecurityToken(Undefined(isolate));
- v8::Context::Scope context_scope(utility_context);
-
- // Run the d8 shell utility script in the utility context
- int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> shell_source =
- i::NativesCollection<i::D8>::GetScriptSource(source_index);
- i::Vector<const char> shell_source_name =
- i::NativesCollection<i::D8>::GetScriptName(source_index);
- Local<String> source =
- String::NewFromUtf8(isolate, shell_source.start(), NewStringType::kNormal,
- shell_source.length()).ToLocalChecked();
- Local<String> name =
- String::NewFromUtf8(isolate, shell_source_name.start(),
- NewStringType::kNormal,
- shell_source_name.length()).ToLocalChecked();
- ScriptOrigin origin(name);
- Local<Script> script =
- Script::Compile(utility_context, source, &origin).ToLocalChecked();
- script->Run(utility_context).ToLocalChecked();
- // Mark the d8 shell script as native to avoid it showing up as normal source
- // in the debugger.
- i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
- i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
- ? i::Handle<i::Script>(i::Script::cast(
- i::JSFunction::cast(*compiled_script)->shared()->script()))
- : i::Handle<i::Script>(i::Script::cast(
- i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Script::TYPE_EXTENSION);
+ if (stringify_function_.IsEmpty()) {
+ int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
+ i::Vector<const char> source_string =
+ i::NativesCollection<i::D8>::GetScriptSource(source_index);
+ i::Vector<const char> source_name =
+ i::NativesCollection<i::D8>::GetScriptName(source_index);
+ Local<String> source =
+ String::NewFromUtf8(isolate, source_string.start(),
+ NewStringType::kNormal, source_string.length())
+ .ToLocalChecked();
+ Local<String> name =
+ String::NewFromUtf8(isolate, source_name.start(),
+ NewStringType::kNormal, source_name.length())
+ .ToLocalChecked();
+ ScriptOrigin origin(name);
+ Local<Script> script =
+ Script::Compile(context, source, &origin).ToLocalChecked();
+ stringify_function_.Reset(
+ isolate, script->Run(context).ToLocalChecked().As<Function>());
+ }
+ Local<Function> fun = Local<Function>::New(isolate, stringify_function_);
+ Local<Value> argv[1] = {value};
+ v8::TryCatch try_catch(isolate);
+ MaybeLocal<Value> result =
+ fun->Call(context, Undefined(isolate), 1, argv).ToLocalChecked();
+ if (result.IsEmpty()) return String::Empty(isolate);
+ return result.ToLocalChecked().As<String>();
}
#endif // !V8_SHARED
@@ -1320,7 +1280,6 @@ inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
void Shell::OnExit(v8::Isolate* isolate) {
#ifndef V8_SHARED
- reinterpret_cast<i::Isolate*>(isolate)->DumpAndResetCompilationStats();
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1997,8 +1956,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
- bool enable_harmony_modules = false;
-
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
@@ -2011,7 +1968,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->Begin(argv, i + 1);
} else if (strcmp(str, "--module") == 0) {
// Pass on to SourceGroup, which understands this option.
- enable_harmony_modules = true;
} else if (strncmp(argv[i], "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@@ -2027,10 +1983,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
SetFlagsFromString("--nologfile_per_isolate");
}
- if (enable_harmony_modules) {
- SetFlagsFromString("--harmony-modules");
- }
-
return true;
}
@@ -2241,8 +2193,6 @@ MaybeLocal<Value> Shell::DeserializeValue(Isolate* isolate,
int* offset) {
DCHECK(offset);
EscapableHandleScope scope(isolate);
- // This function should not use utility_context_ because it is running on a
- // different thread.
Local<Value> result;
SerializationTag tag = data.ReadTag(offset);
@@ -2523,16 +2473,13 @@ int Shell::Main(int argc, char* argv[]) {
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
if (options.use_interactive_shell()) {
-#ifndef V8_SHARED
- InstallUtilityScript(isolate);
-#endif // !V8_SHARED
RunShell(isolate);
}
// Shut down contexts and collect garbage.
evaluation_context_.Reset();
#ifndef V8_SHARED
- utility_context_.Reset();
+ stringify_function_.Reset();
#endif // !V8_SHARED
CollectGarbage(isolate);
}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 16f612c97a..321d9c1770 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -446,7 +446,7 @@ class Shell : public i::AllStatic {
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
#ifndef V8_SHARED
- static Global<Context> utility_context_;
+ static Global<Function> stringify_function_;
static CounterMap* counter_map_;
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
@@ -462,7 +462,7 @@ class Shell : public i::AllStatic {
static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
static Counter* GetCounter(const char* name, bool is_histogram);
- static void InstallUtilityScript(Isolate* isolate);
+ static Local<String> Stringify(Isolate* isolate, Local<Value> value);
#endif // !V8_SHARED
static void Initialize(Isolate* isolate);
static void RunShell(Isolate* isolate);
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 27a0bc39cd..e49c6b7458 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+(function() {
"use strict";
// A more universal stringify that supports more types than JSON.
@@ -89,3 +90,6 @@ function StringifyProxy(proxy, depth) {
}
return '[' + proxy_type + ' Proxy ' + Stringify(info_object, depth-1) + ']';
}
+
+return Stringify;
+})();
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 5fdda4fedc..fa3540e53b 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -81,9 +81,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
__ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(ip);
- if (mode == SAVE_RESULT_REGISTER) __ push(r0);
-
- __ mov(r0, Operand::Zero()); // no arguments
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(r0);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ mov(r0, Operand(1));
__ mov(r1,
Operand(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -94,12 +100,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = {JSCallerSavedCode(i)};
- __ mov(reg, Operand(kDebugZapValue));
+ // Do not clobber r0 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(r0) && (mode == SAVE_RESULT_REGISTER))) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(r0);
-
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
@@ -119,8 +127,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Load the function pointer off of our current stack frame.
- __ ldr(r1, MemOperand(fp,
- StandardFrameConstants::kConstantPoolOffset - kPointerSize));
+ __ ldr(r1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_embedded_constant_pool).
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 3e4b67c938..cd017219d2 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -92,9 +92,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
__ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
__ Push(scratch);
- if (mode == SAVE_RESULT_REGISTER) __ Push(x0);
-
- __ Mov(x0, 0); // No arguments.
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ Push(x0);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ Mov(x0, 1);
__ Mov(x1, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
masm->isolate()));
@@ -104,13 +110,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = Register::XRegFromCode(JSCallerSavedCode(i));
- __ Mov(reg, Operand(kDebugZapValue));
+ // Do not clobber x0 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(x0) && (mode == SAVE_RESULT_REGISTER))) {
+ __ Mov(reg, Operand(kDebugZapValue));
+ }
}
}
- // Restore the register values from the expression stack.
- if (mode == SAVE_RESULT_REGISTER) __ Pop(x0);
-
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
@@ -130,10 +137,12 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set sp based on fp.
- __ Sub(masm->StackPointer(), fp, kPointerSize);
+ __ Add(masm->StackPointer(), fp, FrameDropperFrameConstants::kFunctionOffset);
__ AssertStackConsistency();
- __ Pop(x1, fp, lr); // Function, Frame, Return address.
+ __ Pop(x1); // Function
+ __ Mov(masm->StackPointer(), Operand(fp));
+ __ Pop(fp, lr); // Frame, Return address.
ParameterCount dummy(0);
__ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 8114c21fe8..dae1348322 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -73,14 +73,12 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- Handle<Context> context = context_builder.native_context();
+ Handle<Context> context = context_builder.evaluation_context();
Handle<JSObject> receiver(context->global_proxy());
- MaybeHandle<Object> maybe_result = Evaluate(
- isolate, context_builder.outer_info(),
- context_builder.innermost_context(), context_extension, receiver, source);
- if (!maybe_result.is_null() && !FLAG_debug_eval_readonly_locals) {
- context_builder.UpdateValues();
- }
+ MaybeHandle<Object> maybe_result =
+ Evaluate(isolate, context_builder.outer_info(), context,
+ context_extension, receiver, source);
+ if (!maybe_result.is_null()) context_builder.UpdateValues();
return maybe_result;
}
@@ -130,113 +128,81 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
Handle<JSFunction> local_function =
Handle<JSFunction>::cast(frame_inspector.GetFunction());
Handle<Context> outer_context(local_function->context());
- native_context_ = Handle<Context>(outer_context->native_context());
- Handle<JSFunction> global_function(native_context_->closure());
- outer_info_ = handle(global_function->shared());
- Handle<Context> inner_context;
-
- bool stop = false;
-
- // Iterate the original context chain to create a context chain that reflects
- // our needs. The original context chain may look like this:
- // <native context> <outer contexts> <function context> <inner contexts>
- // In the resulting context chain, we want to materialize the receiver,
- // the parameters of the current function, the stack locals. We only
- // materialize context variables that the function already references,
- // because only for those variables we can be sure that they will be resolved
- // correctly. Variables that are not referenced by the function may be
- // context-allocated and thus accessible, but may be shadowed by stack-
- // allocated variables and the resolution would be incorrect.
- // The result will look like this:
- // <native context> <receiver context>
- // <materialized stack and accessible context vars> <inner contexts>
- // All contexts use the closure of the native context, since there is no
- // function context in the chain. Variables that cannot be resolved are
- // bound to toplevel (script contexts or global object).
- // Once debug-evaluate has been executed, the changes to the materialized
- // objects are written back to the original context chain. Any changes to
- // the original context chain will therefore be overwritten.
+ evaluation_context_ = outer_context;
+ outer_info_ = handle(local_function->shared());
+ Factory* factory = isolate->factory();
+
+ // To evaluate as if we were running eval at the point of the debug break,
+ // we reconstruct the context chain as follows:
+ // - To make stack-allocated variables visible, we materialize them and
+ // use a debug-evaluate context to wrap both the materialized object and
+ // the original context.
+ // - We use the original context chain from the function context to the
+ // native context.
+ // - Between the function scope and the native context, we only resolve
+ // variable names that the current function already uses. Only for these
+ // names we can be sure that they will be correctly resolved. For the
+ // rest, we only resolve to with, script, and native contexts. We use a
+ // whitelist to implement that.
+ // Context::Lookup has special handling for debug-evaluate contexts:
+ // - Look up in the materialized stack variables.
+ // - Look up in the original context.
+ // - Check the whitelist to find out whether to skip contexts during lookup.
const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
for (ScopeIterator it(isolate, &frame_inspector, option);
- !it.Failed() && !it.Done() && !stop; it.Next()) {
+ !it.Failed() && !it.Done(); it.Next()) {
ScopeIterator::ScopeType scope_type = it.Type();
if (scope_type == ScopeIterator::ScopeTypeLocal) {
DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
- it.GetNonLocals(&non_locals_);
+ Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
Handle<Context> local_context =
it.HasContext() ? it.CurrentContext() : outer_context;
-
- // The "this" binding, if any, can't be bound via "with". If we need
- // to, add another node onto the outer context to bind "this".
- Handle<Context> receiver_context =
- MaterializeReceiver(native_context_, local_context, local_function,
- global_function, it.ThisIsNonLocal());
-
- Handle<JSObject> materialized_function = NewJSObjectWithNullProto();
- frame_inspector.MaterializeStackLocals(materialized_function,
- local_function);
- MaterializeArgumentsObject(materialized_function, local_function);
- MaterializeContextChain(materialized_function, local_context);
-
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- global_function, receiver_context, materialized_function);
-
+ Handle<StringSet> non_locals = it.GetNonLocals();
+ MaterializeReceiver(materialized, local_context, local_function,
+ non_locals);
+ frame_inspector.MaterializeStackLocals(materialized, local_function);
+ MaterializeArgumentsObject(materialized, local_function);
ContextChainElement context_chain_element;
- context_chain_element.original_context = local_context;
- context_chain_element.materialized_object = materialized_function;
context_chain_element.scope_info = it.CurrentScopeInfo();
+ context_chain_element.materialized_object = materialized;
+ // Non-locals that are already being referenced by the current function
+ // are guaranteed to be correctly resolved.
+ context_chain_element.whitelist = non_locals;
+ if (it.HasContext()) {
+ context_chain_element.wrapped_context = it.CurrentContext();
+ }
context_chain_.Add(context_chain_element);
-
- stop = true;
- RecordContextsInChain(&inner_context, receiver_context, with_context);
+ evaluation_context_ = outer_context;
+ break;
} else if (scope_type == ScopeIterator::ScopeTypeCatch ||
scope_type == ScopeIterator::ScopeTypeWith) {
- Handle<Context> cloned_context = Handle<Context>::cast(
- isolate->factory()->CopyFixedArray(it.CurrentContext()));
-
ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
- context_chain_element.cloned_context = cloned_context;
+ Handle<Context> current_context = it.CurrentContext();
+ if (!current_context->IsDebugEvaluateContext()) {
+ context_chain_element.wrapped_context = current_context;
+ }
context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, cloned_context, cloned_context);
} else if (scope_type == ScopeIterator::ScopeTypeBlock) {
- Handle<JSObject> materialized_object = NewJSObjectWithNullProto();
- frame_inspector.MaterializeStackLocals(materialized_object,
+ Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
+ frame_inspector.MaterializeStackLocals(materialized,
it.CurrentScopeInfo());
+ ContextChainElement context_chain_element;
+ context_chain_element.scope_info = it.CurrentScopeInfo();
+ context_chain_element.materialized_object = materialized;
if (it.HasContext()) {
- Handle<Context> cloned_context = Handle<Context>::cast(
- isolate->factory()->CopyFixedArray(it.CurrentContext()));
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- global_function, cloned_context, materialized_object);
-
- ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
- context_chain_element.cloned_context = cloned_context;
- context_chain_element.materialized_object = materialized_object;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, cloned_context, with_context);
- } else {
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- global_function, outer_context, materialized_object);
-
- ContextChainElement context_chain_element;
- context_chain_element.materialized_object = materialized_object;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, with_context, with_context);
+ context_chain_element.wrapped_context = it.CurrentContext();
}
+ context_chain_.Add(context_chain_element);
} else {
- stop = true;
+ break;
}
}
- if (innermost_context_.is_null()) {
- innermost_context_ = outer_context;
+
+ for (int i = context_chain_.length() - 1; i >= 0; i--) {
+ evaluation_context_ = factory->NewDebugEvaluateContext(
+ evaluation_context_, context_chain_[i].materialized_object,
+ context_chain_[i].wrapped_context, context_chain_[i].whitelist);
}
- DCHECK(!innermost_context_.is_null());
}
@@ -244,53 +210,16 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
// TODO(yangguo): remove updating values.
for (int i = 0; i < context_chain_.length(); i++) {
ContextChainElement element = context_chain_[i];
- if (!element.original_context.is_null() &&
- !element.cloned_context.is_null()) {
- Handle<Context> cloned_context = element.cloned_context;
- cloned_context->CopyTo(
- Context::MIN_CONTEXT_SLOTS, *element.original_context,
- Context::MIN_CONTEXT_SLOTS,
- cloned_context->length() - Context::MIN_CONTEXT_SLOTS);
- }
if (!element.materialized_object.is_null()) {
- // Write back potential changes to materialized stack locals to the
- // stack.
+ // Write back potential changes to materialized stack locals to the stack.
FrameInspector(frame_, inlined_jsframe_index_, isolate_)
.UpdateStackLocalsFromMaterializedObject(element.materialized_object,
element.scope_info);
- if (element.scope_info->scope_type() == FUNCTION_SCOPE) {
- DCHECK_EQ(context_chain_.length() - 1, i);
- UpdateContextChainFromMaterializedObject(element.materialized_object,
- element.original_context);
- }
}
}
}
-Handle<JSObject> DebugEvaluate::ContextBuilder::NewJSObjectWithNullProto() {
- Handle<JSObject> result =
- isolate_->factory()->NewJSObject(isolate_->object_function());
- Handle<Map> new_map =
- Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
- Map::SetPrototype(new_map, isolate_->factory()->null_value());
- JSObject::MigrateToMap(result, new_map);
- return result;
-}
-
-
-void DebugEvaluate::ContextBuilder::RecordContextsInChain(
- Handle<Context>* inner_context, Handle<Context> first,
- Handle<Context> last) {
- if (!inner_context->is_null()) {
- (*inner_context)->set_previous(*last);
- } else {
- innermost_context_ = last;
- }
- *inner_context = first;
-}
-
-
void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
Handle<JSObject> target, Handle<JSFunction> function) {
// Do not materialize the arguments object for eval or top-level code.
@@ -309,98 +238,20 @@ void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
.Check();
}
-
-MaybeHandle<Object> DebugEvaluate::ContextBuilder::LoadFromContext(
- Handle<Context> context, Handle<String> name, bool* global) {
- static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
- int index;
- PropertyAttributes attributes;
- BindingFlags binding;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding);
- if (holder.is_null()) return MaybeHandle<Object>();
- Handle<Object> value;
- if (index != Context::kNotFound) { // Found on context.
- Handle<Context> context = Handle<Context>::cast(holder);
- // Do not shadow variables on the script context.
- *global = context->IsScriptContext();
- return Handle<Object>(context->get(index), isolate_);
- } else { // Found on object.
- Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- // Do not shadow properties on the global object.
- *global = object->IsJSGlobalObject();
- return JSReceiver::GetDataProperty(object, name);
- }
-}
-
-
-void DebugEvaluate::ContextBuilder::MaterializeContextChain(
- Handle<JSObject> target, Handle<Context> context) {
- for (const Handle<String>& name : non_locals_) {
- HandleScope scope(isolate_);
- Handle<Object> value;
- bool global;
- if (!LoadFromContext(context, name, &global).ToHandle(&value) || global) {
- // If resolving the variable fails, skip it. If it resolves to a global
- // variable, skip it as well since it's not read-only and can be resolved
- // within debug-evaluate.
- continue;
- }
- if (value->IsTheHole()) continue; // Value is not initialized yet (in TDZ).
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
- }
-}
-
-
-void DebugEvaluate::ContextBuilder::StoreToContext(Handle<Context> context,
- Handle<String> name,
- Handle<Object> value) {
- static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
- int index;
- PropertyAttributes attributes;
- BindingFlags binding;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding);
- if (holder.is_null()) return;
- if (attributes & READ_ONLY) return;
- if (index != Context::kNotFound) { // Found on context.
- Handle<Context> context = Handle<Context>::cast(holder);
- context->set(index, *value);
- } else { // Found on object.
- Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- LookupIterator lookup(object, name);
- if (lookup.state() != LookupIterator::DATA) return;
- CHECK(JSReceiver::SetDataProperty(&lookup, value).FromJust());
- }
-}
-
-
-void DebugEvaluate::ContextBuilder::UpdateContextChainFromMaterializedObject(
- Handle<JSObject> source, Handle<Context> context) {
- // TODO(yangguo): check whether overwriting context fields is actually safe
- // wrt fields we consider constant.
- for (const Handle<String>& name : non_locals_) {
- HandleScope scope(isolate_);
- Handle<Object> value = JSReceiver::GetDataProperty(source, name);
- StoreToContext(context, name, value);
- }
-}
-
-
-Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
- Handle<Context> parent_context, Handle<Context> lookup_context,
- Handle<JSFunction> local_function, Handle<JSFunction> global_function,
- bool this_is_non_local) {
- Handle<Object> receiver = isolate_->factory()->undefined_value();
- Handle<String> this_string = isolate_->factory()->this_string();
- if (this_is_non_local) {
- bool global;
- LoadFromContext(lookup_context, this_string, &global).ToHandle(&receiver);
- } else if (local_function->shared()->scope_info()->HasReceiver()) {
- receiver = handle(frame_->receiver(), isolate_);
+void DebugEvaluate::ContextBuilder::MaterializeReceiver(
+ Handle<JSObject> target, Handle<Context> local_context,
+ Handle<JSFunction> local_function, Handle<StringSet> non_locals) {
+ Handle<Object> recv = isolate_->factory()->undefined_value();
+ Handle<String> name = isolate_->factory()->this_string();
+ if (non_locals->Has(name)) {
+ // 'this' is allocated in an outer context and is is already being
+ // referenced by the current function, so it can be correctly resolved.
+ return;
+ } else if (local_function->shared()->scope_info()->HasReceiver() &&
+ !frame_->receiver()->IsTheHole()) {
+ recv = handle(frame_->receiver(), isolate_);
}
- return isolate_->factory()->NewCatchContext(global_function, parent_context,
- this_string, receiver);
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index c0b1f027d1..26f4e414e7 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -53,51 +53,30 @@ class DebugEvaluate : public AllStatic {
void UpdateValues();
- Handle<Context> innermost_context() const { return innermost_context_; }
- Handle<Context> native_context() const { return native_context_; }
+ Handle<Context> evaluation_context() const { return evaluation_context_; }
Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
private:
struct ContextChainElement {
- Handle<Context> original_context;
- Handle<Context> cloned_context;
- Handle<JSObject> materialized_object;
Handle<ScopeInfo> scope_info;
+ Handle<Context> wrapped_context;
+ Handle<JSObject> materialized_object;
+ Handle<StringSet> whitelist;
};
- void RecordContextsInChain(Handle<Context>* inner_context,
- Handle<Context> first, Handle<Context> last);
-
- Handle<JSObject> NewJSObjectWithNullProto();
-
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
void MaterializeArgumentsObject(Handle<JSObject> target,
Handle<JSFunction> function);
- void MaterializeContextChain(Handle<JSObject> target,
- Handle<Context> context);
-
- void UpdateContextChainFromMaterializedObject(Handle<JSObject> source,
- Handle<Context> context);
-
- Handle<Context> MaterializeReceiver(Handle<Context> parent_context,
- Handle<Context> lookup_context,
- Handle<JSFunction> local_function,
- Handle<JSFunction> global_function,
- bool this_is_non_local);
-
- MaybeHandle<Object> LoadFromContext(Handle<Context> context,
- Handle<String> name, bool* global);
-
- void StoreToContext(Handle<Context> context, Handle<String> name,
- Handle<Object> value);
+ void MaterializeReceiver(Handle<JSObject> target,
+ Handle<Context> local_context,
+ Handle<JSFunction> local_function,
+ Handle<StringSet> non_locals);
Handle<SharedFunctionInfo> outer_info_;
- Handle<Context> innermost_context_;
- Handle<Context> native_context_;
+ Handle<Context> evaluation_context_;
List<ContextChainElement> context_chain_;
- List<Handle<String> > non_locals_;
Isolate* isolate_;
JavaScriptFrame* frame_;
int inlined_jsframe_index_;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 25634be8d2..a7956ff417 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -133,8 +133,10 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->StackLocalName(i));
Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
+ // TODO(yangguo): We convert optimized out values to {undefined} when they
+ // are passed to the debugger. Eventually we should handle them somehow.
if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
-
+ if (value->IsOptimizedOut()) value = isolate_->factory()->undefined_value();
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index e785384a42..d9c615b01b 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -19,7 +19,6 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
: isolate_(isolate),
frame_inspector_(frame_inspector),
nested_scope_chain_(4),
- non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
if (!frame_inspector->GetContext()->IsContext() ||
@@ -73,7 +72,9 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
}
}
if (scope_info->scope_type() == FUNCTION_SCOPE) {
- nested_scope_chain_.Add(scope_info);
+ nested_scope_chain_.Add(ExtendedScopeInfo(scope_info,
+ shared_info->start_position(),
+ shared_info->end_position()));
}
if (!collect_non_locals) return;
}
@@ -81,7 +82,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Reparse the code and analyze the scopes.
Scope* scope = NULL;
// Check whether we are in global, eval or function code.
- Zone zone;
+ Zone zone(isolate->allocator());
if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
@@ -107,6 +108,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
if (!ignore_nested_scopes) RetrieveScopeChain(scope);
if (collect_non_locals) CollectNonLocals(scope);
}
+ UnwrapEvaluationContext();
}
@@ -114,10 +116,26 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
frame_inspector_(NULL),
context_(function->context()),
- non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
+ UnwrapEvaluationContext();
+}
+
+void ScopeIterator::UnwrapEvaluationContext() {
+ while (true) {
+ if (context_.is_null()) return;
+ if (!context_->IsDebugEvaluateContext()) return;
+ // An existing debug-evaluate context can only be outside the local scope.
+ DCHECK(nested_scope_chain_.is_empty());
+ Handle<Object> wrapped(context_->get(Context::WRAPPED_CONTEXT_INDEX),
+ isolate_);
+ if (wrapped->IsContext()) {
+ context_ = Handle<Context>::cast(wrapped);
+ } else {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ }
}
@@ -130,11 +148,32 @@ MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
Handle<JSObject> scope_object;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
details->set(kScopeDetailsObjectIndex, *scope_object);
- if (HasContext() && CurrentContext()->closure() != NULL) {
- Handle<String> closure_name = JSFunction::GetDebugName(
- Handle<JSFunction>(CurrentContext()->closure()));
- if (!closure_name.is_null() && (closure_name->length() != 0))
+ Handle<JSFunction> js_function = HasContext()
+ ? handle(CurrentContext()->closure())
+ : Handle<JSFunction>::null();
+ if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript) {
+ return isolate_->factory()->NewJSArrayWithElements(details);
+ }
+
+ int start_position = 0;
+ int end_position = 0;
+ if (!nested_scope_chain_.is_empty()) {
+ js_function = GetFunction();
+ start_position = nested_scope_chain_.last().start_position;
+ end_position = nested_scope_chain_.last().end_position;
+ } else if (!js_function.is_null()) {
+ start_position = js_function->shared()->start_position();
+ end_position = js_function->shared()->end_position();
+ }
+
+ if (!js_function.is_null()) {
+ Handle<String> closure_name = JSFunction::GetDebugName(js_function);
+ if (!closure_name.is_null() && closure_name->length() != 0) {
details->set(kScopeDetailsNameIndex, *closure_name);
+ }
+ details->set(kScopeDetailsStartPositionIndex, Smi::FromInt(start_position));
+ details->set(kScopeDetailsEndPositionIndex, Smi::FromInt(end_position));
+ details->set(kScopeDetailsFunctionIndex, *js_function);
}
return isolate_->factory()->NewJSArrayWithElements(details);
}
@@ -147,30 +186,28 @@ void ScopeIterator::Next() {
// The global scope is always the last in the chain.
DCHECK(context_->IsNativeContext());
context_ = Handle<Context>();
- return;
- }
- if (scope_type == ScopeTypeScript) {
+ } else if (scope_type == ScopeTypeScript) {
seen_script_scope_ = true;
if (context_->IsScriptContext()) {
context_ = Handle<Context>(context_->previous(), isolate_);
}
if (!nested_scope_chain_.is_empty()) {
- DCHECK_EQ(nested_scope_chain_.last()->scope_type(), SCRIPT_SCOPE);
+ DCHECK_EQ(nested_scope_chain_.last().scope_info->scope_type(),
+ SCRIPT_SCOPE);
nested_scope_chain_.RemoveLast();
DCHECK(nested_scope_chain_.is_empty());
}
CHECK(context_->IsNativeContext());
- return;
- }
- if (nested_scope_chain_.is_empty()) {
+ } else if (nested_scope_chain_.is_empty()) {
context_ = Handle<Context>(context_->previous(), isolate_);
} else {
- if (nested_scope_chain_.last()->HasContext()) {
+ if (nested_scope_chain_.last().scope_info->HasContext()) {
DCHECK(context_->previous() != NULL);
context_ = Handle<Context>(context_->previous(), isolate_);
}
nested_scope_chain_.RemoveLast();
}
+ UnwrapEvaluationContext();
}
@@ -178,7 +215,7 @@ void ScopeIterator::Next() {
ScopeIterator::ScopeType ScopeIterator::Type() {
DCHECK(!failed_);
if (!nested_scope_chain_.is_empty()) {
- Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+ Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
@@ -190,7 +227,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
return ScopeTypeScript;
case WITH_SCOPE:
- DCHECK(context_->IsWithContext());
+ DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
return ScopeTypeWith;
case CATCH_SCOPE:
DCHECK(context_->IsCatchContext());
@@ -223,7 +260,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
if (context_->IsScriptContext()) {
return ScopeTypeScript;
}
- DCHECK(context_->IsWithContext());
+ DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
return ScopeTypeWith;
}
@@ -240,9 +277,7 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
DCHECK(nested_scope_chain_.length() == 1);
return MaterializeLocalScope();
case ScopeIterator::ScopeTypeWith:
- // Return the with object.
- // TODO(neis): This breaks for proxies.
- return handle(JSObject::cast(CurrentContext()->extension_receiver()));
+ return WithContextExtension();
case ScopeIterator::ScopeTypeCatch:
return MaterializeCatchScope();
case ScopeIterator::ScopeTypeClosure:
@@ -262,7 +297,7 @@ bool ScopeIterator::HasContext() {
ScopeType type = Type();
if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
if (!nested_scope_chain_.is_empty()) {
- return nested_scope_chain_.last()->HasContext();
+ return nested_scope_chain_.last().scope_info->HasContext();
}
}
return true;
@@ -298,7 +333,7 @@ bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
DCHECK(!failed_);
if (!nested_scope_chain_.is_empty()) {
- return nested_scope_chain_.last();
+ return nested_scope_chain_.last().scope_info;
} else if (context_->IsBlockContext()) {
return Handle<ScopeInfo>(context_->scope_info());
} else if (context_->IsFunctionContext()) {
@@ -313,33 +348,14 @@ Handle<Context> ScopeIterator::CurrentContext() {
if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
nested_scope_chain_.is_empty()) {
return context_;
- } else if (nested_scope_chain_.last()->HasContext()) {
+ } else if (nested_scope_chain_.last().scope_info->HasContext()) {
return context_;
} else {
return Handle<Context>();
}
}
-
-void ScopeIterator::GetNonLocals(List<Handle<String> >* list_out) {
- Handle<String> this_string = isolate_->factory()->this_string();
- for (HashMap::Entry* entry = non_locals_->Start(); entry != nullptr;
- entry = non_locals_->Next(entry)) {
- Handle<String> name(reinterpret_cast<String**>(entry->key));
- // We need to treat "this" differently.
- if (name.is_identical_to(this_string)) continue;
- list_out->Add(Handle<String>(reinterpret_cast<String**>(entry->key)));
- }
-}
-
-
-bool ScopeIterator::ThisIsNonLocal() {
- Handle<String> this_string = isolate_->factory()->this_string();
- void* key = reinterpret_cast<void*>(this_string.location());
- HashMap::Entry* entry = non_locals_->Lookup(key, this_string->Hash());
- return entry != nullptr;
-}
-
+Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
#ifdef DEBUG
// Debug print of the content of the current scope.
@@ -409,7 +425,7 @@ void ScopeIterator::DebugPrint() {
void ScopeIterator::RetrieveScopeChain(Scope* scope) {
if (scope != NULL) {
int source_position = frame_inspector_->GetSourcePosition();
- scope->GetNestedScopeChain(isolate_, &nested_scope_chain_, source_position);
+ GetNestedScopeChain(isolate_, scope, source_position);
} else {
// A failed reparse indicates that the preparser has diverged from the
// parser or that the preparse data given to the initial parse has been
@@ -425,9 +441,8 @@ void ScopeIterator::RetrieveScopeChain(Scope* scope) {
void ScopeIterator::CollectNonLocals(Scope* scope) {
if (scope != NULL) {
- DCHECK_NULL(non_locals_);
- non_locals_ = new HashMap(InternalizedStringMatch);
- scope->CollectNonLocals(non_locals_);
+ DCHECK(non_locals_.is_null());
+ non_locals_ = scope->CollectNonLocals(StringSet::New(isolate_));
}
}
@@ -532,6 +547,16 @@ Handle<JSObject> ScopeIterator::MaterializeCatchScope() {
return catch_scope;
}
+// Retrieve the with-context extension object. If the extension object is
+// a proxy, return an empty object.
+Handle<JSObject> ScopeIterator::WithContextExtension() {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsWithContext());
+ if (context->extension_receiver()->IsJSProxy()) {
+ return isolate_->factory()->NewJSObjectWithNullProto();
+ }
+ return handle(JSObject::cast(context->extension_receiver()));
+}
// Create a plain JSObject which materializes the block scope for the specified
// block context.
@@ -541,7 +566,7 @@ Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
Handle<Context> context = Handle<Context>::null();
if (!nested_scope_chain_.is_empty()) {
- Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+ Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
frame_inspector_->MaterializeStackLocals(block_scope, scope_info);
if (scope_info->HasContext()) context = CurrentContext();
} else {
@@ -815,5 +840,24 @@ bool ScopeIterator::CopyContextExtensionToScopeObject(
return true;
}
+void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
+ int position) {
+ if (!scope->is_eval_scope()) {
+ nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
+ scope->start_position(),
+ scope->end_position()));
+ }
+ for (int i = 0; i < scope->inner_scopes()->length(); i++) {
+ Scope* inner_scope = scope->inner_scopes()->at(i);
+ int beg_pos = inner_scope->start_position();
+ int end_pos = inner_scope->end_position();
+ DCHECK(beg_pos >= 0 && end_pos >= 0);
+ if (beg_pos <= position && position < end_pos) {
+ GetNestedScopeChain(isolate, inner_scope, position);
+ return;
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index fbdf632687..4e95fc4ba4 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -31,7 +31,10 @@ class ScopeIterator {
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsNameIndex = 2;
- static const int kScopeDetailsSize = 3;
+ static const int kScopeDetailsStartPositionIndex = 3;
+ static const int kScopeDetailsEndPositionIndex = 4;
+ static const int kScopeDetailsFunctionIndex = 5;
+ static const int kScopeDetailsSize = 6;
enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS };
@@ -40,8 +43,6 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
- ~ScopeIterator() { delete non_locals_; }
-
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
// More scopes?
@@ -72,10 +73,8 @@ class ScopeIterator {
// be an actual context.
Handle<Context> CurrentContext();
- // Populate the list with collected non-local variable names.
- void GetNonLocals(List<Handle<String> >* list_out);
-
- bool ThisIsNonLocal();
+ // Populate the set with collected non-local variable names.
+ Handle<StringSet> GetNonLocals();
#ifdef DEBUG
// Debug print of the content of the current scope.
@@ -83,11 +82,19 @@ class ScopeIterator {
#endif
private:
+ struct ExtendedScopeInfo {
+ ExtendedScopeInfo(Handle<ScopeInfo> info, int start, int end)
+ : scope_info(info), start_position(start), end_position(end) {}
+ Handle<ScopeInfo> scope_info;
+ int start_position;
+ int end_position;
+ };
+
Isolate* isolate_;
FrameInspector* const frame_inspector_;
Handle<Context> context_;
- List<Handle<ScopeInfo> > nested_scope_chain_;
- HashMap* non_locals_;
+ List<ExtendedScopeInfo> nested_scope_chain_;
+ Handle<StringSet> non_locals_;
bool seen_script_scope_;
bool failed_;
@@ -99,24 +106,19 @@ class ScopeIterator {
return Handle<JSFunction>::cast(frame_inspector_->GetFunction());
}
- static bool InternalizedStringMatch(void* key1, void* key2) {
- Handle<String> s1(reinterpret_cast<String**>(key1));
- Handle<String> s2(reinterpret_cast<String**>(key2));
- DCHECK(s1->IsInternalizedString());
- DCHECK(s2->IsInternalizedString());
- return s1.is_identical_to(s2);
- }
-
void RetrieveScopeChain(Scope* scope);
void CollectNonLocals(Scope* scope);
+ void UnwrapEvaluationContext();
+
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
Handle<JSObject> MaterializeClosure();
Handle<JSObject> MaterializeCatchScope();
Handle<JSObject> MaterializeBlockScope();
+ Handle<JSObject> WithContextExtension();
bool SetLocalVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
@@ -140,6 +142,13 @@ class ScopeIterator {
Handle<JSObject> scope_object,
KeyCollectionType type);
+ // Get the chain of nested scopes within this scope for the source statement
+ // position. The scopes will be added to the list from the outermost scope to
+ // the innermost scope. Only nested block, catch or with scopes are tracked
+ // and will be returned, but no inner function scopes.
+ void GetNestedScopeChain(Isolate* isolate, Scope* scope,
+ int statement_position);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 93c914c3f8..6e94012579 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -16,7 +16,6 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
-#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/list.h"
@@ -84,12 +83,23 @@ BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info)
position_(1),
statement_position_(1) {}
+int BreakLocation::Iterator::ReturnPosition() {
+ if (debug_info_->shared()->HasSourceCode()) {
+ return debug_info_->shared()->end_position() -
+ debug_info_->shared()->start_position() - 1;
+ } else {
+ return 0;
+ }
+}
+
BreakLocation::CodeIterator::CodeIterator(Handle<DebugInfo> debug_info,
BreakLocatorType type)
: Iterator(debug_info),
reloc_iterator_(debug_info->abstract_code()->GetCode(),
GetModeMask(type)) {
- if (!Done()) Next();
+ // There is at least one break location.
+ DCHECK(!Done());
+ Next();
}
int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
@@ -98,6 +108,9 @@ int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
+ if (isolate()->is_tail_call_elimination_enabled()) {
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
+ }
if (type == ALL_BREAK_LOCATIONS) {
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
@@ -137,13 +150,7 @@ void BreakLocation::CodeIterator::Next() {
if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
// Set the positions to the end of the function.
- if (debug_info_->shared()->HasSourceCode()) {
- position_ = debug_info_->shared()->end_position() -
- debug_info_->shared()->start_position() - 1;
- } else {
- position_ = 0;
- }
- statement_position_ = position_;
+ statement_position_ = position_ = ReturnPosition();
}
break;
@@ -157,6 +164,10 @@ BreakLocation BreakLocation::CodeIterator::GetBreakLocation() {
type = DEBUG_BREAK_SLOT_AT_RETURN;
} else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
type = DEBUG_BREAK_SLOT_AT_CALL;
+ } else if (RelocInfo::IsDebugBreakSlotAtTailCall(rmode())) {
+ type = isolate()->is_tail_call_elimination_enabled()
+ ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
+ : DEBUG_BREAK_SLOT_AT_CALL;
} else if (RelocInfo::IsDebuggerStatement(rmode())) {
type = DEBUGGER_STATEMENT;
} else if (RelocInfo::IsDebugBreakSlot(rmode())) {
@@ -171,11 +182,14 @@ BreakLocation BreakLocation::CodeIterator::GetBreakLocation() {
BreakLocation::BytecodeArrayIterator::BytecodeArrayIterator(
Handle<DebugInfo> debug_info, BreakLocatorType type)
: Iterator(debug_info),
- source_position_iterator_(
- debug_info->abstract_code()->GetBytecodeArray()),
+ source_position_iterator_(debug_info->abstract_code()
+ ->GetBytecodeArray()
+ ->source_position_table()),
break_locator_type_(type),
start_position_(debug_info->shared()->start_position()) {
- if (!Done()) Next();
+ // There is at least one break location.
+ DCHECK(!Done());
+ Next();
}
void BreakLocation::BytecodeArrayIterator::Next() {
@@ -192,7 +206,6 @@ void BreakLocation::BytecodeArrayIterator::Next() {
}
DCHECK(position_ >= 0);
DCHECK(statement_position_ >= 0);
- break_index_++;
enum DebugBreakType type = GetDebugBreakType();
if (type == NOT_DEBUG_BREAK) continue;
@@ -200,11 +213,14 @@ void BreakLocation::BytecodeArrayIterator::Next() {
if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
- if (type == DEBUG_BREAK_SLOT_AT_CALL ||
- type == DEBUG_BREAK_SLOT_AT_RETURN) {
+ if (type == DEBUG_BREAK_SLOT_AT_CALL) break;
+ if (type == DEBUG_BREAK_SLOT_AT_RETURN) {
+ DCHECK_EQ(ReturnPosition(), position_);
+ DCHECK_EQ(ReturnPosition(), statement_position_);
break;
}
}
+ break_index_++;
}
BreakLocation::DebugBreakType
@@ -217,6 +233,10 @@ BreakLocation::BytecodeArrayIterator::GetDebugBreakType() {
return DEBUGGER_STATEMENT;
} else if (bytecode == interpreter::Bytecode::kReturn) {
return DEBUG_BREAK_SLOT_AT_RETURN;
+ } else if (bytecode == interpreter::Bytecode::kTailCall) {
+ return isolate()->is_tail_call_elimination_enabled()
+ ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
+ : DEBUG_BREAK_SLOT_AT_CALL;
} else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
return DEBUG_BREAK_SLOT_AT_CALL;
} else if (source_position_iterator_.is_statement()) {
@@ -261,21 +281,6 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
return FromCodeOffset(debug_info, call_offset);
}
-// Find the break point at the supplied address, or the closest one before
-// the address.
-void BreakLocation::FromCodeOffsetSameStatement(
- Handle<DebugInfo> debug_info, int offset, List<BreakLocation>* result_out) {
- int break_index = BreakIndexFromCodeOffset(debug_info, offset);
- base::SmartPointer<Iterator> it(GetIterator(debug_info));
- it->SkipTo(break_index);
- int statement_position = it->statement_position();
- while (!it->Done() && it->statement_position() == statement_position) {
- result_out->Add(it->GetBreakLocation());
- it->Next();
- }
-}
-
-
void BreakLocation::AllForStatementPosition(Handle<DebugInfo> debug_info,
int statement_position,
List<BreakLocation>* result_out) {
@@ -479,6 +484,7 @@ void Debug::ThreadInit() {
thread_local_.last_fp_ = 0;
thread_local_.target_fp_ = 0;
thread_local_.step_in_enabled_ = false;
+ thread_local_.return_value_ = Handle<Object>();
// TODO(isolates): frames_are_dropped_?
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
@@ -565,10 +571,8 @@ void Debug::Unload() {
debug_context_ = Handle<Context>();
}
-
-void Debug::Break(Arguments args, JavaScriptFrame* frame) {
+void Debug::Break(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
- DCHECK(args.length() == 0);
// Initialize LiveEdit.
LiveEdit::InitializeThreadLocal(this);
@@ -611,22 +615,26 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
Address target_fp = thread_local_.target_fp_;
Address last_fp = thread_local_.last_fp_;
- bool step_break = true;
+ bool step_break = false;
switch (step_action) {
case StepNone:
return;
case StepOut:
// Step out has not reached the target frame yet.
if (current_fp < target_fp) return;
+ step_break = true;
break;
case StepNext:
// Step next should not break in a deeper frame.
if (current_fp < target_fp) return;
+ // For step-next, a tail call is like a return and should break.
+ step_break = location.IsTailCall();
// Fall through.
case StepIn: {
FrameSummary summary = GetFirstFrameSummary(frame);
int offset = summary.code_offset();
- step_break = location.IsReturn() || (current_fp != last_fp) ||
+ step_break = step_break || location.IsReturn() ||
+ (current_fp != last_fp) ||
(thread_local_.last_statement_position_ !=
location.abstract_code()->SourceStatementPosition(offset));
break;
@@ -722,9 +730,10 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<Object> args[]) {
PostponeInterruptsScope no_interrupts(isolate_);
AssertDebugContext();
- Handle<Object> holder = isolate_->natives_utils_object();
+ Handle<JSReceiver> holder =
+ Handle<JSReceiver>::cast(isolate_->natives_utils_object());
Handle<JSFunction> fun = Handle<JSFunction>::cast(
- Object::GetProperty(isolate_, holder, name).ToHandleChecked());
+ JSReceiver::GetProperty(isolate_, holder, name).ToHandleChecked());
Handle<Object> undefined = isolate_->factory()->undefined_value();
return Execution::TryCall(isolate_, fun, undefined, argc, args);
}
@@ -1021,8 +1030,10 @@ void Debug::PrepareStep(StepAction step_action) {
BreakLocation location =
BreakLocation::FromCodeOffset(debug_info, call_offset);
- // At a return statement we will step out either way.
+ // Any step at a return is a step-out.
if (location.IsReturn()) step_action = StepOut;
+ // A step-next at a tail call is a step-out.
+ if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
thread_local_.last_statement_position_ =
debug_info->abstract_code()->SourceStatementPosition(
@@ -1309,6 +1320,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"prepare for break points");
+
bool is_interpreted = shared->HasBytecodeArray();
{
@@ -1514,7 +1526,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
if (function.is_null()) {
DCHECK(shared->HasDebugCode());
- } else if (!Compiler::Compile(function, CLEAR_EXCEPTION)) {
+ } else if (!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
return false;
}
@@ -1572,24 +1584,11 @@ void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
UNREACHABLE();
}
-Object* Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- if (frame->is_interpreted()) {
- // Find the handler from the original bytecode array.
- InterpretedFrame* interpreted_frame =
- reinterpret_cast<InterpretedFrame*>(frame);
- SharedFunctionInfo* shared = interpreted_frame->function()->shared();
- BytecodeArray* bytecode_array = shared->bytecode_array();
- int bytecode_offset = interpreted_frame->GetBytecodeOffset();
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
- return isolate_->interpreter()->GetBytecodeHandler(bytecode);
- } else {
- after_break_target_ = NULL;
- if (!LiveEdit::SetAfterBreakTarget(this)) {
- // Continue just after the slot.
- after_break_target_ = frame->pc();
- }
- return isolate_->heap()->undefined_value();
+void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+ after_break_target_ = NULL;
+ if (!LiveEdit::SetAfterBreakTarget(this)) {
+ // Continue just after the slot.
+ after_break_target_ = frame->pc();
}
}
@@ -1610,7 +1609,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
BreakLocation location =
BreakLocation::FromCodeOffset(debug_info, summary.code_offset());
- return location.IsReturn();
+ return location.IsReturn() || location.IsTailCall();
}
@@ -1657,45 +1656,6 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
}
-void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
- List<int>* results_out) {
- FrameSummary summary = GetFirstFrameSummary(frame);
-
- Handle<JSFunction> fun = Handle<JSFunction>(summary.function());
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
-
- if (!EnsureDebugInfo(shared, fun)) return;
-
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Refresh frame summary if the code has been recompiled for debugging.
- if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
- summary = GetFirstFrameSummary(frame);
- }
-
- int call_offset =
- CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
- List<BreakLocation> locations;
- BreakLocation::FromCodeOffsetSameStatement(debug_info, call_offset,
- &locations);
-
- for (BreakLocation location : locations) {
- if (location.code_offset() <= summary.code_offset()) {
- // The break point is near our pc. Could be a step-in possibility,
- // that is currently taken by active debugger call.
- if (break_frame_id() == StackFrame::NO_ID) {
- continue; // We are not stepping.
- } else {
- JavaScriptFrameIterator frame_it(isolate_, break_frame_id());
- // If our frame is a top frame and we are stepping, we can do step-in
- // at this place.
- if (frame_it.frame()->id() != frame_id) continue;
- }
- }
- if (location.IsCall()) results_out->Add(location.position());
- }
-}
-
-
void Debug::RecordEvalCaller(Handle<Script> script) {
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
// For eval scripts add information on the function from which eval was
@@ -1748,13 +1708,6 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
}
-MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
- // Create the promise event object.
- Handle<Object> argv[] = { event_data };
- return CallFunction("MakePromiseEvent", arraysize(argv), argv);
-}
-
-
MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
// Create the async task event object.
Handle<Object> argv[] = { task_event };
@@ -1852,6 +1805,10 @@ void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
// Bail out if there is no listener for this event
if (ignore_events()) return;
+#ifdef DEBUG
+ PrintBreakLocation();
+#endif // DEBUG
+
HandleScope scope(isolate_);
// Create the event data object.
Handle<Object> event_data;
@@ -1881,25 +1838,6 @@ void Debug::OnAfterCompile(Handle<Script> script) {
}
-void Debug::OnPromiseEvent(Handle<JSObject> data) {
- if (in_debug_scope() || ignore_events()) return;
-
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
-
- // Create the script collected state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakePromiseEvent(data).ToHandle(&event_data)) return;
-
- // Process debug event.
- ProcessDebugEvent(v8::PromiseEvent,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
if (in_debug_scope() || ignore_events()) return;
@@ -2049,7 +1987,6 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
case v8::NewFunction:
case v8::BeforeCompile:
case v8::CompileError:
- case v8::PromiseEvent:
case v8::AsyncTaskEvent:
break;
case v8::Exception:
@@ -2084,16 +2021,19 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
// DebugCommandProcessor goes here.
bool running = auto_continue;
- Handle<Object> cmd_processor_ctor = Object::GetProperty(
- isolate_, exec_state, "debugCommandProcessor").ToHandleChecked();
+ Handle<Object> cmd_processor_ctor =
+ JSReceiver::GetProperty(isolate_, exec_state, "debugCommandProcessor")
+ .ToHandleChecked();
Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) };
- Handle<Object> cmd_processor = Execution::Call(
- isolate_, cmd_processor_ctor, exec_state, 1, ctor_args).ToHandleChecked();
+ Handle<JSReceiver> cmd_processor = Handle<JSReceiver>::cast(
+ Execution::Call(isolate_, cmd_processor_ctor, exec_state, 1, ctor_args)
+ .ToHandleChecked());
Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast(
- Object::GetProperty(
- isolate_, cmd_processor, "processDebugRequest").ToHandleChecked());
- Handle<Object> is_running = Object::GetProperty(
- isolate_, cmd_processor, "isRunning").ToHandleChecked();
+ JSReceiver::GetProperty(isolate_, cmd_processor, "processDebugRequest")
+ .ToHandleChecked());
+ Handle<Object> is_running =
+ JSReceiver::GetProperty(isolate_, cmd_processor, "isRunning")
+ .ToHandleChecked();
// Process requests from the debugger.
do {
@@ -2313,6 +2253,44 @@ void Debug::ProcessDebugMessages(bool debug_command_only) {
OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only);
}
+#ifdef DEBUG
+void Debug::PrintBreakLocation() {
+ if (!FLAG_print_break_location) return;
+ HandleScope scope(isolate_);
+ JavaScriptFrameIterator iterator(isolate_);
+ if (iterator.done()) return;
+ JavaScriptFrame* frame = iterator.frame();
+ FrameSummary summary = GetFirstFrameSummary(frame);
+ int source_position =
+ summary.abstract_code()->SourcePosition(summary.code_offset());
+ Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
+ PrintF("[debug] break in function '");
+ summary.function()->PrintName();
+ PrintF("'.\n");
+ if (script_obj->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_obj);
+ Handle<String> source(String::cast(script->source()));
+ Script::InitLineEnds(script);
+ int line = Script::GetLineNumber(script, source_position);
+ int column = Script::GetColumnNumber(script, source_position);
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ int line_start =
+ line == 0 ? 0 : Smi::cast(line_ends->get(line - 1))->value() + 1;
+ int line_end = Smi::cast(line_ends->get(line))->value();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = source->GetFlatContent();
+ if (content.IsOneByte()) {
+ PrintF("[debug] %.*s\n", line_end - line_start,
+ content.ToOneByteVector().start() + line_start);
+ PrintF("[debug] ");
+ for (int i = 0; i < column; i++) PrintF(" ");
+ PrintF("^\n");
+ } else {
+ PrintF("[debug] at line %d column %d\n", line, column);
+ }
+ }
+}
+#endif // DEBUG
DebugScope::DebugScope(Debug* debug)
: debug_(debug),
@@ -2324,9 +2302,10 @@ DebugScope::DebugScope(Debug* debug)
base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(this));
- // Store the previous break id and frame id.
+ // Store the previous break id, frame id and return value.
break_id_ = debug_->break_id();
break_frame_id_ = debug_->break_frame_id();
+ return_value_ = debug_->return_value();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
@@ -2364,6 +2343,7 @@ DebugScope::~DebugScope() {
// Restore to the previous break state.
debug_->thread_local_.break_frame_id_ = break_frame_id_;
debug_->thread_local_.break_id_ = break_id_;
+ debug_->thread_local_.return_value_ = return_value_;
debug_->UpdateState();
}
@@ -2448,8 +2428,9 @@ v8::Local<v8::String> MessageImpl::GetJSON() const {
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
- Handle<Object> fun = Object::GetProperty(
- isolate, event_data_, "toJSONProtocol").ToHandleChecked();
+ Handle<Object> fun =
+ JSReceiver::GetProperty(isolate, event_data_, "toJSONProtocol")
+ .ToHandleChecked();
if (!fun->IsJSFunction()) {
return v8::Local<v8::String>();
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 81db9e54af..501de63181 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -70,10 +70,6 @@ class BreakLocation {
static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame);
- static void FromCodeOffsetSameStatement(Handle<DebugInfo> debug_info,
- int offset,
- List<BreakLocation>* result_out);
-
static void AllForStatementPosition(Handle<DebugInfo> debug_info,
int statement_position,
List<BreakLocation>* result_out);
@@ -85,6 +81,9 @@ class BreakLocation {
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
+ inline bool IsTailCall() const {
+ return type_ == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
+ }
inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
inline bool IsDebuggerStatement() const {
return type_ == DEBUGGER_STATEMENT;
@@ -117,7 +116,8 @@ class BreakLocation {
DEBUGGER_STATEMENT,
DEBUG_BREAK_SLOT,
DEBUG_BREAK_SLOT_AT_CALL,
- DEBUG_BREAK_SLOT_AT_RETURN
+ DEBUG_BREAK_SLOT_AT_RETURN,
+ DEBUG_BREAK_SLOT_AT_TAIL_CALL,
};
BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
@@ -142,6 +142,9 @@ class BreakLocation {
protected:
explicit Iterator(Handle<DebugInfo> debug_info);
+ int ReturnPosition();
+
+ Isolate* isolate() { return debug_info_->GetIsolate(); }
Handle<DebugInfo> debug_info_;
int break_index_;
@@ -169,7 +172,7 @@ class BreakLocation {
}
private:
- static int GetModeMask(BreakLocatorType type);
+ int GetModeMask(BreakLocatorType type);
RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
@@ -414,7 +417,6 @@ class Debug {
void OnCompileError(Handle<Script> script);
void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnPromiseEvent(Handle<JSObject> data);
void OnAsyncTaskEvent(Handle<JSObject> data);
// API facing.
@@ -430,8 +432,8 @@ class Debug {
// Internal logic
bool Load();
- void Break(Arguments args, JavaScriptFrame*);
- Object* SetAfterBreakTarget(JavaScriptFrame* frame);
+ void Break(JavaScriptFrame* frame);
+ void SetAfterBreakTarget(JavaScriptFrame* frame);
// Scripts handling.
Handle<FixedArray> GetLoadedScripts();
@@ -459,9 +461,6 @@ class Debug {
void ClearStepOut();
void EnableStepIn();
- void GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
- List<int>* results_out);
-
bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
// Returns whether the operation succeeded. Compilation can only be triggered
@@ -530,6 +529,11 @@ class Debug {
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
int break_id() { return thread_local_.break_id_; }
+ Handle<Object> return_value() { return thread_local_.return_value_; }
+ void set_return_value(Handle<Object> value) {
+ thread_local_.return_value_ = value;
+ }
+
// Support for embedding into generated code.
Address is_active_address() {
return reinterpret_cast<Address>(&is_active_);
@@ -575,8 +579,6 @@ class Debug {
Handle<Object> promise);
MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakePromiseEvent(
- Handle<JSObject> promise_event);
MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
Handle<JSObject> task_event);
@@ -617,6 +619,8 @@ class Debug {
void ThreadInit();
+ void PrintBreakLocation();
+
// Global handles.
Handle<Context> debug_context_;
Handle<Object> event_listener_;
@@ -682,6 +686,10 @@ class Debug {
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
LiveEdit::FrameDropMode frame_drop_mode_;
+
+ // Value of accumulator in interpreter frames. In non-interpreter frames
+ // this value will be the hole.
+ Handle<Object> return_value_;
};
// Storage location for registers when handling debug break calls
@@ -723,6 +731,7 @@ class DebugScope BASE_EMBEDDED {
DebugScope* prev_; // Previous scope if entered recursively.
StackFrame::Id break_frame_id_; // Previous break frame id.
int break_id_; // Previous break id.
+ Handle<Object> return_value_; // Previous result.
bool failed_; // Did the debug context fail to load?
SaveContext save_; // Saves previous context.
PostponeInterruptsScope no_termination_exceptons_;
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 6849bf5345..7f06ca1be1 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -51,8 +51,7 @@ Debug.DebugEvent = { Break: 1,
BeforeCompile: 4,
AfterCompile: 5,
CompileError: 6,
- PromiseEvent: 7,
- AsyncTaskEvent: 8 };
+ AsyncTaskEvent: 7 };
// Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { Caught : 0,
@@ -1141,39 +1140,6 @@ function MakeScriptObject_(script, include_source) {
}
-function MakePromiseEvent(event_data) {
- return new PromiseEvent(event_data);
-}
-
-
-function PromiseEvent(event_data) {
- this.promise_ = event_data.promise;
- this.parentPromise_ = event_data.parentPromise;
- this.status_ = event_data.status;
- this.value_ = event_data.value;
-}
-
-
-PromiseEvent.prototype.promise = function() {
- return MakeMirror(this.promise_);
-}
-
-
-PromiseEvent.prototype.parentPromise = function() {
- return MakeMirror(this.parentPromise_);
-}
-
-
-PromiseEvent.prototype.status = function() {
- return this.status_;
-}
-
-
-PromiseEvent.prototype.value = function() {
- return MakeMirror(this.value_);
-}
-
-
function MakeAsyncTaskEvent(event_data) {
return new AsyncTaskEvent(event_data);
}
@@ -2517,7 +2483,6 @@ utils.InstallFunctions(utils, DONT_ENUM, [
"MakeExceptionEvent", MakeExceptionEvent,
"MakeBreakEvent", MakeBreakEvent,
"MakeCompileEvent", MakeCompileEvent,
- "MakePromiseEvent", MakePromiseEvent,
"MakeAsyncTaskEvent", MakeAsyncTaskEvent,
"IsBreakPointTriggered", IsBreakPointTriggered,
"UpdateScriptBreakPoints", UpdateScriptBreakPoints,
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 95f2bc6b68..056407f29a 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -68,9 +68,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- if (mode == SAVE_RESULT_REGISTER) __ push(eax);
-
- __ Move(eax, Immediate(0)); // No arguments.
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(eax);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ Move(eax, Immediate(1));
__ mov(ebx,
Immediate(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -81,12 +87,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Register reg = {JSCallerSavedCode(i)};
- __ Move(reg, Immediate(kDebugZapValue));
+ // Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
-
__ pop(ebx);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
@@ -110,9 +118,12 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
+ __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
__ pop(edi); // Function.
+ __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
+ // frame
+ // marker
+ // and code
__ pop(ebp);
ParameterCount dummy(0);
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 91c990d19b..78ed6f157a 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -710,20 +710,18 @@ class FunctionInfoListener {
void FunctionDone() {
HandleScope scope(isolate());
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- *Object::GetElement(
- isolate(), result_, current_parent_index_).ToHandleChecked());
+ FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+ *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+ .ToHandleChecked());
current_parent_index_ = info.GetParentIndex();
}
// Saves only function code, because for a script function we
// may never create a SharedFunctionInfo object.
void FunctionCode(Handle<Code> function_code) {
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- *Object::GetElement(
- isolate(), result_, current_parent_index_).ToHandleChecked());
+ FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+ *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+ .ToHandleChecked());
info.SetFunctionCode(function_code,
Handle<HeapObject>(isolate()->heap()->null_value()));
}
@@ -735,10 +733,9 @@ class FunctionInfoListener {
if (!shared->IsSharedFunctionInfo()) {
return;
}
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(
- *Object::GetElement(
- isolate(), result_, current_parent_index_).ToHandleChecked());
+ FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+ *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+ .ToHandleChecked());
info.SetFunctionCode(Handle<Code>(shared->code()),
Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
@@ -1185,21 +1182,22 @@ static int TranslatePosition(int original_position,
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
HandleScope scope(isolate);
- Handle<Object> element = Object::GetElement(
- isolate, position_change_array, i).ToHandleChecked();
+ Handle<Object> element =
+ JSReceiver::GetElement(isolate, position_change_array, i)
+ .ToHandleChecked();
CHECK(element->IsSmi());
int chunk_start = Handle<Smi>::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = Object::GetElement(
- isolate, position_change_array, i + 1).ToHandleChecked();
+ element = JSReceiver::GetElement(isolate, position_change_array, i + 1)
+ .ToHandleChecked();
CHECK(element->IsSmi());
int chunk_end = Handle<Smi>::cast(element)->value();
// Position mustn't be inside a chunk.
DCHECK(original_position >= chunk_end);
- element = Object::GetElement(
- isolate, position_change_array, i + 2).ToHandleChecked();
+ element = JSReceiver::GetElement(isolate, position_change_array, i + 2)
+ .ToHandleChecked();
CHECK(element->IsSmi());
int chunk_changed_end = Handle<Smi>::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
@@ -1448,7 +1446,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
for (int i = 0; i < len; i++) {
HandleScope scope(isolate);
Handle<Object> element =
- Object::GetElement(isolate, shared_info_array, i).ToHandleChecked();
+ JSReceiver::GetElement(isolate, shared_info_array, i).ToHandleChecked();
Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
@@ -1485,26 +1483,22 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
// Initializes an artificial stack frame. The data it contains is used for:
// a. successful work of frame dropper code which eventually gets control,
-// b. being compatible with regular stack structure for various stack
+// b. being compatible with a typed frame structure for various stack
// iterators.
-// Frame structure (conforms InternalFrame structure):
+// Frame structure (conforms to InternalFrame structure):
+// -- function
// -- code
-// -- SMI maker
-// -- function (slot is called "context")
+// -- SMI marker
// -- frame base
static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
DCHECK(bottom_js_frame->is_java_script());
-
Address fp = bottom_js_frame->fp();
-
- // Move function pointer into "context" slot.
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
- Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
-
- Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
+ Memory::Object_at(fp + FrameDropperFrameConstants::kFunctionOffset) =
+ Memory::Object_at(fp + StandardFrameConstants::kFunctionOffset);
+ Memory::Object_at(fp + FrameDropperFrameConstants::kFrameTypeOffset) =
Smi::FromInt(StackFrame::INTERNAL);
+ Memory::Object_at(fp + FrameDropperFrameConstants::kCodeOffset) = *code;
}
@@ -1566,9 +1560,9 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
}
Address unused_stack_top = top_frame->sp();
- int new_frame_size = LiveEdit::kFrameDropperFrameSize * kPointerSize;
- Address unused_stack_bottom = bottom_js_frame->fp()
- - new_frame_size + kPointerSize; // Bigger address end is exclusive.
+ Address unused_stack_bottom =
+ bottom_js_frame->fp() - FrameDropperFrameConstants::kFixedFrameSize +
+ 2 * kPointerSize; // Bigger address end is exclusive.
Address* top_frame_pc_address = top_frame->pc_address();
@@ -1580,8 +1574,9 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
int shortage_bytes =
static_cast<int>(unused_stack_top - unused_stack_bottom);
- Address padding_start = pre_top_frame->fp() -
- LiveEdit::kFrameDropperFrameSize * kPointerSize;
+ Address padding_start =
+ pre_top_frame->fp() -
+ (FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
Address padding_pointer = padding_start;
Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue);
@@ -1601,7 +1596,7 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
MemMove(padding_start + kPointerSize - shortage_bytes,
padding_start + kPointerSize,
- LiveEdit::kFrameDropperFrameSize * kPointerSize);
+ FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
@@ -1664,14 +1659,16 @@ class MultipleFunctionTarget {
for (int i = 0; i < len; i++) {
HandleScope scope(isolate);
Handle<Object> old_element =
- Object::GetElement(isolate, old_shared_array_, i).ToHandleChecked();
+ JSReceiver::GetElement(isolate, old_shared_array_, i)
+ .ToHandleChecked();
if (!old_shared.is_identical_to(UnwrapSharedFunctionInfoFromJSValue(
Handle<JSValue>::cast(old_element)))) {
continue;
}
Handle<Object> new_element =
- Object::GetElement(isolate, new_shared_array_, i).ToHandleChecked();
+ JSReceiver::GetElement(isolate, new_shared_array_, i)
+ .ToHandleChecked();
if (new_element->IsUndefined()) return false;
Handle<SharedFunctionInfo> new_shared =
UnwrapSharedFunctionInfoFromJSValue(
@@ -1703,7 +1700,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
TARGET& target, // NOLINT
bool do_drop) {
Debug* debug = isolate->debug();
- Zone zone;
+ Zone zone(isolate->allocator());
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1824,7 +1821,7 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
Handle<Object> obj =
- Object::GetElement(isolate, result, i).ToHandleChecked();
+ JSReceiver::GetElement(isolate, result, i).ToHandleChecked();
if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
@@ -1909,8 +1906,9 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
FixedArray::cast(old_shared_array->elements()));
Handle<JSArray> result = isolate->factory()->NewJSArray(len);
+ JSObject::EnsureWritableFastElements(result);
Handle<FixedArray> result_elements =
- JSObject::EnsureWritableFastElements(result);
+ handle(FixedArray::cast(result->elements()), isolate);
// Fill the default values.
for (int i = 0; i < len; i++) {
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index f3d6c54c0e..67be70e00a 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -170,10 +170,6 @@ class LiveEdit : AllStatic {
* ...
* --- Bottom
*/
- // A size of frame base including fp. Padding words starts right above
- // the base.
- static const int kFrameDropperFrameSize =
- 4 + StandardFrameConstants::kCPSlotCount;
// A number of words that should be reserved on stack for the LiveEdit use.
// Stored on stack in form of Smi.
static const int kFramePaddingInitialSize = 1;
@@ -256,8 +252,8 @@ class JSArrayBasedStruct {
}
Handle<Object> GetField(int field_position) {
- return Object::GetElement(
- isolate(), array_, field_position).ToHandleChecked();
+ return JSReceiver::GetElement(isolate(), array_, field_position)
+ .ToHandleChecked();
}
int GetSmiValueField(int field_position) {
@@ -337,9 +333,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
static bool IsInstance(Handle<JSArray> array) {
if (array->length() != Smi::FromInt(kSize_)) return false;
Handle<Object> element(
- Object::GetElement(array->GetIsolate(),
- array,
- kSharedInfoOffset_).ToHandleChecked());
+ JSReceiver::GetElement(array->GetIsolate(), array, kSharedInfoOffset_)
+ .ToHandleChecked());
if (!element->IsJSValue()) return false;
return Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo();
}
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 1d9f7d6037..8e00d61ef5 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -77,9 +77,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
__ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(at);
- if (mode == SAVE_RESULT_REGISTER) __ push(v0);
-
- __ PrepareCEntryArgs(0); // No arguments.
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(v0);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ PrepareCEntryArgs(1);
__ PrepareCEntryFunction(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
@@ -89,12 +95,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = {JSCallerSavedCode(i)};
- __ li(reg, kDebugZapValue);
+ // Do not clobber v0 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function returned by DebugBreak.
+ if (!(reg.is(v0) && (mode == SAVE_RESULT_REGISTER))) {
+ __ li(reg, kDebugZapValue);
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
-
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
@@ -114,9 +122,10 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set sp based on fp.
- __ Subu(sp, fp, Operand(kPointerSize));
+ __ lw(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
- __ Pop(ra, fp, a1); // Return address, Frame, Function.
+ // Pop return address and frame.
+ __ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
__ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 0646a249f7..aad095b64d 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -79,9 +79,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
__ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(at);
- if (mode == SAVE_RESULT_REGISTER) __ push(v0);
-
- __ PrepareCEntryArgs(0); // No arguments.
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(v0);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ PrepareCEntryArgs(1);
__ PrepareCEntryFunction(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
@@ -91,12 +97,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = {JSCallerSavedCode(i)};
- __ li(reg, kDebugZapValue);
+ // Do not clobber v0 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function returned by DebugBreak.
+ if (!(reg.is(v0) && (mode == SAVE_RESULT_REGISTER))) {
+ __ li(reg, kDebugZapValue);
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
-
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
@@ -116,9 +124,10 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set sp based on fp.
- __ Dsubu(sp, fp, Operand(kPointerSize));
+ __ ld(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
- __ Pop(ra, fp, a1); // Return address, Frame, Function.
+ // Pop return address and frame.
+ __ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
__ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 8b9dd02b6e..881f303f29 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -1037,6 +1037,15 @@ FunctionMirror.prototype.toText = function() {
};
+FunctionMirror.prototype.context = function() {
+ if (this.resolved()) {
+ if (!this._context)
+ this._context = new ContextMirror(%FunctionGetContextData(this.value_));
+ return this._context;
+ }
+};
+
+
/**
* Mirror object for unresolved functions.
* @param {string} value The name for the unresolved function reflected by this
@@ -1434,14 +1443,6 @@ GeneratorMirror.prototype.func = function() {
};
-GeneratorMirror.prototype.context = function() {
- if (!this.context_) {
- this.context_ = new ContextMirror(%GeneratorGetContext(this.value_));
- }
- return this.context_;
-};
-
-
GeneratorMirror.prototype.receiver = function() {
if (!this.receiver_) {
this.receiver_ = MakeMirror(%GeneratorGetReceiver(this.value_));
@@ -1801,11 +1802,6 @@ FrameDetails.prototype.scopeCount = function() {
};
-FrameDetails.prototype.stepInPositionsImpl = function() {
- return %GetStepInPositions(this.break_id_, this.frameId());
-};
-
-
/**
* Mirror object for stack frames.
* @param {number} break_id The break id in the VM for which this frame is
@@ -1985,29 +1981,6 @@ FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
};
-FrameMirror.prototype.stepInPositions = function() {
- var script = this.func().script();
- var funcOffset = this.func().sourcePosition_();
-
- var stepInRaw = this.details_.stepInPositionsImpl();
- var result = [];
- if (stepInRaw) {
- for (var i = 0; i < stepInRaw.length; i++) {
- var posStruct = {};
- var offset = script.locationFromPosition(funcOffset + stepInRaw[i],
- true);
- serializeLocationFields(offset, posStruct);
- var item = {
- position: posStruct
- };
- result.push(item);
- }
- }
-
- return result;
-};
-
-
FrameMirror.prototype.evaluate = function(source, disable_break,
opt_context_object) {
return MakeMirror(%DebugEvaluate(this.break_id_,
@@ -2177,6 +2150,9 @@ FrameMirror.prototype.toText = function(opt_locals) {
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
var kScopeDetailsNameIndex = 2;
+var kScopeDetailsStartPositionIndex = 3;
+var kScopeDetailsEndPositionIndex = 4;
+var kScopeDetailsFunctionIndex = 5;
function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
@@ -2221,6 +2197,29 @@ ScopeDetails.prototype.name = function() {
};
+ScopeDetails.prototype.startPosition = function() {
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ }
+ return this.details_[kScopeDetailsStartPositionIndex];
+}
+
+
+ScopeDetails.prototype.endPosition = function() {
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ }
+ return this.details_[kScopeDetailsEndPositionIndex];
+}
+
+ScopeDetails.prototype.func = function() {
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ }
+ return this.details_[kScopeDetailsFunctionIndex];
+}
+
+
ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
var raw_res;
if (!IS_UNDEFINED(this.break_id_)) {
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index aab5399fee..a160bc2e91 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -83,9 +83,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
__ push(ip);
- if (mode == SAVE_RESULT_REGISTER) __ push(r3);
-
- __ mov(r3, Operand::Zero()); // no arguments
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(r3);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ mov(r3, Operand(1));
__ mov(r4,
Operand(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -96,12 +102,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = {JSCallerSavedCode(i)};
- __ mov(reg, Operand(kDebugZapValue));
+ // Do not clobber r3 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(r3) && (mode == SAVE_RESULT_REGISTER))) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(r3);
-
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
@@ -121,8 +129,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Load the function pointer off of our current stack frame.
- __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
- kPointerSize));
+ __ LoadP(r4, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/debug/s390/OWNERS b/deps/v8/src/debug/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/debug/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
new file mode 100644
index 0000000000..c6764c2dca
--- /dev/null
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -0,0 +1,165 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ // oill r3, 0
+ // oill r3, 0
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+
+ // lr r0, r0 64-bit only
+ // lr r0, r0 64-bit only
+ // lr r0, r0 64-bit only
+ for (int i = 8; i < Assembler::kDebugBreakSlotLength; i += 2) {
+ __ nop();
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_size));
+}
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
+ // Generate enough nop's to make space for a call instruction.
+ masm->RecordDebugBreakSlot(mode);
+ EmitDebugBreakSlot(masm);
+}
+
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
+ // Patch the code changing the debug break slot code from
+ //
+ // oill r3, 0
+ // oill r3, 0
+ // oill r3, 0 64-bit only
+ // lr r0, r0 64-bit only
+ //
+ // to a call to the debug break code, using a FIXED_SEQUENCE.
+ //
+ // iilf r14, <address> 6-bytes
+ // basr r14, r14A 2-bytes
+ //
+ // The 64bit sequence has an extra iihf.
+ //
+ // iihf r14, <high 32-bits address> 6-bytes
+ // iilf r14, <lower 32-bits address> 6-bytes
+ // basr r14, r14 2-bytes
+ patcher.masm()->mov(v8::internal::r14,
+ Operand(reinterpret_cast<intptr_t>(code->entry())));
+ patcher.masm()->basr(v8::internal::r14, v8::internal::r14);
+}
+
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instr current_instr = Assembler::instr_at(pc);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(ip);
+ }
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+ __ push(ip);
+
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(r2);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ mov(r2, Operand(1));
+ __ mov(r3,
+ Operand(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = {JSCallerSavedCode(i)};
+ // Do not clobber r2 if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(r2) && (mode == SAVE_RESULT_REGISTER))) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+ }
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ mov(ip, Operand(after_break_target));
+ __ LoadP(ip, MemOperand(ip));
+ __ JumpToJSEntry(ip);
+}
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ // Load the function pointer off of our current stack frame.
+ __ LoadP(r3, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+
+ // Pop return address and frame
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(r3, no_reg, dummy, dummy);
+
+ // Load context from the function.
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ // Clear new.target as a safety measure.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+ // Get function code.
+ __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, r3 is function, cp is context.
+ __ Jump(ip);
+}
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index f7fbe7691e..a85ddb3093 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -69,9 +69,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
__ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- if (mode == SAVE_RESULT_REGISTER) __ Push(rax);
-
- __ Set(rax, 0); // No arguments (argc == 0).
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ Push(rax);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ Set(rax, 1);
__ Move(rbx, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
masm->isolate()));
@@ -81,12 +87,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Register reg = {JSCallerSavedCode(i)};
- __ Set(reg, kDebugZapValue);
+ // Do not clobber rax if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(rax) && (mode == SAVE_RESULT_REGISTER))) {
+ __ Set(reg, kDebugZapValue);
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ Pop(rax);
-
// Read current padding counter and skip corresponding number of words.
__ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
@@ -111,9 +119,12 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set rsp based on rbp.
- __ leap(rsp, Operand(rbp, -1 * kPointerSize));
-
+ __ leap(rsp, Operand(rbp, FrameDropperFrameConstants::kFunctionOffset));
__ Pop(rdi); // Function.
+ __ addp(rsp,
+ Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
+ // frame marker
+ // and code
__ popq(rbp);
ParameterCount dummy(0);
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index 8ddb82f39d..029a00415f 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -68,9 +68,15 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- if (mode == SAVE_RESULT_REGISTER) __ push(eax);
-
- __ Move(eax, Immediate(0)); // No arguments.
+ // Push arguments for DebugBreak call.
+ if (mode == SAVE_RESULT_REGISTER) {
+ // Break on return.
+ __ push(eax);
+ } else {
+ // Non-return breaks.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+ }
+ __ Move(eax, Immediate(1));
__ mov(ebx,
Immediate(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -81,12 +87,14 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Register reg = {JSCallerSavedCode(i)};
- __ Move(reg, Immediate(kDebugZapValue));
+ // Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
+ // contain return value of the function.
+ if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
}
}
- if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
-
__ pop(ebx);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
@@ -110,9 +118,12 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
+ __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
__ pop(edi); // Function.
+ __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
+ // frame
+ // marker
+ // and code
__ pop(ebp);
ParameterCount dummy(0);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index e00e5ab538..b2c5d42df4 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -38,7 +38,7 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
current_(NULL) {
- for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
deopt_entry_code_entries_[i] = -1;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
}
@@ -46,7 +46,7 @@ DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
DeoptimizerData::~DeoptimizerData() {
- for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
allocator_->Free(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
@@ -307,7 +307,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Zone zone;
+ Zone zone(isolate->allocator());
ZoneList<Code*> codes(10, &zone);
// Walk over all optimized code objects in this native context.
@@ -444,7 +444,6 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
case EAGER:
case SOFT:
case LAZY:
- case DEBUGGER:
return (frame_type == StackFrame::STUB)
? FLAG_trace_stub_failures
: FLAG_trace_deopt;
@@ -459,7 +458,6 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case EAGER: return "eager";
case SOFT: return "soft";
case LAZY: return "lazy";
- case DEBUGGER: return "debugger";
}
FATAL("Unsupported deopt type");
return NULL;
@@ -474,7 +472,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
- has_alignment_padding_(0),
deoptimizing_throw_(false),
catch_handler_data_(-1),
catch_handler_pc_offset_(-1),
@@ -482,6 +479,12 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
output_count_(0),
jsframe_count_(0),
output_(nullptr),
+ caller_frame_top_(0),
+ caller_fp_(0),
+ caller_pc_(0),
+ caller_constant_pool_(0),
+ input_frame_context_(0),
+ stack_fp_(0),
trace_scope_(nullptr) {
if (isolate->deoptimizer_lazy_throw()) {
isolate->set_deoptimizer_lazy_throw(false);
@@ -546,9 +549,6 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
- case Deoptimizer::DEBUGGER:
- DCHECK(optimized_code->contains(from_));
- return optimized_code;
}
FATAL("Could not find code for optimized function");
return NULL;
@@ -601,7 +601,7 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LT(type, kBailoutTypesWithCodeEntry);
+ CHECK_LE(type, kLastBailoutType);
MemoryChunk* base = data->deopt_entry_code_[type];
return base->area_start() + (id * table_entry_size_);
}
@@ -711,16 +711,38 @@ void Deoptimizer::DoComputeOutputFrames() {
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+ {
+ // Read caller's PC, caller's FP and caller's constant pool values
+ // from input frame. Compute caller's frame top address.
+
+ Register fp_reg = JavaScriptFrame::fp_register();
+ stack_fp_ = input_->GetRegister(fp_reg.code());
+
+ caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize();
+
+ Address fp_address = input_->GetFramePointerAddress();
+ caller_fp_ = Memory::intptr_at(fp_address);
+ caller_pc_ =
+ Memory::intptr_at(fp_address + CommonFrameConstants::kCallerPCOffset);
+ input_frame_context_ = Memory::intptr_at(
+ fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
+
+ if (FLAG_enable_embedded_constant_pool) {
+ caller_constant_pool_ = Memory::intptr_at(
+ fp_address + CommonFrameConstants::kConstantPoolOffset);
+ }
+ }
+
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
MessageFor(bailout_type_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " (opt #%d) @%d, FP to SP delta: %d]\n",
- input_data->OptimizationId()->value(),
- bailout_id_,
- fp_to_sp_delta_);
+ " (opt #%d) @%d, FP to SP delta: %d, caller sp: 0x%08" V8PRIxPTR
+ "]\n",
+ input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
+ caller_frame_top_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
(compiled_code_->is_hydrogen_stub())) {
compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
@@ -763,39 +785,42 @@ void Deoptimizer::DoComputeOutputFrames() {
}
output_count_ = static_cast<int>(count);
- Register fp_reg = JavaScriptFrame::fp_register();
- stack_fp_ = reinterpret_cast<Address>(
- input_->GetRegister(fp_reg.code()) +
- has_alignment_padding_ * kPointerSize);
-
// Translate each output frame.
- for (size_t i = 0; i < count; ++i) {
+ int frame_index = 0; // output_frame_index
+ for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- int frame_index = static_cast<int>(i);
- switch (translated_state_.frames()[i].kind()) {
+ TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
+ switch (translated_frame->kind()) {
case TranslatedFrame::kFunction:
- DoComputeJSFrame(frame_index, deoptimizing_throw_ && i == count - 1);
+ DoComputeJSFrame(translated_frame, frame_index,
+ deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kInterpretedFunction:
- DoComputeInterpretedFrame(frame_index,
+ DoComputeInterpretedFrame(translated_frame, frame_index,
deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
- DoComputeArgumentsAdaptorFrame(frame_index);
+ DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
+ break;
+ case TranslatedFrame::kTailCallerFunction:
+ DoComputeTailCallerFrame(translated_frame, frame_index);
+ // Tail caller frame translations do not produce output frames.
+ frame_index--;
+ output_count_--;
break;
case TranslatedFrame::kConstructStub:
- DoComputeConstructStubFrame(frame_index);
+ DoComputeConstructStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kGetter:
- DoComputeAccessorStubFrame(frame_index, false);
+ DoComputeAccessorStubFrame(translated_frame, frame_index, false);
break;
case TranslatedFrame::kSetter:
- DoComputeAccessorStubFrame(frame_index, true);
+ DoComputeAccessorStubFrame(translated_frame, frame_index, true);
break;
case TranslatedFrame::kCompiledStub:
- DoComputeCompiledStubFrame(frame_index);
+ DoComputeCompiledStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
@@ -811,22 +836,18 @@ void Deoptimizer::DoComputeOutputFrames() {
MessageFor(bailout_type_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
- " took %0.3f ms]\n",
- bailout_id_,
- node_id.ToInt(),
- output_[index]->GetPc(),
- FullCodeGenerator::State2String(
- static_cast<FullCodeGenerator::State>(
- output_[index]->GetState()->value())),
- has_alignment_padding_ ? "with padding" : "no padding",
+ " @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
+ ", state=%s, took %0.3f ms]\n",
+ bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
+ caller_frame_top_, FullCodeGenerator::State2String(
+ static_cast<FullCodeGenerator::State>(
+ output_[index]->GetState()->value())),
ms);
}
}
-void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
+void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
+ int frame_index, bool goto_catch_handler) {
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -853,8 +874,6 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
PrintF(trace_scope_->file(), " translating frame ");
base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
- PrintF(trace_scope_->file(),
- " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
height_in_bytes, goto_catch_handler ? " (throw)" : "");
}
@@ -862,7 +881,6 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
- unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -875,24 +893,11 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- Register fp_reg = JavaScriptFrame::fp_register();
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
intptr_t top_address;
if (is_bottommost) {
- // Determine whether the input frame contains alignment padding.
- has_alignment_padding_ =
- (!compiled_code_->is_turbofanned() && HasAlignmentPadding(shared)) ? 1
- : 0;
- // 2 = context and function in the frame.
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(fp_reg.code()) -
- StandardFrameConstants::kFixedFrameSizeFromFp -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ top_address = caller_frame_top_ - output_frame_size;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -900,13 +905,11 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
- input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
@@ -917,10 +920,9 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPCOnStackSize;
- input_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_pc_;
} else {
value = output_[frame_index - 1]->GetPc();
}
@@ -932,30 +934,27 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kFPOnStackSize;
- input_offset -= kFPOnStackSize;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_fp_;
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
- has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ if (is_topmost) {
+ Register fp_reg = JavaScriptFrame::fp_register();
+ output_frame->SetRegister(fp_reg.code(), fp_value);
+ }
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
- DCHECK(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
if (FLAG_enable_embedded_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
// from the input frame. For subsequent output frames, it can be read from
// the previous frame.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_constant_pool_;
} else {
value = output_[frame_index - 1]->GetConstantPool();
}
@@ -967,9 +966,7 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
- Register context_reg = JavaScriptFrame::context_register();
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
TranslatedFrame::iterator context_pos = value_iterator;
int context_input_index = input_index;
@@ -988,14 +985,15 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
CHECK(!compiled_code_->is_turbofanned());
- context =
- is_bottommost
- ? reinterpret_cast<Object*>(input_->GetFrameSlot(input_offset))
- : function->context();
+ context = is_bottommost ? reinterpret_cast<Object*>(input_frame_context_)
+ : function->context();
}
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+ if (is_topmost) {
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), value);
+ }
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
if (context == isolate_->heap()->arguments_marker()) {
@@ -1009,11 +1007,7 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// Translate the rest of the frame.
@@ -1065,7 +1059,7 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
+ if (is_topmost) {
Builtins* builtins = isolate_->builtins();
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
if (bailout_type_ == LAZY) {
@@ -1080,10 +1074,9 @@ void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
}
}
-void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
+void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+ int frame_index,
bool goto_catch_handler) {
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1110,7 +1103,6 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by InterpreterFrameConstants.
unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
- unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -1125,18 +1117,11 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- Register fp_reg = InterpretedFrame::fp_register();
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
intptr_t top_address;
if (is_bottommost) {
- // Subtract interpreter fixed frame size for the context function slots,
- // new,target and bytecode offset.
- top_address = input_->GetRegister(fp_reg.code()) -
- InterpreterFrameConstants::kFixedFrameSizeFromFp -
- height_in_bytes;
+ top_address = caller_frame_top_ - output_frame_size;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -1144,13 +1129,11 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
- input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, the function, new.target and the bytecode offset. Synthesize
@@ -1162,10 +1145,9 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPCOnStackSize;
- input_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_pc_;
} else {
value = output_[frame_index - 1]->GetPc();
}
@@ -1177,31 +1159,27 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kFPOnStackSize;
- input_offset -= kFPOnStackSize;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_fp_;
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- DCHECK(!is_bottommost ||
- (input_->GetRegister(fp_reg.code()) +
- has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ if (is_topmost) {
+ Register fp_reg = InterpretedFrame::fp_register();
+ output_frame->SetRegister(fp_reg.code(), fp_value);
+ }
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
- DCHECK(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
if (FLAG_enable_embedded_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
// from the input frame. For subsequent output frames, it can be read from
// the previous frame.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
+ value = caller_constant_pool_;
} else {
value = output_[frame_index - 1]->GetConstantPool();
}
@@ -1215,7 +1193,6 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// so long as we don't inline functions that need local contexts.
Register context_reg = InterpretedFrame::context_register();
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
// When deoptimizing into a catch block, we need to take the context
// from a register that was specified in the handler table.
@@ -1243,31 +1220,24 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// The new.target slot is only used during function activiation which is
// before the first deopt point, so should never be needed. Just set it to
// undefined.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
Object* new_target = isolate_->heap()->undefined_value();
WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
// Set the bytecode array pointer.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
Object* bytecode_array = shared->bytecode_array();
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
"bytecode array ");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
int raw_bytecode_offset =
BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
@@ -1317,7 +1287,7 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
}
// Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
+ if (is_topmost) {
Code* continuation =
builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
if (bailout_type_ == LAZY) {
@@ -1334,11 +1304,10 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
}
}
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(
+ TranslatedFrame* translated_frame, int frame_index) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ bool is_bottommost = (0 == frame_index);
int input_index = 0;
unsigned height = translated_frame->height();
@@ -1351,7 +1320,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
" translating arguments adaptor => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -1360,15 +1329,19 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
- // Arguments adaptor can not be topmost or bottommost.
- CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ // Arguments adaptor can not be topmost.
+ CHECK(frame_index < output_count_ - 1);
CHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ if (is_bottommost) {
+ top_address = caller_frame_top_ - output_frame_size;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
@@ -1381,13 +1354,22 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetCallerPc(output_offset, callers_pc);
- DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
+ intptr_t value;
+ if (is_bottommost) {
+ value = caller_pc_;
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetCallerPc(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
+ if (is_bottommost) {
+ value = caller_fp_;
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
@@ -1396,7 +1378,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetConstantPool();
+ if (is_bottommost) {
+ value = caller_constant_pool_;
+ } else {
+ value = output_[frame_index - 1]->GetConstantPool();
+ }
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
@@ -1440,17 +1426,94 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
}
}
+void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
+ int frame_index) {
+ SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+
+ bool is_bottommost = (0 == frame_index);
+ // Tail caller frame can't be topmost.
+ CHECK_NE(output_count_ - 1, frame_index);
+
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " translating tail caller frame ");
+ base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ PrintF(trace_scope_->file(), "%s\n", name.get());
+ }
+
+ if (!is_bottommost) return;
+
+ // Drop arguments adaptor frame below current frame if it exsits.
+ Address fp_address = input_->GetFramePointerAddress();
+ Address adaptor_fp_address =
+ Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
+
+ if (Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR) !=
+ Memory::Object_at(adaptor_fp_address +
+ CommonFrameConstants::kContextOrFrameTypeOffset)) {
+ return;
+ }
+
+ int caller_params_count =
+ Smi::cast(
+ Memory::Object_at(adaptor_fp_address +
+ ArgumentsAdaptorFrameConstants::kLengthOffset))
+ ->value();
+
+ int callee_params_count =
+ function_->shared()->internal_formal_parameter_count();
-void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
+ // Both caller and callee parameters count do not include receiver.
+ int offset = (caller_params_count - callee_params_count) * kPointerSize;
+ intptr_t new_stack_fp =
+ reinterpret_cast<intptr_t>(adaptor_fp_address) + offset;
+
+ intptr_t new_caller_frame_top = new_stack_fp +
+ (callee_params_count + 1) * kPointerSize +
+ CommonFrameConstants::kFixedFrameSizeAboveFp;
+
+ intptr_t adaptor_caller_pc = Memory::intptr_at(
+ adaptor_fp_address + CommonFrameConstants::kCallerPCOffset);
+ intptr_t adaptor_caller_fp = Memory::intptr_at(
+ adaptor_fp_address + CommonFrameConstants::kCallerFPOffset);
+
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " dropping caller arguments adaptor frame: offset=%d, "
+ "fp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR
+ ", "
+ "caller sp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR "\n",
+ offset, stack_fp_, new_stack_fp, caller_frame_top_,
+ new_caller_frame_top);
+ }
+ caller_frame_top_ = new_caller_frame_top;
+ caller_fp_ = adaptor_caller_fp;
+ caller_pc_ = adaptor_caller_pc;
+}
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
+ int frame_index) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ // The construct frame could become topmost only if we inlined a constructor
+ // call which does a tail call (otherwise the tail callee's frame would be
+ // the topmost one). So it could only be the LAZY case.
+ CHECK(!is_topmost || bailout_type_ == LAZY);
int input_index = 0;
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+
+ // If the construct frame appears to be topmost we should ensure that the
+ // value of result register is preserved during continuation execution.
+ // We do this here by "pushing" the result of the constructor function to the
+ // top of the reconstructed stack and then using the
+ // FullCodeGenerator::TOS_REG machinery.
+ if (is_topmost) {
+ height_in_bytes += kPointerSize;
+ }
+
// Skip function.
value_iterator++;
input_index++;
@@ -1459,7 +1522,7 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
" translating construct stub => height=%d\n", height_in_bytes);
}
- unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
+ unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -1467,13 +1530,13 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
- // Construct stub can not be topmost or bottommost.
- DCHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ // Construct stub can not be topmost.
+ DCHECK(frame_index > 0 && frame_index < output_count_);
DCHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
intptr_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
@@ -1503,6 +1566,10 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
+ if (is_topmost) {
+ Register fp_reg = JavaScriptFrame::fp_register();
+ output_frame->SetRegister(fp_reg.code(), fp_value);
+ }
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
@@ -1514,24 +1581,22 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
"caller's constant_pool\n");
}
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
-
- // A marker value is used in place of the function.
+ // A marker value is used to mark the frame.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
- "function (construct sentinel)\n");
+ "typed frame marker\n");
- // The output frame reflects a JSConstructStubGeneric frame.
+ // The context can be gotten from the previous frame.
output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
+ value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
+ if (is_topmost) {
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), value);
+ }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// The allocation site.
output_offset -= kPointerSize;
@@ -1556,6 +1621,18 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
DebugPrintOutputSlot(value, frame_index, output_offset,
"allocated receiver\n");
+ if (is_topmost) {
+ // Ensure the result is restored back when we return to the stub.
+ output_offset -= kPointerSize;
+ Register result_reg = FullCodeGenerator::result_register();
+ value = input_->GetRegister(result_reg.code());
+ output_frame->SetFrameSlot(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "constructor result\n");
+
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+ }
+
CHECK_EQ(0u, output_offset);
intptr_t pc = reinterpret_cast<intptr_t>(
@@ -1566,15 +1643,32 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(construct_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), fp_value);
+ }
}
-}
+ // Set the continuation for the topmost frame.
+ if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
+ DCHECK_EQ(LAZY, bailout_type_);
+ Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
-void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
+void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
+ int frame_index,
bool is_setter_stub_frame) {
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ // The accessor frame could become topmost only if we inlined an accessor
+ // call which does a tail call (otherwise the tail callee's frame would be
+ // the topmost one). So it could only be the LAZY case.
+ CHECK(!is_topmost || bailout_type_ == LAZY);
int input_index = 0;
// Skip accessor.
@@ -1585,6 +1679,19 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
// frame. This means that we have to use a height of 0.
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
+
+ // If the accessor frame appears to be topmost we should ensure that the
+ // value of result register is preserved during continuation execution.
+ // We do this here by "pushing" the result of the accessor function to the
+ // top of the reconstructed stack and then using the
+ // FullCodeGenerator::TOS_REG machinery.
+ // We don't need to restore the result in case of a setter call because we
+ // have to return the stored value but not the result of the setter function.
+ bool should_preserve_result = is_topmost && !is_setter_stub_frame;
+ if (should_preserve_result) {
+ height_in_bytes += kPointerSize;
+ }
+
const char* kind = is_setter_stub_frame ? "setter" : "getter";
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
@@ -1592,7 +1699,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
}
// We need 1 stack entry for the return address and enough entries for the
- // StackFrame::INTERNAL (FP, context, frame type, code object and constant
+ // StackFrame::INTERNAL (FP, frame type, context, code object and constant
// pool (if enabled)- see MacroAssembler::EnterFrame).
// For a setter stub frame we need one additional entry for the implicit
// return value, see StoreStubCompiler::CompileStoreViaSetter.
@@ -1607,8 +1714,8 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::INTERNAL);
- // A frame for an accessor stub can not be the topmost or bottommost one.
- CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+ // A frame for an accessor stub can not be bottommost.
+ CHECK(frame_index > 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
@@ -1631,6 +1738,10 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
+ if (is_topmost) {
+ Register fp_reg = JavaScriptFrame::fp_register();
+ output_frame->SetRegister(fp_reg.code(), fp_value);
+ }
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
@@ -1642,17 +1753,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
"caller's constant_pool\n");
}
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
-
- // A marker value is used in place of the function.
+ // Set the frame type.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "function ");
+ DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
}
@@ -1667,6 +1772,16 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) {
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), value);
+ }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
+
// Skip receiver.
value_iterator++;
input_index++;
@@ -1679,6 +1794,20 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
output_offset);
}
+ if (should_preserve_result) {
+ // Ensure the result is restored back when we return to the stub.
+ output_offset -= kPointerSize;
+ Register result_reg = FullCodeGenerator::result_register();
+ value = input_->GetRegister(result_reg.code());
+ output_frame->SetFrameSlot(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "accessor result\n");
+
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+ } else {
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ }
+
CHECK_EQ(0u, output_offset);
Smi* offset = is_setter_stub_frame ?
@@ -1691,11 +1820,25 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), fp_value);
+ }
}
-}
+ // Set the continuation for the topmost frame.
+ if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
+ DCHECK_EQ(LAZY, bailout_type_);
+ Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
-void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
+void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
+ int frame_index) {
//
// FROM TO
// | .... | | .... |
@@ -1731,8 +1874,6 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
// and then, if the descriptor specifies a constant number of stack
// parameters, the stack parameters as well.
- TranslatedFrame* translated_frame =
- &(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
@@ -1751,10 +1892,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
CHECK_EQ(translated_frame->height(), param_count + 1);
CHECK_GE(param_count, 0);
- int height_in_bytes = kPointerSize * (param_count + stack_param_count) +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
+ int height_in_bytes = kPointerSize * (param_count + stack_param_count);
+ int fixed_frame_size = StubFailureTrampolineFrameConstants::kFixedFrameSize;
int output_frame_size = height_in_bytes + fixed_frame_size;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
@@ -1770,28 +1909,24 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
CHECK_EQ(frame_index, 0);
output_[frame_index] = output_frame;
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- Register fp_reg = StubFailureTrampolineFrame::fp_register();
- intptr_t top_address = input_->GetRegister(fp_reg.code()) -
- StandardFrameConstants::kFixedFrameSizeFromFp - height_in_bytes;
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = caller_frame_top_ - output_frame_size;
output_frame->SetTop(top_address);
- // Read caller's PC (JSFunction continuation) from the input frame.
- unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
+ // Set caller's PC (JSFunction continuation).
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ intptr_t value = caller_pc_;
output_frame->SetCallerPc(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's pc\n");
// Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kFPOnStackSize;
- value = input_->GetFrameSlot(input_frame_offset);
+ value = caller_fp_;
output_frame_offset -= kFPOnStackSize;
output_frame->SetCallerFp(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
+ intptr_t frame_ptr = top_address + output_frame_offset;
+ Register fp_reg = StubFailureTrampolineFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
@@ -1799,20 +1934,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
+ value = caller_constant_pool_;
output_frame_offset -= kPointerSize;
output_frame->SetCallerConstantPool(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's constant_pool\n");
}
- // Remember where the context will need to be written back from the deopt
- // translation.
- output_frame_offset -= kPointerSize;
- unsigned context_frame_offset = output_frame_offset;
-
- // A marker value is used in place of the function.
+ // The marker for the typed stack frame
output_frame_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
@@ -1873,8 +2002,6 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
Register context_reg = StubFailureTrampolineFrame::context_register();
value = reinterpret_cast<intptr_t>(maybe_context);
output_frame->SetRegister(context_reg.code(), value);
- output_frame->SetFrameSlot(context_frame_offset, value);
- DebugPrintOutputSlot(value, frame_index, context_frame_offset, "context\n");
++value_iterator;
// Copy constant stack parameters to the failure frame. If the number of stack
@@ -1942,14 +2069,13 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
- DCHECK_NE(DEBUGGER, bailout_type_);
-
// Walk to the last JavaScript output frame to find out if it has
// adapted arguments.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
}
- translated_state_.Prepare(it->frame()->has_adapted_arguments(), stack_fp_);
+ translated_state_.Prepare(it->frame()->has_adapted_arguments(),
+ reinterpret_cast<Address>(stack_fp_));
for (auto& materialization : values_to_materialize_) {
Handle<Object> value = materialization.value_->GetValue();
@@ -1966,7 +2092,8 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
reinterpret_cast<intptr_t>(*value);
}
- isolate_->materialized_object_store()->Remove(stack_fp_);
+ isolate_->materialized_object_store()->Remove(
+ reinterpret_cast<Address>(stack_fp_));
}
@@ -2024,25 +2151,26 @@ void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
}
}
-
-unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = StandardFrameConstants::kFixedFrameSize;
+unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
+ unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
if (!function_->IsSmi()) {
fixed_size += ComputeIncomingArgumentSize(function_->shared());
- } else {
- CHECK_EQ(Smi::cast(function_), Smi::FromInt(StackFrame::STUB));
}
+ return fixed_size;
+}
+
+unsigned Deoptimizer::ComputeInputFrameSize() const {
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
- unsigned result = fixed_size + fp_to_sp_delta_ -
- StandardFrameConstants::kFixedFrameSizeFromFp;
+ unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
+ unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size =
ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
- CHECK(result ==
- fixed_size + (stack_slots * kPointerSize) -
- StandardFrameConstants::kFixedFrameSize + outgoing_size);
+ CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
+ CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
+ result);
}
return result;
}
@@ -2240,6 +2368,10 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(height, zone());
}
+void Translation::BeginTailCallerFrame(int literal_id) {
+ buffer_->Add(TAIL_CALLER_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+}
void Translation::BeginJSFrame(BailoutId node_id,
int literal_id,
@@ -2362,7 +2494,7 @@ void Translation::StoreArgumentsObject(bool args_known,
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
- StandardFrameConstants::kMarkerOffset) /
+ StandardFrameConstants::kFunctionOffset) /
kPointerSize);
}
@@ -2385,6 +2517,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case DOUBLE_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
+ case TAIL_CALLER_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
@@ -2510,7 +2643,9 @@ int ComputeSourcePosition(Handle<SharedFunctionInfo> shared,
BailoutId node_id) {
if (shared->HasBytecodeArray()) {
BytecodeArray* bytecodes = shared->bytecode_array();
- return bytecodes->SourcePosition(node_id.ToInt());
+ // BailoutId points to the next bytecode in the bytecode aray. Subtract
+ // 1 to get the end of current bytecode.
+ return bytecodes->SourcePosition(node_id.ToInt() - 1);
} else {
Code* non_optimized_code = shared->code();
FixedArray* raw_data = non_optimized_code->deoptimization_data();
@@ -2942,6 +3077,11 @@ TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
shared_info, height);
}
+TranslatedFrame TranslatedFrame::TailCallerFrame(
+ SharedFunctionInfo* shared_info) {
+ return TranslatedFrame(kTailCallerFunction, shared_info->GetIsolate(),
+ shared_info, 0);
+}
TranslatedFrame TranslatedFrame::ConstructStubFrame(
SharedFunctionInfo* shared_info, int height) {
@@ -2976,6 +3116,9 @@ int TranslatedFrame::GetValueCount() {
case kConstructStub:
return 1 + height_;
+ case kTailCallerFunction:
+ return 1; // Function.
+
case kCompiledStub:
return height_;
@@ -3052,6 +3195,18 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
}
+ case Translation::TAIL_CALLER_FRAME: {
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ if (trace_file != nullptr) {
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading tail caller frame marker %s\n",
+ name.get());
+ }
+ return TranslatedFrame::TailCallerFrame(shared_info);
+ }
+
case Translation::CONSTRUCT_STUB_FRAME: {
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
@@ -3152,6 +3307,7 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
case Translation::JS_FRAME:
case Translation::INTERPRETED_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
+ case Translation::TAIL_CALLER_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
@@ -3725,7 +3881,8 @@ void TranslatedState::StoreMaterializedValuesAndDeopt() {
materialized_store->Set(stack_frame_pointer_,
previously_materialized_objects);
CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
- frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
+ frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
+ frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
Object* const function = frames_[0].front().GetRawValue();
Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 0259f01ccc..21ca84ed52 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -116,6 +116,7 @@ class TranslatedFrame {
kInterpretedFunction,
kGetter,
kSetter,
+ kTailCallerFunction,
kArgumentsAdaptor,
kConstructStub,
kCompiledStub,
@@ -186,6 +187,7 @@ class TranslatedFrame {
SharedFunctionInfo* shared_info);
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
int height);
+ static TranslatedFrame TailCallerFrame(SharedFunctionInfo* shared_info);
static TranslatedFrame ConstructStubFrame(SharedFunctionInfo* shared_info,
int height);
static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
@@ -315,7 +317,6 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
virtual void LeaveContext(Context* context) = 0;
};
-
#define DEOPT_MESSAGES_LIST(V) \
V(kAccessCheck, "Access check needed") \
V(kNoReason, "no reason") \
@@ -333,6 +334,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kInstanceMigrationFailed, "instance migration failed") \
V(kInsufficientTypeFeedbackForCallWithArguments, \
"Insufficient type feedback for call with arguments") \
+ V(kFastArrayPushFailed, "Falling off the fast path for FastArrayPush") \
V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
"Insufficient type feedback for combined type of binary operation") \
V(kInsufficientTypeFeedbackForGenericNamedAccess, \
@@ -396,18 +398,9 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kUndefinedOrNullInForIn, "null or undefined in for-in") \
V(kUndefinedOrNullInToObject, "null or undefined in ToObject")
-
class Deoptimizer : public Malloced {
public:
- enum BailoutType {
- EAGER,
- LAZY,
- SOFT,
- // This last bailout type is not really a bailout, but used by the
- // debugger to deoptimize stack frames to allow inspection.
- DEBUGGER,
- kBailoutTypesWithCodeEntry = SOFT + 1
- };
+ enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
#define DEOPT_MESSAGES_CONSTANTS(C, T) C,
enum DeoptReason {
@@ -537,8 +530,8 @@ class Deoptimizer : public Malloced {
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int has_alignment_padding_offset() {
- return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ static int caller_frame_top_offset() {
+ return OFFSET_OF(Deoptimizer, caller_frame_top_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -594,12 +587,20 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeJSFrame(int frame_index, bool goto_catch_handler);
- void DoComputeInterpretedFrame(int frame_index, bool goto_catch_handler);
- void DoComputeArgumentsAdaptorFrame(int frame_index);
- void DoComputeConstructStubFrame(int frame_index);
- void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
- void DoComputeCompiledStubFrame(int frame_index);
+ void DoComputeJSFrame(TranslatedFrame* translated_frame, int frame_index,
+ bool goto_catch_handler);
+ void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+ int frame_index, bool goto_catch_handler);
+ void DoComputeArgumentsAdaptorFrame(TranslatedFrame* translated_frame,
+ int frame_index);
+ void DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
+ int frame_index);
+ void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
+ int frame_index);
+ void DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
+ int frame_index, bool is_setter_stub_frame);
+ void DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
+ int frame_index);
void WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
@@ -612,6 +613,7 @@ class Deoptimizer : public Malloced {
unsigned output_offset,
const char* debug_hint_string);
+ unsigned ComputeInputFrameAboveFpFixedSize() const;
unsigned ComputeInputFrameSize() const;
static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
@@ -651,10 +653,6 @@ class Deoptimizer : public Malloced {
// from the input frame's double registers.
void CopyDoubleRegisters(FrameDescription* output_frame);
- // Determines whether the input frame contains alignment padding by looking
- // at the dynamic alignment state slot inside the frame.
- bool HasAlignmentPadding(SharedFunctionInfo* shared);
-
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -662,7 +660,6 @@ class Deoptimizer : public Malloced {
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
- int has_alignment_padding_;
bool deoptimizing_throw_;
int catch_handler_data_;
int catch_handler_pc_offset_;
@@ -676,8 +673,15 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
+ // Caller frame details computed from input frame.
+ intptr_t caller_frame_top_;
+ intptr_t caller_fp_;
+ intptr_t caller_pc_;
+ intptr_t caller_constant_pool_;
+ intptr_t input_frame_context_;
+
// Key for lookup of previously materialized objects
- Address stack_fp_;
+ intptr_t stack_fp_;
TranslatedState translated_state_;
struct ValueToMaterialize {
@@ -891,8 +895,8 @@ class DeoptimizerData {
private:
MemoryAllocator* allocator_;
- int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
- MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
+ int deopt_entry_code_entries_[Deoptimizer::kLastBailoutType + 1];
+ MemoryChunk* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
Deoptimizer* current_;
@@ -944,6 +948,7 @@ class TranslationIterator BASE_EMBEDDED {
V(GETTER_STUB_FRAME) \
V(SETTER_STUB_FRAME) \
V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(TAIL_CALLER_FRAME) \
V(COMPILED_STUB_FRAME) \
V(DUPLICATED_OBJECT) \
V(ARGUMENTS_OBJECT) \
@@ -987,6 +992,7 @@ class Translation BASE_EMBEDDED {
unsigned height);
void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
+ void BeginTailCallerFrame(int literal_id);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 59a57e552e..ed9ca9ac66 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -10,7 +10,7 @@
#include "src/deoptimizer.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
namespace v8 {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 9fd450a75a..288c60e305 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/conversions.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
@@ -428,7 +429,6 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
}
}
-
static void TraceTopFrame(Isolate* isolate) {
StackFrameIterator it(isolate);
if (it.done()) {
@@ -503,12 +503,6 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
- bool IsPacked(Handle<JSObject> holder, Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t end) final {
- return ElementsAccessorSubclass::IsPackedImpl(holder, backing_store, start,
- end);
- }
-
static bool IsPackedImpl(Handle<JSObject> holder,
Handle<FixedArrayBase> backing_store, uint32_t start,
uint32_t end) {
@@ -608,81 +602,67 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- uint32_t Push(Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
- Arguments* args, uint32_t push_size) final {
- return ElementsAccessorSubclass::PushImpl(receiver, backing_store, args,
- push_size);
+ uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) final {
+ return ElementsAccessorSubclass::PushImpl(receiver, args, push_size);
}
- static uint32_t PushImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> elms_obj, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
uint32_t push_sized) {
UNREACHABLE();
return 0;
}
- uint32_t Unshift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Arguments* args,
+ uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) final {
- return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
- unshift_size);
+ return ElementsAccessorSubclass::UnshiftImpl(receiver, args, unshift_size);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> elms_obj, Arguments* args,
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) {
UNREACHABLE();
return 0;
}
- Handle<JSArray> Slice(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store, uint32_t start,
+ Handle<JSArray> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) final {
- return ElementsAccessorSubclass::SliceImpl(receiver, backing_store, start,
- end);
+ return ElementsAccessorSubclass::SliceImpl(receiver, start, end);
}
static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t end) {
UNREACHABLE();
return Handle<JSArray>();
}
- Handle<JSArray> Splice(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, uint32_t start,
+ Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
uint32_t delete_count, Arguments* args,
uint32_t add_count) final {
- return ElementsAccessorSubclass::SpliceImpl(receiver, backing_store, start,
- delete_count, args, add_count);
+ return ElementsAccessorSubclass::SpliceImpl(receiver, start, delete_count,
+ args, add_count);
}
static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) {
UNREACHABLE();
return Handle<JSArray>();
}
- Handle<Object> Pop(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
- return ElementsAccessorSubclass::PopImpl(receiver, backing_store);
+ Handle<Object> Pop(Handle<JSArray> receiver) final {
+ return ElementsAccessorSubclass::PopImpl(receiver);
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) {
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) {
UNREACHABLE();
return Handle<Object>();
}
- Handle<Object> Shift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
- return ElementsAccessorSubclass::ShiftImpl(receiver, backing_store);
+ Handle<Object> Shift(Handle<JSArray> receiver) final {
+ return ElementsAccessorSubclass::ShiftImpl(receiver);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) {
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
UNREACHABLE();
return Handle<Object>();
}
@@ -714,8 +694,11 @@ class ElementsAccessorBase : public ElementsAccessor {
if (length == 0) {
array->initialize_elements();
} else if (length <= capacity) {
- if (array->HasFastSmiOrObjectElements()) {
- backing_store = JSObject::EnsureWritableFastElements(array);
+ if (IsFastSmiOrObjectElementsKind(kind())) {
+ JSObject::EnsureWritableFastElements(array);
+ if (array->elements() != *backing_store) {
+ backing_store = handle(array->elements(), isolate);
+ }
}
if (2 * length <= capacity) {
// If more than half the elements won't be used, trim the array.
@@ -737,6 +720,16 @@ class ElementsAccessorBase : public ElementsAccessor {
JSObject::ValidateElements(array);
}
+ static uint32_t GetIterationLength(JSObject* receiver,
+ FixedArrayBase* elements) {
+ if (receiver->IsJSArray()) {
+ DCHECK(JSArray::cast(receiver)->length()->IsSmi());
+ return static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(receiver)->length())->value());
+ }
+ return ElementsAccessorSubclass::GetCapacityImpl(receiver, elements);
+ }
+
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
@@ -853,40 +846,194 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
+ Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
+ return ElementsAccessorSubclass::NormalizeImpl(object,
+ handle(object->elements()));
+ }
+
+ static Handle<SeededNumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ UNREACHABLE();
+ return Handle<SeededNumberDictionary>();
+ }
+
+ Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries,
+ bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ return ElementsAccessorSubclass::CollectValuesOrEntriesImpl(
+ isolate, object, values_or_entries, get_entries, nof_items, filter);
+ }
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ int count = 0;
+ KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
+ accumulator.NextPrototype();
+ ElementsAccessorSubclass::CollectElementIndicesImpl(
+ object, handle(object->elements(), isolate), &accumulator, kMaxUInt32,
+ ALL_PROPERTIES, 0);
+ Handle<FixedArray> keys = accumulator.GetKeys();
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ Handle<Object> value;
+ uint32_t index;
+ if (!key->ToUint32(&index)) continue;
+
+ uint32_t entry = ElementsAccessorSubclass::GetEntryForIndexImpl(
+ *object, object->elements(), index, filter);
+ if (entry == kMaxUInt32) continue;
+
+ PropertyDetails details =
+ ElementsAccessorSubclass::GetDetailsImpl(*object, entry);
+
+ if (details.kind() == kData) {
+ value = ElementsAccessorSubclass::GetImpl(object, entry);
+ } else {
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::GetProperty(&it), Nothing<bool>());
+ }
+ if (get_entries) {
+ value = MakeEntryPair(isolate, index, value);
+ }
+ values_or_entries->set(count++, *value);
+ }
+
+ *nof_items = count;
+ return Just(true);
+ }
+
+ void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter, uint32_t offset) final {
+ if (filter & ONLY_ALL_CAN_READ) return;
+ ElementsAccessorSubclass::CollectElementIndicesImpl(
+ object, backing_store, keys, range, filter, offset);
+ }
+
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys, uint32_t range,
PropertyFilter filter,
uint32_t offset) {
DCHECK_NE(DICTIONARY_ELEMENTS, kind());
- if (filter & ONLY_ALL_CAN_READ) {
- // Non-dictionary elements can't have all-can-read accessors.
- return;
- }
- uint32_t length = 0;
- if (object->IsJSArray()) {
- length = Smi::cast(JSArray::cast(*object)->length())->value();
- } else {
- length =
- ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
- }
+ // Non-dictionary elements can't have all-can-read accessors.
+ uint32_t length = GetIterationLength(*object, *backing_store);
if (range < length) length = range;
for (uint32_t i = offset; i < length; i++) {
- if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
- filter)) {
- continue;
+ if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+ filter)) {
+ keys->AddKey(i);
}
- keys->AddKey(i);
}
}
- void CollectElementIndices(Handle<JSObject> object,
- Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter, uint32_t offset) final {
- ElementsAccessorSubclass::CollectElementIndicesImpl(
- object, backing_store, keys, range, filter, offset);
- };
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ uint32_t length =
+ ElementsAccessorSubclass::GetIterationLength(*object, *backing_store);
+ for (uint32_t i = 0; i < length; i++) {
+ if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+ filter)) {
+ if (convert == CONVERT_TO_STRING) {
+ Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+ list->set(insertion_index, *index_string);
+ } else {
+ list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ }
+ insertion_index++;
+ }
+ }
+ *nof_indices = insertion_index;
+ return list;
+ }
+
+ Handle<FixedArray> PrependElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys,
+ GetKeysConversion convert,
+ PropertyFilter filter) final {
+ return ElementsAccessorSubclass::PrependElementIndicesImpl(
+ object, backing_store, keys, convert, filter);
+ }
+
+ static Handle<FixedArray> PrependElementIndicesImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) {
+ Isolate* isolate = object->GetIsolate();
+ uint32_t nof_property_keys = keys->length();
+ uint32_t initial_list_length =
+ ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+ initial_list_length += nof_property_keys;
+
+ // Collect the element indices into a new list.
+ uint32_t nof_indices = 0;
+ Handle<FixedArray> combined_keys =
+ isolate->factory()->NewFixedArray(initial_list_length);
+ combined_keys = ElementsAccessorSubclass::DirectCollectElementIndicesImpl(
+ isolate, object, backing_store, convert, filter, combined_keys,
+ &nof_indices);
+
+ // Sort the indices list if necessary.
+ if (IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind())) {
+ struct {
+ bool operator()(Object* a, Object* b) {
+ if (!a->IsUndefined()) {
+ if (b->IsUndefined()) return true;
+ return a->Number() < b->Number();
+ }
+ return !b->IsUndefined();
+ }
+ } cmp;
+ Object** start =
+ reinterpret_cast<Object**>(combined_keys->GetFirstElementAddress());
+ std::sort(start, start + nof_indices, cmp);
+ uint32_t array_length = 0;
+ // Indices from dictionary elements should only be converted after
+ // sorting.
+ if (convert == CONVERT_TO_STRING) {
+ for (uint32_t i = 0; i < nof_indices; i++) {
+ Handle<Object> index_string = isolate->factory()->Uint32ToString(
+ combined_keys->get(i)->Number());
+ combined_keys->set(i, *index_string);
+ }
+ } else if (!(object->IsJSArray() &&
+ JSArray::cast(*object)->length()->ToArrayLength(
+ &array_length) &&
+ array_length <= Smi::kMaxValue)) {
+ // Since we use std::sort above, the GC will no longer know where the
+ // HeapNumbers are, hence we have to write them again.
+ // For Arrays with valid Smi length, we are sure to have no HeapNumber
+ // indices and thus we can skip this step.
+ for (uint32_t i = 0; i < nof_indices; i++) {
+ Object* index = combined_keys->get(i);
+ combined_keys->set(i, index);
+ }
+ }
+ }
+
+ // Copy over the passed-in property keys.
+ CopyObjectToObjectElements(*keys, FAST_ELEMENTS, 0, *combined_keys,
+ FAST_ELEMENTS, nof_indices, nof_property_keys);
+
+ if (IsHoleyElementsKind(kind())) {
+ // Shrink combined_keys to the final size.
+ int final_size = nof_indices + nof_property_keys;
+ DCHECK_LE(final_size, combined_keys->length());
+ combined_keys->Shrink(final_size);
+ }
+
+ return combined_keys;
+ }
void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
@@ -919,12 +1066,7 @@ class ElementsAccessorBase : public ElementsAccessor {
? index
: kMaxUInt32;
} else {
- uint32_t length =
- holder->IsJSArray()
- ? static_cast<uint32_t>(
- Smi::cast(JSArray::cast(holder)->length())->value())
- : ElementsAccessorSubclass::GetCapacityImpl(holder,
- backing_store);
+ uint32_t length = GetIterationLength(holder, backing_store);
return index < length ? index : kMaxUInt32;
}
}
@@ -961,6 +1103,19 @@ class DictionaryElementsAccessor
: ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
+ static uint32_t GetIterationLength(JSObject* receiver,
+ FixedArrayBase* elements) {
+ uint32_t length;
+ if (receiver->IsJSArray()) {
+ // Special-case GetIterationLength for dictionary elements since the
+ // length of the array might be a HeapNumber.
+ JSArray::cast(receiver)->length()->ToArrayLength(&length);
+ } else {
+ length = GetCapacityImpl(receiver, elements);
+ }
+ return length;
+ }
+
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
uint32_t length,
Handle<FixedArrayBase> backing_store) {
@@ -1037,12 +1192,16 @@ class DictionaryElementsAccessor
static bool HasAccessorsImpl(JSObject* holder,
FixedArrayBase* backing_store) {
+ DisallowHeapAllocation no_gc;
SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
if (!dict->requires_slow_elements()) return false;
int capacity = dict->Capacity();
+ Heap* heap = holder->GetHeap();
+ Object* undefined = heap->undefined_value();
+ Object* the_hole = heap->the_hole_value();
for (int i = 0; i < capacity; i++) {
Object* key = dict->KeyAt(i);
- if (!dict->IsKey(key)) continue;
+ if (key == the_hole || key == undefined) continue;
DCHECK(!dict->IsDeleted(i));
PropertyDetails details = dict->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
@@ -1141,47 +1300,97 @@ class DictionaryElementsAccessor
return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
}
+ static uint32_t FilterKey(Handle<SeededNumberDictionary> dictionary,
+ int entry, Object* raw_key, PropertyFilter filter) {
+ DCHECK(!dictionary->IsDeleted(entry));
+ DCHECK(raw_key->IsNumber());
+ DCHECK_LE(raw_key->Number(), kMaxUInt32);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) return kMaxUInt32;
+ return static_cast<uint32_t>(raw_key->Number());
+ }
+
+ static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+ int entry, PropertyFilter filter) {
+ DisallowHeapAllocation no_gc;
+ Object* raw_key = dictionary->KeyAt(entry);
+ if (!dictionary->IsKey(raw_key)) return kMaxUInt32;
+ return FilterKey(dictionary, entry, raw_key, filter);
+ }
+
+ static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+ int entry, PropertyFilter filter,
+ Object* undefined, Object* the_hole) {
+ DisallowHeapAllocation no_gc;
+ Object* raw_key = dictionary->KeyAt(entry);
+ // Replace the IsKey check with a direct comparison which is much faster.
+ if (raw_key == undefined || raw_key == the_hole) {
+ return kMaxUInt32;
+ }
+ return FilterKey(dictionary, entry, raw_key, filter);
+ }
+
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys, uint32_t range,
PropertyFilter filter,
uint32_t offset) {
+ if (filter & SKIP_STRINGS) return;
+ Isolate* isolate = keys->isolate();
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ Handle<Object> the_hole = isolate->factory()->the_hole_value();
Handle<SeededNumberDictionary> dictionary =
Handle<SeededNumberDictionary>::cast(backing_store);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = dictionary->KeyAt(i);
- if (!dictionary->IsKey(k)) continue;
- if (k->FilterKey(filter)) continue;
- if (dictionary->IsDeleted(i)) continue;
- DCHECK(k->IsNumber());
- DCHECK_LE(k->Number(), kMaxUInt32);
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < offset) continue;
- PropertyDetails details = dictionary->DetailsAt(i);
- if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
- Object* accessors = dictionary->ValueAt(i);
- if (!accessors->IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
- }
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) != 0) continue;
- keys->AddKey(index);
+ uint32_t key =
+ GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+ if (key == kMaxUInt32) continue;
+ keys->AddKey(key);
}
keys->SortCurrentElementsList();
}
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ if (filter & SKIP_STRINGS) return list;
+ if (filter & ONLY_ALL_CAN_READ) return list;
+
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ Handle<Object> the_hole = isolate->factory()->the_hole_value();
+ Handle<SeededNumberDictionary> dictionary =
+ Handle<SeededNumberDictionary>::cast(backing_store);
+ uint32_t capacity = dictionary->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key =
+ GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+ if (key == kMaxUInt32) continue;
+ Handle<Object> index = isolate->factory()->NewNumberFromUint(key);
+ list->set(insertion_index, *index);
+ insertion_index++;
+ }
+ *nof_indices = insertion_index;
+ return list;
+ }
+
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
+ Isolate* isolate = accumulator->isolate();
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ Handle<Object> the_hole = isolate->factory()->the_hole_value();
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(receiver->elements());
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (!dictionary->IsKey(k)) continue;
+ if (k == *undefined) continue;
+ if (k == *the_hole) continue;
if (dictionary->IsDeleted(i)) continue;
Object* value = dictionary->ValueAt(i);
DCHECK(!value->IsTheHole());
@@ -1205,6 +1414,36 @@ class FastElementsAccessor
typedef typename KindTraits::BackingStore BackingStore;
+ static Handle<SeededNumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> store) {
+ Isolate* isolate = store->GetIsolate();
+ ElementsKind kind = FastElementsAccessorSubclass::kind();
+
+ // Ensure that notifications fire if the array or object prototypes are
+ // normalizing.
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ isolate->UpdateArrayProtectorOnNormalizeElements(object);
+ }
+
+ int capacity = object->GetFastElementsUsage();
+ Handle<SeededNumberDictionary> dictionary =
+ SeededNumberDictionary::New(isolate, capacity);
+
+ PropertyDetails details = PropertyDetails::Empty();
+ bool used_as_prototype = object->map()->is_prototype_map();
+ int j = 0;
+ for (int i = 0; j < capacity; i++) {
+ if (IsHoleyElementsKind(kind)) {
+ if (BackingStore::cast(*store)->is_the_hole(i)) continue;
+ }
+ Handle<Object> value = FastElementsAccessorSubclass::GetImpl(*store, i);
+ dictionary = SeededNumberDictionary::AddNumberEntry(
+ dictionary, i, value, details, used_as_prototype);
+ j++;
+ }
+ return dictionary;
+ }
+
static void DeleteAtEnd(Handle<JSObject> obj,
Handle<BackingStore> backing_store, uint32_t entry) {
uint32_t length = static_cast<uint32_t>(backing_store->length());
@@ -1337,15 +1576,10 @@ class FastElementsAccessor
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
- uint32_t length = 0;
Handle<FixedArrayBase> elements(receiver->elements(),
receiver->GetIsolate());
- if (receiver->IsJSArray()) {
- length = Smi::cast(JSArray::cast(*receiver)->length())->value();
- } else {
- length =
- FastElementsAccessorSubclass::GetCapacityImpl(*receiver, *elements);
- }
+ uint32_t length =
+ FastElementsAccessorSubclass::GetIterationLength(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
if (IsFastPackedElementsKind(KindTraits::Kind) ||
HasEntryImpl(*elements, i)) {
@@ -1380,45 +1614,33 @@ class FastElementsAccessor
#endif
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) {
- return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
- AT_END);
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) {
+ return FastElementsAccessorSubclass::RemoveElement(receiver, AT_END);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) {
- return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
- AT_START);
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ return FastElementsAccessorSubclass::RemoveElement(receiver, AT_START);
}
static uint32_t PushImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
Arguments* args, uint32_t push_size) {
+ Handle<FixedArrayBase> backing_store(receiver->elements());
return FastElementsAccessorSubclass::AddArguments(receiver, backing_store,
args, push_size, AT_END);
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
Arguments* args, uint32_t unshift_size) {
+ Handle<FixedArrayBase> backing_store(receiver->elements());
return FastElementsAccessorSubclass::AddArguments(
receiver, backing_store, args, unshift_size, AT_START);
}
- static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, int dst_index,
- int src_index, int len, int hole_start,
- int hole_end) {
- UNREACHABLE();
- }
-
static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t end) {
- DCHECK(start < end);
Isolate* isolate = receiver->GetIsolate();
- int result_len = end - start;
+ Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
+ int result_len = end < start ? 0u : end - start;
Handle<JSArray> result_array = isolate->factory()->NewJSArray(
KindTraits::Kind, result_len, result_len);
DisallowHeapAllocation no_gc;
@@ -1431,7 +1653,6 @@ class FastElementsAccessor
}
static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) {
Isolate* isolate = receiver->GetIsolate();
@@ -1439,6 +1660,15 @@ class FastElementsAccessor
uint32_t length = Smi::cast(receiver->length())->value();
uint32_t new_length = length - delete_count + add_count;
+ ElementsKind kind = KindTraits::Kind;
+ if (new_length <= static_cast<uint32_t>(receiver->elements()->length()) &&
+ IsFastSmiOrObjectElementsKind(kind)) {
+ HandleScope scope(isolate);
+ JSObject::EnsureWritableFastElements(receiver);
+ }
+
+ Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
+
if (new_length == 0) {
receiver->set_elements(heap->empty_fixed_array());
receiver->set_length(Smi::FromInt(0));
@@ -1477,6 +1707,55 @@ class FastElementsAccessor
return deleted_elements;
}
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ int count = 0;
+ uint32_t length = object->elements()->length();
+ for (uint32_t index = 0; index < length; ++index) {
+ if (!HasEntryImpl(object->elements(), index)) continue;
+ Handle<Object> value =
+ FastElementsAccessorSubclass::GetImpl(object->elements(), index);
+ if (get_entries) {
+ value = MakeEntryPair(isolate, index, value);
+ }
+ values_or_entries->set(count++, *value);
+ }
+ *nof_items = count;
+ return Just(true);
+ }
+
+ static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, int dst_index,
+ int src_index, int len, int hole_start,
+ int hole_end) {
+ Heap* heap = isolate->heap();
+ Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
+ if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
+ // Update all the copies of this backing_store handle.
+ *dst_elms.location() =
+ BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
+ receiver->set_elements(*dst_elms);
+ // Adjust the hole offset as the array has been shrunk.
+ hole_end -= src_index;
+ DCHECK_LE(hole_start, backing_store->length());
+ DCHECK_LE(hole_end, backing_store->length());
+ } else if (len != 0) {
+ if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+ MemMove(dst_elms->data_start() + dst_index,
+ dst_elms->data_start() + src_index, len * kDoubleSize);
+ } else {
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(FixedArray::cast(*dst_elms), dst_index, src_index,
+ len);
+ }
+ }
+ if (hole_start != hole_end) {
+ dst_elms->FillWithHoles(hole_start, hole_end);
+ }
+ }
+
private:
// SpliceShrinkStep might modify the backing_store.
static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
@@ -1523,9 +1802,14 @@ class FastElementsAccessor
}
static Handle<Object> RemoveElement(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
Where remove_position) {
Isolate* isolate = receiver->GetIsolate();
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ HandleScope scope(isolate);
+ JSObject::EnsureWritableFastElements(receiver);
+ }
+ Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
uint32_t length =
static_cast<uint32_t>(Smi::cast(receiver->length())->value());
DCHECK(length > 0);
@@ -1540,8 +1824,8 @@ class FastElementsAccessor
FastElementsAccessorSubclass::SetLengthImpl(isolate, receiver, new_length,
backing_store);
- if (IsHoleyElementsKind(KindTraits::Kind) && result->IsTheHole()) {
- return receiver->GetIsolate()->factory()->undefined_value();
+ if (IsHoleyElementsKind(kind) && result->IsTheHole()) {
+ return isolate->factory()->undefined_value();
}
return result;
}
@@ -1551,7 +1835,7 @@ class FastElementsAccessor
Arguments* args, uint32_t add_size,
Where remove_position) {
uint32_t length = Smi::cast(receiver->length())->value();
- DCHECK(add_size > 0);
+ DCHECK(0 < add_size);
uint32_t elms_len = backing_store->length();
// Check we do not overflow the new_length.
DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
@@ -1630,29 +1914,6 @@ class FastSmiOrObjectElementsAccessor
return backing_store->get(index);
}
- static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, int dst_index,
- int src_index, int len, int hole_start,
- int hole_end) {
- Heap* heap = isolate->heap();
- Handle<FixedArray> dst_elms = Handle<FixedArray>::cast(backing_store);
- if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
- // Update all the copies of this backing_store handle.
- *dst_elms.location() =
- FixedArray::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
- receiver->set_elements(*dst_elms);
- // Adjust the hole offset as the array has been shrunk.
- hole_end -= src_index;
- DCHECK_LE(hole_start, backing_store->length());
- DCHECK_LE(hole_end, backing_store->length());
- } else if (len != 0) {
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*dst_elms, dst_index, src_index, len);
- }
- if (hole_start != hole_end) {
- dst_elms->FillWithHoles(hole_start, hole_end);
- }
- }
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
@@ -1784,31 +2045,6 @@ class FastDoubleElementsAccessor
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
}
- static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, int dst_index,
- int src_index, int len, int hole_start,
- int hole_end) {
- Heap* heap = isolate->heap();
- Handle<FixedDoubleArray> dst_elms =
- Handle<FixedDoubleArray>::cast(backing_store);
- if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
- // Update all the copies of this backing_store handle.
- *dst_elms.location() = FixedDoubleArray::cast(
- heap->LeftTrimFixedArray(*dst_elms, src_index));
- receiver->set_elements(*dst_elms);
- // Adjust the hole offset as the array has been shrunk.
- hole_end -= src_index;
- DCHECK_LE(hole_start, backing_store->length());
- DCHECK_LE(hole_end, backing_store->length());
- } else if (len != 0) {
- MemMove(dst_elms->data_start() + dst_index,
- dst_elms->data_start() + src_index, len * kDoubleSize);
- }
- if (hole_start != hole_end) {
- dst_elms->FillWithHoles(hole_start, hole_end);
- }
- }
-
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
@@ -1965,14 +2201,33 @@ class TypedElementsAccessor
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
- Handle<FixedArrayBase> elements(receiver->elements(),
- receiver->GetIsolate());
+ Handle<FixedArrayBase> elements(receiver->elements());
uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
Handle<Object> value = AccessorClass::GetImpl(*elements, i);
accumulator->AddKey(value, convert);
}
}
+
+ static Maybe<bool> CollectValuesOrEntriesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter) {
+ int count = 0;
+ if ((filter & ONLY_CONFIGURABLE) == 0) {
+ Handle<FixedArrayBase> elements(object->elements());
+ uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
+ for (uint32_t index = 0; index < length; ++index) {
+ Handle<Object> value = AccessorClass::GetImpl(*elements, index);
+ if (get_entries) {
+ value = MakeEntryPair(isolate, index, value);
+ }
+ values_or_entries->set(count++, *value);
+ }
+ }
+ *nof_items = count;
+ return Just(true);
+ }
};
@@ -2163,6 +2418,55 @@ class SloppyArgumentsElementsAccessor
obj, entry - length);
}
}
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter,
+ uint32_t offset) {
+ FixedArray* parameter_map = FixedArray::cast(*backing_store);
+ uint32_t length = parameter_map->length() - 2;
+ if (range < length) length = range;
+
+ for (uint32_t i = offset; i < length; ++i) {
+ if (!parameter_map->get(i + 2)->IsTheHole()) {
+ keys->AddKey(i);
+ }
+ }
+
+ Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
+ ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys, range,
+ filter, offset);
+ if (SloppyArgumentsElementsAccessorSubclass::kind() ==
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+ keys->SortCurrentElementsList();
+ }
+ }
+
+ static Handle<FixedArray> DirectCollectElementIndicesImpl(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+ PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+ uint32_t insertion_index = 0) {
+ FixedArray* parameter_map = FixedArray::cast(*backing_store);
+ uint32_t length = parameter_map->length() - 2;
+
+ for (uint32_t i = 0; i < length; ++i) {
+ if (parameter_map->get(i + 2)->IsTheHole()) continue;
+ if (convert == CONVERT_TO_STRING) {
+ Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+ list->set(insertion_index, *index_string);
+ } else {
+ list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ }
+ insertion_index++;
+ }
+
+ Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
+ return ArgumentsAccessor::DirectCollectElementIndicesImpl(
+ isolate, object, store, convert, filter, list, nof_indices,
+ insertion_index);
+ }
};
@@ -2264,6 +2568,13 @@ class FastSloppyArgumentsElementsAccessor
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
+ static Handle<SeededNumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ FixedArray* parameter_map = FixedArray::cast(*elements);
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+ return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
+ }
+
static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
@@ -2452,11 +2763,9 @@ class StringWrapperElementsAccessor
KeyAccumulator* keys, uint32_t range,
PropertyFilter filter,
uint32_t offset) {
- if ((filter & ONLY_ALL_CAN_READ) == 0) {
- uint32_t length = GetString(*object)->length();
- for (uint32_t i = 0; i < length; i++) {
- keys->AddKey(i);
- }
+ uint32_t length = GetString(*object)->length();
+ for (uint32_t i = 0; i < length; i++) {
+ keys->AddKey(i);
}
BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
range, filter, offset);
@@ -2488,6 +2797,11 @@ class FastStringWrapperElementsAccessor
: StringWrapperElementsAccessor<
FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
+
+ static Handle<SeededNumberDictionary> NormalizeImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements);
+ }
};
class SlowStringWrapperElementsAccessor
@@ -2664,62 +2978,66 @@ void ElementsAccessor::TearDown() {
Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
uint32_t concat_size) {
- int result_len = 0;
- ElementsKind elements_kind = GetInitialFastElementsKind();
- bool has_double = false;
+ const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+ STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+ USE(kHalfOfMaxInt);
+ uint32_t result_len = 0;
+ bool has_raw_doubles = false;
+ ElementsKind result_elements_kind = GetInitialFastElementsKind();
{
DisallowHeapAllocation no_gc;
+ bool is_holey = false;
// Iterate through all the arguments performing checks
// and calculating total length.
- bool is_holey = false;
for (uint32_t i = 0; i < concat_size; i++) {
- Object* arg = (*args)[i];
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
+ JSArray* array = JSArray::cast((*args)[i]);
+ uint32_t len = 0;
+ array->length()->ToArrayLength(&len);
// We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
result_len += len;
DCHECK(0 <= result_len);
DCHECK(result_len <= FixedDoubleArray::kMaxLength);
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
- has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+ ElementsKind arg_kind = array->GetElementsKind();
+ has_raw_doubles = has_raw_doubles || IsFastDoubleElementsKind(arg_kind);
is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
- elements_kind = GetMoreGeneralElementsKind(elements_kind, arg_kind);
+ result_elements_kind =
+ GetMoreGeneralElementsKind(result_elements_kind, arg_kind);
}
if (is_holey) {
- elements_kind = GetHoleyElementsKind(elements_kind);
+ result_elements_kind = GetHoleyElementsKind(result_elements_kind);
}
}
// If a double array is concatted into a fast elements array, the fast
// elements array needs to be initialized to contain proper holes, since
// boxing doubles may cause incremental marking.
- ArrayStorageAllocationMode mode =
- has_double && IsFastObjectElementsKind(elements_kind)
- ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
- : DONT_INITIALIZE_ARRAY_ELEMENTS;
+ bool requires_double_boxing =
+ has_raw_doubles && !IsFastDoubleElementsKind(result_elements_kind);
+ ArrayStorageAllocationMode mode = requires_double_boxing
+ ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+ : DONT_INITIALIZE_ARRAY_ELEMENTS;
Handle<JSArray> result_array = isolate->factory()->NewJSArray(
- elements_kind, result_len, result_len, Strength::WEAK, mode);
+ result_elements_kind, result_len, result_len, mode);
if (result_len == 0) return result_array;
- int j = 0;
+
+ uint32_t insertion_index = 0;
Handle<FixedArrayBase> storage(result_array->elements(), isolate);
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(result_elements_kind);
for (uint32_t i = 0; i < concat_size; i++) {
// It is crucial to keep |array| in a raw pointer form to avoid
// performance degradation.
JSArray* array = JSArray::cast((*args)[i]);
- int len = Smi::cast(array->length())->value();
- if (len > 0) {
- ElementsKind from_kind = array->GetElementsKind();
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- j += len;
- }
+ uint32_t len = 0;
+ array->length()->ToArrayLength(&len);
+ if (len == 0) continue;
+ ElementsKind from_kind = array->GetElementsKind();
+ accessor->CopyElements(array, 0, from_kind, storage, insertion_index, len);
+ insertion_index += len;
}
- DCHECK(j == result_len);
+ DCHECK_EQ(insertion_index, result_len);
return result_array;
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 483d753bc1..2b18ab07d1 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -8,7 +8,7 @@
#include "src/elements-kind.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
#include "src/objects.h"
namespace v8 {
@@ -52,11 +52,6 @@ class ElementsAccessor {
return HasElement(holder, index, handle(holder->elements()), filter);
}
- // Returns true if the backing store is compact in the given range
- virtual bool IsPacked(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store, uint32_t start,
- uint32_t end) = 0;
-
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
@@ -100,6 +95,24 @@ class ElementsAccessor {
filter, offset);
}
+ virtual Maybe<bool> CollectValuesOrEntries(
+ Isolate* isolate, Handle<JSObject> object,
+ Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+ //
+ virtual Handle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+ inline Handle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArray> keys,
+ GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES) {
+ return PrependElementIndices(object, handle(object->elements()), keys,
+ convert, filter);
+ }
+
virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) = 0;
@@ -124,28 +137,27 @@ class ElementsAccessor {
static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
uint32_t concat_size);
- virtual uint32_t Push(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Arguments* args,
+ virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
uint32_t push_size) = 0;
virtual uint32_t Unshift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
Arguments* args, uint32_t unshift_size) = 0;
virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t end) = 0;
virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) = 0;
- virtual Handle<Object> Pop(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) = 0;
+ virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
- virtual Handle<Object> Shift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) = 0;
+ virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
+
+ virtual Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) = 0;
+
+ virtual uint32_t GetCapacity(JSObject* holder,
+ FixedArrayBase* backing_store) = 0;
protected:
friend class LookupIterator;
@@ -172,8 +184,6 @@ class ElementsAccessor {
uint32_t destination_start, int copy_size) = 0;
private:
- virtual uint32_t GetCapacity(JSObject* holder,
- FixedArrayBase* backing_store) = 0;
static ElementsAccessor** elements_accessors_;
const char* name_;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index e6a569f33d..a092a8a06c 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -138,16 +138,6 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
SaveContext save(isolate);
isolate->set_context(function->context());
- // Do proper receiver conversion for non-strict mode api functions.
- if (!receiver->IsJSReceiver() &&
- is_sloppy(function->shared()->language_mode())) {
- if (receiver->IsUndefined() || receiver->IsNull()) {
- receiver = handle(function->global_proxy(), isolate);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, receiver), Object);
- }
- }
DCHECK(function->context()->global_object()->IsJSGlobalObject());
auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
bool has_exception = value.is_null();
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
new file mode 100644
index 0000000000..29a2474b09
--- /dev/null
+++ b/deps/v8/src/external-reference-table.cc
@@ -0,0 +1,354 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/external-reference-table.h"
+
+#include "src/accessors.h"
+#include "src/assembler.h"
+#include "src/counters.h"
+#include "src/deoptimizer.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
+ ExternalReferenceTable* external_reference_table =
+ isolate->external_reference_table();
+ if (external_reference_table == NULL) {
+ external_reference_table = new ExternalReferenceTable(isolate);
+ isolate->set_external_reference_table(external_reference_table);
+ }
+ return external_reference_table;
+}
+
+ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
+ // Miscellaneous
+ Add(ExternalReference::roots_array_start(isolate).address(),
+ "Heap::roots_array_start()");
+ Add(ExternalReference::address_of_stack_limit(isolate).address(),
+ "StackGuard::address_of_jslimit()");
+ Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
+ "StackGuard::address_of_real_jslimit()");
+ Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
+ "Heap::NewSpaceAllocationLimitAddress()");
+ Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
+ "Heap::NewSpaceAllocationTopAddress()");
+ Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
+ "mod_two_doubles");
+ // Keyed lookup cache.
+ Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
+ "KeyedLookupCache::keys()");
+ Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
+ "KeyedLookupCache::field_offsets()");
+ Add(ExternalReference::handle_scope_next_address(isolate).address(),
+ "HandleScope::next");
+ Add(ExternalReference::handle_scope_limit_address(isolate).address(),
+ "HandleScope::limit");
+ Add(ExternalReference::handle_scope_level_address(isolate).address(),
+ "HandleScope::level");
+ Add(ExternalReference::new_deoptimizer_function(isolate).address(),
+ "Deoptimizer::New()");
+ Add(ExternalReference::compute_output_frames_function(isolate).address(),
+ "Deoptimizer::ComputeOutputFrames()");
+ Add(ExternalReference::address_of_min_int().address(),
+ "LDoubleConstant::min_int");
+ Add(ExternalReference::address_of_one_half().address(),
+ "LDoubleConstant::one_half");
+ Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+ Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
+ "Interpreter::dispatch_table_address");
+ Add(ExternalReference::address_of_negative_infinity().address(),
+ "LDoubleConstant::negative_infinity");
+ Add(ExternalReference::power_double_double_function(isolate).address(),
+ "power_double_double_function");
+ Add(ExternalReference::power_double_int_function(isolate).address(),
+ "power_double_int_function");
+ Add(ExternalReference::math_log_double_function(isolate).address(),
+ "std::log");
+ Add(ExternalReference::store_buffer_top(isolate).address(),
+ "store_buffer_top");
+ Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
+ Add(ExternalReference::get_date_field_function(isolate).address(),
+ "JSDate::GetField");
+ Add(ExternalReference::date_cache_stamp(isolate).address(),
+ "date_cache_stamp");
+ Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+ "address_of_pending_message_obj");
+ Add(ExternalReference::get_make_code_young_function(isolate).address(),
+ "Code::MakeCodeYoung");
+ Add(ExternalReference::cpu_features().address(), "cpu_features");
+ Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
+ "Heap::OldSpaceAllocationTopAddress");
+ Add(ExternalReference::old_space_allocation_limit_address(isolate).address(),
+ "Heap::OldSpaceAllocationLimitAddress");
+ Add(ExternalReference::allocation_sites_list_address(isolate).address(),
+ "Heap::allocation_sites_list_address()");
+ Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
+ Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+ "Code::MarkCodeAsExecuted");
+ Add(ExternalReference::is_profiling_address(isolate).address(),
+ "CpuProfiler::is_profiling");
+ Add(ExternalReference::scheduled_exception_address(isolate).address(),
+ "Isolate::scheduled_exception");
+ Add(ExternalReference::invoke_function_callback(isolate).address(),
+ "InvokeFunctionCallback");
+ Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
+ "InvokeAccessorGetterCallback");
+ Add(ExternalReference::wasm_f32_trunc(isolate).address(),
+ "wasm::f32_trunc_wrapper");
+ Add(ExternalReference::wasm_f32_floor(isolate).address(),
+ "wasm::f32_floor_wrapper");
+ Add(ExternalReference::wasm_f32_ceil(isolate).address(),
+ "wasm::f32_ceil_wrapper");
+ Add(ExternalReference::wasm_f32_nearest_int(isolate).address(),
+ "wasm::f32_nearest_int_wrapper");
+ Add(ExternalReference::wasm_f64_trunc(isolate).address(),
+ "wasm::f64_trunc_wrapper");
+ Add(ExternalReference::wasm_f64_floor(isolate).address(),
+ "wasm::f64_floor_wrapper");
+ Add(ExternalReference::wasm_f64_ceil(isolate).address(),
+ "wasm::f64_ceil_wrapper");
+ Add(ExternalReference::wasm_f64_nearest_int(isolate).address(),
+ "wasm::f64_nearest_int_wrapper");
+ Add(ExternalReference::wasm_int64_to_float32(isolate).address(),
+ "wasm::int64_to_float32_wrapper");
+ Add(ExternalReference::wasm_uint64_to_float32(isolate).address(),
+ "wasm::uint64_to_float32_wrapper");
+ Add(ExternalReference::wasm_int64_to_float64(isolate).address(),
+ "wasm::int64_to_float64_wrapper");
+ Add(ExternalReference::wasm_uint64_to_float64(isolate).address(),
+ "wasm::uint64_to_float64_wrapper");
+ Add(ExternalReference::wasm_float32_to_int64(isolate).address(),
+ "wasm::float32_to_int64_wrapper");
+ Add(ExternalReference::wasm_float32_to_uint64(isolate).address(),
+ "wasm::float32_to_uint64_wrapper");
+ Add(ExternalReference::wasm_float64_to_int64(isolate).address(),
+ "wasm::float64_to_int64_wrapper");
+ Add(ExternalReference::wasm_float64_to_uint64(isolate).address(),
+ "wasm::float64_to_uint64_wrapper");
+ Add(ExternalReference::wasm_int64_div(isolate).address(), "wasm::int64_div");
+ Add(ExternalReference::wasm_int64_mod(isolate).address(), "wasm::int64_mod");
+ Add(ExternalReference::wasm_uint64_div(isolate).address(),
+ "wasm::uint64_div");
+ Add(ExternalReference::wasm_uint64_mod(isolate).address(),
+ "wasm::uint64_mod");
+ Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
+ "f64_acos_wrapper");
+ Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
+ "f64_asin_wrapper");
+ Add(ExternalReference::f64_atan_wrapper_function(isolate).address(),
+ "f64_atan_wrapper");
+ Add(ExternalReference::f64_cos_wrapper_function(isolate).address(),
+ "f64_cos_wrapper");
+ Add(ExternalReference::f64_sin_wrapper_function(isolate).address(),
+ "f64_sin_wrapper");
+ Add(ExternalReference::f64_tan_wrapper_function(isolate).address(),
+ "f64_tan_wrapper");
+ Add(ExternalReference::f64_exp_wrapper_function(isolate).address(),
+ "f64_exp_wrapper");
+ Add(ExternalReference::f64_log_wrapper_function(isolate).address(),
+ "f64_log_wrapper");
+ Add(ExternalReference::f64_pow_wrapper_function(isolate).address(),
+ "f64_pow_wrapper");
+ Add(ExternalReference::f64_atan2_wrapper_function(isolate).address(),
+ "f64_atan2_wrapper");
+ Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
+ "f64_mod_wrapper");
+ Add(ExternalReference::log_enter_external_function(isolate).address(),
+ "Logger::EnterExternal");
+ Add(ExternalReference::log_leave_external_function(isolate).address(),
+ "Logger::LeaveExternal");
+ Add(ExternalReference::address_of_minus_one_half().address(),
+ "double_constants.minus_one_half");
+ Add(ExternalReference::stress_deopt_count(isolate).address(),
+ "Isolate::stress_deopt_count_address()");
+ Add(ExternalReference::virtual_handler_register(isolate).address(),
+ "Isolate::virtual_handler_register()");
+ Add(ExternalReference::virtual_slot_register(isolate).address(),
+ "Isolate::virtual_slot_register()");
+ Add(ExternalReference::runtime_function_table_address(isolate).address(),
+ "Runtime::runtime_function_table_address()");
+ Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
+ .address(),
+ "Isolate::is_tail_call_elimination_enabled_address()");
+
+ // Debug addresses
+ Add(ExternalReference::debug_after_break_target_address(isolate).address(),
+ "Debug::after_break_target_address()");
+ Add(ExternalReference::debug_is_active_address(isolate).address(),
+ "Debug::is_active_address()");
+ Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+ "Debug::step_in_enabled_address()");
+
+#ifndef V8_INTERPRETED_REGEXP
+ Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
+ Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
+ "RegExpMacroAssembler*::CheckStackGuardState()");
+ Add(ExternalReference::re_grow_stack(isolate).address(),
+ "NativeRegExpMacroAssembler::GrowStack()");
+ Add(ExternalReference::re_word_character_map().address(),
+ "NativeRegExpMacroAssembler::word_character_map");
+ Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
+ "RegExpStack::limit_address()");
+ Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
+ .address(),
+ "RegExpStack::memory_address()");
+ Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
+ "RegExpStack::memory_size()");
+ Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
+ "OffsetsVector::static_offsets_vector");
+#endif // V8_INTERPRETED_REGEXP
+
+ // The following populates all of the different type of external references
+ // into the ExternalReferenceTable.
+ //
+ // NOTE: This function was originally 100k of code. It has since been
+ // rewritten to be mostly table driven, as the callback macro style tends to
+ // very easily cause code bloat. Please be careful in the future when adding
+ // new references.
+
+ struct RefTableEntry {
+ uint16_t id;
+ const char* name;
+ };
+
+ static const RefTableEntry c_builtins[] = {
+#define DEF_ENTRY_C(name, ignored) {Builtins::c_##name, "Builtins::" #name},
+ BUILTIN_LIST_C(DEF_ENTRY_C)
+#undef DEF_ENTRY_C
+ };
+
+ for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
+ ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
+ isolate);
+ Add(ref.address(), c_builtins[i].name);
+ }
+
+ static const RefTableEntry builtins[] = {
+#define DEF_ENTRY_C(name, ignored) {Builtins::k##name, "Builtins::" #name},
+#define DEF_ENTRY_A(name, i1, i2, i3) {Builtins::k##name, "Builtins::" #name},
+ BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
+ BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
+#undef DEF_ENTRY_C
+#undef DEF_ENTRY_A
+ };
+
+ for (unsigned i = 0; i < arraysize(builtins); ++i) {
+ ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
+ Add(ref.address(), builtins[i].name);
+ }
+
+ static const RefTableEntry runtime_functions[] = {
+#define RUNTIME_ENTRY(name, i1, i2) {Runtime::k##name, "Runtime::" #name},
+ FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
+#undef RUNTIME_ENTRY
+ };
+
+ for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
+ ExternalReference ref(
+ static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
+ Add(ref.address(), runtime_functions[i].name);
+ }
+
+ // Stat counters
+ struct StatsRefTableEntry {
+ StatsCounter* (Counters::*counter)();
+ const char* name;
+ };
+
+ static const StatsRefTableEntry stats_ref_table[] = {
+#define COUNTER_ENTRY(name, caption) {&Counters::name, "Counters::" #name},
+ STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
+#undef COUNTER_ENTRY
+ };
+
+ Counters* counters = isolate->counters();
+ for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
+ // To make sure the indices are not dependent on whether counters are
+ // enabled, use a dummy address as filler.
+ Address address = NotAvailable();
+ StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
+ if (counter->Enabled()) {
+ address = reinterpret_cast<Address>(counter->GetInternalPointer());
+ }
+ Add(address, stats_ref_table[i].name);
+ }
+
+ // Top addresses
+ static const char* address_names[] = {
+#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
+ FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
+#undef BUILD_NAME_LITERAL
+ };
+
+ for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
+ Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
+ address_names[i]);
+ }
+
+ // Accessors
+ struct AccessorRefTable {
+ Address address;
+ const char* name;
+ };
+
+ static const AccessorRefTable accessors[] = {
+#define ACCESSOR_INFO_DECLARATION(name) \
+ {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_SETTER_DECLARATION(name) \
+ {FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
+ ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
+ };
+
+ for (unsigned i = 0; i < arraysize(accessors); ++i) {
+ Add(accessors[i].address, accessors[i].name);
+ }
+
+ StubCache* stub_cache = isolate->stub_cache();
+
+ // Stub cache tables
+ Add(stub_cache->key_reference(StubCache::kPrimary).address(),
+ "StubCache::primary_->key");
+ Add(stub_cache->value_reference(StubCache::kPrimary).address(),
+ "StubCache::primary_->value");
+ Add(stub_cache->map_reference(StubCache::kPrimary).address(),
+ "StubCache::primary_->map");
+ Add(stub_cache->key_reference(StubCache::kSecondary).address(),
+ "StubCache::secondary_->key");
+ Add(stub_cache->value_reference(StubCache::kSecondary).address(),
+ "StubCache::secondary_->value");
+ Add(stub_cache->map_reference(StubCache::kSecondary).address(),
+ "StubCache::secondary_->map");
+
+ // Runtime entries
+ Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
+ "HandleScope::DeleteExtensions");
+ Add(ExternalReference::incremental_marking_record_write_function(isolate)
+ .address(),
+ "IncrementalMarking::RecordWrite");
+ Add(ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate)
+ .address(),
+ "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
+ Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+ "StoreBuffer::StoreBufferOverflow");
+
+ // Add a small set of deopt entry addresses to encoder without generating the
+ // deopt table code, which isn't possible at deserialization time.
+ HandleScope scope(isolate);
+ for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+ Address address = Deoptimizer::GetDeoptimizationEntry(
+ isolate, entry, Deoptimizer::LAZY,
+ Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+ Add(address, "lazy_deopt");
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
new file mode 100644
index 0000000000..2ea4b14cf3
--- /dev/null
+++ b/deps/v8/src/external-reference-table.h
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTERNAL_REFERENCE_TABLE_H_
+#define V8_EXTERNAL_REFERENCE_TABLE_H_
+
+#include "src/address-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ExternalReferenceTable is a helper class that defines the relationship
+// between external references and their encodings. It is used to build
+// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
+class ExternalReferenceTable {
+ public:
+ static ExternalReferenceTable* instance(Isolate* isolate);
+
+ int size() const { return refs_.length(); }
+ Address address(int i) { return refs_[i].address; }
+ const char* name(int i) { return refs_[i].name; }
+
+ inline static Address NotAvailable() { return NULL; }
+
+ static const int kDeoptTableSerializeEntryCount = 64;
+
+ private:
+ struct ExternalReferenceEntry {
+ Address address;
+ const char* name;
+ };
+
+ explicit ExternalReferenceTable(Isolate* isolate);
+
+ void Add(Address address, const char* name) {
+ ExternalReferenceEntry entry = {address, name};
+ refs_.Add(entry);
+ }
+
+ List<ExternalReferenceEntry> refs_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_EXTERNAL_REFERENCE_TABLE_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 15ddb5ff43..41c3cb5eaa 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -112,12 +112,12 @@ Factory::NewSloppyBlockWithEvalContextExtension(
return result;
}
-
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
- Handle<Object> to_number,
+ Handle<Object> to_number, bool to_boolean,
const char* type_of, byte kind) {
Handle<Oddball> oddball = New<Oddball>(map, OLD_SPACE);
- Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
+ Oddball::Initialize(isolate(), oddball, to_string, to_number, to_boolean,
+ type_of, kind);
return oddball;
}
@@ -800,6 +800,22 @@ Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
return context;
}
+Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<JSReceiver> extension,
+ Handle<Context> wrapped,
+ Handle<StringSet> whitelist) {
+ STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+ Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 2);
+ array->set_map_no_write_barrier(*debug_evaluate_context_map());
+ Handle<Context> c = Handle<Context>::cast(array);
+ c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
+ c->set_previous(*previous);
+ c->set_native_context(previous->native_context());
+ if (!extension.is_null()) c->set(Context::EXTENSION_INDEX, *extension);
+ if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
+ if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
+ return c;
+}
Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> previous,
@@ -859,6 +875,7 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info =
Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
info->set_flag(0); // Must clear the flag, it was initialized as undefined.
+ info->set_is_sloppy(true);
return info;
}
@@ -1350,33 +1367,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
info->ResetForNewContext(isolate()->heap()->global_ic_age());
}
- if (FLAG_always_opt && info->allows_lazy_compilation()) {
- result->MarkForOptimization();
- }
-
- CodeAndLiterals cached = info->SearchOptimizedCodeMap(
- context->native_context(), BailoutId::None());
- if (cached.code != nullptr) {
- // Caching of optimized code enabled and optimized code found.
- DCHECK(!cached.code->marked_for_deoptimization());
- DCHECK(result->shared()->is_compiled());
- result->ReplaceCode(cached.code);
- }
-
- if (cached.literals != nullptr) {
- result->set_literals(cached.literals);
- } else {
- int number_of_literals = info->num_literals();
- Handle<LiteralsArray> literals =
- LiteralsArray::New(isolate(), handle(info->feedback_vector()),
- number_of_literals, pretenure);
- result->set_literals(*literals);
-
- // Cache context-specific literals.
- Handle<Context> native_context(context->native_context());
- SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(info, native_context,
- literals);
- }
+ // Give compiler a chance to pre-initialize.
+ Compiler::PostInstantiation(result, pretenure);
return result;
}
@@ -1507,6 +1499,14 @@ Handle<JSObject> Factory::NewJSObjectWithMemento(
JSObject);
}
+Handle<JSObject> Factory::NewJSObjectWithNullProto() {
+ Handle<JSObject> result = NewJSObject(isolate()->object_function());
+ Handle<Map> new_map =
+ Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
+ Map::SetPrototype(new_map, null_value());
+ JSObject::MigrateToMap(result, new_map);
+ return result;
+}
Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info) {
@@ -1596,11 +1596,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- Strength strength,
PretenureFlag pretenure) {
- Map* map = isolate()->get_initial_js_array_map(elements_kind, strength);
+ Map* map = isolate()->get_initial_js_array_map(elements_kind);
if (map == nullptr) {
- DCHECK(strength == Strength::WEAK);
Context* native_context = isolate()->context()->native_context();
JSFunction* array_function = native_context->array_function();
map = array_function->initial_map();
@@ -1608,23 +1606,21 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
}
-
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
- int capacity, Strength strength,
+ int capacity,
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
- Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
NewJSArrayStorage(array, length, capacity, mode);
return array;
}
-
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
- int length, Strength strength,
+ int length,
PretenureFlag pretenure) {
DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
+ Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
@@ -2071,16 +2067,13 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
object->set_hash(*hash);
}
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
- Handle<Code> code, Handle<ScopeInfo> scope_info,
- Handle<TypeFeedbackVector> feedback_vector) {
+ Handle<Code> code, Handle<ScopeInfo> scope_info) {
DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
name, code, IsConstructable(kind, scope_info->language_mode()));
shared->set_scope_info(*scope_info);
- shared->set_feedback_vector(*feedback_vector);
shared->set_kind(kind);
shared->set_num_literals(number_of_literals);
if (IsGeneratorFunction(kind)) {
@@ -2136,7 +2129,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
- share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
+ share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
StaticFeedbackVectorSpec empty_spec;
Handle<TypeFeedbackMetadata> feedback_metadata =
TypeFeedbackMetadata::New(isolate(), &empty_spec);
@@ -2300,7 +2293,6 @@ Handle<JSWeakMap> Factory::NewJSWeakMap() {
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
int number_of_properties,
- bool is_strong,
bool* is_result_from_cache) {
const int kMapCacheSize = 128;
@@ -2309,29 +2301,21 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
isolate()->bootstrapper()->IsActive()) {
*is_result_from_cache = false;
Handle<Map> map = Map::Create(isolate(), number_of_properties);
- if (is_strong) map->set_is_strong();
return map;
}
*is_result_from_cache = true;
if (number_of_properties == 0) {
// Reuse the initial map of the Object function if the literal has no
- // predeclared properties, or the strong map if strong.
- return handle(is_strong
- ? context->js_object_strong_map()
- : context->object_function()->initial_map(), isolate());
+ // predeclared properties.
+ return handle(context->object_function()->initial_map(), isolate());
}
int cache_index = number_of_properties - 1;
- Handle<Object> maybe_cache(is_strong ? context->strong_map_cache()
- : context->map_cache(), isolate());
+ Handle<Object> maybe_cache(context->map_cache(), isolate());
if (maybe_cache->IsUndefined()) {
// Allocate the new map cache for the native context.
maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
- if (is_strong) {
- context->set_strong_map_cache(*maybe_cache);
- } else {
- context->set_map_cache(*maybe_cache);
- }
+ context->set_map_cache(*maybe_cache);
} else {
// Check to see whether there is a matching element in the cache.
Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
@@ -2346,7 +2330,6 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Create a new map and add it to the cache.
Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
- if (is_strong) map->set_is_strong();
Handle<WeakCell> cell = NewWeakCell(map);
cache->set(cache_index, *cell);
return map;
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index dd107d144b..2fa2901437 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -16,8 +16,8 @@ namespace internal {
class Factory final {
public:
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
- Handle<Object> to_number, const char* type_of,
- byte kind);
+ Handle<Object> to_number, bool to_boolean,
+ const char* type_of, byte kind);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
@@ -256,6 +256,11 @@ class Factory final {
Handle<Context> previous,
Handle<JSReceiver> extension);
+ Handle<Context> NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<JSReceiver> extension,
+ Handle<Context> wrapped,
+ Handle<StringSet> whitelist);
+
// Create a block context.
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
@@ -389,6 +394,8 @@ class Factory final {
// JSObject that should have a memento pointing to the allocation site.
Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
Handle<AllocationSite> site);
+ // JSObject without a prototype.
+ Handle<JSObject> NewJSObjectWithNullProto();
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -410,34 +417,30 @@ class Factory final {
// according to the specified mode.
Handle<JSArray> NewJSArray(
ElementsKind elements_kind, int length, int capacity,
- Strength strength = Strength::WEAK,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArray(
int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- return NewJSArray(elements_kind, 0, capacity, strength,
+ return NewJSArray(elements_kind, 0, capacity,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
}
// Create a JSArray with the given elements.
Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind, int length,
- Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED) {
return NewJSArrayWithElements(elements, elements_kind, elements->length(),
- strength, pretenure);
+ pretenure);
}
void NewJSArrayStorage(
@@ -631,8 +634,7 @@ class Factory final {
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
- Handle<Code> code, Handle<ScopeInfo> scope_info,
- Handle<TypeFeedbackVector> feedback_vector);
+ Handle<Code> code, Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
MaybeHandle<Code> code,
bool is_constructor);
@@ -651,7 +653,6 @@ class Factory final {
// native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
int number_of_properties,
- bool is_strong,
bool* is_result_from_cache);
// Creates a new FixedArray that holds the data associated with the
@@ -710,7 +711,6 @@ class Factory final {
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
- Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED);
};
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index 518003b2ee..cd2910ca13 100644
--- a/deps/v8/src/compiler/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -2,34 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/fast-accessor-assembler.h"
+#include "src/fast-accessor-assembler.h"
#include "src/base/logging.h"
-#include "src/code-stubs.h" // For CallApiFunctionStub.
-#include "src/compiler/graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/compiler/verifier.h"
+#include "src/code-stubs.h" // For CallApiCallbackStub.
+#include "src/compiler/code-stub-assembler.h"
#include "src/handles-inl.h"
-#include "src/objects.h" // For FAA::GetInternalField impl.
+#include "src/objects.h" // For FAA::LoadInternalField impl.
+
+using v8::internal::compiler::CodeStubAssembler;
+using v8::internal::compiler::Node;
namespace v8 {
namespace internal {
-namespace compiler {
FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
- : zone_(),
- assembler_(new RawMachineAssembler(
- isolate, new (zone()) Graph(zone()),
- Linkage::GetJSCallDescriptor(&zone_, false, 1,
- CallDescriptor::kNoFlags))),
+ : zone_(isolate->allocator()),
+ isolate_(isolate),
+ assembler_(new CodeStubAssembler(isolate, zone(), 1,
+ Code::ComputeFlags(Code::STUB),
+ "FastAccessorAssembler")),
state_(kBuilding) {}
-
-FastAccessorAssembler::~FastAccessorAssembler() {}
-
+FastAccessorAssembler::~FastAccessorAssembler() { Clear(); }
FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
int const_value) {
@@ -37,34 +32,32 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
return FromRaw(assembler_->NumberConstant(const_value));
}
-
FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
CHECK_EQ(kBuilding, state_);
- // For JS call descriptor, the receiver is parameter 0. If we use other
- // call descriptors, this may or may not hold. So let's check.
- CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
+ // For JS functions, the receiver is parameter 0.
return FromRaw(assembler_->Parameter(0));
}
-
FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
ValueId value, int field_no) {
CHECK_EQ(kBuilding, state_);
+
// Determine the 'value' object's instance type.
- Node* object_map =
- assembler_->Load(MachineType::Pointer(), FromId(value),
- assembler_->IntPtrConstant(
- Internals::kHeapObjectMapOffset - kHeapObjectTag));
+ Node* object_map = assembler_->LoadObjectField(
+ FromId(value), Internals::kHeapObjectMapOffset, MachineType::Pointer());
Node* instance_type = assembler_->WordAnd(
- assembler_->Load(
- MachineType::Uint16(), object_map,
- assembler_->IntPtrConstant(
- Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
+ assembler_->LoadObjectField(object_map,
+ Internals::kMapInstanceTypeAndBitFieldOffset,
+ MachineType::Uint16()),
assembler_->IntPtrConstant(0xff));
// Check whether we have a proper JSObject.
- RawMachineLabel is_jsobject, is_not_jsobject, merge;
+ CodeStubAssembler::Variable result(assembler_.get(),
+ MachineRepresentation::kTagged);
+ CodeStubAssembler::Label is_jsobject(assembler_.get());
+ CodeStubAssembler::Label is_not_jsobject(assembler_.get());
+ CodeStubAssembler::Label merge(assembler_.get(), &result);
assembler_->Branch(
assembler_->WordEqual(
instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
@@ -72,10 +65,10 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
// JSObject? Then load the internal field field_no.
assembler_->Bind(&is_jsobject);
- Node* internal_field = assembler_->Load(
- MachineType::Pointer(), FromId(value),
- assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
- kPointerSize * field_no));
+ Node* internal_field = assembler_->LoadObjectField(
+ FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
+ MachineType::Pointer());
+ result.Bind(internal_field);
assembler_->Goto(&merge);
// No JSObject? Return undefined.
@@ -83,43 +76,39 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
// the method should take a label instead.
assembler_->Bind(&is_not_jsobject);
Node* fail_value = assembler_->UndefinedConstant();
+ result.Bind(fail_value);
assembler_->Goto(&merge);
// Return.
assembler_->Bind(&merge);
- Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
- fail_value);
- return FromRaw(phi);
+ return FromRaw(result.value());
}
-
FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
int offset) {
CHECK_EQ(kBuilding, state_);
- return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
- assembler_->IntPtrConstant(offset)));
+ return FromRaw(assembler_->LoadBufferObject(FromId(value), offset,
+ MachineType::IntPtr()));
}
-
FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
int offset) {
CHECK_EQ(kBuilding, state_);
- return FromRaw(
- assembler_->Load(MachineType::AnyTagged(),
- assembler_->Load(MachineType::Pointer(), FromId(value),
- assembler_->IntPtrConstant(offset))));
+ return FromRaw(assembler_->LoadBufferObject(
+ assembler_->LoadBufferObject(FromId(value), offset,
+ MachineType::Pointer()),
+ 0, MachineType::AnyTagged()));
}
-
void FastAccessorAssembler::ReturnValue(ValueId value) {
CHECK_EQ(kBuilding, state_);
assembler_->Return(FromId(value));
}
-
void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
CHECK_EQ(kBuilding, state_);
- RawMachineLabel pass, fail;
+ CodeStubAssembler::Label pass(assembler_.get());
+ CodeStubAssembler::Label fail(assembler_.get());
assembler_->Branch(
assembler_->Word32Equal(
assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
@@ -130,39 +119,34 @@ void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
assembler_->Bind(&pass);
}
-
void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
CHECK_EQ(kBuilding, state_);
- RawMachineLabel is_null, not_null;
+ CodeStubAssembler::Label is_null(assembler_.get());
+ CodeStubAssembler::Label not_null(assembler_.get());
assembler_->Branch(
- assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
+ assembler_->WordEqual(FromId(value), assembler_->IntPtrConstant(0)),
&is_null, &not_null);
assembler_->Bind(&is_null);
assembler_->Return(assembler_->NullConstant());
assembler_->Bind(&not_null);
}
-
FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
CHECK_EQ(kBuilding, state_);
- RawMachineLabel* label =
- new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
- return FromRaw(label);
+ return FromRaw(new CodeStubAssembler::Label(assembler_.get()));
}
-
void FastAccessorAssembler::SetLabel(LabelId label_id) {
CHECK_EQ(kBuilding, state_);
assembler_->Bind(FromId(label_id));
}
-
void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
CHECK_EQ(kBuilding, state_);
- RawMachineLabel pass;
+ CodeStubAssembler::Label pass(assembler_.get());
assembler_->Branch(
- assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+ assembler_->WordEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
&pass, FromId(label_id));
assembler_->Bind(&pass);
}
@@ -171,92 +155,77 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
FunctionCallback callback_function, ValueId arg) {
CHECK_EQ(kBuilding, state_);
- // Create API function stub.
- CallApiFunctionStub stub(assembler_->isolate(), true);
-
// Wrap the FunctionCallback in an ExternalReference.
ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
ExternalReference callback(&callback_api_function,
- ExternalReference::DIRECT_API_CALL,
- assembler_->isolate());
-
- // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
- // through to the callback.
- // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
- static const int kStackParam = 1;
- Node* args[] = {
+ ExternalReference::DIRECT_API_CALL, isolate());
+
+ // Create & call API callback via stub.
+ CallApiCallbackStub stub(isolate(), 1, true);
+ DCHECK_EQ(5, stub.GetCallInterfaceDescriptor().GetParameterCount());
+ DCHECK_EQ(1, stub.GetCallInterfaceDescriptor().GetStackParameterCount());
+ // TODO(vogelheim): There is currently no clean way to retrieve the context
+ // parameter for a stub and the implementation details are hidden in
+ // compiler/*. The context_paramter is computed as:
+ // Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
+ const int context_parameter = 2;
+ Node* call = assembler_->CallStub(
+ stub.GetCallInterfaceDescriptor(),
+ assembler_->HeapConstant(stub.GetCode()),
+ assembler_->Parameter(context_parameter),
+
// Stub/register parameters:
- assembler_->Parameter(0), /* receiver (use accessor's) */
- assembler_->UndefinedConstant(), /* call_data (undefined) */
- assembler_->NullConstant(), /* holder (null) */
- assembler_->ExternalConstant(callback), /* API callback function */
- assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
-
- // kStackParam stack parameter(s):
- FromId(arg),
-
- // Context parameter. (See Linkage::GetStubCallDescriptor.)
- assembler_->UndefinedConstant()};
- CHECK_EQ(5 + kStackParam + 1, arraysize(args));
-
- Node* call = assembler_->CallN(
- Linkage::GetStubCallDescriptor(
- assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
- kStackParam + stub.GetStackParameterCount(),
- CallDescriptor::kNoFlags),
- assembler_->HeapConstant(stub.GetCode()), args);
+ assembler_->Parameter(0), /* receiver (use accessor's) */
+ assembler_->UndefinedConstant(), /* call_data (undefined) */
+ assembler_->NullConstant(), /* holder (null) */
+ assembler_->ExternalConstant(callback), /* API callback function */
+
+ // JS arguments, on stack:
+ FromId(arg));
+
return FromRaw(call);
}
MaybeHandle<Code> FastAccessorAssembler::Build() {
CHECK_EQ(kBuilding, state_);
-
- // Cleanup: We no longer need this.
- nodes_.clear();
- labels_.clear();
-
- // Export the schedule and call the compiler.
- Schedule* schedule = assembler_->Export();
- Code::Flags flags = Code::ComputeFlags(Code::STUB);
- MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
- assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
- schedule, flags, "FastAccessorAssembler");
-
- // Update state & return.
+ Handle<Code> code = assembler_->GenerateCode();
state_ = !code.is_null() ? kBuilt : kError;
+ Clear();
return code;
}
-
FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
nodes_.push_back(node);
ValueId value = {nodes_.size() - 1};
return value;
}
-
FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
- RawMachineLabel* label) {
+ CodeStubAssembler::Label* label) {
labels_.push_back(label);
LabelId label_id = {labels_.size() - 1};
return label_id;
}
-
Node* FastAccessorAssembler::FromId(ValueId value) const {
CHECK_LT(value.value_id, nodes_.size());
CHECK_NOT_NULL(nodes_.at(value.value_id));
return nodes_.at(value.value_id);
}
-
-RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
+CodeStubAssembler::Label* FastAccessorAssembler::FromId(LabelId label) const {
CHECK_LT(label.label_id, labels_.size());
CHECK_NOT_NULL(labels_.at(label.label_id));
return labels_.at(label.label_id);
}
+void FastAccessorAssembler::Clear() {
+ for (auto label : labels_) {
+ delete label;
+ }
+ nodes_.clear();
+ labels_.clear();
+}
-} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.h b/deps/v8/src/fast-accessor-assembler.h
index 1cb751d026..57e72e8eb1 100644
--- a/deps/v8/src/compiler/fast-accessor-assembler.h
+++ b/deps/v8/src/fast-accessor-assembler.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
-#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#ifndef V8_FAST_ACCESSOR_ASSEMBLER_H_
+#define V8_FAST_ACCESSOR_ASSEMBLER_H_
#include <stdint.h>
#include <vector>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "include/v8-experimental.h"
#include "src/base/macros.h"
#include "src/base/smart-pointers.h"
#include "src/handles.h"
+// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
+#include "src/compiler/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -24,11 +24,8 @@ class Isolate;
class Zone;
namespace compiler {
-
class Node;
-class RawMachineAssembler;
-class RawMachineLabel;
-
+}
// This interface "exports" an aggregated subset of RawMachineAssembler, for
// use by the API to implement Fast Dom Accessors.
@@ -75,21 +72,24 @@ class FastAccessorAssembler {
MaybeHandle<Code> Build();
private:
- ValueId FromRaw(Node* node);
- LabelId FromRaw(RawMachineLabel* label);
- Node* FromId(ValueId value) const;
- RawMachineLabel* FromId(LabelId value) const;
+ ValueId FromRaw(compiler::Node* node);
+ LabelId FromRaw(compiler::CodeStubAssembler::Label* label);
+ compiler::Node* FromId(ValueId value) const;
+ compiler::CodeStubAssembler::Label* FromId(LabelId value) const;
+ void Clear();
Zone* zone() { return &zone_; }
+ Isolate* isolate() const { return isolate_; }
Zone zone_;
- base::SmartPointer<RawMachineAssembler> assembler_;
+ Isolate* isolate_;
+ base::SmartPointer<compiler::CodeStubAssembler> assembler_;
// To prevent exposing the RMA internals to the outside world, we'll map
// Node + Label pointers integers wrapped in ValueId and LabelId instances.
// These vectors maintain this mapping.
- std::vector<Node*> nodes_;
- std::vector<RawMachineLabel*> labels_;
+ std::vector<compiler::Node*> nodes_;
+ std::vector<compiler::CodeStubAssembler::Label*> labels_;
// Remember the current state for easy error checking. (We prefer to be
// strict as this class will be exposed at the API.)
@@ -98,8 +98,7 @@ class FastAccessorAssembler {
DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
};
-} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#endif // V8_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 1eb9dd5111..68088ade4d 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -172,20 +172,11 @@ struct MaybeBoolFlag {
//
#define FLAG FLAG_FULL
-DEFINE_BOOL(warn_template_set, true,
- "warn on deprecated v8::Template::Set() use")
-
DEFINE_BOOL(experimental_extras, false,
"enable code compiled in via v8_experimental_extra_library_files")
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
-DEFINE_BOOL(use_strong, false, "enforce strong mode")
-DEFINE_IMPLICATION(use_strong, use_strict)
-
-DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
-DEFINE_IMPLICATION(use_strong, strong_mode)
-DEFINE_BOOL(strong_this, true, "don't allow 'this' to escape from constructors")
DEFINE_BOOL(es_staging, false,
"enable test-worthy harmony features (for internal use only)")
@@ -193,10 +184,6 @@ DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-DEFINE_BOOL(legacy_const, false, "legacy semantics for const in sloppy mode")
-// ES2015 const semantics are shipped
-DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, legacy_const, true)
-
DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
// Removing extra Promise functions is staged
DEFINE_NEG_IMPLICATION(harmony, promise_extra)
@@ -204,44 +191,41 @@ DEFINE_NEG_IMPLICATION(harmony, promise_extra)
// Activate on ClusterFuzz.
DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
DEFINE_IMPLICATION(es_staging, move_object_start)
-DEFINE_IMPLICATION(es_staging, harmony_tailcalls)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_object_observe, "harmony Object.observe") \
- V(harmony_modules, "harmony modules") \
- V(harmony_function_sent, "harmony function.sent") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd") \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_iterator_close, "harmony iterator finalization") \
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_array_prototype_values, "harmony Array.prototype.values") \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_function_sent, "harmony function.sent") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_simd, "harmony simd") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_regexp_property, "harmony unicode regexp property classes") \
+ V(harmony_string_padding, "harmony String-padding methods")
+
+// Features that are complete (but still behind --harmony/es-staging flag).
+#define HARMONY_STAGED(V) \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
V(harmony_tailcalls, "harmony tail calls") \
V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
V(harmony_object_own_property_descriptors, \
"harmony Object.getOwnPropertyDescriptors()") \
- V(harmony_regexp_property, "harmony unicode regexp property classes")
-
-// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_function_name, "harmony Function name inference") \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
- V(harmony_species, "harmony Symbol.species") \
- V(harmony_instanceof, "harmony instanceof support")
+ V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_default_parameters, "harmony default parameters") \
- V(harmony_destructuring_assignment, "harmony destructuring assignment") \
- V(harmony_destructuring_bind, "harmony destructuring bind") \
- V(harmony_tostring, "harmony toString") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_sloppy_let, "harmony let in sloppy mode") \
- V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_reflect, "harmony Reflect API") \
- V(harmony_regexp_subclass, "harmony regexp subclassing")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_function_name, "harmony Function name inference") \
+ V(harmony_instanceof, "harmony instanceof support") \
+ V(harmony_iterator_close, "harmony iterator finalization") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_regexp_exec, "harmony RegExp exec override behavior") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_sloppy_let, "harmony let in sloppy mode") \
+ V(harmony_sloppy_function, "harmony sloppy function block scoping") \
+ V(harmony_regexp_subclass, "harmony regexp subclassing") \
+ V(harmony_restrictive_declarations, \
+ "harmony limitations on sloppy mode function declarations") \
+ V(harmony_species, "harmony Symbol.species")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -271,10 +255,6 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
-// Destructuring shares too much parsing architecture with default parameters
-// to be enabled on its own.
-DEFINE_IMPLICATION(harmony_destructuring_bind, harmony_default_parameters)
-
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
@@ -288,6 +268,7 @@ DEFINE_BOOL(track_fields, true, "track fields with only smi values")
DEFINE_BOOL(track_double_fields, true, "track fields with double values")
DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values")
DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields")
+DEFINE_BOOL(harmony_instanceof_opt, true, "optimize ES6 instanceof support")
DEFINE_IMPLICATION(track_double_fields, track_fields)
DEFINE_IMPLICATION(track_heap_object_fields, track_fields)
DEFINE_IMPLICATION(track_computed_fields, track_fields)
@@ -309,6 +290,7 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
+DEFINE_BOOL(ignition_eager, true, "eagerly compile and parse with ignition")
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
@@ -380,8 +362,6 @@ DEFINE_BOOL(use_osr, true, "use on-stack replacement")
DEFINE_BOOL(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_BOOL(trace_bce, false, "trace array bounds check elimination")
-DEFINE_BOOL(array_bounds_checks_hoisting, false,
- "perform array bounds checks hoisting")
DEFINE_BOOL(array_index_dehoisting, true, "perform array index dehoisting")
DEFINE_BOOL(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
@@ -415,8 +395,6 @@ DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
-DEFINE_BOOL(concurrent_osr, false, "concurrent on-stack replacement")
-DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation)
DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
@@ -480,18 +458,23 @@ DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
// Flags for native WebAssembly.
DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
-DEFINE_BOOL(trace_wasm_ast, false, "dump AST after WASM decode")
+DEFINE_INT(trace_wasm_ast_start, 0,
+ "start function for WASM AST trace (inclusive)")
+DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
+DEFINE_INT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
+DEFINE_BOOL(wasm_loop_assignment_analysis, true,
+ "perform loop assignment analysis for WASM")
DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
-DEFINE_BOOL(dump_asmjs_wasm, false, "dump Asm.js to WASM module bytes")
-DEFINE_STRING(asmjs_wasm_dumpfile, "asmjs.wasm",
- "file to dump asm wasm conversion result to")
+DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
+DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -595,6 +578,8 @@ DEFINE_BOOL(trace_stub_failures, false,
"trace deoptimization of generated code stubs")
DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
+DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
+DEFINE_BOOL(serialize_age_code, false, "pre age code in the code cache")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
// compiler.cc
@@ -623,8 +608,6 @@ DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
// debugger
-DEFINE_BOOL(debug_eval_readonly_locals, true,
- "do not update locals after debug-evaluate")
DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
@@ -685,6 +668,7 @@ DEFINE_INT(trace_allocation_stack_interval, -1,
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
+DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
@@ -702,8 +686,11 @@ DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"least this many unmarked objects")
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
+DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
+DEFINE_BOOL(parallel_pointer_update, true,
+ "use parallel pointer update during compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
@@ -721,7 +708,7 @@ DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_BOOL(scavenge_reclaim_unmodified_objects, false,
+DEFINE_BOOL(scavenge_reclaim_unmodified_objects, true,
"remove unmodified and unreferenced objects")
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
@@ -730,6 +717,9 @@ DEFINE_INT(heap_growing_percent, 0,
DEFINE_INT(histogram_interval, 600000,
"time interval in ms for aggregating memory histograms")
+// global-handles.cc
+DEFINE_BOOL(trace_object_groups, false,
+ "print object groups detected during each garbage collection")
// heap-snapshot-generator.cc
DEFINE_BOOL(heap_profiler_trace_objects, false,
@@ -779,6 +769,7 @@ DEFINE_BOOL(eliminate_prototype_chain_checks, true,
"Collapse prototype chain checks into single-cell checks")
DEFINE_IMPLICATION(eliminate_prototype_chain_checks, track_prototype_users)
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
+DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
#if TRACE_MAPS
DEFINE_BOOL(trace_maps, false, "trace map creation")
#endif
@@ -865,7 +856,6 @@ DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
@@ -994,6 +984,9 @@ DEFINE_BOOL(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
+// Debugger
+DEFINE_BOOL(print_break_location, false, "print source location on debug break")
+
//
// Logging and profiling flags
//
@@ -1010,8 +1003,6 @@ DEFINE_BOOL(log_code, false,
DEFINE_BOOL(log_gc, false,
"Log heap samples on garbage collection for the hp2ps tool.")
DEFINE_BOOL(log_handles, false, "Log global handle events.")
-DEFINE_BOOL(log_snapshot_positions, false,
- "log positions of (de)serialized objects in the snapshot.")
DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(prof, false,
"Log statistical profiling information (implies --log-code).")
@@ -1029,6 +1020,11 @@ DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
DEFINE_BOOL(perf_basic_prof_only_functions, false,
"Only report function code ranges to perf (i.e. no stubs).")
DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
+DEFINE_BOOL(perf_prof, false,
+ "Enable perf linux profiler (experimental annotate support).")
+DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
+DEFINE_BOOL(perf_prof_debug_info, false,
+ "Enable debug info for perf linux profiler (experimental).")
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 4013601dac..5ecbd4567e 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -24,6 +24,8 @@
#include "src/mips/frames-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/frames-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/frames-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/frames-x87.h" // NOLINT
#else
@@ -114,7 +116,9 @@ inline void StandardFrame::SetExpression(int index, Object* value) {
inline Object* StandardFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
- return Memory::Object_at(fp() + offset);
+ Object* maybe_result = Memory::Object_at(fp() + offset);
+ DCHECK(!maybe_result->IsSmi());
+ return maybe_result;
}
@@ -139,23 +143,20 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
- return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
+ Object* frame_type =
+ Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ return frame_type == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
- Object* marker =
- Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
- return marker == Smi::FromInt(StackFrame::CONSTRUCT);
+ Object* frame_type =
+ Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ return frame_type == Smi::FromInt(StackFrame::CONSTRUCT);
}
-
inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {
-}
-
+ : StandardFrame(iterator) {}
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
@@ -242,6 +243,14 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
+inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
+ : StandardFrame(iterator) {}
+
+inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
+ : StubFrame(iterator) {}
+
+inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
+ : StubFrame(iterator) {}
inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
@@ -257,21 +266,18 @@ inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
}
-
inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate)
: iterator_(isolate) {
if (!done()) Advance();
}
-
inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate, ThreadLocalTop* top)
: iterator_(isolate, top) {
if (!done()) Advance();
}
-
inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
// problematic that we can't use the safe-cast operator to cast to
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 50a2e21a05..0e57429ea3 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -134,12 +134,10 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
#undef FRAME_TYPE_CASE
}
-
// -------------------------------------------------------------------------
-
-JavaScriptFrameIterator::JavaScriptFrameIterator(
- Isolate* isolate, StackFrame::Id id)
+JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
+ StackFrame::Id id)
: iterator_(isolate) {
while (!done()) {
Advance();
@@ -216,9 +214,9 @@ SafeStackFrameIterator::SafeStackFrameIterator(
// we check only that kMarkerOffset is within the stack bounds and do
// compile time check that kContextOffset slot is pushed on the stack before
// kMarkerOffset.
- STATIC_ASSERT(StandardFrameConstants::kMarkerOffset <
+ STATIC_ASSERT(StandardFrameConstants::kFunctionOffset <
StandardFrameConstants::kContextOffset);
- Address frame_marker = fp + StandardFrameConstants::kMarkerOffset;
+ Address frame_marker = fp + StandardFrameConstants::kFunctionOffset;
if (IsValidStackAddress(frame_marker)) {
type = StackFrame::ComputeType(this, &state);
top_frame_type_ = type;
@@ -403,11 +401,29 @@ void StackFrame::SetReturnAddressLocationResolver(
return_address_location_resolver_ = resolver;
}
+static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
+ Code* interpreter_entry_trampoline =
+ isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ Code* interpreter_bytecode_dispatch =
+ isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+
+ return (pc >= interpreter_entry_trampoline->instruction_start() &&
+ pc < interpreter_entry_trampoline->instruction_end()) ||
+ (pc >= interpreter_bytecode_dispatch->instruction_start() &&
+ pc < interpreter_bytecode_dispatch->instruction_end());
+}
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
DCHECK(state->fp != NULL);
+#if defined(USE_SIMULATOR)
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
+ kPointerSize);
+#endif
+ Object* marker = Memory::Object_at(
+ state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
if (!iterator->can_access_heap_objects_) {
// TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
// means that we are being called from the profiler, which can interrupt
@@ -416,66 +432,81 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// reliable.
#if defined(USE_SIMULATOR)
MSAN_MEMORY_IS_INITIALIZED(
- state->fp + StandardFrameConstants::kContextOffset, kPointerSize);
- MSAN_MEMORY_IS_INITIALIZED(
- state->fp + StandardFrameConstants::kMarkerOffset, kPointerSize);
+ state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
#endif
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- // An adapter frame has a special SMI constant for the context and
- // is not distinguished through the marker.
- return ARGUMENTS_ADAPTOR;
+ Object* maybe_function =
+ Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
+ if (!marker->IsSmi()) {
+ if (maybe_function->IsSmi()) {
+ return NONE;
+ } else if (FLAG_ignition && IsInterpreterFramePc(iterator->isolate(),
+ *(state->pc_address))) {
+ return INTERPRETED;
+ } else {
+ return JAVA_SCRIPT;
+ }
}
- Object* marker =
- Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
- if (marker->IsSmi()) {
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ } else {
+ // Look up the code object to figure out the type of the stack frame.
+ Code* code_obj =
+ GetContainingCode(iterator->isolate(), *(state->pc_address));
+ if (code_obj != nullptr) {
+ if (code_obj->is_interpreter_entry_trampoline() ||
+ code_obj->is_interpreter_enter_bytecode_dispatch()) {
+ return INTERPRETED;
+ }
+ switch (code_obj->kind()) {
+ case Code::BUILTIN:
+ if (marker->IsSmi()) break;
+ // We treat frames for BUILTIN Code objects as OptimizedFrame for now
+ // (all the builtins with JavaScript linkage are actually generated
+ // with TurboFan currently, so this is sound).
+ return OPTIMIZED;
+ case Code::FUNCTION:
+ return JAVA_SCRIPT;
+ case Code::OPTIMIZED_FUNCTION:
+ return OPTIMIZED;
+ case Code::WASM_FUNCTION:
+ return WASM;
+ case Code::WASM_TO_JS_FUNCTION:
+ return WASM_TO_JS;
+ case Code::JS_TO_WASM_FUNCTION:
+ return JS_TO_WASM;
+ default:
+ // All other types should have an explicit marker
+ break;
+ }
} else {
- return JAVA_SCRIPT;
+ return NONE;
}
}
- // Look up the code object to figure out the type of the stack frame.
- Code* code_obj = GetContainingCode(iterator->isolate(), *(state->pc_address));
-
- Object* marker =
- Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
- if (code_obj != nullptr) {
- switch (code_obj->kind()) {
- case Code::FUNCTION:
- return JAVA_SCRIPT;
- case Code::OPTIMIZED_FUNCTION:
- return OPTIMIZED;
- case Code::WASM_FUNCTION:
- return STUB;
- case Code::BUILTIN:
- if (!marker->IsSmi()) {
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- // An adapter frame has a special SMI constant for the context and
- // is not distinguished through the marker.
- return ARGUMENTS_ADAPTOR;
- } else {
- // The interpreter entry trampoline has a non-SMI marker.
- DCHECK(code_obj->is_interpreter_entry_trampoline() ||
- code_obj->is_interpreter_enter_bytecode_dispatch());
- return INTERPRETED;
- }
- }
- break; // Marker encodes the frame type.
- case Code::HANDLER:
- if (!marker->IsSmi()) {
- // Only hydrogen code stub handlers can have a non-SMI marker.
- DCHECK(code_obj->is_hydrogen_stub());
- return OPTIMIZED;
- }
- break; // Marker encodes the frame type.
- default:
- break; // Marker encodes the frame type.
- }
+ DCHECK(marker->IsSmi());
+ StackFrame::Type candidate =
+ static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ switch (candidate) {
+ case ENTRY:
+ case ENTRY_CONSTRUCT:
+ case EXIT:
+ case STUB:
+ case STUB_FAILURE_TRAMPOLINE:
+ case INTERNAL:
+ case CONSTRUCT:
+ case ARGUMENTS_ADAPTOR:
+ return candidate;
+ case JS_TO_WASM:
+ case WASM_TO_JS:
+ case WASM:
+ case JAVA_SCRIPT:
+ case OPTIMIZED:
+ case INTERPRETED:
+ default:
+ // Unoptimized and optimized JavaScript frames, including
+ // interpreted frames, should never have a StackFrame::Type
+ // marker. If we find one, we're likely being called from the
+ // profiler in a bogus stack frame.
+ return NONE;
}
-
- // Didn't find a code object, or the code kind wasn't specific enough.
- // The marker should encode the frame type.
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
@@ -493,16 +524,7 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
Address StackFrame::UnpaddedFP() const {
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
- if (!is_optimized()) return fp();
- int32_t alignment_state = Memory::int32_at(
- fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
-
- return (alignment_state == kAlignmentPaddingPushed) ?
- (fp() + kPointerSize) : fp();
-#else
return fp();
-#endif
}
@@ -572,7 +594,7 @@ void ExitFrame::Iterate(ObjectVisitor* v) const {
Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPDisplacement;
+ return fp() + ExitFrameConstants::kCallerSPOffset;
}
@@ -648,13 +670,50 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
SafepointEntry safepoint_entry;
Code* code = StackFrame::GetSafepointData(
isolate(), pc(), &safepoint_entry, &stack_slots);
- unsigned slot_space =
- stack_slots * kPointerSize - StandardFrameConstants::kFixedFrameSize;
+ unsigned slot_space = stack_slots * kPointerSize;
- // Visit the outgoing parameters.
+ // Determine the fixed header and spill slot area size.
+ int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
+ Object* marker =
+ Memory::Object_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
+ if (marker->IsSmi()) {
+ StackFrame::Type candidate =
+ static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ switch (candidate) {
+ case ENTRY:
+ case ENTRY_CONSTRUCT:
+ case EXIT:
+ case STUB_FAILURE_TRAMPOLINE:
+ case ARGUMENTS_ADAPTOR:
+ case STUB:
+ case INTERNAL:
+ case CONSTRUCT:
+ case JS_TO_WASM:
+ case WASM_TO_JS:
+ case WASM:
+ frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
+ break;
+ case JAVA_SCRIPT:
+ case OPTIMIZED:
+ case INTERPRETED:
+ // These frame types have a context, but they are actually stored
+ // in the place on the stack that one finds the frame type.
+ UNREACHABLE();
+ break;
+ case NONE:
+ case NUMBER_OF_TYPES:
+ case MANUAL:
+ UNREACHABLE();
+ break;
+ }
+ }
+ slot_space -=
+ (frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
+
+ Object** frame_header_base = &Memory::Object_at(fp() - frame_header_size);
+ Object** frame_header_limit = &Memory::Object_at(fp());
Object** parameters_base = &Memory::Object_at(sp());
- Object** parameters_limit = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
+ Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
// Visit the parameters that may be on top of the saved registers.
if (safepoint_entry.argument_count() > 0) {
@@ -690,7 +749,10 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
// Visit the rest of the parameters.
- v->VisitPointers(parameters_base, parameters_limit);
+ if (!is_js_to_wasm() && !is_wasm()) {
+ // Non-WASM frames have tagged values as parameters.
+ v->VisitPointers(parameters_base, parameters_limit);
+ }
// Visit pointer spill slots and locals.
for (unsigned index = 0; index < stack_slots; index++) {
@@ -704,12 +766,11 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), constant_pool_address(), code);
- // Visit the context in stub frame and JavaScript frame.
- // Visit the function in JavaScript frame.
- Object** fixed_base = &Memory::Object_at(
- fp() + StandardFrameConstants::kMarkerOffset);
- Object** fixed_limit = &Memory::Object_at(fp());
- v->VisitPointers(fixed_base, fixed_limit);
+ if (!is_wasm() && !is_wasm_to_js()) {
+ // Visit the context in stub frame and JavaScript frame.
+ // Visit the function in JavaScript frame.
+ v->VisitPointers(frame_header_base, frame_header_limit);
+ }
}
@@ -724,7 +785,7 @@ Code* StubFrame::unchecked_code() const {
Address StubFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPDisplacement;
+ return fp() + ExitFrameConstants::kCallerSPOffset;
}
@@ -893,6 +954,15 @@ void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
}
}
+namespace {
+
+bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
+ return code->is_turbofanned() && function->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization;
+}
+
+} // namespace
+
FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
AbstractCode* abstract_code, int code_offset,
bool is_constructor)
@@ -900,7 +970,11 @@ FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
function_(function),
abstract_code_(abstract_code),
code_offset_(code_offset),
- is_constructor_(is_constructor) {}
+ is_constructor_(is_constructor) {
+ DCHECK(abstract_code->IsBytecodeArray() ||
+ Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
+ CannotDeoptFromAsmCode(Code::cast(abstract_code), function));
+}
void FrameSummary::Print() {
PrintF("receiver: ");
@@ -912,7 +986,10 @@ void FrameSummary::Print() {
if (abstract_code_->IsCode()) {
Code* code = abstract_code_->GetCode();
if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
- if (code->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT ");
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ DCHECK(CannotDeoptFromAsmCode(code, *function()));
+ PrintF(" ASM ");
+ }
} else {
PrintF(" BYTECODE ");
}
@@ -926,8 +1003,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
+ Code* code = LookupCode();
+ if (code->kind() == Code::BUILTIN ||
+ CannotDeoptFromAsmCode(code, function())) {
return JavaScriptFrame::Summarize(frames);
}
@@ -952,7 +1030,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
if (frame_opcode == Translation::JS_FRAME ||
frame_opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--;
- BailoutId const ast_id = BailoutId(it.Next());
+ BailoutId const bailout_id = BailoutId(it.Next());
SharedFunctionInfo* const shared_info =
SharedFunctionInfo::cast(literal_array->get(it.Next()));
it.Next(); // Skip height.
@@ -999,14 +1077,14 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
DeoptimizationOutputData* const output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
unsigned const entry =
- Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
+ Deoptimizer::GetOutputInfo(output_data, bailout_id, shared_info);
code_offset = FullCodeGenerator::PcField::decode(entry);
abstract_code = AbstractCode::cast(code);
} else {
- // TODO(rmcilroy): Modify FrameSummary to enable us to summarize
- // based on the BytecodeArray and bytecode offset.
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
- code_offset = 0;
+ // BailoutId points to the next bytecode in the bytecode aray. Subtract
+ // 1 to get the end of current bytecode.
+ code_offset = bailout_id.ToInt() - 1;
abstract_code = AbstractCode::cast(shared_info->bytecode_array());
}
FrameSummary summary(receiver, function, abstract_code, code_offset,
@@ -1030,7 +1108,6 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
int OptimizedFrame::LookupExceptionHandlerInTable(
int* stack_slots, HandlerTable::CatchPrediction* prediction) {
Code* code = LookupCode();
- DCHECK(code->is_optimized_code());
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
if (stack_slots) *stack_slots = code->stack_slots();
@@ -1069,8 +1146,9 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
+ Code* code = LookupCode();
+ if (code->kind() == Code::BUILTIN ||
+ CannotDeoptFromAsmCode(code, function())) {
return JavaScriptFrame::GetFunctions(functions);
}
@@ -1222,6 +1300,20 @@ void StackFrame::PrintIndex(StringStream* accumulator,
accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
}
+void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ accumulator->Add("wasm frame");
+}
+
+Code* WasmFrame::unchecked_code() const {
+ return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+}
+
+void WasmFrame::Iterate(ObjectVisitor* v) const { IterateCompiledFrame(v); }
+
+Address WasmFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kCallerSPOffset;
+}
namespace {
@@ -1437,10 +1529,10 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() +
- kFirstRegisterParameterFrameOffset);
+ Object** limit = &Memory::Object_at(
+ fp() + StubFailureTrampolineFrameConstants::kFixedHeaderBottomOffset);
v->VisitPointers(base, limit);
- base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
+ base = &Memory::Object_at(fp() + StandardFrameConstants::kFunctionOffset);
const int offset = StandardFrameConstants::kLastObjectOffset;
limit = &Memory::Object_at(fp() + offset) + 1;
v->VisitPointers(base, limit);
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index f33eb16741..f6806d7563 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -97,13 +97,15 @@ class StackHandler BASE_EMBEDDED {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
-
#define STACK_FRAME_TYPE_LIST(V) \
V(ENTRY, EntryFrame) \
V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
+ V(WASM, WasmFrame) \
+ V(WASM_TO_JS, WasmToJsFrame) \
+ V(JS_TO_WASM, JsToWasmFrame) \
V(INTERPRETED, InterpretedFrame) \
V(STUB, StubFrame) \
V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
@@ -137,77 +139,179 @@ class StackHandler BASE_EMBEDDED {
// |- - - - - - - - -| Header <-- frame ptr |
// 2 | [Constant Pool] | | |
// |- - - - - - - - -| | |
-// 2+cp | Context | | if a constant pool |
-// |- - - - - - - - -| | is used, cp = 1, |
-// 3+cp |JSFunction/Marker| v otherwise, cp = 0 |
-// +-----------------+---- |
-// 4+cp | | ^ Callee
+// 2+cp |Context/Frm. Type| v if a constant pool |
+// |-----------------+---- is used, cp = 1, |
+// 3+cp | | ^ otherwise, cp = 0 |
+// |- - - - - - - - -| | |
+// 4+cp | | | Callee
// |- - - - - - - - -| | frame slots
// ... | | Frame slots (slot >= 0)
// |- - - - - - - - -| | |
// | | v |
// -----+-----------------+----- <-- stack ptr -------------
//
-
-class StandardFrameConstants : public AllStatic {
+class CommonFrameConstants : public AllStatic {
public:
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = kCallerFPOffset + 1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+
// Fixed part of the frame consists of return address, caller fp,
// constant pool (if FLAG_enable_embedded_constant_pool), context, and
// function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
// is the last object pointer.
- static const int kCPSlotSize =
- FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
- static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
static const int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
- static const int kFixedFrameSize =
- kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
static const int kFixedSlotCountAboveFp =
kFixedFrameSizeAboveFp / kPointerSize;
- static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static const int kCPSlotSize =
+ FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
static const int kCPSlotCount = kCPSlotSize / kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
- static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
- static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
static const int kConstantPoolOffset = kCPSlotSize ? -1 * kPointerSize : 0;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+ static const int kContextOrFrameTypeSize = kPointerSize;
+ static const int kContextOrFrameTypeOffset =
+ -(kCPSlotSize + kContextOrFrameTypeSize);
+};
+// StandardFrames are used for interpreted, full-codegen and optimized
+// JavaScript frames. They always have a context below the saved fp/constant
+// pool and below that the JSFunction of the executing function.
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | [Constant Pool] | | |
+// |- - - - - - - - -| | |
+// 2+cp | Context | | if a constant pool |
+// |- - - - - - - - -| | is used, cp = 1, |
+// 3+cp | JSFunction | v otherwise, cp = 0 |
+// +-----------------+---- |
+// 4+cp | | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | | Frame slots (slot >= 0)
+// |- - - - - - - - -| | |
+// | | v |
+// -----+-----------------+----- <-- stack ptr -------------
+//
+class StandardFrameConstants : public CommonFrameConstants {
+ public:
+ static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
+ static const int kFixedFrameSize =
+ kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
+ static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
+ static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static const int kContextOffset = kContextOrFrameTypeOffset;
+ static const int kFunctionOffset = -2 * kPointerSize - kCPSlotSize;
+ static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
static const int kLastObjectOffset = kContextOffset;
};
+// TypedFrames have a SMI type maker value below the saved FP/constant pool to
+// distinguish them from StandardFrames, which have a context in that position
+// instead.
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | [Constant Pool] | | |
+// |- - - - - - - - -| | |
+// 2+cp |Frame Type Marker| v if a constant pool |
+// |-----------------+---- is used, cp = 1, |
+// 3+cp | | ^ otherwise, cp = 0 |
+// |- - - - - - - - -| | |
+// 4+cp | | | Callee
+// |- - - - - - - - -| | frame slots
+// ... | | Frame slots (slot >= 0)
+// |- - - - - - - - -| | |
+// | | v |
+// -----+-----------------+----- <-- stack ptr -------------
+//
+class TypedFrameConstants : public CommonFrameConstants {
+ public:
+ static const int kFrameTypeSize = kContextOrFrameTypeSize;
+ static const int kFrameTypeOffset = kContextOrFrameTypeOffset;
+ static const int kFixedFrameSizeFromFp = kCPSlotSize + kFrameTypeSize;
+ static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
+ static const int kFixedFrameSize =
+ StandardFrameConstants::kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
+ static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static const int kFirstPushedFrameValueOffset =
+ -StandardFrameConstants::kCPSlotSize - kFrameTypeSize - kPointerSize;
+};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
+#define TYPED_FRAME_PUSHED_VALUE_OFFSET(x) \
+ (TypedFrameConstants::kFirstPushedFrameValueOffset - (x)*kPointerSize)
+#define TYPED_FRAME_SIZE(count) \
+ (TypedFrameConstants::kFixedFrameSize + (count)*kPointerSize)
+#define TYPED_FRAME_SIZE_FROM_SP(count) \
+ (TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kPointerSize)
+#define DEFINE_TYPED_FRAME_SIZES(count) \
+ static const int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
+ static const int kFixedSlotCount = kFixedFrameSize / kPointerSize; \
+ static const int kFixedFrameSizeFromFp = TYPED_FRAME_SIZE_FROM_SP(count); \
+ static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize
+
+class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
+ static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
};
-
-class InternalFrameConstants : public AllStatic {
+class InternalFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ DEFINE_TYPED_FRAME_SIZES(1);
};
+class FrameDropperFrameConstants : public InternalFrameConstants {
+ public:
+ // FP-relative.
+ static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
+};
-class ConstructFrameConstants : public AllStatic {
+class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kImplicitReceiverOffset =
- StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize;
- static const int kLengthOffset =
- StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
- static const int kAllocationSiteOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- static const int kCodeOffset =
- StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+ static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kAllocationSiteOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ DEFINE_TYPED_FRAME_SIZES(4);
+};
+
+class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
+ public:
+ static const int kArgumentsArgumentsOffset =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kArgumentsLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static const int kArgumentsPointerOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static const int kFixedHeaderBottomOffset = kArgumentsPointerOffset;
+ DEFINE_TYPED_FRAME_SIZES(3);
};
@@ -310,6 +414,9 @@ class StackFrame BASE_EMBEDDED {
bool is_exit() const { return type() == EXIT; }
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
+ bool is_wasm() const { return type() == WASM; }
+ bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
+ bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
bool is_stub_failure_trampoline() const {
@@ -617,8 +724,7 @@ class FrameSummary BASE_EMBEDDED {
bool is_constructor_;
};
-
-class JavaScriptFrame: public StandardFrame {
+class JavaScriptFrame : public StandardFrame {
public:
Type type() const override { return JAVA_SCRIPT; }
@@ -841,6 +947,55 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
+class WasmFrame : public StandardFrame {
+ public:
+ Type type() const override { return WASM; }
+
+ // GC support.
+ void Iterate(ObjectVisitor* v) const override;
+
+ // Printing support.
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ // Determine the code for the frame.
+ Code* unchecked_code() const override;
+
+ static WasmFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm());
+ return static_cast<WasmFrame*>(frame);
+ }
+
+ protected:
+ inline explicit WasmFrame(StackFrameIteratorBase* iterator);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+class WasmToJsFrame : public StubFrame {
+ public:
+ Type type() const override { return WASM_TO_JS; }
+
+ protected:
+ inline explicit WasmToJsFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+class JsToWasmFrame : public StubFrame {
+ public:
+ Type type() const override { return JS_TO_WASM; }
+
+ protected:
+ inline explicit JsToWasmFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
class InternalFrame: public StandardFrame {
public:
@@ -869,14 +1024,6 @@ class InternalFrame: public StandardFrame {
class StubFailureTrampolineFrame: public StandardFrame {
public:
- // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
- // presubmit script complains about using sizeof() on a type.
- static const int kFirstRegisterParameterFrameOffset =
- StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
-
- static const int kCallerStackParameterCountFrameOffset =
- StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
-
Type type() const override { return STUB_FAILURE_TRAMPOLINE; }
// Get the code associated with this frame.
@@ -974,7 +1121,6 @@ class StackFrameIterator: public StackFrameIteratorBase {
DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
};
-
// Iterator that supports iterating through all JavaScript frames.
class JavaScriptFrameIterator BASE_EMBEDDED {
public:
@@ -997,7 +1143,6 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
StackFrameIterator iterator_;
};
-
// NOTE: The stack trace frame iterator is an iterator that only
// traverse proper JavaScript frames; that is JavaScript frames that
// have proper JavaScript functions. This excludes the problematic
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 46e3e2cc71..81c5ff2ae7 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -293,42 +293,38 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ VisitDeclarations(scope()->declarations());
+ }
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_);
- predictable.ExpectSize(
- masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_);
+ predictable.ExpectSize(
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -537,7 +533,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -643,7 +639,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -994,14 +990,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1076,10 +1072,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r0. Iterate through that.
__ bind(&fixed_array);
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(r1);
- __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -1114,12 +1106,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r0);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ str(r2, FieldMemOperand(r0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1172,31 +1160,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ mov(r2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1631,13 +1594,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1884,63 +1840,43 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(r1);
- __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ b(ne, &resume);
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r1);
- __ b(eq, &post_runtime);
- __ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(r1);
+ __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ b(ne, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+ __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+ __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+ __ mov(r1, cp);
+ __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r1);
+ __ b(eq, &post_runtime);
+ __ push(r0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1970,7 +1906,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ push(r2);
- // Push holes for the rest of the arguments to the generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1994,9 +1933,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
// cp = callee's context,
// r4 = callee's JS function.
- __ PushFixedFrame(r4);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r4);
// Load the operand stack size.
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
@@ -2081,7 +2018,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
- __ pop(r2);
+ PopOperand(r2);
__ LoadRoot(r3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
@@ -2093,18 +2030,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
@@ -2713,7 +2638,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3197,23 +3122,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(r0, &done_convert);
- __ Push(r0);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3390,6 +3298,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(r0);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r0);
+ context()->Plug(r0);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3432,11 +3345,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), r0);
+ PushOperand(r0);
+
// Push undefined as the receiver.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
PushOperand(r0);
-
- __ LoadNativeContextSlot(expr->context_index(), r0);
}
@@ -3450,60 +3365,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- PushOperand(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r0);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(r0);
- }
- }
- }
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3741,11 +3605,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3785,9 +3649,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in r0.
switch (assign_type) {
case VARIABLE:
@@ -4040,21 +3901,16 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(r0, r1);
Split(eq, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ __ JumpIfSmi(r0, if_false);
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
-}
-
-
Register FullCodeGenerator::result_register() {
return r0;
}
@@ -4064,6 +3920,10 @@ Register FullCodeGenerator::context_register() {
return cp;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ ldr(value, MemOperand(fp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4134,12 +3994,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Operand(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(r1));
__ Pop(result_register()); // Restore the accumulator.
@@ -4250,7 +4104,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// mov r0, r0 (NOP)
// ; load on-stack replacement address into ip - either of (for ARMv7):
@@ -4288,8 +4141,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
+#ifdef DEBUG
Address interrupt_address = Assembler::target_address_at(
pc_immediate_load_address, unoptimized_code);
+#endif
if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
DCHECK(interrupt_address ==
@@ -4299,14 +4154,9 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
- if (interrupt_address ==
- isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
DCHECK(interrupt_address ==
- isolate->builtins()->OsrAfterStackCheck()->entry());
- return OSR_AFTER_STACK_CHECK;
+ isolate->builtins()->OnStackReplacement()->entry());
+ return ON_STACK_REPLACEMENT;
}
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index d43ae754e9..aa67117a7f 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -296,42 +296,36 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ VisitDeclarations(scope()->declarations());
+ }
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
- {
- Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ Bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+ }
- {
- Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -530,7 +524,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -640,7 +634,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -997,14 +991,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// TODO(all): This visitor probably needs better comments and a revisit.
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1072,10 +1066,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register x0. Iterate through that.
__ Bind(&fixed_array);
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(x1);
- __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
__ Push(x1, x0, x2); // Smi and array, fixed array length (as smi).
@@ -1108,12 +1098,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cmp(x11, x2);
__ B(eq, &update_each);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(x0);
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1165,31 +1151,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new space for
- // nested functions that don't need literals cloning. If we're running with
- // the --always-opt or the --prepare-always-opt flag, we need to use the
- // runtime function so that the new function we are creating here gets a
- // chance to have its code optimized and doesn't just get a copy of the
- // existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ Mov(x2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1618,13 +1579,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ Peek(x0, 0);
- __ Push(x0);
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1858,18 +1812,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
@@ -2494,7 +2436,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -2979,23 +2921,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(x0, &done_convert);
- __ Push(x0);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3176,6 +3101,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(x0);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, x0);
+ context()->Plug(x0);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3229,11 +3159,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), x0);
+ PushOperand(x0);
+
// Push undefined as the receiver.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
PushOperand(x0);
-
- __ LoadNativeContextSlot(expr->context_index(), x0);
}
@@ -3247,58 +3179,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRunTime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- PopOperand(x10);
- PushOperands(x0, x10);
-
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(x0);
- }
- }
- }
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3532,11 +3415,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ B(&stub_call);
__ Bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3578,9 +3461,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
__ Bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in x0.
switch (assign_type) {
case VARIABLE:
@@ -3842,22 +3722,17 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ CompareRoot(x0, nil_value);
Split(eq, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
SetExpressionPosition(expr);
@@ -3870,66 +3745,46 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// and suchlike. The implementation changes a little by bleeding_edge so I
// don't want to spend too much time on it now.
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ Push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ B(&suspend);
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ Pop(x1);
- __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
- __ B(ne, &resume);
- __ Push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ Bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
- __ Mov(x1, Smi::FromInt(continuation.pos()));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
- __ Cmp(__ StackPointer(), x1);
- __ B(eq, &post_runtime);
- __ Push(x0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ Bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ Pop(x1);
+ __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
+ __ B(ne, &resume);
+ __ Push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ Bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Smi::FromInt(continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ __ Bind(&resume);
+ context()->Plug(result_register());
}
@@ -3968,7 +3823,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
JSGeneratorObject::kReceiverOffset));
__ Push(x10);
- // Push holes for the rest of the arguments to the generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
@@ -4079,7 +3937,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register empty_fixed_array = x4;
Register untagged_result = x5;
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
- __ Pop(result_value);
+ PopOperand(result_value);
__ LoadRoot(boolean_done,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
@@ -4115,6 +3973,10 @@ Register FullCodeGenerator::context_register() {
return cp;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Ldr(value, MemOperand(fp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
@@ -4186,11 +4048,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
__ Pop(result_register(), x1); // Restore the accumulator and get the token.
for (DeferredCommand cmd : commands_) {
@@ -4246,7 +4103,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
patcher.b(6, pl);
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// .. .. .. .. mov x0, x0 (NOP)
// .. .. .. .. ldr x16, pc+<on-stack replacement address>
@@ -4267,9 +4123,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
isolate->builtins()->InterruptCheck()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
reinterpret_cast<uint64_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(
isolate->builtins()->OnStackReplacement()->entry())));
Memory::uint64_at(interrupt_address_pointer) =
reinterpret_cast<uint64_t>(replacement_code->entry());
@@ -4295,9 +4148,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (entry == reinterpret_cast<uint64_t>(
isolate->builtins()->OnStackReplacement()->entry())) {
return ON_STACK_REPLACEMENT;
- } else if (entry == reinterpret_cast<uint64_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry())) {
- return OSR_AFTER_STACK_CHECK;
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 8255089f7e..af5dd41885 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -4,8 +4,8 @@
#include "src/full-codegen/full-codegen.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-numbering.h"
+#include "src/ast/ast.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopeinfo.h"
#include "src/ast/scopes.h"
@@ -14,6 +14,7 @@
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
+#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/snapshot/snapshot.h"
@@ -30,9 +31,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
TRACE_EVENT0("v8", "V8.CompileFullCode");
- // Ensure that the feedback vector is large enough.
- info->EnsureFeedbackVector();
-
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
@@ -67,7 +65,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::PrintCode(code, info);
info->SetCode(code);
void* line_info = masm.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+ LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(
+ AbstractCode::cast(*code), line_info));
#ifdef DEBUG
// Check that no context-specific object has been embedded.
@@ -157,8 +156,7 @@ bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
ArrayLiteral* expr) const {
- // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
- return expr->depth() > 1 || expr->is_strong() ||
+ return expr->depth() > 1 ||
expr->values()->length() > JSArray::kInitialMaxFastElementArray;
}
@@ -486,14 +484,14 @@ void FullCodeGenerator::CallRuntimeWithOperands(Runtime::FunctionId id) {
}
void FullCodeGenerator::OperandStackDepthIncrement(int count) {
+ DCHECK_IMPLIES(!HasStackOverflow(), operand_stack_depth_ >= 0);
DCHECK_GE(count, 0);
- DCHECK_GE(operand_stack_depth_, 0);
operand_stack_depth_ += count;
}
void FullCodeGenerator::OperandStackDepthDecrement(int count) {
+ DCHECK_IMPLIES(!HasStackOverflow(), operand_stack_depth_ >= count);
DCHECK_GE(count, 0);
- DCHECK_GE(operand_stack_depth_, count);
operand_stack_depth_ -= count;
}
@@ -562,9 +560,17 @@ void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
}
}
__ Call(callable.code(), RelocInfo::CODE_TARGET);
+
+ // Reload the context register after the call as i.e. TurboFan code stubs
+ // won't preserve the context register.
+ LoadFromFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
context()->Plug(result_register());
}
+void FullCodeGenerator::EmitNewObject(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::FastNewObject(isolate()));
+}
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
@@ -585,6 +591,9 @@ void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToInteger(isolate()));
+}
void FullCodeGenerator::EmitToNumber(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::ToNumber(isolate()));
@@ -662,13 +671,16 @@ void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
}
}
-
-void FullCodeGenerator::SetCallPosition(Expression* expr) {
+void FullCodeGenerator::SetCallPosition(Expression* expr,
+ TailCallMode tail_call_mode) {
if (expr->position() == RelocInfo::kNoPosition) return;
RecordPosition(masm_, expr->position());
if (info_->is_debug()) {
+ RelocInfo::Mode mode = (tail_call_mode == TailCallMode::kAllow)
+ ? RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL
+ : RelocInfo::DEBUG_BREAK_SLOT_AT_CALL;
// Always emit a debug break slot before a call.
- DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
+ DebugCodegen::GenerateSlot(masm_, mode);
}
}
@@ -851,7 +863,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
NestedBlock nested_block(this, stmt);
- SetStatementPosition(stmt);
{
EnterBlockScopeIfNeeded block_scope_state(
@@ -880,7 +891,6 @@ void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
Comment cmnt(masm_, "[ EmptyStatement");
- SetStatementPosition(stmt);
}
@@ -913,7 +923,6 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FullCodeGenerator::EmitContinue(Statement* target) {
NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
int context_length = 0;
// When continuing, we clobber the unpredictable value in the accumulator
// with one that's safe for GC. If we hit an exit from the try block of
@@ -923,15 +932,17 @@ void FullCodeGenerator::EmitContinue(Statement* target) {
while (!current->IsContinueTarget(target)) {
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred continue through finally");
- current->Exit(&stack_depth, &context_length);
- DCHECK_EQ(0, stack_depth);
- DCHECK_EQ(0, context_length);
+ current->Exit(&context_length);
+ DCHECK_EQ(-1, context_length);
current->AsTryFinally()->deferred_commands()->RecordContinue(target);
return;
}
- current = current->Exit(&stack_depth, &context_length);
+ current = current->Exit(&context_length);
}
- __ Drop(stack_depth);
+ int stack_depth = current->GetStackDepthAtTarget();
+ int stack_drop = operand_stack_depth_ - stack_depth;
+ DCHECK_GE(stack_drop, 0);
+ __ Drop(stack_drop);
if (context_length > 0) {
while (context_length > 0) {
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -952,7 +963,6 @@ void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
void FullCodeGenerator::EmitBreak(Statement* target) {
NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
int context_length = 0;
// When breaking, we clobber the unpredictable value in the accumulator
// with one that's safe for GC. If we hit an exit from the try block of
@@ -962,15 +972,17 @@ void FullCodeGenerator::EmitBreak(Statement* target) {
while (!current->IsBreakTarget(target)) {
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred break through finally");
- current->Exit(&stack_depth, &context_length);
- DCHECK_EQ(0, stack_depth);
- DCHECK_EQ(0, context_length);
+ current->Exit(&context_length);
+ DCHECK_EQ(-1, context_length);
current->AsTryFinally()->deferred_commands()->RecordBreak(target);
return;
}
- current = current->Exit(&stack_depth, &context_length);
+ current = current->Exit(&context_length);
}
- __ Drop(stack_depth);
+ int stack_depth = current->GetStackDepthAtTarget();
+ int stack_drop = operand_stack_depth_ - stack_depth;
+ DCHECK_GE(stack_drop, 0);
+ __ Drop(stack_drop);
if (context_length > 0) {
while (context_length > 0) {
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -991,23 +1003,56 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void FullCodeGenerator::EmitUnwindAndReturn() {
NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred return through finally");
- current->Exit(&stack_depth, &context_length);
- DCHECK_EQ(0, stack_depth);
- DCHECK_EQ(0, context_length);
+ current->Exit(&context_length);
+ DCHECK_EQ(-1, context_length);
current->AsTryFinally()->deferred_commands()->RecordReturn();
return;
}
- current = current->Exit(&stack_depth, &context_length);
+ current = current->Exit(&context_length);
}
- __ Drop(stack_depth);
EmitReturnSequence();
}
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
+ __ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
+ __ CallStub(&stub);
+ } else {
+ __ Push(info);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
+ }
+ context()->Plug(result_register());
+}
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetExpressionPosition(prop);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(!prop->IsSuperAccess());
+
+ __ Move(LoadDescriptor::NameRegister(), key->value());
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallLoadIC(NOT_INSIDE_TYPEOF);
+}
+
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object
SetExpressionPosition(prop);
@@ -1041,6 +1086,10 @@ void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
PushOperand(result_register());
}
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
+ __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
+}
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
@@ -1158,7 +1207,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Iteration loop_statement(this, stmt);
if (stmt->init() != NULL) {
- SetStatementPosition(stmt->init());
Visit(stmt->init());
}
@@ -1236,6 +1284,11 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
decrement_loop_depth();
}
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
+ result_register());
+ context()->Plug(result_register());
+}
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
@@ -1250,7 +1303,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Label try_entry, handler_entry, exit;
__ jmp(&try_entry);
__ bind(&handler_entry);
- ClearPendingMessage();
+ if (stmt->clear_pending_message()) ClearPendingMessage();
// Exception handler code, the exception is in the result register.
// Extend the context before executing the catch block.
@@ -1281,7 +1334,8 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
try_catch_depth_++;
int handler_index = NewHandlerTableEntry();
EnterTryBlock(handler_index, &handler_entry);
- { TryCatch try_body(this);
+ {
+ Comment cmnt_try(masm(), "[ Try block");
Visit(stmt->try_block());
}
ExitTryBlock(handler_index);
@@ -1322,7 +1376,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Exception handler code. This code is only executed when an exception
// is thrown. Record the continuation and jump to the finally block.
{
- Comment cmt_handler(masm(), "[ Finally handler");
+ Comment cmnt_handler(masm(), "[ Finally handler");
deferred.RecordThrow();
}
@@ -1331,6 +1385,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
int handler_index = NewHandlerTableEntry();
EnterTryBlock(handler_index, &handler_entry);
{
+ Comment cmnt_try(masm(), "[ Try block");
TryFinally try_body(this, &deferred);
Visit(stmt->try_block());
}
@@ -1345,15 +1400,14 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Finally block implementation.
__ bind(&finally_entry);
- Comment cmnt_finally(masm(), "[ Finally block");
- OperandStackDepthIncrement(2); // Token and accumulator are on stack.
- EnterFinallyBlock();
{
- Finally finally_body(this);
+ Comment cmnt_finally(masm(), "[ Finally block");
+ OperandStackDepthIncrement(2); // Token and accumulator are on stack.
+ EnterFinallyBlock();
Visit(stmt->finally_block());
+ ExitFinallyBlock();
+ OperandStackDepthDecrement(2); // Token and accumulator were on stack.
}
- ExitFinallyBlock();
- OperandStackDepthDecrement(2); // Token and accumulator were on stack.
{
Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
@@ -1434,6 +1488,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
Comment cmnt(masm_, "[ ClassLiteral");
{
+ NestedClassLiteral nested_class_literal(this, lit);
EnterBlockScopeIfNeeded block_scope_state(
this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
@@ -1463,8 +1518,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
EmitClassDefineProperties(lit);
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
+ // Set both the prototype and constructor to have fast properties.
CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
if (lit->class_variable_proxy() != nullptr) {
@@ -1586,6 +1640,49 @@ void FullCodeGenerator::VisitCall(Call* expr) {
#endif
}
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ EmitLoadJSRuntimeFunction(expr);
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ EmitCallJSRuntimeFunction(expr);
+ context()->DropAndPlug(1, result_register());
+
+ } else {
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ __ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
+ context()->Plug(result_register());
+ }
+ }
+ }
+}
void FullCodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
@@ -1599,28 +1696,32 @@ void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
-
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth, int* context_length) {
+ int* context_length) {
// The macros used here must preserve the result register.
+ // Calculate how many operands to drop to get down to handler block.
+ int stack_drop = codegen_->operand_stack_depth_ - GetStackDepthAtTarget();
+ DCHECK_GE(stack_drop, 0);
+
// Because the handler block contains the context of the finally
// code, we can restore it directly from there for the finally code
// rather than iteratively unwinding contexts via their previous
// links.
if (*context_length > 0) {
- __ Drop(*stack_depth); // Down to the handler block.
+ __ Drop(stack_drop); // Down to the handler block.
// Restore the context to its dedicated register and the stack.
- STATIC_ASSERT(TryFinally::kElementCount == 1);
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
__ Pop(codegen_->context_register());
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
codegen_->context_register());
} else {
// Down to the handler block and also drop context.
- __ Drop(*stack_depth + kElementCount);
+ __ Drop(stack_drop + TryBlockConstant::kElementCount);
}
- *stack_depth = 0;
- *context_length = 0;
+
+ // The caller will ignore outputs.
+ *context_length = -1;
return previous_;
}
@@ -1671,7 +1772,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
return true;
}
- if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
@@ -1733,27 +1834,6 @@ void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
}
-void BackEdgeTable::AddStackCheck(Handle<Code> code, uint32_t pc_offset) {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = code->GetIsolate();
- Address pc = code->instruction_start() + pc_offset;
- Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
- PatchAt(*code, pc, OSR_AFTER_STACK_CHECK, patch);
-}
-
-
-void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = code->GetIsolate();
- Address pc = code->instruction_start() + pc_offset;
-
- if (OSR_AFTER_STACK_CHECK == GetBackEdgeState(isolate, *code, pc)) {
- Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- PatchAt(*code, pc, ON_STACK_REPLACEMENT, patch);
- }
-}
-
-
#ifdef DEBUG
bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 6ab02313bb..0c12937149 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -93,6 +93,12 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 149;
#elif V8_TARGET_ARCH_MIPS64
static const int kCodeSizeMultiplier = 149;
+#elif V8_TARGET_ARCH_S390
+// TODO(joransiu): Copied PPC value. Check this is sensible for S390.
+ static const int kCodeSizeMultiplier = 200;
+#elif V8_TARGET_ARCH_S390X
+// TODO(joransiu): Copied PPC value. Check this is sensible for S390X.
+ static const int kCodeSizeMultiplier = 200;
#else
#error Unsupported target architecture.
#endif
@@ -108,7 +114,9 @@ class FullCodeGenerator: public AstVisitor {
class NestedStatement BASE_EMBEDDED {
public:
- explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
+ explicit NestedStatement(FullCodeGenerator* codegen)
+ : codegen_(codegen),
+ stack_depth_at_target_(codegen->operand_stack_depth_) {
// Link into codegen's nesting stack.
previous_ = codegen->nesting_stack_;
codegen->nesting_stack_ = this;
@@ -130,18 +138,20 @@ class FullCodeGenerator: public AstVisitor {
// Notify the statement that we are exiting it via break, continue, or
// return and give it a chance to generate cleanup code. Return the
// next outer statement in the nesting stack. We accumulate in
- // *stack_depth the amount to drop the stack and in *context_length the
- // number of context chain links to unwind as we traverse the nesting
- // stack from an exit to its target.
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
- return previous_;
- }
+ // {*context_length} the number of context chain links to unwind as we
+ // traverse the nesting stack from an exit to its target.
+ virtual NestedStatement* Exit(int* context_length) { return previous_; }
+
+ // Determine the expected operand stack depth when this statement is being
+ // used as the target of an exit. The caller will drop to this depth.
+ int GetStackDepthAtTarget() { return stack_depth_at_target_; }
protected:
MacroAssembler* masm() { return codegen_->masm(); }
FullCodeGenerator* codegen_;
NestedStatement* previous_;
+ int stack_depth_at_target_;
private:
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
@@ -192,7 +202,7 @@ class FullCodeGenerator: public AstVisitor {
: Breakable(codegen, block) {
}
- NestedStatement* Exit(int* stack_depth, int* context_length) override {
+ NestedStatement* Exit(int* context_length) override {
auto block_scope = statement()->AsBlock()->scope();
if (block_scope != nullptr) {
if (block_scope->ContextLocalCount() > 0) ++(*context_length);
@@ -201,17 +211,21 @@ class FullCodeGenerator: public AstVisitor {
}
};
- // The try block of a try/catch statement.
- class TryCatch : public NestedStatement {
+ // A class literal expression
+ class NestedClassLiteral : public NestedStatement {
public:
- static const int kElementCount = TryBlockConstant::kElementCount;
-
- explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
+ NestedClassLiteral(FullCodeGenerator* codegen, ClassLiteral* lit)
+ : NestedStatement(codegen),
+ needs_context_(lit->scope() != nullptr &&
+ lit->scope()->NeedsContext()) {}
- NestedStatement* Exit(int* stack_depth, int* context_length) override {
- *stack_depth += kElementCount;
+ NestedStatement* Exit(int* context_length) override {
+ if (needs_context_) ++(*context_length);
return previous_;
}
+
+ private:
+ const bool needs_context_;
};
class DeferredCommands {
@@ -254,12 +268,10 @@ class FullCodeGenerator: public AstVisitor {
// The try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
- static const int kElementCount = TryBlockConstant::kElementCount;
-
TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
: NestedStatement(codegen), deferred_commands_(commands) {}
- NestedStatement* Exit(int* stack_depth, int* context_length) override;
+ NestedStatement* Exit(int* context_length) override;
bool IsTryFinally() override { return true; }
TryFinally* AsTryFinally() override { return this; }
@@ -270,35 +282,6 @@ class FullCodeGenerator: public AstVisitor {
DeferredCommands* deferred_commands_;
};
- // The finally block of a try/finally statement.
- class Finally : public NestedStatement {
- public:
- static const int kElementCount = 3;
-
- explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
-
- NestedStatement* Exit(int* stack_depth, int* context_length) override {
- *stack_depth += kElementCount;
- return previous_;
- }
- };
-
- // The body of a for/in loop.
- class ForIn : public Iteration {
- public:
- static const int kElementCount = 5;
-
- ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
- : Iteration(codegen, statement) {
- }
-
- NestedStatement* Exit(int* stack_depth, int* context_length) override {
- *stack_depth += kElementCount;
- return previous_;
- }
- };
-
-
// The body of a with or catch.
class WithOrCatch : public NestedStatement {
public:
@@ -306,7 +289,7 @@ class FullCodeGenerator: public AstVisitor {
: NestedStatement(codegen) {
}
- NestedStatement* Exit(int* stack_depth, int* context_length) override {
+ NestedStatement* Exit(int* context_length) override {
++(*context_length);
return previous_;
}
@@ -537,6 +520,7 @@ class FullCodeGenerator: public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
+ F(NewObject) \
F(ValueOf) \
F(StringCharFromCode) \
F(StringCharAt) \
@@ -564,6 +548,7 @@ class FullCodeGenerator: public AstVisitor {
F(ToName) \
F(ToObject) \
F(DebugIsActive) \
+ F(GetOrdinaryHasInstance) \
F(CreateIterResultObject)
#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
@@ -681,6 +666,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
FeedbackVectorSlot slot);
+ void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
+
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
@@ -707,7 +694,8 @@ class FullCodeGenerator: public AstVisitor {
// This is used in loop headers where we want to break for each iteration.
void SetExpressionAsStatementPosition(Expression* expr);
- void SetCallPosition(Expression* expr);
+ void SetCallPosition(Expression* expr,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
void SetConstructCallPosition(Expression* expr) {
// Currently call and construct calls are treated the same wrt debugging.
@@ -743,13 +731,16 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- LanguageMode language_mode() { return literal()->language_mode(); }
+ LanguageMode language_mode() { return scope()->language_mode(); }
bool has_simple_parameters() { return info_->has_simple_parameters(); }
FunctionLiteral* literal() const { return info_->literal(); }
Scope* scope() { return scope_; }
static Register context_register();
+ // Get fields from the stack frame. Offsets are the frame pointer relative
+ // offsets defined in, e.g., StandardFrameConstants.
+ void LoadFromFrameField(int frame_offset, Register value);
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
@@ -783,8 +774,6 @@ class FullCodeGenerator: public AstVisitor {
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
-
int NewHandlerTableEntry();
struct BailoutEntry {
@@ -1046,11 +1035,7 @@ class BackEdgeTable {
return instruction_start_ + pc_offset(index);
}
- enum BackEdgeState {
- INTERRUPT,
- ON_STACK_REPLACEMENT,
- OSR_AFTER_STACK_CHECK
- };
+ enum BackEdgeState { INTERRUPT, ON_STACK_REPLACEMENT };
// Increase allowed loop nesting level by one and patch those matching loops.
static void Patch(Isolate* isolate, Code* unoptimized_code);
@@ -1065,13 +1050,6 @@ class BackEdgeTable {
static void Revert(Isolate* isolate,
Code* unoptimized_code);
- // Change a back edge patched for on-stack replacement to perform a
- // stack check first.
- static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
-
- // Revert the patch by AddStackCheck.
- static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
-
// Return the current patch state of the back edge.
static BackEdgeState GetBackEdgeState(Isolate* isolate,
Code* unoptimized_code,
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 3dc5da12c6..f1945c897c 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -288,39 +288,35 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
+ VisitDeclarations(scope()->declarations());
+ }
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit
- = ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -487,7 +483,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -595,7 +591,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -934,14 +930,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1008,11 +1004,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register eax. Iterate through that.
__ bind(&fixed_array);
- // No need for a write barrier, we are storing a Smi in the feedback vector.
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
- Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
@@ -1043,12 +1034,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(edx);
__ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1089,8 +1076,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ add(esp, Immediate(5 * kPointerSize));
- OperandStackDepthDecrement(ForIn::kElementCount);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1099,31 +1085,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ mov(ebx, Immediate(info));
- __ CallStub(&stub);
- } else {
- __ push(Immediate(info));
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1554,12 +1515,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1801,64 +1756,44 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ j(not_equal, &resume);
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
- __ cmp(esp, ebx);
- __ j(equal, &post_runtime);
- __ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
-
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ j(not_equal, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1886,7 +1821,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Push receiver.
__ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
- // Push holes for arguments to generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1991,19 +1929,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
isolate()->factory()->ToBoolean(done));
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(!prop->IsSuperAccess());
-
- __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ OperandStackDepthDecrement(1);
}
@@ -2597,7 +2523,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3078,23 +3004,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(eax, &done_convert, Label::kNear);
- __ Push(eax);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3278,6 +3187,12 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(eax);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ mov(eax, NativeContextOperand());
+ __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
+ context()->Plug(eax);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3319,10 +3234,12 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadGlobalFunction(expr->context_index(), eax);
+ PushOperand(eax);
+
// Push undefined as receiver.
PushOperand(isolate()->factory()->undefined_value());
-
- __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -3336,58 +3253,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- PushOperand(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(eax);
- }
- }
- }
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -3632,11 +3500,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3676,9 +3544,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in eax.
switch (assign_type) {
case VARIABLE:
@@ -3804,7 +3669,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(eax, if_false);
@@ -3823,7 +3688,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -3934,21 +3799,16 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
+ __ JumpIfSmi(eax, if_false);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(eax, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(eax);
-}
-
-
Register FullCodeGenerator::result_register() {
return eax;
}
@@ -3958,6 +3818,10 @@ Register FullCodeGenerator::context_register() {
return esi;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(value, Operand(ebp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4024,12 +3888,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(edx));
__ Pop(result_register()); // Restore the accumulator.
@@ -4087,7 +3945,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
*jns_offset_address = kJnsOffset;
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
@@ -4125,15 +3982,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK_EQ(kNopByteOne, *jns_instr_address);
DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
- isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
- DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address,
- unoptimized_code));
- return OSR_AFTER_STACK_CHECK;
+ DCHECK_EQ(
+ isolate->builtins()->OnStackReplacement()->entry(),
+ Assembler::target_address_at(call_target_address, unoptimized_code));
+ return ON_STACK_REPLACEMENT;
}
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 3b34cb3a98..f329a23d00 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -305,38 +305,35 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ VisitDeclarations(scope()->declarations());
+ }
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_,
- masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(
+ masm_, masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -527,7 +524,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -637,7 +634,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
__ mov(a0, result_register());
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
@@ -990,15 +987,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1072,11 +1069,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register v0. Iterate through that.
__ bind(&fixed_array);
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(a1);
- __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
-
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
@@ -1111,12 +1103,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, t0, Operand(a2));
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(a0);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ sw(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1167,31 +1155,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ li(a2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1628,13 +1591,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ lw(a0, MemOperand(sp));
- __ push(a0);
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1882,62 +1838,41 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(a1);
- __ Branch(&resume, ne, a1,
- Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(continuation.pos())));
- __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
- __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ Branch(&post_runtime, eq, sp, Operand(a1));
- __ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(a1);
+ __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+ __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+ __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ Branch(&post_runtime, eq, sp, Operand(a1));
+ __ push(v0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1967,7 +1902,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ push(a2);
- // Push holes for the rest of the arguments to the generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1990,9 +1928,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// fp = caller's frame pointer.
// cp = callee's context,
// t0 = callee's JS function.
- __ Push(ra, fp, cp, t0);
- // Adjust FP to point to saved FP.
- __ Addu(fp, sp, 2 * kPointerSize);
+ __ PushStandardFrame(t0);
// Load the operand stack size.
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2079,7 +2015,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- __ pop(a2);
+ PopOperand(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
@@ -2092,18 +2028,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
@@ -2711,7 +2635,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3204,23 +3128,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into v0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(v0, &done_convert);
- __ Push(v0);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3403,6 +3310,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(v0);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
+ context()->Plug(v0);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3444,11 +3356,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), v0);
+ PushOperand(v0);
+
// Push undefined as the receiver.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
PushOperand(v0);
-
- __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -3462,60 +3376,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ lw(at, MemOperand(sp, 0));
- PushOperand(at);
- __ sw(v0, MemOperand(sp, kPointerSize));
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, v0);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(v0);
- }
- }
- }
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3753,11 +3616,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3796,9 +3659,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in v0.
switch (assign_type) {
case VARIABLE:
@@ -4045,29 +3905,23 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ mov(a0, result_register());
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(a1, nil_value);
- Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
- } else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ LoadRoot(a1, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+ } else {
+ __ JumpIfSmi(v0, if_false);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(v0);
-}
-
-
Register FullCodeGenerator::result_register() {
return v0;
}
@@ -4077,6 +3931,10 @@ Register FullCodeGenerator::context_register() {
return cp;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ lw(value, MemOperand(fp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4147,12 +4005,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Operand(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(a1));
__ Pop(result_register()); // Restore the accumulator.
@@ -4188,7 +4040,9 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 6 * kInstrSize;
+ Address pc_immediate_load_address =
+ Assembler::target_address_from_return_address(pc);
+ Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
Isolate* isolate = unoptimized_code->GetIsolate();
CodePatcher patcher(isolate, branch_address, 1);
@@ -4204,7 +4058,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
patcher.masm()->slt(at, a3, zero_reg);
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// addiu at, zero_reg, 1
// beq at, zero_reg, ok ;; Not changed
// lui t9, <on-stack replacement address> upper
@@ -4215,7 +4068,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
patcher.masm()->addiu(at, zero_reg, 1);
break;
}
- Address pc_immediate_load_address = pc - 4 * kInstrSize;
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
Assembler::set_target_address_at(isolate, pc_immediate_load_address,
@@ -4231,10 +4083,11 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Code* unoptimized_code,
Address pc) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 6 * kInstrSize;
- Address pc_immediate_load_address = pc - 4 * kInstrSize;
+ Address pc_immediate_load_address =
+ Assembler::target_address_from_return_address(pc);
+ Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
- DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+ DCHECK(Assembler::IsBeq(Assembler::instr_at(branch_address + kInstrSize)));
if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
DCHECK(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_immediate_load_address)) ==
@@ -4245,18 +4098,11 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
- if (reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_immediate_load_address)) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
- return ON_STACK_REPLACEMENT;
- }
-
DCHECK(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_immediate_load_address)) ==
+ Assembler::target_address_at(pc_immediate_load_address)) ==
reinterpret_cast<uint32_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
- return OSR_AFTER_STACK_CHECK;
+ isolate->builtins()->OnStackReplacement()->entry()));
+ return ON_STACK_REPLACEMENT;
}
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 9573be297a..681abd1230 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -301,42 +301,38 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ VisitDeclarations(scope()->declarations());
+ }
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_,
- masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(
+ masm_, masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
+ VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -527,7 +523,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -637,7 +633,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
__ mov(a0, result_register());
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
@@ -990,16 +986,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1073,11 +1069,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register v0. Iterate through that.
__ bind(&fixed_array);
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(a1);
- __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
-
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
@@ -1113,12 +1104,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, a4, Operand(a2));
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(a0);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ sd(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1169,31 +1156,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ li(a2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1630,13 +1592,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ ld(a0, MemOperand(sp));
- __ push(a0);
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1884,62 +1839,41 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(a1);
- __ Branch(&resume, ne, a1,
- Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(continuation.pos())));
- __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
- __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ Branch(&post_runtime, eq, sp, Operand(a1));
- __ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(a1);
+ __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+ __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+ __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+ __ mov(a1, cp);
+ __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
+ __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ Branch(&post_runtime, eq, sp, Operand(a1));
+ __ push(v0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1969,7 +1903,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ push(a2);
- // Push holes for the rest of the arguments to the generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
@@ -1994,9 +1931,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// fp = caller's frame pointer.
// cp = callee's context,
// a4 = callee's JS function.
- __ Push(ra, fp, cp, a4);
- // Adjust FP to point to saved FP.
- __ Daddu(fp, sp, 2 * kPointerSize);
+ __ PushStandardFrame(a4);
// Load the operand stack size.
__ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2083,7 +2018,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- __ pop(a2);
+ PopOperand(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
@@ -2096,18 +2031,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
@@ -2161,12 +2084,10 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ DadduAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ DaddBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::SUB:
- __ DsubuAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ DsubBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::MUL: {
__ Dmulh(v0, left, right);
@@ -2715,7 +2636,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3209,23 +3130,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into v0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(v0, &done_convert);
- __ Push(v0);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3408,6 +3312,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(v0);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
+ context()->Plug(v0);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3449,11 +3358,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), v0);
+ PushOperand(v0);
+
// Push undefined as the receiver.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
PushOperand(v0);
-
- __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -3467,59 +3378,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ ld(at, MemOperand(sp, 0));
- PushOperand(at);
- __ sd(v0, MemOperand(sp, kPointerSize));
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, v0);
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(v0);
- }
- }
- }
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3751,20 +3612,18 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
Register scratch1 = a1;
- Register scratch2 = a4;
__ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ DadduAndCheckForOverflow(v0, v0, scratch1, scratch2);
- __ BranchOnNoOverflow(&done, scratch2);
+ __ DaddBranchNoOvf(v0, v0, Operand(scratch1), &done);
// Call stub. Undo operation first.
__ Move(v0, a0);
__ jmp(&stub_call);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3803,9 +3662,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in v0.
switch (assign_type) {
case VARIABLE:
@@ -4052,29 +3908,23 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ mov(a0, result_register());
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(a1, nil_value);
- Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
- } else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ LoadRoot(a1, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+ } else {
+ __ JumpIfSmi(v0, if_false);
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(v0);
-}
-
-
Register FullCodeGenerator::result_register() {
return v0;
}
@@ -4084,6 +3934,12 @@ Register FullCodeGenerator::context_register() {
return cp;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ // DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ DCHECK(IsAligned(frame_offset, kPointerSize));
+ // __ sw(value, MemOperand(fp, frame_offset));
+ __ ld(value, MemOperand(fp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
// DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4156,12 +4012,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Operand(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
__ Pop(result_register()); // Restore the accumulator.
__ Pop(a1); // Get the token.
@@ -4196,7 +4046,9 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 8 * kInstrSize;
+ Address pc_immediate_load_address =
+ Assembler::target_address_from_return_address(pc);
+ Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
Isolate* isolate = unoptimized_code->GetIsolate();
CodePatcher patcher(isolate, branch_address, 1);
@@ -4214,7 +4066,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
patcher.masm()->slt(at, a3, zero_reg);
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// addiu at, zero_reg, 1
// beq at, zero_reg, ok ;; Not changed
// lui t9, <on-stack replacement address> upper
@@ -4227,7 +4078,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
patcher.masm()->daddiu(at, zero_reg, 1);
break;
}
- Address pc_immediate_load_address = pc - 6 * kInstrSize;
// Replace the stack check address in the load-immediate (6-instr sequence)
// with the entry address of the replacement code.
Assembler::set_target_address_at(isolate, pc_immediate_load_address,
@@ -4243,10 +4093,11 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Code* unoptimized_code,
Address pc) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 8 * kInstrSize;
- Address pc_immediate_load_address = pc - 6 * kInstrSize;
+ Address pc_immediate_load_address =
+ Assembler::target_address_from_return_address(pc);
+ Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
- DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize)));
+ DCHECK(Assembler::IsBeq(Assembler::instr_at(branch_address + kInstrSize)));
if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
DCHECK(reinterpret_cast<uint64_t>(
Assembler::target_address_at(pc_immediate_load_address)) ==
@@ -4257,18 +4108,11 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
- if (reinterpret_cast<uint64_t>(
- Assembler::target_address_at(pc_immediate_load_address)) ==
- reinterpret_cast<uint64_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
- return ON_STACK_REPLACEMENT;
- }
-
DCHECK(reinterpret_cast<uint64_t>(
- Assembler::target_address_at(pc_immediate_load_address)) ==
+ Assembler::target_address_at(pc_immediate_load_address)) ==
reinterpret_cast<uint64_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
- return OSR_AFTER_STACK_CHECK;
+ isolate->builtins()->OnStackReplacement()->entry()));
+ return ON_STACK_REPLACEMENT;
}
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index daf3dbc099..301ccf53cc 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -299,41 +299,34 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- {
- Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ VisitDeclarations(scope()->declarations());
+ }
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
- {
- Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bc_short(ge, &ok);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bc_short(ge, &ok);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- {
- Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -516,7 +509,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -616,7 +609,7 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -955,14 +948,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1038,11 +1031,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r3. Iterate through that.
__ bind(&fixed_array);
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(r4);
- __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ StoreP(
- r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
__ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
@@ -1079,12 +1067,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r7, r5);
__ beq(&update_each);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r3);
__ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
@@ -1138,28 +1122,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
- scope()->is_function_scope() && info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ mov(r5, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1592,13 +1554,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ LoadP(r3, MemOperand(sp));
- __ push(r3);
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1843,64 +1798,44 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ b(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(r4);
- __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
- __ bne(&resume);
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
- __ mr(r4, cp);
- __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r4);
- __ beq(&post_runtime);
- __ push(r3); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ b(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(r4);
+ __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+ __ bne(&resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+ __ mr(r4, cp);
+ __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r4);
+ __ beq(&post_runtime);
+ __ push(r3); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1930,7 +1865,10 @@ void FullCodeGenerator::EmitGeneratorResume(
__ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ push(r5);
- // Push holes for the rest of the arguments to the generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1959,9 +1897,7 @@ void FullCodeGenerator::EmitGeneratorResume(
// fp = caller's frame pointer.
// cp = callee's context,
// r7 = callee's JS function.
- __ PushFixedFrame(r7);
- // Adjust FP to point to saved FP.
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r7);
// Load the operand stack size.
__ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
@@ -2060,7 +1996,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
- __ pop(r5);
+ PopOperand(r5);
__ LoadRoot(r6,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
@@ -2072,18 +2008,6 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
}
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
@@ -2715,7 +2639,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3198,23 +3122,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(r3, &done_convert);
- __ Push(r3);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3384,6 +3291,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(r3);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r3);
+ context()->Plug(r3);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3425,11 +3337,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), r3);
+ PushOperand(r3);
+
// Push undefined as the receiver.
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
PushOperand(r3);
-
- __ LoadNativeContextSlot(expr->context_index(), r3);
}
@@ -3443,60 +3357,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ LoadP(ip, MemOperand(sp, 0));
- PushOperand(ip);
- __ StoreP(r3, MemOperand(sp, kPointerSize));
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r3);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(r3);
- }
- }
- }
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3731,11 +3594,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(&stub_call);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3774,9 +3637,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in r3.
switch (assign_type) {
case VARIABLE:
@@ -4032,26 +3892,25 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(r3, r4);
Split(eq, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareRoot(r3, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ __ JumpIfSmi(r3, if_false);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through, cr0);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r3);
-}
-
-
Register FullCodeGenerator::result_register() { return r3; }
Register FullCodeGenerator::context_register() { return cp; }
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ LoadP(value, MemOperand(fp, frame_offset), r0);
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
@@ -4122,12 +3981,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Operand(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(r4));
// Restore the accumulator (r3) and token (r4).
@@ -4180,7 +4033,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
break;
}
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// crset
// bge <ok> ;; not changed
@@ -4209,8 +4061,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate, Code* unoptimized_code, Address pc) {
Address mov_address = Assembler::target_address_from_return_address(pc);
Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+#ifdef DEBUG
Address interrupt_address =
Assembler::target_address_at(mov_address, unoptimized_code);
+#endif
if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
@@ -4219,13 +4073,9 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
- if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
DCHECK(interrupt_address ==
- isolate->builtins()->OsrAfterStackCheck()->entry());
- return OSR_AFTER_STACK_CHECK;
+ isolate->builtins()->OnStackReplacement()->entry());
+ return ON_STACK_REPLACEMENT;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/full-codegen/s390/OWNERS b/deps/v8/src/full-codegen/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/full-codegen/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
new file mode 100644
index 0000000000..88bec4cab6
--- /dev/null
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -0,0 +1,3981 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ast/scopes.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/parsing/parser.h"
+
+#include "src/s390/code-stubs-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+// See PatchInlinedSmiCode in ic-s390.cc for the code that patches it
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ beq(target); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ bne(target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ DCHECK(is_int16(delta_to_patch_site));
+ __ chi(r0, Operand(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop();
+ __ nop();
+ }
+ }
+
+ private:
+ MacroAssembler* masm() { return masm_; }
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o r3: the JS function object being called (i.e., ourselves)
+// o r5: the new target value
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+// o ip: our own function entry (required by the prologue)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-s390.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(literal());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ LoadP(r4, MemOperand(sp, receiver_offset), r0);
+ __ AssertNotSmi(r4);
+ __ CompareObjectType(r4, r4, no_reg, FIRST_JS_RECEIVER_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
+ }
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ int prologue_offset = masm_->pc_offset();
+
+ info->set_prologue_offset(prologue_offset);
+ __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
+
+ {
+ Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ AddP(ip, sp, Operand(-(locals_count * kPointerSize)));
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ __ CmpLogicalP(ip, r5);
+ __ bge(&ok, Label::kNear);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&ok);
+ }
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r4, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ // TODO(joransiu): Use MVC for better performance
+ __ lay(sp, MemOperand(sp, -kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+ }
+ // Continue loop if not done.
+ __ BranchOnCount(r4, &loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ // TODO(joransiu): Use MVC for better performance
+ if (remaining > 0) {
+ __ lay(sp, MemOperand(sp, -remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+ }
+ }
+ }
+ }
+
+ bool function_in_register_r3 = true;
+
+ // Possibly allocate a local context.
+ if (info->scope()->num_heap_slots() > 0) {
+ // Argument to NewContext is the function, which is still in r3.
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (info->scope()->is_script_scope()) {
+ __ push(r3);
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
+ } else {
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r5); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r3);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r5); // Preserve new target.
+ }
+ }
+ function_in_register_r3 = false;
+ // Context is returned in r2. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ LoadRR(cp, r2);
+ __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r2, MemOperand(fp, parameter_offset), r0);
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ StoreP(r2, target);
+
+ // Update the write barrier.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r2, r4,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r2, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep it marked as such.
+ }
+ SetVar(this_function_var, r3, r2, r4);
+ }
+
+ // Possibly set up a local binding to the new target value.
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r5, r2, r4);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
+ __ CallStub(&stub);
+
+ function_in_register_r3 = false;
+ SetVar(rest_param, r2, r3, r4);
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_r3) {
+ // Load this again, if it's used by the local context below.
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(r3);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ SetVar(arguments, r2, r3, r4);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter);
+ }
+
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
+ Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope()->declarations());
+ }
+
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ CmpLogicalP(sp, ip);
+ __ bge(&ok, Label::kNear);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
+
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ {
+ Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+}
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r4, Operand(profiling_counter_));
+ intptr_t smi_delta = reinterpret_cast<intptr_t>(Smi::FromInt(delta));
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(-smi_delta)) {
+ __ AddP(FieldMemOperand(r4, Cell::kValueOffset), Operand(-smi_delta));
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ } else {
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ __ SubSmiLiteral(r5, r5, Smi::FromInt(delta), r0);
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ }
+}
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ __ mov(r4, Operand(profiling_counter_));
+ __ LoadSmiLiteral(r5, Smi::FromInt(reset_value));
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+}
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+
+ DCHECK(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+ kCodeSizeMultiplier / 2;
+ int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ {
+ // BackEdgeTable::PatchAt manipulates this sequence.
+ __ bge(&ok, Label::kNear);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ }
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ CmpP(r5, Operand::Zero());
+ __ bge(&ok);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(r2);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(r2);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r2
+ __ push(r2);
+ __ CallRuntime(Runtime::kTraceExit);
+ }
+ EmitProfilingCounterHandlingForReturnSequence(false);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ {
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ int32_t sp_delta = arg_count * kPointerSize;
+ SetReturnPosition(literal());
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+
+ __ Ret();
+ }
+ }
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ mov(result_register(), Operand(lit));
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ mov(result_register(), Operand(lit));
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ if (count > 1) codegen()->DropOperands(count - 1);
+ __ StoreP(reg, MemOperand(sp, 0));
+}
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ b(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ b(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == true_label_);
+ DCHECK(materialize_false == false_label_);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(ip, value_root_index);
+ codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ }
+}
+
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+ Label* if_false, Label* fall_through) {
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+}
+
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ b(cond, if_true);
+ } else if (if_true == fall_through) {
+ __ b(NegateCondition(cond), if_false);
+ } else {
+ __ b(cond, if_true);
+ __ b(if_false);
+ }
+}
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ DCHECK(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ LoadP(dest, location, r0);
+}
+
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
+ Register scratch1) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(!scratch0.is(src));
+ DCHECK(!scratch0.is(scratch1));
+ DCHECK(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ StoreP(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest()) return;
+
+ Label skip;
+ if (should_normalize) __ b(&skip);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (FLAG_debug_code) {
+ // Check that we're not inside a with or catch context.
+ __ LoadP(r3, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r3, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(r3, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, StackOperand(variable));
+ }
+ break;
+
+ case VariableLocation::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ mov(r4, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ DCHECK(IsDeclaredVariableMode(mode));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ } else {
+ __ LoadSmiLiteral(r2, Smi::FromInt(0)); // Indicates no initial value.
+ }
+ __ Push(r4, r2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case VariableLocation::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp, offset, result_register(), r4,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ mov(r4, Operand(variable->name()));
+ PushOperand(r4);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ mov(r3, Operand(pairs));
+ __ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
+ __ Push(r3, r2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
+ // Return value is ignored.
+}
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules);
+ // Return value is ignored.
+}
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ LoadP(r3, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ LoadRR(r4, r2);
+ __ OrP(r4, r3);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+
+ __ CmpP(r3, r2);
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetExpressionPosition(clause);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ __ bne(&next_test);
+ __ Drop(1);
+ __ b(clause->body_target());
+ __ bind(&skip);
+
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ DropOperands(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ b(nested_statement.break_label());
+ } else {
+ __ b(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+
+ // Get the object to enumerate over.
+ SetExpressionAsStatementPosition(stmt->enumerable());
+ VisitForAccumulatorValue(stmt->enumerable());
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
+ Label convert, done_convert;
+ __ JumpIfSmi(r2, &convert);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(&exit);
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ beq(&exit);
+ __ bind(&convert);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ __ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ __ push(r2);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r2); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kForInEnumerate);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kMetaMapRootIndex);
+ __ bne(&fixed_array);
+
+ // We got a map in register r2. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(r3, r2);
+ __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ beq(&no_descriptors, Label::kNear);
+
+ __ LoadInstanceDescriptors(r2, r4);
+ __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r4,
+ FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ push(r2); // Map.
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(r4, r3, r2);
+ __ b(&loop);
+
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ b(&exit);
+
+ // We got a fixed array in register r2. Iterate through that.
+ __ bind(&fixed_array);
+
+ __ LoadSmiLiteral(r3, Smi::FromInt(1)); // Smi(1) indicates slow check
+ __ Push(r3, r2); // Smi and array
+ __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ Push(r3); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ Push(r2); // Initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ SetExpressionAsStatementPosition(stmt->each());
+
+ // Load the current count to r2, load the length to r3.
+ __ LoadP(r2, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ CmpLogicalP(r2, r3); // Compare to the array length.
+ __ bge(loop_statement.break_label());
+
+ // Get the current entry of the array into register r5.
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ __ AddP(r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(r5, r2);
+ __ LoadP(r5, MemOperand(r5, r4));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register r4.
+ __ LoadP(r4, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ LoadP(r3, MemOperand(sp, 4 * kPointerSize));
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ CmpP(r6, r4);
+ __ beq(&update_each);
+
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
+ __ EmitLoadTypeFeedbackVector(r2);
+ __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ StoreP(
+ r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(r3, r5); // Enumerable and current entry.
+ __ CallRuntime(Runtime::kForInFilter);
+ PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+ __ LoadRR(r5, r2);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ CmpP(r2, r0);
+ __ beq(loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register r5.
+ __ bind(&update_each);
+ __ LoadRR(result_register(), r5);
+ // Perform the assignment as if via '='.
+ {
+ EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ }
+
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ pop(r2);
+ __ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
+ __ push(r2);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ b(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ DropOperands(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+ TypeofMode typeof_mode,
+ Label* slow) {
+ Register current = cp;
+ Register next = r3;
+ Register temp = r4;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
+ // Load next context in chain.
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at native context.
+ __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ __ beq(&fast, Label::kNear);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ // Load next context in chain.
+ __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ b(&loop);
+ __ bind(&fast);
+ }
+
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_mode);
+}
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ DCHECK(var->IsContextSlot());
+ Register context = cp;
+ Register next = r5;
+ Register temp = r6;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
+ __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ Variable* var = proxy->var();
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
+ __ b(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ bne(done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ }
+ }
+ __ b(done);
+ }
+}
+
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofMode typeof_mode) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+}
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofMode typeof_mode) {
+ // Record position before possible IC call.
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ EmitGlobalVariableLoad(proxy, typeof_mode);
+ context()->Plug(r2);
+ break;
+ }
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (NeedsHoleCheckForLoad(proxy)) {
+ Label done;
+ // Let and const need a read barrier.
+ GetVar(r2, var);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ bne(&done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(r2);
+ break;
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
+ __ bind(&slow);
+ __ Push(var->name());
+ Runtime::FunctionId function_id =
+ typeof_mode == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof;
+ __ CallRuntime(function_id);
+ __ bind(&done);
+ context()->Plug(r2);
+ }
+ }
+}
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(expr->pattern()));
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
+ if (expression == NULL) {
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
+ PushOperand(r3);
+ } else {
+ VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
+ }
+}
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_properties));
+ int flags = expr->ComputeFlags();
+ __ LoadSmiLiteral(r2, Smi::FromInt(flags));
+ if (MustCreateObjectLiteralWithRuntime(expr)) {
+ __ Push(r5, r4, r3, r2);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
+ } else {
+ FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
+ __ CallStub(&stub);
+ }
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r2.
+ bool result_saved = false;
+
+ AccessorTable accessor_table(zone());
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ if (!result_saved) {
+ PushOperand(r2); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
+ __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
+ }
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+ __ LoadSmiLiteral(r2, Smi::FromInt(SLOPPY)); // PropertyAttributes
+ PushOperand(r2);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
+ } else {
+ DropOperands(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
+ break;
+ case ObjectLiteral::Property::GETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = property;
+ }
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = property;
+ }
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ LoadSmiLiteral(r2, Smi::FromInt(NONE));
+ PushOperand(r2);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ }
+
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ PushOperand(r2); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
+ VisitForStackValue(value);
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ } else {
+ DropOperands(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+ break;
+ }
+ }
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r2);
+ }
+}
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ bool has_fast_elements =
+ IsFastObjectElementsKind(expr->constant_elements_kind());
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_elements));
+ if (MustCreateArrayLiteralWithRuntime(expr)) {
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
+ __ Push(r5, r4, r3, r2);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
+ } else {
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+ bool result_saved = false; // Is the result saved to the stack?
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ DCHECK(!subexpr->IsSpread());
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ PushOperand(r2);
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+ Smi::FromInt(array_index));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ PopOperand(r2);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ PushOperand(r2);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r2);
+ }
+}
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+
+ Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
+
+ Property* property = expr->target()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ PushOperand(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = r3;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ PushOperands(scratch, result_register());
+ }
+ break;
+ case KEYED_SUPER_PROPERTY: {
+ const Register scratch = r3;
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ __ LoadRR(scratch, result_register());
+ VisitForAccumulatorValue(property->key());
+ PushOperands(scratch, result_register());
+ if (expr->is_compound()) {
+ const Register scratch1 = r4;
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ PushOperands(scratch1, scratch, result_register());
+ }
+ break;
+ }
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ {
+ AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ PushOperand(r2); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ SetExpressionPosition(expr);
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op(), expr->AssignmentSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyStore(property);
+ context()->Plug(r2);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyStore(property);
+ context()->Plug(r2);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ Label suspend, continuation, post_runtime, resume;
+
+ __ b(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(r3);
+ __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+ __ bne(&resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
+ __ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
+ __ LoadRR(r3, cp);
+ __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ CmpP(sp, r3);
+ __ beq(&post_runtime);
+ __ push(r2); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+}
+
+void FullCodeGenerator::EmitGeneratorResume(
+ Expression* generator, Expression* value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in r2, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r3 will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ PopOperand(r3);
+
+ // Store input value into generator object.
+ __ StoreP(result_register(),
+ FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
+ __ LoadRR(r4, result_register());
+ __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+
+ // Load suspended function and context.
+ __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ push(r4);
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_S390X
+ __ CmpP(r5, Operand::Zero());
+ __ beq(&push_frame, Label::kNear);
+#else
+ __ SmiUntag(r5);
+ __ beq(&push_frame, Label::kNear);
+#endif
+ __ LoadRR(r0, r5);
+ __ bind(&argument_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&argument_loop);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame, done;
+ __ bind(&push_frame);
+ __ b(r14, &resume_frame); // brasl
+ __ b(&done);
+ __ bind(&resume_frame);
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // r6 = callee's JS function.
+ __ PushStandardFrame(r6);
+
+ // Load the operand stack size.
+ __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ Label call_resume;
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ bne(&slow_resume, Label::kNear);
+ __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r4);
+ __ AddP(ip, ip, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
+ __ Jump(ip);
+ __ bind(&slow_resume);
+ } else {
+ __ beq(&call_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label operand_loop;
+ __ LoadRR(r0, r5);
+ __ bind(&operand_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&operand_loop);
+
+ __ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
+ DCHECK(!result_register().is(r3));
+ __ Push(r3, result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ OperandStackDepthIncrement(4);
+ __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ SubP(r2, fp, sp);
+ __ CmpP(r2, Operand(expected_diff));
+ __ Assert(eq, kUnexpectedStackDepth);
+ }
+}
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label allocate, done_allocate;
+
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
+ __ b(&done_allocate);
+
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+
+ __ bind(&done_allocate);
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ PopOperand(r4);
+ __ LoadRoot(r5,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+}
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+
+ // Get the arguments.
+ Register left = r3;
+ Register right = r2;
+ PopOperand(left);
+
+ // Perform combined smi check on both operands.
+ __ LoadRR(scratch1, right);
+ __ OrP(scratch1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ b(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub.
+ switch (op) {
+ case Token::SAR:
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ ShiftRightArithP(right, left, scratch1);
+ __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
+ break;
+ case Token::SHL: {
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+#if V8_TARGET_ARCH_S390X
+ __ ShiftLeftP(right, left, scratch2);
+#else
+ __ SmiUntag(scratch1, left);
+ __ ShiftLeftP(scratch1, scratch1, scratch2);
+ // Check that the *signed* result fits in a smi
+ __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+ __ SmiTag(right, scratch1);
+#endif
+ break;
+ }
+ case Token::SHR: {
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srl(scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number.
+ __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::ADD: {
+ __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ BranchOnOverflow(&stub_call);
+ __ LoadRR(right, scratch1);
+ break;
+ }
+ case Token::SUB: {
+ __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ BranchOnOverflow(&stub_call);
+ __ LoadRR(right, scratch1);
+ break;
+ }
+ case Token::MUL: {
+ Label mul_zero;
+#if V8_TARGET_ARCH_S390X
+ // Remove tag from both operands.
+ __ SmiUntag(ip, right);
+ __ SmiUntag(scratch2, left);
+ __ mr_z(scratch1, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ lr(ip, scratch2); // 32 bit load
+ __ sra(ip, Operand(31));
+ __ cr_z(ip, scratch1); // 32 bit compare
+ __ bne(&stub_call);
+#else
+ __ SmiUntag(ip, right);
+ __ LoadRR(scratch2, left); // load into low order of reg pair
+ __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch1, scratch2, ip);
+ __ bne(&stub_call);
+#endif
+ // Go slow on zero result to handle -0.
+ __ chi(scratch2, Operand::Zero());
+ __ beq(&mul_zero, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(right, scratch2);
+#else
+ __ LoadRR(right, scratch2);
+#endif
+ __ b(&done);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ bind(&mul_zero);
+ __ AddP(scratch2, right, left);
+ __ CmpP(scratch2, Operand::Zero());
+ __ blt(&stub_call);
+ __ LoadSmiLiteral(right, Smi::FromInt(0));
+ break;
+ }
+ case Token::BIT_OR:
+ __ OrP(right, left);
+ break;
+ case Token::BIT_AND:
+ __ AndP(right, left);
+ break;
+ case Token::BIT_XOR:
+ __ XorP(right, left);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Expression* value = property->value();
+
+ Register scratch = r3;
+ if (property->is_static()) {
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ LoadP(scratch, MemOperand(sp, 0)); // prototype
+ }
+ PushOperand(scratch);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
+ __ push(r2);
+ }
+
+ VisitForStackValue(value);
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
+ PopOperand(r3);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorSlot slot) {
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
+
+ Property* prop = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(prop);
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ PushOperand(r2); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ PushOperand(r2);
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ // stack: value, this; r2: home_object
+ Register scratch = r4;
+ Register scratch2 = r5;
+ __ LoadRR(scratch, result_register()); // home_object
+ __ LoadP(r2, MemOperand(sp, kPointerSize)); // value
+ __ LoadP(scratch2, MemOperand(sp, 0)); // this
+ __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 0)); // home_object
+ // stack: this, home_object; r2: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ PushOperand(r2);
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = r4;
+ Register scratch2 = r5;
+ __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; r3: key, r6: value
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(scratch, MemOperand(sp, 0)); // home_object
+ __ StoreP(scratch, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 0));
+ __ Move(r2, scratch2);
+ // stack: this, home_object, key; r2: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ PushOperand(r2); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Move(StoreDescriptor::NameRegister(), r2);
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ StoreP(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ LoadRR(r5, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(r3, offset, r5, r4, kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorSlot slot) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+
+ } else if (var->mode() == LET && op != Token::INIT) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ // Perform the assignment.
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&const_error, Label::kNear);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ beq(&uninitialized_this);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
+ if (var->IsLookupSlot()) {
+ // Assignment to var.
+ __ Push(var->name());
+ __ Push(r2);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
+ } else {
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ MemOperand location = VarOperand(var, r3);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
+ // Const initializers need a write barrier.
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(r2);
+ __ mov(r2, Operand(var->name()));
+ __ Push(cp, r2); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ }
+ // Silently ignore store in sloppy mode.
+ }
+}
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ DCHECK(prop->key()->IsLiteral());
+
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ PopOperand(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r2 : value
+ // stack : receiver ('this'), home_object
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ PushOperand(key->value());
+ PushOperand(r2);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r2 : value
+ // stack : receiver ('this'), home_object, key
+ DCHECK(prop != NULL);
+
+ PushOperand(r2);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), r2);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ EmitNamedSuperPropertyLoad(expr);
+ }
+ } else {
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), r2);
+ PopOperand(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
+ }
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ // Get the target function.
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
+ {
+ StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ PushOperand(r1);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ } else {
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ LoadP(r1, MemOperand(sp, 0));
+ PushOperand(r1);
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+ }
+
+ EmitCall(expr, convert_mode);
+}
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
+
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = r3;
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
+ __ LoadRR(scratch, r2);
+ VisitForAccumulatorValue(super_ref->this_var());
+ PushOperands(scratch, r2, r2, scratch);
+ PushOperand(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+
+ // Replace home_object with target function.
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r2);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ LoadP(ip, MemOperand(sp, 0));
+ PushOperand(ip);
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
+}
+
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetExpressionPosition(prop);
+ // Load the function from the receiver.
+ const Register scratch = r3;
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
+ __ LoadRR(scratch, r2);
+ VisitForAccumulatorValue(super_ref->this_var());
+ PushOperands(scratch, r2, r2, scratch);
+ VisitForStackValue(prop->key());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+ // - home_object
+ // - key
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+
+ // Replace home_object with target function.
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr);
+}
+
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr, expr->tail_call_mode());
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // r6: copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
+ } else {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ }
+
+ // r5: the receiver of the enclosing function.
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ // r4: language mode.
+ __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
+
+ // r3: the start position of the scope the calls resides in.
+ __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
+
+ // Do the runtime call.
+ __ Push(r6, r5, r4, r3);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
+}
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in r2) and
+ // the object holding it (returned in r3).
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(r2, r3); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ b(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(r2);
+ // Pass undefined as the receiver, which is the WithBaseObject of a
+ // non-object environment record. If the callee is sloppy, it will patch
+ // it up to be the global receiver.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ push(r3);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ PushOperand(r4); // Reserved receiver slot.
+ }
+}
+
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ PushCalleeAndWithBaseObject(expr);
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r3);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // Touch up the stack with the resolved function.
+ __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetConstructCallPosition(expr);
+
+ // Load function and argument count into r3 and r2.
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
+
+ // Record call targets in unoptimized code.
+ __ EmitLoadTypeFeedbackVector(r4);
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
+
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ PushOperand(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetConstructCallPosition(expr);
+
+ // Load new target into r5.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ LoadRR(r5, result_register());
+
+ // Load function and argument count into r1 and r0.
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
+
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestIfSmi(r2);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_TYPED_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_PROXY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(r2, &null);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+ // Map is now in r2.
+ __ blt(&null);
+
+ // Return 'Function' for JSFunction and JSBoundFunction objects.
+ __ CmpLogicalP(r3, Operand(FIRST_FUNCTION_TYPE));
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ __ bge(&function);
+
+ // Check if the constructor in the map is a JS function.
+ Register instance_type = r4;
+ __ GetMapConstructor(r2, r2, r3, instance_type);
+ __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+ __ bne(&non_function_constructor, Label::kNear);
+
+ // r2 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ b(&done, Label::kNear);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(r2, Heap::kFunction_stringRootIndex);
+ __ b(&done, Label::kNear);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(r2, Heap::kObject_stringRootIndex);
+ __ b(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(r2, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(r2, &done);
+ // If the object is not a value type, return the object.
+ __ CompareObjectType(r2, r3, r3, JS_VALUE_TYPE);
+ __ bne(&done, Label::kNear);
+ __ LoadP(r2, FieldMemOperand(r2, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ PopOperands(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index);
+ }
+
+ __ SmiUntag(value);
+ __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToByteArrayOffset(r1, index);
+ __ StoreByte(value, MemOperand(ip, r1));
+ context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ PopOperands(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index, index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value);
+ __ SmiToShortArrayOffset(r1, index);
+ __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag));
+ context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(r2, r3);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r3;
+ Register index = r2;
+ Register result = r5;
+
+ PopOperand(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r3;
+ Register index = r2;
+ Register scratch = r5;
+ Register result = r2;
+
+ PopOperand(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object, index, scratch, result,
+ &need_conversion, &need_conversion,
+ &index_out_of_range, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ // Move target to r3.
+ int const argc = args->length() - 2;
+ __ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(r2, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(r2);
+
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ IndexFromHash(r2, r2);
+
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r2);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadP(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r2);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ mov(ip, Operand(debug_is_active));
+ __ LoadlB(r2, MemOperand(ip));
+ __ SmiTag(r2);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ __ Pop(r4, r5);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ b(&done);
+
+ __ bind(&runtime);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), r2);
+ PushOperand(r2);
+
+ // Push undefined as the receiver.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ PushOperand(r2);
+}
+
+void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ SetCallPosition(expr);
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
+ context()->Plug(r2);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
+ __ LoadGlobalObject(r4);
+ __ mov(r3, Operand(var->name()));
+ __ Push(r4, r3);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ context()->Plug(r2);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(is_this);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
+ context()->Plug(r2);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(), test->false_label(),
+ test->true_label(), test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(), &materialize_false,
+ &materialize_true, &materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r2, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
+ __ b(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ AccumulatorValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ LoadRR(r5, r2);
+ TypeofStub typeof_stub(isolate());
+ __ CallStub(&typeof_stub);
+ context()->Plug(r2);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+ Comment cmnt(masm_, "[ CountOperation");
+
+ Property* prop = expr->expression()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(prop);
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ PushOperand(ip);
+ }
+ switch (assign_type) {
+ case NAMED_PROPERTY: {
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(prop);
+ break;
+ }
+
+ case NAMED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ PushOperand(result_register());
+ const Register scratch = r3;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ PushOperands(scratch, result_register());
+ EmitNamedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ const Register scratch = r3;
+ const Register scratch1 = r4;
+ __ LoadRR(scratch, result_register());
+ VisitForAccumulatorValue(prop->key());
+ PushOperands(scratch, result_register());
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ PushOperands(scratch1, scratch, result_register());
+ EmitKeyedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(prop);
+ break;
+ }
+
+ case VARIABLE:
+ UNREACHABLE();
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r2, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r2);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = r3;
+ Register scratch2 = r4;
+ __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+ __ AddAndCheckForOverflow(r2, r2, scratch1, scratch2, r0);
+ __ BranchOnNoOverflow(&done);
+ // Call stub. Undo operation first.
+ __ SubP(r2, r2, scratch1);
+ __ b(&stub_call);
+ __ bind(&slow);
+ }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ PushOperand(r2);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ bind(&stub_call);
+ __ LoadRR(r3, r2);
+ __ LoadSmiLiteral(r2, Smi::FromInt(count_value));
+
+ SetExpressionPosition(expr);
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in r2.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ {
+ EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN, expr->CountSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(r2);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN, expr->CountSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ PopOperand(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ EmitNamedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ EmitKeyedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ {
+ AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(r2, if_true);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r2, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, FIRST_NONSTRING_TYPE);
+ Split(lt, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ __ beq(if_true);
+ __ CompareRoot(r2, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(if_false);
+ __ JumpIfSmi(r2, if_false);
+ // Check for undetectable objects => true.
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
+
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ AndP(r3, r3,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(r3, Operand(1 << Map::kIsCallable));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(if_true);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+ __ blt(if_false);
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ Split(eq, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(r2, if_false); \
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); \
+ __ CompareRoot(r2, Heap::k##Type##MapRootIndex); \
+ Split(eq, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+ } else {
+ if (if_false != fall_through) __ b(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetExpressionPosition(expr);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ CallRuntimeWithOperands(Runtime::kHasProperty);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForAccumulatorValue(expr->right());
+ PopOperand(r3);
+ InstanceOfStub stub(isolate());
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+ PopOperand(r3);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ LoadRR(r4, r3);
+ __ OrP(r4, r2);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+ __ CmpP(r3, r2);
+ Split(cond, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CmpP(r2, Operand::Zero());
+ Split(cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue
+ ? Heap::kNullValueRootIndex
+ : Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(r2, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ __ JumpIfSmi(r2, if_false);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ AndP(r0, r3, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+Register FullCodeGenerator::result_register() { return r2; }
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ LoadP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ StoreP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ LoadP(dst, ContextMemOperand(cp, context_index), r0);
+}
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() || closure_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
+ } else if (closure_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ DCHECK(closure_scope->is_function_scope());
+ __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ PushOperand(ip);
+}
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ DCHECK(!result_register().is(r3));
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ LoadP(r3, MemOperand(ip));
+ PushOperand(r3);
+
+ ClearPendingMessage();
+}
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ DCHECK(!result_register().is(r3));
+ // Restore pending message from stack.
+ PopOperand(r3);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(r3));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(r3));
+ // Restore the accumulator (r2) and token (r3).
+ __ Pop(r3, result_register());
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
+ __ bne(&skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
+
+#undef __
+
+#if V8_TARGET_ARCH_S390X
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
+static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
+static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+#else
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
+static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
+static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+#endif
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 4);
+
+ switch (target_state) {
+ case INTERRUPT: {
+ // <decrement profiling counter>
+ // bge <ok> ;; patched to GE BRC
+ // brasrl r14, <interrupt stub address>
+ // <reset profiling counter>
+ // ok-label
+ patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
+ break;
+ }
+ case ON_STACK_REPLACEMENT:
+ // <decrement profiling counter>
+ // brc 0x0, <ok> ;; patched to NOP BRC
+ // brasrl r14, <interrupt stub address>
+ // <reset profiling counter>
+ // ok-label ----- pc_after points here
+ patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
+ break;
+ }
+
+ // Replace the stack check address in the mov sequence with the
+ // entry address of the replacement code.
+ Assembler::set_target_address_at(isolate, call_address, unoptimized_code,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_address, replacement_code);
+}
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate, Code* unoptimized_code, Address pc) {
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
+#ifdef DEBUG
+ Address interrupt_address =
+ Assembler::target_address_at(call_address, unoptimized_code);
+#endif
+
+ DCHECK(BRC == Instruction::S390OpcodeValue(branch_address));
+ // For interrupt, we expect a branch greater than or equal
+ // i.e. BRC 0xa, +XXXX (0xA7A4XXXX)
+ FourByteInstr br_instr = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(branch_address));
+ if (kInterruptBranchInstruction == br_instr) {
+ DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
+ return INTERRUPT;
+ }
+
+ // Expect BRC to be patched to NOP branch.
+ // i.e. BRC 0x0, +XXXX (0xA704XXXX)
+ USE(kOSRBranchInstruction);
+ DCHECK(kOSRBranchInstruction == br_instr);
+
+ DCHECK(interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry());
+ return ON_STACK_REPLACEMENT;
+}
+
+} // namespace internal
+} // namespace v8
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 2f7788d0c7..992e7fe4f7 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -286,35 +286,32 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
+ VisitDeclarations(scope()->declarations());
+ }
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -500,7 +497,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -606,7 +603,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -947,14 +944,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1029,11 +1026,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register rax. Iterate through that.
__ bind(&fixed_array);
- // No need for a write barrier, we are storing a Smi in the feedback vector.
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(rbx);
- __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
- TypeFeedbackVector::MegamorphicSentinel(isolate()));
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
__ Push(Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(rax); // Array
@@ -1069,12 +1061,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(rdx);
__ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
@@ -1115,8 +1103,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addp(rsp, Immediate(5 * kPointerSize));
- OperandStackDepthDecrement(ForIn::kElementCount);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1125,31 +1112,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ Move(rbx, info);
- __ CallStub(&stub);
- } else {
- __ Push(info);
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1578,12 +1540,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ Push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1824,65 +1780,45 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ Push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ Pop(rbx);
- __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
- __ j(not_equal, &resume);
- __ Push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(continuation.pos()));
- __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movp(rcx, rsi);
- __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
- kDontSaveFPRegs);
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpp(rsp, rbx);
- __ j(equal, &post_runtime);
- __ Push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movp(context_register(),
- Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
-
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
-
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ Pop(rbx);
+ __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
+ __ j(not_equal, &resume);
+ __ Push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(continuation.pos()));
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
+ __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+ kDontSaveFPRegs);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
+ __ j(equal, &post_runtime);
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ movp(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1911,7 +1847,10 @@ void FullCodeGenerator::EmitGeneratorResume(
// Push receiver.
__ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
- // Push holes for arguments to generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadSharedFunctionInfoSpecialField(rdx, rdx,
SharedFunctionInfo::kFormalParameterCountOffset);
@@ -2014,19 +1953,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ LoadRoot(FieldOperand(rax, JSIteratorResult::kDoneOffset),
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(!prop->IsSuperAccess());
-
- __ Move(LoadDescriptor::NameRegister(), key->value());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ OperandStackDepthDecrement(1);
}
@@ -2586,7 +2513,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3066,23 +2993,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(rax, &done_convert, Label::kNear);
- __ Push(rax);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3266,6 +3176,11 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(rax);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, rax);
+ context()->Plug(rax);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3306,11 +3221,13 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as receiver.
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), rax);
+ PushOperand(rax);
+
+ // Push undefined as receiver.
OperandStackDepthIncrement(1);
__ PushRoot(Heap::kUndefinedValueRootIndex);
-
- __ LoadNativeContextSlot(expr->context_index(), rax);
}
@@ -3324,59 +3241,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
-
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- PushOperand(Operand(rsp, 0));
- __ movp(Operand(rsp, kPointerSize), rax);
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(function, arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(rax);
- }
- }
- }
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3619,11 +3486,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3663,9 +3530,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in rax.
switch (assign_type) {
case VARIABLE:
@@ -3920,21 +3784,16 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ CompareRoot(rax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
+ __ JumpIfSmi(rax, if_false);
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rax, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(rax);
-}
-
-
Register FullCodeGenerator::result_register() {
return rax;
}
@@ -3944,6 +3803,10 @@ Register FullCodeGenerator::context_register() {
return rsi;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK(IsAligned(frame_offset, kPointerSize));
+ __ movp(value, Operand(rbp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK(IsAligned(frame_offset, kPointerSize));
@@ -4013,11 +3876,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
__ Pop(result_register()); // Restore the accumulator.
__ Pop(rdx); // Get the token.
@@ -4073,7 +3931,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
*jns_offset_address = kJnsOffset;
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
@@ -4111,16 +3968,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK_EQ(kNopByteOne, *jns_instr_address);
DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address,
- unoptimized_code) ==
- isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
- DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address,
- unoptimized_code));
- return OSR_AFTER_STACK_CHECK;
+ DCHECK_EQ(
+ isolate->builtins()->OnStackReplacement()->entry(),
+ Assembler::target_address_at(call_target_address, unoptimized_code));
+ return ON_STACK_REPLACEMENT;
}
} // namespace internal
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 1fecf499a6..f14aaf69b0 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -285,39 +285,35 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter);
}
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
Comment cmnt(masm_, "[ Declarations");
- VisitForEffect(scope()->GetIllegalRedeclaration());
+ VisitDeclarations(scope()->declarations());
+ }
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit
- = ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -484,7 +480,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -592,7 +588,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -926,14 +922,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
// Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(ForIn::kElementCount);
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
@@ -1000,11 +996,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register eax. Iterate through that.
__ bind(&fixed_array);
- // No need for a write barrier, we are storing a Smi in the feedback vector.
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
- Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ push(Immediate(Smi::FromInt(1))); // Smi(1) undicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
@@ -1035,12 +1026,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // We might get here from TurboFan or Crankshaft when something in the
- // for-in loop body deopts and only now notice in fullcodegen, that we
- // can now longer use the enum cache, i.e. left fast mode. So better record
- // this information here, in case we later OSR back into this loop or
- // reoptimize the whole function w/o rerunning the loop with the slow
- // mode object in fullcodegen (which would result in a deopt loop).
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(edx);
__ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1081,8 +1068,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ add(esp, Immediate(5 * kPointerSize));
- OperandStackDepthDecrement(ForIn::kElementCount);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1091,31 +1077,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ mov(ebx, Immediate(info));
- __ CallStub(&stub);
- } else {
- __ push(Immediate(info));
- __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
- }
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1546,12 +1507,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties);
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1793,64 +1748,44 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ j(not_equal, &resume);
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
- __ cmp(esp, ebx);
- __ j(equal, &post_runtime);
- __ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- // Pop value from top-of-stack slot, box result into result register.
- OperandStackDepthDecrement(1);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
- break;
- }
-
- case Yield::kDelegating:
- UNREACHABLE();
- }
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ j(not_equal, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+ kDontSaveFPRegs);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
}
@@ -1878,7 +1813,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Push receiver.
__ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
- // Push holes for arguments to generator function.
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1983,19 +1921,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
isolate()->factory()->ToBoolean(done));
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(!prop->IsSuperAccess());
-
- __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ OperandStackDepthDecrement(1);
}
@@ -2589,7 +2515,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr);
+ SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceTailCall);
@@ -3070,23 +2996,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to an integer.
- Label done_convert;
- __ JumpIfSmi(eax, &done_convert, Label::kNear);
- __ Push(eax);
- __ CallRuntime(Runtime::kToInteger);
- __ bind(&done_convert);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3270,6 +3179,12 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(eax);
}
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ mov(eax, NativeContextOperand());
+ __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
+ context()->Plug(eax);
+}
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -3311,10 +3226,12 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadGlobalFunction(expr->context_index(), eax);
+ PushOperand(eax);
+
// Push undefined as receiver.
PushOperand(isolate()->factory()->undefined_value());
-
- __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -3328,58 +3245,9 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- PushOperand(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- OperandStackDepthDecrement(arg_count);
- context()->Plug(eax);
- }
- }
- }
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -3624,11 +3492,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3668,9 +3536,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
// Store the value returned in eax.
switch (assign_type) {
case VARIABLE:
@@ -3796,7 +3661,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(eax, if_false);
@@ -3815,7 +3680,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -3926,21 +3791,16 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
+ __ JumpIfSmi(eax, if_false);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(eax, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(eax);
-}
-
-
Register FullCodeGenerator::result_register() {
return eax;
}
@@ -3950,6 +3810,10 @@ Register FullCodeGenerator::context_register() {
return esi;
}
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(value, Operand(ebp, frame_offset));
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4016,12 +3880,6 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(!slot.IsInvalid());
- __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(slot)));
-}
-
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(edx));
__ Pop(result_register()); // Restore the accumulator.
@@ -4079,7 +3937,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
*jns_offset_address = kJnsOffset;
break;
case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
@@ -4117,15 +3974,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
DCHECK_EQ(kNopByteOne, *jns_instr_address);
DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
- isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
- DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address,
- unoptimized_code));
- return OSR_AFTER_STACK_CHECK;
+ DCHECK_EQ(
+ isolate->builtins()->OnStackReplacement()->entry(),
+ Assembler::target_address_at(call_target_address, unoptimized_code));
+ return ON_STACK_REPLACEMENT;
}
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 819bd69e07..0df5975b54 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -656,6 +656,12 @@ class ELF BASE_EMBEDDED {
#elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
+#elif V8_TARGET_ARCH_S390X
+ const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 3,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+#elif V8_TARGET_ARCH_S390
+ const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 1, 2, 1, 3,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
#endif
@@ -680,6 +686,11 @@ class ELF BASE_EMBEDDED {
// id=B81AEC1A37F5DAF185257C3E004E8845&linkid=1n0000&c_t=
// c9xw7v5dzsj7gt1ifgf4cjbcnskqptmr
header->machine = 21;
+#elif V8_TARGET_ARCH_S390
+ // Processor identification value is 22 (EM_S390) as defined in the ABI:
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN1691
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html#AEN1599
+ header->machine = 22;
#else
#error Unsupported target architecture.
#endif
@@ -772,7 +783,8 @@ class ELFSymbol BASE_EMBEDDED {
return static_cast<Binding>(info >> 4);
}
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
- (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
+ (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
+ (V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -796,7 +808,7 @@ class ELFSymbol BASE_EMBEDDED {
uint16_t section;
};
#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
- (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX)
+ (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX) || V8_TARGET_ARCH_S390X
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -1145,6 +1157,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
+#elif V8_TARGET_ARCH_S390
+ w->Write<uint8_t>(DW_OP_reg11); // The frame pointer's here on S390.
#else
#error Unsupported target architecture.
#endif
@@ -1927,7 +1941,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
- Zone zone;
+ Zone zone(isolate->allocator());
MachO mach_o(&zone);
Writer w(&mach_o);
@@ -1939,7 +1953,7 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- Zone zone;
+ Zone zone(isolate->allocator());
ELF elf(&zone);
Writer w(&elf);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index edd52b0ceb..ed9caa92a9 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -644,7 +644,6 @@ bool GlobalHandles::IsWeak(Object** location) {
return Node::FromLocation(location)->IsWeak();
}
-
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
@@ -811,6 +810,111 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
return any_group_was_visited;
}
+namespace {
+// Traces the information about object groups and implicit ref groups given by
+// the embedder to the V8 during each gc prologue.
+class ObjectGroupsTracer {
+ public:
+ explicit ObjectGroupsTracer(Isolate* isolate);
+ void Print();
+
+ private:
+ void PrintObjectGroup(ObjectGroup* group);
+ void PrintImplicitRefGroup(ImplicitRefGroup* group);
+ void PrintObject(Object* object);
+ void PrintConstructor(JSObject* js_object);
+ void PrintInternalFields(JSObject* js_object);
+ Isolate* isolate_;
+ DISALLOW_COPY_AND_ASSIGN(ObjectGroupsTracer);
+};
+
+ObjectGroupsTracer::ObjectGroupsTracer(Isolate* isolate) : isolate_(isolate) {}
+
+void ObjectGroupsTracer::Print() {
+ GlobalHandles* global_handles = isolate_->global_handles();
+
+ PrintIsolate(isolate_, "### Tracing object groups:\n");
+
+ for (auto group : *(global_handles->object_groups())) {
+ PrintObjectGroup(group);
+ }
+ for (auto group : *(global_handles->implicit_ref_groups())) {
+ PrintImplicitRefGroup(group);
+ }
+
+ PrintIsolate(isolate_, "### Tracing object groups finished.\n");
+}
+
+void ObjectGroupsTracer::PrintObject(Object* object) {
+ if (object->IsJSObject()) {
+ JSObject* js_object = JSObject::cast(object);
+
+ PrintF("{ constructor_name: ");
+ PrintConstructor(js_object);
+ PrintF(", hidden_fields: [ ");
+ PrintInternalFields(js_object);
+ PrintF(" ] }\n");
+ } else {
+ PrintF("object of unexpected type: %p\n", object);
+ }
+}
+
+void ObjectGroupsTracer::PrintConstructor(JSObject* js_object) {
+ Object* maybe_constructor = js_object->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ String* name = String::cast(constructor->shared()->name());
+ if (name->length() == 0) name = constructor->shared()->inferred_name();
+
+ PrintF("%s", name->ToCString().get());
+ } else if (maybe_constructor->IsNull()) {
+ if (js_object->IsOddball()) {
+ PrintF("<oddball>");
+ } else {
+ PrintF("<null>");
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void ObjectGroupsTracer::PrintInternalFields(JSObject* js_object) {
+ for (int i = 0; i < js_object->GetInternalFieldCount(); ++i) {
+ if (i != 0) {
+ PrintF(", ");
+ }
+ PrintF("%p", js_object->GetInternalField(i));
+ }
+}
+
+void ObjectGroupsTracer::PrintObjectGroup(ObjectGroup* group) {
+ PrintIsolate(isolate_, "ObjectGroup (size: %lu)\n", group->length);
+ Object*** objects = group->objects;
+
+ for (size_t i = 0; i < group->length; ++i) {
+ PrintIsolate(isolate_, " - Member: ");
+ PrintObject(*objects[i]);
+ }
+}
+
+void ObjectGroupsTracer::PrintImplicitRefGroup(ImplicitRefGroup* group) {
+ PrintIsolate(isolate_, "ImplicitRefGroup (children count: %lu)\n",
+ group->length);
+ PrintIsolate(isolate_, " - Parent: ");
+ PrintObject(*(group->parent));
+
+ Object*** children = group->children;
+ for (size_t i = 0; i < group->length; ++i) {
+ PrintIsolate(isolate_, " - Child: ");
+ PrintObject(*children[i]);
+ }
+}
+
+} // namespace
+
+void GlobalHandles::PrintObjectGroups() {
+ ObjectGroupsTracer(isolate_).Print();
+}
void GlobalHandles::InvokeSecondPassPhantomCallbacks(
List<PendingPhantomCallback>* callbacks, Isolate* isolate) {
@@ -1119,7 +1223,8 @@ void GlobalHandles::PrintStats() {
}
PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
+ PrintF(" allocated memory = %" V8_SIZET_PREFIX V8_PTR_PREFIX "dB\n",
+ total * sizeof(Node));
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 7047d8ca01..ac8487b19f 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -165,7 +165,7 @@ class GlobalHandles {
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
- // Clear the weakness of a global handle.
+ // Mark the reference to this object independent of any object group.
static void MarkIndependent(Object** location);
// Mark the reference to this object externaly unreachable.
@@ -242,6 +242,9 @@ class GlobalHandles {
// can be skipped and false otherwise.
bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
+ // Print all objects in object groups
+ void PrintObjectGroups();
+
// Add an object group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a garbage collection.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index be401a62ec..e7ac2b9f7f 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -59,6 +59,9 @@ namespace internal {
#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an embedded constant pool
@@ -110,6 +113,7 @@ const int kMaxUInt16 = (1 << 16) - 1;
const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
+const int kMinUInt32 = 0;
const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT
@@ -120,6 +124,11 @@ const int kFloatSize = sizeof(float); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
+#if V8_TARGET_ARCH_ARM64
+const int kFrameAlignmentInBytes = 2 * kPointerSize;
+#else
+const int kFrameAlignmentInBytes = kPointerSize;
+#endif
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize = kPointerSize + kPointerSize;
#else
@@ -128,6 +137,12 @@ const int kRegisterSize = kPointerSize;
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
+#else
+const int kElidedFrameSlots = 0;
+#endif
+
const int kDoubleSizeLog2 = 3;
#if V8_HOST_ARCH_64_BIT
@@ -243,89 +258,36 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum LanguageMode {
- // LanguageMode is expressed as a bitmask. Descriptions of the bits:
- STRICT_BIT = 1 << 0,
- STRONG_BIT = 1 << 1,
- LANGUAGE_END,
-
- // Shorthands for some common language modes.
- SLOPPY = 0,
- STRICT = STRICT_BIT,
- STRONG = STRICT_BIT | STRONG_BIT
-};
+enum LanguageMode { SLOPPY, STRICT, LANGUAGE_END = 3 };
inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
switch (mode) {
- case SLOPPY:
- return os << "sloppy";
- case STRICT:
- return os << "strict";
- case STRONG:
- return os << "strong";
- default:
- return os << "unknown";
+ case SLOPPY: return os << "sloppy";
+ case STRICT: return os << "strict";
+ default: UNREACHABLE();
}
+ return os;
}
inline bool is_sloppy(LanguageMode language_mode) {
- return (language_mode & STRICT_BIT) == 0;
+ return language_mode == SLOPPY;
}
inline bool is_strict(LanguageMode language_mode) {
- return language_mode & STRICT_BIT;
-}
-
-
-inline bool is_strong(LanguageMode language_mode) {
- return language_mode & STRONG_BIT;
+ return language_mode != SLOPPY;
}
inline bool is_valid_language_mode(int language_mode) {
- return language_mode == SLOPPY || language_mode == STRICT ||
- language_mode == STRONG;
-}
-
-
-inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
- int language_mode = 0;
- if (strict_bit) language_mode |= STRICT_BIT;
- if (strong_bit) language_mode |= STRONG_BIT;
- DCHECK(is_valid_language_mode(language_mode));
- return static_cast<LanguageMode>(language_mode);
+ return language_mode == SLOPPY || language_mode == STRICT;
}
-// Strong mode behaviour must sometimes be signalled by a two valued enum where
-// caching is involved, to prevent sloppy and strict mode from being incorrectly
-// differentiated.
-enum class Strength : bool {
- WEAK, // sloppy, strict behaviour
- STRONG // strong behaviour
-};
-
-
-inline bool is_strong(Strength strength) {
- return strength == Strength::STRONG;
-}
-
-
-inline std::ostream& operator<<(std::ostream& os, const Strength& strength) {
- return os << (is_strong(strength) ? "strong" : "weak");
-}
-
-
-inline Strength strength(LanguageMode language_mode) {
- return is_strong(language_mode) ? Strength::STRONG : Strength::WEAK;
-}
-
-
-inline size_t hash_value(Strength strength) {
- return static_cast<size_t>(strength);
+inline LanguageMode construct_language_mode(bool strict_bit) {
+ return static_cast<LanguageMode>(strict_bit);
}
@@ -525,7 +487,9 @@ enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
- VISIT_ONLY_STRONG
+ VISIT_ONLY_STRONG,
+ VISIT_ONLY_STRONG_FOR_SERIALIZATION,
+ VISIT_ONLY_STRONG_ROOT_LIST,
};
// Flag indicating whether code is built into the VM (one of the natives files).
@@ -726,10 +690,13 @@ enum CpuFeature {
FPR_GPR_MOV,
LWSYNC,
ISELECT,
+ // S390
+ DISTINCT_OPS,
+ GENERAL_INSTR_EXT,
+ FLOATING_POINT_EXT,
NUMBER_OF_CPU_FEATURES
};
-
// Defines hints about receiver values based on structural knowledge.
enum class ConvertReceiverMode : unsigned {
kNullOrUndefined, // Guaranteed to be null or undefined.
@@ -959,12 +926,6 @@ enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
-enum ClearExceptionFlag {
- KEEP_EXCEPTION,
- CLEAR_EXCEPTION
-};
-
-
enum MinusZeroMode {
TREAT_MINUS_ZERO_AS_ZERO,
FAIL_ON_MINUS_ZERO
@@ -1069,7 +1030,6 @@ inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
if (IsConciseMethod(kind)) return false;
if (IsArrowFunction(kind)) return false;
if (IsGeneratorFunction(kind)) return false;
- if (is_strong(mode)) return IsClassConstructor(kind);
return true;
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index b162ba8645..6331c79fac 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -118,9 +118,8 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
-
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
- : isolate_(isolate) {
+ : isolate_(isolate), zone_(isolate->allocator()) {
HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 4a772ebbac..f019acecb2 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -45,7 +45,6 @@
V(dot_string, ".") \
V(entries_string, "entries") \
V(enumerable_string, "enumerable") \
- V(enumerate_string, "enumerate") \
V(Error_string, "Error") \
V(eval_string, "eval") \
V(false_string, "false") \
@@ -178,7 +177,6 @@
V(strict_function_transition_symbol) \
V(string_iterator_iterated_string_symbol) \
V(string_iterator_next_index_symbol) \
- V(strong_function_transition_symbol) \
V(uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 4e6e6081d7..972dfa6e5c 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -12,7 +12,6 @@ namespace v8 {
namespace internal {
const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
-const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
@@ -42,86 +41,55 @@ void GCIdleTimeAction::Print() {
void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
- PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
+ PrintF("size_of_objects=%" V8_SIZET_PREFIX V8_PTR_PREFIX "d ",
+ size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
}
-
size_t GCIdleTimeHandler::EstimateMarkingStepSize(
- size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
+ double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
DCHECK(idle_time_in_ms > 0);
if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
}
- size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
- if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
- // In the case of an overflow we return maximum marking step size.
+ double marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
+ if (marking_step_size >= kMaximumMarkingStepSize) {
return kMaximumMarkingStepSize;
}
-
- if (marking_step_size > kMaximumMarkingStepSize)
- return kMaximumMarkingStepSize;
-
return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
}
-
-size_t GCIdleTimeHandler::EstimateMarkCompactTime(
- size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
- // TODO(hpayer): Be more precise about the type of mark-compact event. It
- // makes a huge difference if compaction is happening.
- if (mark_compact_speed_in_bytes_per_ms == 0) {
- mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
- }
- size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
- return Min(result, kMaxMarkCompactTimeInMs);
-}
-
-
-size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
+double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
size_t size_of_objects,
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+ double final_incremental_mark_compact_speed_in_bytes_per_ms) {
if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
final_incremental_mark_compact_speed_in_bytes_per_ms =
kInitialConservativeFinalIncrementalMarkCompactSpeed;
}
- size_t result =
+ double result =
size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
- return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
-}
-
-
-bool GCIdleTimeHandler::ShouldDoMarkCompact(
- size_t idle_time_in_ms, size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms) {
- return idle_time_in_ms >= kMaxScheduledIdleTime &&
- idle_time_in_ms >=
- EstimateMarkCompactTime(size_of_objects,
- mark_compact_speed_in_bytes_per_ms);
+ return Min<double>(result, kMaxFinalIncrementalMarkCompactTimeInMs);
}
-
bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
int contexts_disposed, double contexts_disposal_rate) {
return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
contexts_disposal_rate < kHighContextDisposalRate;
}
-
bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- size_t idle_time_in_ms, size_t size_of_objects,
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+ double idle_time_in_ms, size_t size_of_objects,
+ double final_incremental_mark_compact_speed_in_bytes_per_ms) {
return idle_time_in_ms >=
EstimateFinalIncrementalMarkCompactTime(
size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms);
}
-
bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
- size_t idle_time_in_ms) {
+ double idle_time_in_ms) {
// TODO(jochen): Estimate the time it will take to build the object groups.
return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 74ef1b1e87..39dea7e1ff 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -90,9 +90,6 @@ class GCIdleTimeHandler {
static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
2 * MB;
- // Maximum mark-compact time returned by EstimateMarkCompactTime.
- static const size_t kMaxMarkCompactTimeInMs;
-
// Maximum final incremental mark-compact time returned by
// EstimateFinalIncrementalMarkCompactTime.
static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
@@ -130,27 +127,20 @@ class GCIdleTimeHandler {
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
- static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
- size_t marking_speed_in_bytes_per_ms);
-
- static size_t EstimateMarkCompactTime(
- size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
-
- static size_t EstimateFinalIncrementalMarkCompactTime(
- size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+ static size_t EstimateMarkingStepSize(double idle_time_in_ms,
+ double marking_speed_in_bytes_per_ms);
- static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
- size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms);
+ static double EstimateFinalIncrementalMarkCompactTime(
+ size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
double contexts_disposal_rate);
static bool ShouldDoFinalIncrementalMarkCompact(
- size_t idle_time_in_ms, size_t size_of_objects,
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
+ double idle_time_in_ms, size_t size_of_objects,
+ double final_incremental_mark_compact_speed_in_bytes_per_ms);
- static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
+ static bool ShouldDoOverApproximateWeakClosure(double idle_time_in_ms);
private:
GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index ec1ad65391..3c46f5292d 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -24,6 +24,13 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (FLAG_runtime_call_stats) {
+ RuntimeCallStats* stats =
+ tracer_->heap_->isolate()->counters()->runtime_call_stats();
+ timer_.Initialize(&stats->GC, stats->current_timer());
+ stats->Enter(&timer_);
+ }
}
@@ -31,26 +38,25 @@ GCTracer::Scope::~Scope() {
DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
tracer_->current_.scopes[scope_] +=
tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (FLAG_runtime_call_stats) {
+ tracer_->heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+ }
}
-
-GCTracer::AllocationEvent::AllocationEvent(double duration,
- size_t allocation_in_bytes) {
- duration_ = duration;
- allocation_in_bytes_ = allocation_in_bytes;
-}
-
-
-GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
- time_ = time;
-}
-
-
-GCTracer::SurvivalEvent::SurvivalEvent(double promotion_ratio) {
- promotion_ratio_ = promotion_ratio;
+const char* GCTracer::Scope::Name(ScopeId id) {
+#define CASE(scope) \
+ case Scope::scope: \
+ return "V8.GC_" #scope;
+ switch (id) {
+ TRACER_SCOPES(CASE)
+ case Scope::NUMBER_OF_SCOPES:
+ break;
+ }
+#undef CASE
+ return "(unknown)";
}
-
GCTracer::Event::Event(Type type, const char* gc_reason,
const char* collector_reason)
: type(type),
@@ -182,9 +188,15 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
start_time, committed_memory);
heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
start_time, used_memory);
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (FLAG_runtime_call_stats) {
+ RuntimeCallStats* stats =
+ heap_->isolate()->counters()->runtime_call_stats();
+ timer_.Initialize(&stats->GC, stats->current_timer());
+ stats->Enter(&timer_);
+ }
}
-
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
@@ -215,6 +227,7 @@ void GCTracer::Stop(GarbageCollector collector) {
heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
current_.end_time, used_memory);
+ double duration = current_.end_time - current_.start_time;
if (current_.type == Event::SCAVENGER) {
current_.incremental_marking_steps =
current_.cumulative_incremental_marking_steps -
@@ -228,7 +241,10 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.pure_incremental_marking_duration =
current_.cumulative_pure_incremental_marking_duration -
previous_.cumulative_pure_incremental_marking_duration;
- scavenger_events_.push_front(current_);
+ recorded_scavenges_total_.Push(
+ MakeBytesAndDuration(current_.new_space_object_size, duration));
+ recorded_scavenges_survived_.Push(MakeBytesAndDuration(
+ current_.survived_new_space_object_size, duration));
} else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
current_.incremental_marking_steps =
current_.cumulative_incremental_marking_steps -
@@ -247,20 +263,24 @@ void GCTracer::Stop(GarbageCollector collector) {
previous_incremental_mark_compactor_event_
.cumulative_pure_incremental_marking_duration;
longest_incremental_marking_step_ = 0.0;
- incremental_mark_compactor_events_.push_front(current_);
+ recorded_incremental_marking_steps_.Push(
+ MakeBytesAndDuration(current_.incremental_marking_bytes,
+ current_.pure_incremental_marking_duration));
+ recorded_incremental_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
combined_mark_compact_speed_cache_ = 0.0;
} else {
DCHECK(current_.incremental_marking_bytes == 0);
DCHECK(current_.incremental_marking_duration == 0);
DCHECK(current_.pure_incremental_marking_duration == 0);
longest_incremental_marking_step_ = 0.0;
- mark_compactor_events_.push_front(current_);
+ recorded_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
combined_mark_compact_speed_cache_ = 0.0;
}
// TODO(ernstm): move the code below out of GCTracer.
- double duration = current_.end_time - current_.start_time;
double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
@@ -281,6 +301,10 @@ void GCTracer::Stop(GarbageCollector collector) {
longest_incremental_marking_finalization_step_ = 0.0;
cumulative_incremental_marking_finalization_steps_ = 0;
cumulative_incremental_marking_finalization_duration_ = 0.0;
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (FLAG_runtime_call_stats) {
+ heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+ }
}
@@ -313,11 +337,14 @@ void GCTracer::SampleAllocation(double current_ms,
void GCTracer::AddAllocation(double current_ms) {
allocation_time_ms_ = current_ms;
- new_space_allocation_events_.push_front(AllocationEvent(
- allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
- old_generation_allocation_events_.push_front(
- AllocationEvent(allocation_duration_since_gc_,
- old_generation_allocation_in_bytes_since_gc_));
+ if (allocation_duration_since_gc_ > 0) {
+ recorded_new_generation_allocations_.Push(
+ MakeBytesAndDuration(new_space_allocation_in_bytes_since_gc_,
+ allocation_duration_since_gc_));
+ recorded_old_generation_allocations_.Push(
+ MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
+ allocation_duration_since_gc_));
+ }
allocation_duration_since_gc_ = 0;
new_space_allocation_in_bytes_since_gc_ = 0;
old_generation_allocation_in_bytes_since_gc_ = 0;
@@ -325,19 +352,19 @@ void GCTracer::AddAllocation(double current_ms) {
void GCTracer::AddContextDisposalTime(double time) {
- context_disposal_events_.push_front(ContextDisposalEvent(time));
+ recorded_context_disposal_times_.Push(time);
}
void GCTracer::AddCompactionEvent(double duration,
intptr_t live_bytes_compacted) {
- compaction_events_.push_front(
- CompactionEvent(duration, live_bytes_compacted));
+ recorded_compactions_.Push(
+ MakeBytesAndDuration(live_bytes_compacted, duration));
}
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
- survival_events_.push_front(SurvivalEvent(promotion_ratio));
+ recorded_survival_ratios_.Push(promotion_ratio);
}
@@ -394,9 +421,8 @@ void GCTracer::Print() const {
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB);
- int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
double duration = current_.end_time - current_.start_time;
- Output("%.1f / %d ms", duration, external_time);
+ Output("%.1f / %.1f ms", duration, TotalExternalTime());
if (current_.type == Event::SCAVENGER) {
if (current_.incremental_marking_steps > 0) {
@@ -448,10 +474,12 @@ void GCTracer::PrintNVP() const {
"code=%.2f "
"semispace=%.2f "
"object_groups=%.2f "
+ "external_prologue=%.2f "
+ "external_epilogue=%.2f "
+ "external_weak_global_handles=%.2f "
"steps_count=%d "
"steps_took=%.1f "
- "scavenge_throughput=%" V8_PTR_PREFIX
- "d "
+ "scavenge_throughput=%.f "
"total_size_before=%" V8_PTR_PREFIX
"d "
"total_size_after=%" V8_PTR_PREFIX
@@ -473,8 +501,7 @@ void GCTracer::PrintNVP() const {
"average_survival_ratio=%.1f%% "
"promotion_rate=%.1f%% "
"semi_space_copy_rate=%.1f%% "
- "new_space_allocation_throughput=%" V8_PTR_PREFIX
- "d "
+ "new_space_allocation_throughput=%.1f "
"context_disposal_rate=%.1f\n",
heap_->isolate()->time_millis_since_init(), duration,
spent_in_mutator, current_.TypeName(true),
@@ -486,6 +513,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.incremental_marking_steps,
current_.incremental_marking_duration,
ScavengeSpeedInBytesPerMillisecond(),
@@ -509,7 +539,6 @@ void GCTracer::PrintNVP() const {
"mutator=%.1f "
"gc=%s "
"reduce_memory=%d "
- "external=%.1f "
"clear=%1.f "
"clear.code_flush=%.1f "
"clear.dependent_code=%.1f "
@@ -524,18 +553,27 @@ void GCTracer::PrintNVP() const {
"evacuate=%.1f "
"evacuate.candidates=%.1f "
"evacuate.clean_up=%.1f "
- "evacuate.new_space=%.1f "
+ "evacuate.copy=%.1f "
"evacuate.update_pointers=%.1f "
"evacuate.update_pointers.between_evacuated=%.1f "
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.weak=%.1f "
+ "external.mc_prologue=%.1f "
+ "external.mc_epilogue=%.1f "
+ "external.mc_incremental_prologue=%.1f "
+ "external.mc_incremental_epilogue=%.1f "
+ "external.weak_global_handles=%.1f "
"finish=%.1f "
"mark=%.1f "
"mark.finish_incremental=%.1f "
"mark.prepare_code_flush=%.1f "
"mark.roots=%.1f "
"mark.weak_closure=%.1f "
+ "mark.weak_closure.ephemeral=%.1f "
+ "mark.weak_closure.weak_handles=%.1f "
+ "mark.weak_closure.weak_roots=%.1f "
+ "mark.weak_closure.harmony=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
"sweep.map=%.1f "
@@ -547,8 +585,7 @@ void GCTracer::PrintNVP() const {
"finalization_steps_count=%d "
"finalization_steps_took=%.1f "
"finalization_longest_step=%.1f "
- "incremental_marking_throughput=%" V8_PTR_PREFIX
- "d "
+ "incremental_marking_throughput=%.f "
"total_size_before=%" V8_PTR_PREFIX
"d "
"total_size_after=%" V8_PTR_PREFIX
@@ -570,13 +607,12 @@ void GCTracer::PrintNVP() const {
"average_survival_ratio=%.1f%% "
"promotion_rate=%.1f%% "
"semi_space_copy_rate=%.1f%% "
- "new_space_allocation_throughput=%" V8_PTR_PREFIX
- "d "
+ "new_space_allocation_throughput=%.1f "
"context_disposal_rate=%.1f "
- "compaction_speed=%" V8_PTR_PREFIX "d\n",
+ "compaction_speed=%.f\n",
heap_->isolate()->time_millis_since_init(), duration,
spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
- current_.scopes[Scope::EXTERNAL], current_.scopes[Scope::MC_CLEAR],
+ current_.scopes[Scope::MC_CLEAR],
current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
@@ -590,17 +626,26 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
- current_.scopes[Scope::MC_EVACUATE_NEW_SPACE],
+ current_.scopes[Scope::MC_EVACUATE_COPY],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
+ current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
@@ -632,181 +677,72 @@ void GCTracer::PrintNVP() const {
}
}
-
-double GCTracer::MeanDuration(const EventBuffer& events) const {
- if (events.empty()) return 0.0;
-
- double mean = 0.0;
- EventBuffer::const_iterator iter = events.begin();
- while (iter != events.end()) {
- mean += iter->end_time - iter->start_time;
- ++iter;
- }
-
- return mean / events.size();
-}
-
-
-double GCTracer::MaxDuration(const EventBuffer& events) const {
- if (events.empty()) return 0.0;
-
- double maximum = 0.0f;
- EventBuffer::const_iterator iter = events.begin();
- while (iter != events.end()) {
- maximum = Max(iter->end_time - iter->start_time, maximum);
- ++iter;
- }
-
- return maximum;
-}
-
-
-double GCTracer::MeanIncrementalMarkingDuration() const {
- if (cumulative_incremental_marking_steps_ == 0) return 0.0;
-
- // We haven't completed an entire round of incremental marking, yet.
- // Use data from GCTracer instead of data from event buffers.
- if (incremental_mark_compactor_events_.empty()) {
- return cumulative_incremental_marking_duration_ /
- cumulative_incremental_marking_steps_;
- }
-
- int steps = 0;
- double durations = 0.0;
- EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
- while (iter != incremental_mark_compactor_events_.end()) {
- steps += iter->incremental_marking_steps;
- durations += iter->incremental_marking_duration;
- ++iter;
- }
-
- if (steps == 0) return 0.0;
-
- return durations / steps;
+double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+ const BytesAndDuration& initial, double time_ms) {
+ BytesAndDuration sum = buffer.Sum(
+ [time_ms](BytesAndDuration a, BytesAndDuration b) {
+ if (time_ms != 0 && a.second >= time_ms) return a;
+ return std::make_pair(a.first + b.first, a.second + b.second);
+ },
+ initial);
+ uint64_t bytes = sum.first;
+ double durations = sum.second;
+ if (durations == 0.0) return 0;
+ double speed = bytes / durations;
+ const int max_speed = 1024 * MB;
+ const int min_speed = 1;
+ if (speed >= max_speed) return max_speed;
+ if (speed <= min_speed) return min_speed;
+ return speed;
}
-
-double GCTracer::MaxIncrementalMarkingDuration() const {
- // We haven't completed an entire round of incremental marking, yet.
- // Use data from GCTracer instead of data from event buffers.
- if (incremental_mark_compactor_events_.empty())
- return longest_incremental_marking_step_;
-
- double max_duration = 0.0;
- EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
- while (iter != incremental_mark_compactor_events_.end())
- max_duration = Max(iter->longest_incremental_marking_step, max_duration);
-
- return max_duration;
+double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
+ return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
}
-
-intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
+double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
if (cumulative_incremental_marking_duration_ == 0.0) return 0;
-
// We haven't completed an entire round of incremental marking, yet.
// Use data from GCTracer instead of data from event buffers.
- if (incremental_mark_compactor_events_.empty()) {
- return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
- cumulative_pure_incremental_marking_duration_);
- }
-
- intptr_t bytes = 0;
- double durations = 0.0;
- EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
- while (iter != incremental_mark_compactor_events_.end()) {
- bytes += iter->incremental_marking_bytes;
- durations += iter->pure_incremental_marking_duration;
- ++iter;
+ if (recorded_incremental_marking_steps_.Count() == 0) {
+ return cumulative_incremental_marking_bytes_ /
+ cumulative_pure_incremental_marking_duration_;
}
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+ return AverageSpeed(recorded_incremental_marking_steps_);
}
-
-intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
+double GCTracer::ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode) const {
- intptr_t bytes = 0;
- double durations = 0.0;
- EventBuffer::const_iterator iter = scavenger_events_.begin();
- while (iter != scavenger_events_.end()) {
- bytes += mode == kForAllObjects ? iter->new_space_object_size
- : iter->survived_new_space_object_size;
- durations += iter->end_time - iter->start_time;
- ++iter;
+ if (mode == kForAllObjects) {
+ return AverageSpeed(recorded_scavenges_total_);
+ } else {
+ return AverageSpeed(recorded_scavenges_survived_);
}
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
-
-intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
- if (compaction_events_.size() == 0) return 0;
- intptr_t bytes = 0;
- double durations = 0.0;
- CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
- while (iter != compaction_events_.end()) {
- bytes += iter->live_bytes_compacted;
- durations += iter->duration;
- ++iter;
- }
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
+double GCTracer::CompactionSpeedInBytesPerMillisecond() const {
+ return AverageSpeed(recorded_compactions_);
}
-
-intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
- intptr_t bytes = 0;
- double durations = 0.0;
- EventBuffer::const_iterator iter = mark_compactor_events_.begin();
- while (iter != mark_compactor_events_.end()) {
- bytes += iter->start_object_size;
- durations += iter->end_time - iter->start_time;
- ++iter;
- }
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+double GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
+ return AverageSpeed(recorded_mark_compacts_);
}
-
-intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
- const {
- intptr_t bytes = 0;
- double durations = 0.0;
- EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
- while (iter != incremental_mark_compactor_events_.end()) {
- bytes += iter->start_object_size;
- durations += iter->end_time - iter->start_time;
- ++iter;
- }
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
+ return AverageSpeed(recorded_incremental_mark_compacts_);
}
-
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
const double kMinimumMarkingSpeed = 0.5;
- double speed1 =
- static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
- double speed2 = static_cast<double>(
- FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+ double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
+ double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
// No data for the incremental marking speed.
// Return the non-incremental mark-compact speed.
combined_mark_compact_speed_cache_ =
- static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+ MarkCompactSpeedInBytesPerMillisecond();
} else {
// Combine the speed of incremental step and the speed of the final step.
// 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
@@ -815,101 +751,59 @@ double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
return combined_mark_compact_speed_cache_;
}
-
-size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
double time_ms) const {
size_t bytes = new_space_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_;
- AllocationEventBuffer::const_iterator iter =
- new_space_allocation_events_.begin();
- const size_t max_bytes = static_cast<size_t>(-1);
- while (iter != new_space_allocation_events_.end() &&
- bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
- bytes += iter->allocation_in_bytes_;
- durations += iter->duration_;
- ++iter;
- }
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+ return AverageSpeed(recorded_new_generation_allocations_,
+ MakeBytesAndDuration(bytes, durations), time_ms);
}
-
-size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
+double GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms) const {
size_t bytes = old_generation_allocation_in_bytes_since_gc_;
double durations = allocation_duration_since_gc_;
- AllocationEventBuffer::const_iterator iter =
- old_generation_allocation_events_.begin();
- const size_t max_bytes = static_cast<size_t>(-1);
- while (iter != old_generation_allocation_events_.end() &&
- bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
- bytes += iter->allocation_in_bytes_;
- durations += iter->duration_;
- ++iter;
- }
-
- if (durations == 0.0) return 0;
- // Make sure the result is at least 1.
- return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+ return AverageSpeed(recorded_old_generation_allocations_,
+ MakeBytesAndDuration(bytes, durations), time_ms);
}
-
-size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+double GCTracer::AllocationThroughputInBytesPerMillisecond(
double time_ms) const {
return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
}
-
-size_t GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
+double GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
return AllocationThroughputInBytesPerMillisecond(kThroughputTimeFrameMs);
}
-
-size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
+double GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
const {
return OldGenerationAllocationThroughputInBytesPerMillisecond(
kThroughputTimeFrameMs);
}
-
double GCTracer::ContextDisposalRateInMilliseconds() const {
- if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
-
+ if (recorded_context_disposal_times_.Count() <
+ recorded_context_disposal_times_.kSize)
+ return 0.0;
double begin = heap_->MonotonicallyIncreasingTimeInMs();
- double end = 0.0;
- ContextDisposalEventBuffer::const_iterator iter =
- context_disposal_events_.begin();
- while (iter != context_disposal_events_.end()) {
- end = iter->time_;
- ++iter;
- }
-
- return (begin - end) / context_disposal_events_.size();
+ double end = recorded_context_disposal_times_.Sum(
+ [](double a, double b) { return b; }, 0.0);
+ return (begin - end) / recorded_context_disposal_times_.Count();
}
-
double GCTracer::AverageSurvivalRatio() const {
- if (survival_events_.size() == 0) return 0.0;
-
- double sum_of_rates = 0.0;
- SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
- while (iter != survival_events_.end()) {
- sum_of_rates += iter->promotion_ratio_;
- ++iter;
- }
-
- return sum_of_rates / static_cast<double>(survival_events_.size());
+ if (recorded_survival_ratios_.Count() == 0) return 0.0;
+ double sum = recorded_survival_ratios_.Sum(
+ [](double a, double b) { return a + b; }, 0.0);
+ return sum / recorded_survival_ratios_.Count();
}
-
bool GCTracer::SurvivalEventsRecorded() const {
- return survival_events_.size() > 0;
+ return recorded_survival_ratios_.Count() > 0;
}
-
-void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
+void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index e8ec168187..9ea3cce8fa 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -6,88 +6,114 @@
#define V8_HEAP_GC_TRACER_H_
#include "src/base/platform/platform.h"
+#include "src/counters.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
-// A simple ring buffer class with maximum size known at compile time.
-// The class only implements the functionality required in GCTracer.
-template <typename T, size_t MAX_SIZE>
+template <typename T>
class RingBuffer {
public:
- class const_iterator {
- public:
- const_iterator() : index_(0), elements_(NULL) {}
-
- const_iterator(size_t index, const T* elements)
- : index_(index), elements_(elements) {}
-
- bool operator==(const const_iterator& rhs) const {
- return elements_ == rhs.elements_ && index_ == rhs.index_;
- }
-
- bool operator!=(const const_iterator& rhs) const {
- return elements_ != rhs.elements_ || index_ != rhs.index_;
+ RingBuffer() { Reset(); }
+ static const int kSize = 10;
+ void Push(const T& value) {
+ if (count_ == kSize) {
+ elements_[start_++] = value;
+ if (start_ == kSize) start_ = 0;
+ } else {
+ DCHECK_EQ(start_, 0);
+ elements_[count_++] = value;
}
+ }
- operator const T*() const { return elements_ + index_; }
-
- const T* operator->() const { return elements_ + index_; }
-
- const T& operator*() const { return elements_[index_]; }
-
- const_iterator& operator++() {
- index_ = (index_ + 1) % (MAX_SIZE + 1);
- return *this;
- }
+ int Count() const { return count_; }
- const_iterator& operator--() {
- index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1);
- return *this;
+ template <typename Callback>
+ T Sum(Callback callback, const T& initial) const {
+ int j = start_ + count_ - 1;
+ if (j >= kSize) j -= kSize;
+ T result = initial;
+ for (int i = 0; i < count_; i++) {
+ result = callback(result, elements_[j]);
+ if (--j == -1) j += kSize;
}
-
- private:
- size_t index_;
- const T* elements_;
- };
-
- RingBuffer() : begin_(0), end_(0) {}
-
- bool empty() const { return begin_ == end_; }
- size_t size() const {
- return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1);
- }
- const_iterator begin() const { return const_iterator(begin_, elements_); }
- const_iterator end() const { return const_iterator(end_, elements_); }
- const_iterator back() const { return --end(); }
- void push_back(const T& element) {
- elements_[end_] = element;
- end_ = (end_ + 1) % (MAX_SIZE + 1);
- if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1);
- }
- void push_front(const T& element) {
- begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1);
- if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1);
- elements_[begin_] = element;
+ return result;
}
- void reset() {
- begin_ = 0;
- end_ = 0;
- }
+ void Reset() { start_ = count_ = 0; }
private:
- T elements_[MAX_SIZE + 1];
- size_t begin_;
- size_t end_;
-
+ T elements_[kSize];
+ int start_;
+ int count_;
DISALLOW_COPY_AND_ASSIGN(RingBuffer);
};
+typedef std::pair<uint64_t, double> BytesAndDuration;
+
+inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
+ return std::make_pair(bytes, duration);
+}
enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
+#define TRACER_SCOPES(F) \
+ F(EXTERNAL_WEAK_GLOBAL_HANDLES) \
+ F(MC_CLEAR) \
+ F(MC_CLEAR_CODE_FLUSH) \
+ F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_GLOBAL_HANDLES) \
+ F(MC_CLEAR_MAPS) \
+ F(MC_CLEAR_SLOTS_BUFFER) \
+ F(MC_CLEAR_STORE_BUFFER) \
+ F(MC_CLEAR_STRING_TABLE) \
+ F(MC_CLEAR_WEAK_CELLS) \
+ F(MC_CLEAR_WEAK_COLLECTIONS) \
+ F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EVACUATE) \
+ F(MC_EVACUATE_CANDIDATES) \
+ F(MC_EVACUATE_CLEAN_UP) \
+ F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
+ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_EXTERNAL_EPILOGUE) \
+ F(MC_EXTERNAL_PROLOGUE) \
+ F(MC_FINISH) \
+ F(MC_INCREMENTAL_FINALIZE) \
+ F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
+ F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
+ F(MC_MARK) \
+ F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_PREPARE_CODE_FLUSH) \
+ F(MC_MARK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_SWEEP) \
+ F(MC_SWEEP_CODE) \
+ F(MC_SWEEP_MAP) \
+ F(MC_SWEEP_OLD) \
+ F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
+ F(SCAVENGER_EXTERNAL_EPILOGUE) \
+ F(SCAVENGER_EXTERNAL_PROLOGUE) \
+ F(SCAVENGER_OBJECT_GROUPS) \
+ F(SCAVENGER_OLD_TO_NEW_POINTERS) \
+ F(SCAVENGER_ROOTS) \
+ F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_SEMISPACE) \
+ F(SCAVENGER_WEAK)
+
+#define TRACE_GC(tracer, scope_id) \
+ GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
+ GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), \
+ GCTracer::Scope::Name(gc_tracer_scope_id))
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -97,112 +123,26 @@ class GCTracer {
class Scope {
public:
enum ScopeId {
- EXTERNAL,
- MC_CLEAR,
- MC_CLEAR_CODE_FLUSH,
- MC_CLEAR_DEPENDENT_CODE,
- MC_CLEAR_GLOBAL_HANDLES,
- MC_CLEAR_MAPS,
- MC_CLEAR_SLOTS_BUFFER,
- MC_CLEAR_STORE_BUFFER,
- MC_CLEAR_STRING_TABLE,
- MC_CLEAR_WEAK_CELLS,
- MC_CLEAR_WEAK_COLLECTIONS,
- MC_CLEAR_WEAK_LISTS,
- MC_EVACUATE,
- MC_EVACUATE_CANDIDATES,
- MC_EVACUATE_CLEAN_UP,
- MC_EVACUATE_NEW_SPACE,
- MC_EVACUATE_UPDATE_POINTERS,
- MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED,
- MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED,
- MC_EVACUATE_UPDATE_POINTERS_TO_NEW,
- MC_EVACUATE_UPDATE_POINTERS_WEAK,
- MC_FINISH,
- MC_INCREMENTAL_FINALIZE,
- MC_MARK,
- MC_MARK_FINISH_INCREMENTAL,
- MC_MARK_PREPARE_CODE_FLUSH,
- MC_MARK_ROOTS,
- MC_MARK_WEAK_CLOSURE,
- MC_SWEEP,
- MC_SWEEP_CODE,
- MC_SWEEP_MAP,
- MC_SWEEP_OLD,
- SCAVENGER_CODE_FLUSH_CANDIDATES,
- SCAVENGER_OBJECT_GROUPS,
- SCAVENGER_OLD_TO_NEW_POINTERS,
- SCAVENGER_ROOTS,
- SCAVENGER_SCAVENGE,
- SCAVENGER_SEMISPACE,
- SCAVENGER_WEAK,
- NUMBER_OF_SCOPES
+#define DEFINE_SCOPE(scope) scope,
+ TRACER_SCOPES(DEFINE_SCOPE)
+#undef DEFINE_SCOPE
+ NUMBER_OF_SCOPES
};
Scope(GCTracer* tracer, ScopeId scope);
~Scope();
+ static const char* Name(ScopeId id);
private:
GCTracer* tracer_;
ScopeId scope_;
double start_time_;
+ RuntimeCallTimer timer_;
DISALLOW_COPY_AND_ASSIGN(Scope);
};
- class AllocationEvent {
- public:
- // Default constructor leaves the event uninitialized.
- AllocationEvent() {}
-
- AllocationEvent(double duration, size_t allocation_in_bytes);
-
- // Time spent in the mutator during the end of the last sample to the
- // beginning of the next sample.
- double duration_;
-
- // Memory allocated in the new space during the end of the last sample
- // to the beginning of the next sample
- size_t allocation_in_bytes_;
- };
-
-
- class CompactionEvent {
- public:
- CompactionEvent() : duration(0), live_bytes_compacted(0) {}
-
- CompactionEvent(double duration, intptr_t live_bytes_compacted)
- : duration(duration), live_bytes_compacted(live_bytes_compacted) {}
-
- double duration;
- intptr_t live_bytes_compacted;
- };
-
-
- class ContextDisposalEvent {
- public:
- // Default constructor leaves the event uninitialized.
- ContextDisposalEvent() {}
-
- explicit ContextDisposalEvent(double time);
-
- // Time when context disposal event happened.
- double time_;
- };
-
-
- class SurvivalEvent {
- public:
- // Default constructor leaves the event uninitialized.
- SurvivalEvent() {}
-
- explicit SurvivalEvent(double survival_ratio);
-
- double promotion_ratio_;
- };
-
-
class Event {
public:
enum Type {
@@ -307,19 +247,6 @@ class GCTracer {
double scopes[Scope::NUMBER_OF_SCOPES];
};
- static const size_t kRingBufferMaxSize = 10;
-
- typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
-
- typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
-
- typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
- ContextDisposalEventBuffer;
-
- typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
-
- typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
-
static const int kThroughputTimeFrameMs = 5000;
explicit GCTracer(Heap* heap);
@@ -369,63 +296,27 @@ class GCTracer {
return cumulative_sweeping_duration_;
}
- // Compute the mean duration of the last scavenger events. Returns 0 if no
- // events have been recorded.
- double MeanScavengerDuration() const {
- return MeanDuration(scavenger_events_);
- }
-
- // Compute the max duration of the last scavenger events. Returns 0 if no
- // events have been recorded.
- double MaxScavengerDuration() const { return MaxDuration(scavenger_events_); }
-
- // Compute the mean duration of the last mark compactor events. Returns 0 if
- // no events have been recorded.
- double MeanMarkCompactorDuration() const {
- return MeanDuration(mark_compactor_events_);
- }
-
- // Compute the max duration of the last mark compactor events. Return 0 if no
- // events have been recorded.
- double MaxMarkCompactorDuration() const {
- return MaxDuration(mark_compactor_events_);
- }
-
- // Compute the mean duration of the last incremental mark compactor
- // events. Returns 0 if no events have been recorded.
- double MeanIncrementalMarkCompactorDuration() const {
- return MeanDuration(incremental_mark_compactor_events_);
- }
-
- // Compute the mean step duration of the last incremental marking round.
- // Returns 0 if no incremental marking round has been completed.
- double MeanIncrementalMarkingDuration() const;
-
- // Compute the max step duration of the last incremental marking round.
- // Returns 0 if no incremental marking round has been completed.
- double MaxIncrementalMarkingDuration() const;
-
// Compute the average incremental marking speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
- intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
+ double IncrementalMarkingSpeedInBytesPerMillisecond() const;
// Compute the average scavenge speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
- intptr_t ScavengeSpeedInBytesPerMillisecond(
+ double ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode = kForAllObjects) const;
// Compute the average compaction speed in bytes/millisecond.
// Returns 0 if not enough events have been recorded.
- intptr_t CompactionSpeedInBytesPerMillisecond() const;
+ double CompactionSpeedInBytesPerMillisecond() const;
// Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
- intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+ double MarkCompactSpeedInBytesPerMillisecond() const;
// Compute the average incremental mark-sweep finalize speed in
// bytes/millisecond.
// Returns 0 if no events have been recorded.
- intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+ double FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
// Compute the overall mark compact speed including incremental steps
// and the final mark-compact step.
@@ -433,29 +324,29 @@ class GCTracer {
// Allocation throughput in the new space in bytes/millisecond.
// Returns 0 if no allocation events have been recorded.
- size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+ double NewSpaceAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
// Allocation throughput in the old generation in bytes/millisecond in the
// last time_ms milliseconds.
// Returns 0 if no allocation events have been recorded.
- size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+ double OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
// Allocation throughput in heap in bytes/millisecond in the last time_ms
// milliseconds.
// Returns 0 if no allocation events have been recorded.
- size_t AllocationThroughputInBytesPerMillisecond(double time_ms) const;
+ double AllocationThroughputInBytesPerMillisecond(double time_ms) const;
// Allocation throughput in heap in bytes/milliseconds in the last
// kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
- size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
+ double CurrentAllocationThroughputInBytesPerMillisecond() const;
// Allocation throughput in old generation in bytes/milliseconds in the last
// kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
- size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
+ double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
// Computes the context disposal rate in milliseconds. It takes the time
// frame of the first recorded context disposal to the current time and
@@ -474,6 +365,13 @@ class GCTracer {
// Discard all recorded survival events.
void ResetSurvivalEvents();
+ // Returns the average speed of the events in the buffer.
+ // If the buffer is empty, the result is 0.
+ // Otherwise, the result is between 1 byte/ms and 1 GB/ms.
+ static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
+ static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+ const BytesAndDuration& initial, double time_ms);
+
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -487,12 +385,6 @@ class GCTracer {
// it can be included in later crash dumps.
void Output(const char* format, ...) const;
- // Compute the mean duration of the events in the given ring buffer.
- double MeanDuration(const EventBuffer& events) const;
-
- // Compute the max duration of the events in the given ring buffer.
- double MaxDuration(const EventBuffer& events) const;
-
void ClearMarkCompactStatistics() {
cumulative_incremental_marking_steps_ = 0;
cumulative_incremental_marking_bytes_ = 0;
@@ -506,6 +398,16 @@ class GCTracer {
cumulative_sweeping_duration_ = 0;
}
+ double TotalExternalTime() const {
+ return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
+ current_.scopes[Scope::MC_EXTERNAL_EPILOGUE] +
+ current_.scopes[Scope::MC_EXTERNAL_PROLOGUE] +
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE] +
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE] +
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE];
+ }
+
// Pointer to the heap that owns this tracer.
Heap* heap_;
@@ -519,28 +421,6 @@ class GCTracer {
// Previous INCREMENTAL_MARK_COMPACTOR event.
Event previous_incremental_mark_compactor_event_;
- // RingBuffers for SCAVENGER events.
- EventBuffer scavenger_events_;
-
- // RingBuffers for MARK_COMPACTOR events.
- EventBuffer mark_compactor_events_;
-
- // RingBuffers for INCREMENTAL_MARK_COMPACTOR events.
- EventBuffer incremental_mark_compactor_events_;
-
- // RingBuffer for allocation events.
- AllocationEventBuffer new_space_allocation_events_;
- AllocationEventBuffer old_generation_allocation_events_;
-
- // RingBuffer for context disposal events.
- ContextDisposalEventBuffer context_disposal_events_;
-
- // RingBuffer for compaction events.
- CompactionEventBuffer compaction_events_;
-
- // RingBuffer for survival events.
- SurvivalEventBuffer survival_events_;
-
// Cumulative number of incremental marking steps since creation of tracer.
int cumulative_incremental_marking_steps_;
@@ -597,6 +477,20 @@ class GCTracer {
// Counts how many tracers were started without stopping.
int start_counter_;
+ // Separate timer used for --runtime_call_stats
+ RuntimeCallTimer timer_;
+
+ RingBuffer<BytesAndDuration> recorded_incremental_marking_steps_;
+ RingBuffer<BytesAndDuration> recorded_scavenges_total_;
+ RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
+ RingBuffer<BytesAndDuration> recorded_compactions_;
+ RingBuffer<BytesAndDuration> recorded_mark_compacts_;
+ RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+ RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
+ RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+ RingBuffer<double> recorded_context_disposal_times_;
+ RingBuffer<double> recorded_survival_ratios_;
+
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 57e6cc4c93..e31d3d6859 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -12,9 +12,9 @@
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
-#include "src/heap/store-buffer-inl.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/log.h"
@@ -25,20 +25,24 @@
namespace v8 {
namespace internal {
-void PromotionQueue::insert(HeapObject* target, int size) {
+void PromotionQueue::insert(HeapObject* target, int32_t size,
+ bool was_marked_black) {
if (emergency_stack_ != NULL) {
- emergency_stack_->Add(Entry(target, size));
+ emergency_stack_->Add(Entry(target, size, was_marked_black));
return;
}
- if ((rear_ - 2) < limit_) {
+ if ((rear_ - 1) < limit_) {
RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size));
+ emergency_stack_->Add(Entry(target, size, was_marked_black));
return;
}
- *(--rear_) = reinterpret_cast<intptr_t>(target);
- *(--rear_) = size;
+ struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
+ entry->obj_ = target;
+ entry->size_ = size;
+ entry->was_marked_black_ = was_marked_black;
+
// Assert no overflow into live objects.
#ifdef DEBUG
SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
@@ -247,6 +251,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
} else {
old_gen_exhausted_ = true;
}
+
+ if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
+ space != OLD_SPACE) {
+ Marking::MarkBlack(Marking::MarkBitFrom(object));
+ MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
+ }
return allocation;
}
@@ -434,31 +444,11 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return false;
}
-
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
}
-
-void Heap::MoveBlock(Address dst, Address src, int byte_size) {
- DCHECK(IsAligned(byte_size, kPointerSize));
-
- int size_in_words = byte_size / kPointerSize;
-
- if ((dst < src) || (dst >= (src + byte_size))) {
- Object** src_slot = reinterpret_cast<Object**>(src);
- Object** dst_slot = reinterpret_cast<Object**>(dst);
- Object** end_slot = src_slot + size_in_words;
-
- while (src_slot != end_slot) {
- *dst_slot++ = *src_slot++;
- }
- } else {
- MemMove(dst, src, static_cast<size_t>(byte_size));
- }
-}
-
template <Heap::FindMementoMode mode>
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
@@ -655,8 +645,7 @@ void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(Smi::FromInt(0));
}
-
-Object* Heap::ToBoolean(bool condition) {
+Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index f5110f8f8c..c3f56ac4c5 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -36,7 +36,7 @@
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/type-feedback-vector.h"
@@ -68,7 +68,6 @@ class IdleScavengeObserver : public AllocationObserver {
Heap& heap_;
};
-
Heap::Heap()
: amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
@@ -76,7 +75,6 @@ Heap::Heap()
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
- reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
@@ -92,6 +90,7 @@ Heap::Heap()
survived_since_last_expansion_(0),
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
+ memory_pressure_level_(MemoryPressureLevel::kNone),
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
@@ -116,6 +115,7 @@ Heap::Heap()
inline_allocation_disabled_(false),
total_regexp_code_generated_(0),
tracer_(nullptr),
+ embedder_heap_tracer_(nullptr),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_ratio_(0),
@@ -453,10 +453,6 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
- if (isolate()->concurrent_osr_enabled()) {
- isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
- }
-
if (new_space_.IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
@@ -794,12 +790,19 @@ class GCCallbacksScope {
void Heap::HandleGCRequest() {
- if (incremental_marking()->request_type() ==
- IncrementalMarking::COMPLETE_MARKING) {
+ if (HighMemoryPressure()) {
+ incremental_marking()->reset_request_type();
+ CheckMemoryPressure();
+ } else if (incremental_marking()->request_type() ==
+ IncrementalMarking::COMPLETE_MARKING) {
+ incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_, "GC interrupt",
current_gc_callback_flags_);
- } else if (incremental_marking()->IsMarking() &&
+ } else if (incremental_marking()->request_type() ==
+ IncrementalMarking::FINALIZATION &&
+ incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
+ incremental_marking()->reset_request_type();
FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
}
}
@@ -815,7 +818,7 @@ void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
PrintF("[IncrementalMarking] (%s).\n", gc_reason);
}
- GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
HistogramTimerScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
@@ -824,7 +827,7 @@ void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
@@ -835,7 +838,7 @@ void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
@@ -924,7 +927,7 @@ void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
double deadline = MonotonicallyIncreasingTimeInMs() +
FLAG_external_allocation_limit_incremental_time;
incremental_marking()->AdvanceIncrementalMarking(
- 0, deadline,
+ deadline,
IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
IncrementalMarking::FORCE_COMPLETION));
@@ -933,20 +936,15 @@ void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
void Heap::EnsureFillerObjectAtTop() {
- // There may be an allocation memento behind every object in new space.
- // If we evacuate a not full new space or if we are on the last page of
- // the new space, then there may be uninitialized memory behind the top
- // pointer of the new space page. We store a filler object there to
- // identify the unused space.
- Address from_top = new_space_.top();
- // Check that from_top is inside its page (i.e., not at the end).
- Address space_end = new_space_.ToSpaceEnd();
- if (from_top < space_end) {
- Page* page = Page::FromAddress(from_top);
- if (page->Contains(from_top)) {
- int remaining_in_page = static_cast<int>(page->area_end() - from_top);
- CreateFillerObjectAt(from_top, remaining_in_page);
- }
+ // There may be an allocation memento behind objects in new space. Upon
+ // evacuation of a non-full new space (or if we are on the last page) there
+ // may be uninitialized memory behind top. We fill the remainder of the page
+ // with a filler.
+ Address to_top = new_space_.top();
+ NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
+ if (page->Contains(to_top)) {
+ int remaining_in_page = static_cast<int>(page->area_end() - to_top);
+ CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
}
}
@@ -1037,6 +1035,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
+ memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
}
tracer()->Stop(collector);
@@ -1109,7 +1108,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
dst_objects[i]);
}
}
- incremental_marking()->RecordWrites(array);
+ incremental_marking()->IterateBlackObject(array);
}
@@ -1143,7 +1142,8 @@ bool Heap::ReserveSpace(Reservation* reservations) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
+ for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
+ space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->length());
if (reservation->at(0).size == 0) continue;
@@ -1160,15 +1160,18 @@ bool Heap::ReserveSpace(Reservation* reservations) {
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
- allocation = paged_space(space)->AllocateRawUnaligned(size);
+ // The deserializer will update the skip list.
+ allocation = paged_space(space)->AllocateRawUnaligned(
+ size, PagedSpace::IGNORE_SKIP_LIST);
}
HeapObject* free_space = nullptr;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
Address free_space_address = free_space->address();
- CreateFillerObjectAt(free_space_address, size);
- DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
+ CreateFillerObjectAt(free_space_address, size,
+ ClearRecordedSlots::kNo);
+ DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1279,7 +1282,9 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ TRACE_GC(tracer(), collector == MARK_COMPACTOR
+ ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
+ : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
@@ -1323,22 +1328,10 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
- if (collector != SCAVENGER) {
- // Callbacks that fire after this point might trigger nested GCs and
- // restart incremental marking, the assertion can't be moved down.
- DCHECK(incremental_marking()->IsStopped());
-
- // We finished a marking cycle. We can uncommit the marking deque until
- // we start marking again.
- mark_compact_collector()->marking_deque()->Uninitialize();
- mark_compact_collector()->EnsureMarkingDequeIsCommitted(
- MarkCompactCollector::kMinMarkingDequeSize);
- }
-
gc_post_processing_depth_++;
{
AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, gc_callback_flags);
@@ -1351,9 +1344,8 @@ bool Heap::PerformGarbageCollection(
Relocatable::PostGarbageCollectionProcessing(isolate_);
double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
- double mutator_speed = static_cast<double>(
- tracer()
- ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
+ double mutator_speed =
+ tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
@@ -1369,7 +1361,9 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ TRACE_GC(tracer(), collector == MARK_COMPACTOR
+ ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
+ : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
@@ -1399,6 +1393,10 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
}
}
+ if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
+ gc_type == kGCTypeMarkSweepCompact)) {
+ isolate_->global_handles()->PrintObjectGroups();
+ }
}
@@ -1453,6 +1451,13 @@ void Heap::MarkCompactEpilogue() {
incremental_marking()->Epilogue();
PreprocessStackTraces();
+ DCHECK(incremental_marking()->IsStopped());
+
+ // We finished a marking cycle. We can uncommit the marking deque until
+ // we start marking again.
+ mark_compact_collector()->marking_deque()->Uninitialize();
+ mark_compact_collector()->EnsureMarkingDequeIsCommitted(
+ MarkCompactCollector::kMinMarkingDequeSize);
}
@@ -1558,8 +1563,8 @@ void PromotionQueue::Initialize() {
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
front_ = rear_ =
- reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
- limit_ = reinterpret_cast<intptr_t*>(
+ reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
+ limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
emergency_stack_ = NULL;
}
@@ -1569,22 +1574,21 @@ void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
- intptr_t* head_start = rear_;
- intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+ struct Entry* head_start = rear_;
+ struct Entry* head_end =
+ Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
int entries_count =
- static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+ static_cast<int>(head_end - head_start) / sizeof(struct Entry);
emergency_stack_ = new List<Entry>(2 * entries_count);
while (head_start != head_end) {
- int size = static_cast<int>(*(head_start++));
- HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ struct Entry* entry = head_start++;
// New space allocation in SemiSpaceCopyObject marked the region
// overlapping with promotion queue as uninitialized.
- MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
- MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
- emergency_stack_->Add(Entry(obj, size));
+ MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
+ emergency_stack_->Add(*entry);
}
rear_ = head_end;
}
@@ -1612,7 +1616,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
- GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
RelocationLock relocation_lock(this);
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
@@ -1673,20 +1677,19 @@ void Heap::Scavenge() {
{
// Copy roots.
- GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
}
{
// Copy objects reachable from the old generation.
- GCTracer::Scope gc_scope(tracer(),
- GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
Scavenger::ScavengeObject);
}
{
- GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
// Copy objects reachable from the encountered weak cells.
@@ -1695,8 +1698,7 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the code flushing candidates list.
- GCTracer::Scope gc_scope(tracer(),
- GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
@@ -1704,7 +1706,7 @@ void Heap::Scavenge() {
}
{
- GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
@@ -1716,8 +1718,7 @@ void Heap::Scavenge() {
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
} else {
- GCTracer::Scope gc_scope(tracer(),
- GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
while (isolate()->global_handles()->IterateObjectGroups(
&scavenge_visitor, &IsUnscavengedHeapObject)) {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
@@ -1849,6 +1850,10 @@ void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
set_allocation_sites_list(allocation_site_obj);
}
+void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
+ set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
+ set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
+}
void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
DisallowHeapAllocation no_allocation_scope;
@@ -1938,8 +1943,9 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
{
while (!promotion_queue()->is_empty()) {
HeapObject* target;
- int size;
- promotion_queue()->remove(&target, &size);
+ int32_t size;
+ bool was_marked_black;
+ promotion_queue()->remove(&target, &size, &was_marked_black);
// Promoted object might be already partially visited
// during old space pointer iteration. Thus we search specifically
@@ -1947,7 +1953,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
- IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
+ IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
+ &Scavenger::ScavengeObject);
}
}
@@ -2000,7 +2007,7 @@ int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
- CreateFillerObjectAt(object->address(), filler_size);
+ CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
return HeapObject::FromAddress(object->address() + filler_size);
}
@@ -2016,7 +2023,8 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
filler_size -= pre_filler;
}
if (filler_size)
- CreateFillerObjectAt(object->address() + object_size, filler_size);
+ CreateFillerObjectAt(object->address() + object_size, filler_size,
+ ClearRecordedSlots::kNo);
return object;
}
@@ -2134,7 +2142,7 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
- CreateFillerObjectAt(obj->address(), size);
+ CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -2320,6 +2328,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
@@ -2380,6 +2389,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
@@ -2401,6 +2411,20 @@ bool Heap::CreateInitialMaps() {
#undef ALLOCATE_MAP
}
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_true_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTrue);
+
+ {
+ AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_false_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kFalse);
+
{ // Empty arrays
{
ByteArray* byte_array;
@@ -2530,6 +2554,15 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
TransitionArray* array = TransitionArray::cast(raw_array);
array->set_length(capacity);
MemsetPointer(array->data_start(), undefined_value(), capacity);
+ // Transition arrays are tenured. When black allocation is on we have to
+ // add the transition array to the list of encountered_transition_arrays.
+ if (incremental_marking()->black_allocation()) {
+ array->set_next_link(encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ set_encountered_transition_arrays(array);
+ } else {
+ array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
+ }
return array;
}
@@ -2618,50 +2651,61 @@ void Heap::CreateInitialObjects() {
// Allocate initial string table.
set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
+ // Allocate
+
// Finish initializing oddballs after creating the string table.
Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
- factory->nan_value(), "undefined", Oddball::kUndefined);
+ factory->nan_value(), false, "undefined",
+ Oddball::kUndefined);
// Initialize the null_value.
Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::FromInt(0), isolate()), "object",
+ handle(Smi::FromInt(0), isolate()), false, "object",
Oddball::kNull);
- set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
- handle(Smi::FromInt(1), isolate()),
- "boolean", Oddball::kTrue));
+ // Initialize the true_value.
+ Oddball::Initialize(isolate(), factory->true_value(), "true",
+ handle(Smi::FromInt(1), isolate()), true, "boolean",
+ Oddball::kTrue);
- set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
- handle(Smi::FromInt(0), isolate()),
- "boolean", Oddball::kFalse));
+ // Initialize the false_value.
+ Oddball::Initialize(isolate(), factory->false_value(), "false",
+ handle(Smi::FromInt(0), isolate()), false, "boolean",
+ Oddball::kFalse);
- set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
- handle(Smi::FromInt(-1), isolate()),
- "undefined", Oddball::kTheHole));
+ set_the_hole_value(*factory->NewOddball(
+ factory->the_hole_map(), "hole", handle(Smi::FromInt(-1), isolate()),
+ false, "undefined", Oddball::kTheHole));
set_uninitialized_value(
*factory->NewOddball(factory->uninitialized_map(), "uninitialized",
- handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kUninitialized));
+ handle(Smi::FromInt(-1), isolate()), false,
+ "undefined", Oddball::kUninitialized));
set_arguments_marker(
*factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
- handle(Smi::FromInt(-4), isolate()), "undefined",
- Oddball::kArgumentsMarker));
+ handle(Smi::FromInt(-4), isolate()), false,
+ "undefined", Oddball::kArgumentsMarker));
set_no_interceptor_result_sentinel(*factory->NewOddball(
factory->no_interceptor_result_sentinel_map(),
"no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
- "undefined", Oddball::kOther));
+ false, "undefined", Oddball::kOther));
set_termination_exception(*factory->NewOddball(
factory->termination_exception_map(), "termination_exception",
- handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
+ handle(Smi::FromInt(-3), isolate()), false, "undefined",
+ Oddball::kOther));
set_exception(*factory->NewOddball(factory->exception_map(), "exception",
- handle(Smi::FromInt(-5), isolate()),
+ handle(Smi::FromInt(-5), isolate()), false,
"undefined", Oddball::kException));
+ set_optimized_out(
+ *factory->NewOddball(factory->optimized_out_map(), "optimized_out",
+ handle(Smi::FromInt(-6), isolate()), false,
+ "undefined", Oddball::kOptimizedOut));
+
for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
factory->InternalizeUtf8String(constant_string_table[i].contents);
@@ -2876,7 +2920,6 @@ void Heap::CreateInitialObjects() {
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
- case kStoreBufferTopRootIndex:
case kNumberStringCacheRootIndex:
case kInstanceofCacheFunctionRootIndex:
case kInstanceofCacheMapRootIndex:
@@ -3037,14 +3080,14 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
- instance->set_source_position_table(empty_fixed_array());
+ instance->set_source_position_table(empty_byte_array());
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
return result;
}
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
+void Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots mode) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
@@ -3059,6 +3102,9 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
FreeSpace::cast(filler)->nobarrier_set_size(size);
}
+ if (mode == ClearRecordedSlots::kYes) {
+ ClearRecordedSlotRange(addr, addr + size);
+ }
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are NULL.
DCHECK((filler->map() == NULL && !deserialization_complete_) ||
@@ -3092,9 +3138,11 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
// the heap using HeapIterator, we can update the live byte count. We cannot
// update while using HeapIterator because the iterator is temporarily
// marking the whole object graph, without updating live bytes.
- if (!in_heap_iterator() &&
- !mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
+ if (lo_space()->Contains(object)) {
+ lo_space()->AdjustLiveBytes(by);
+ } else if (!in_heap_iterator() &&
+ !mark_compact_collector()->sweeping_in_progress() &&
+ Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
if (mode == SEQUENTIAL_TO_SWEEPER) {
MemoryChunk::IncrementLiveBytesFromGC(object, by);
} else {
@@ -3135,7 +3183,8 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- CreateFillerObjectAt(object->address(), bytes_to_trim);
+ CreateFillerObjectAt(object->address(), bytes_to_trim,
+ ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
@@ -3150,11 +3199,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Maintain consistency of live bytes during incremental marking
Marking::TransferMark(this, object->address(), new_start);
- if (mark_compact_collector()->sweeping_in_progress()) {
- // Array trimming during sweeping can add invalid slots in free list.
- ClearRecordedSlotRange(object, former_start,
- HeapObject::RawField(new_object, 0));
- }
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Notify the heap profiler of change in object layout.
@@ -3214,12 +3258,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
- CreateFillerObjectAt(new_end, bytes_to_trim);
- if (mark_compact_collector()->sweeping_in_progress()) {
- // Array trimming during sweeping can add invalid slots in free list.
- ClearRecordedSlotRange(object, reinterpret_cast<Object**>(new_end),
- reinterpret_cast<Object**>(old_end));
- }
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
}
// Initialize header of the trimmed array. We are storing the new length
@@ -3313,7 +3352,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
-
if (immovable) {
Address address = result->address();
// Code objects which should stay at a fixed address are allocated either
@@ -3323,7 +3361,8 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
// Discard the first code allocation, which was on a page where it could
// be moved.
- CreateFillerObjectAt(result->address(), object_size);
+ CreateFillerObjectAt(result->address(), object_size,
+ ClearRecordedSlots::kNo);
allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
if (!allocation.To(&result)) return allocation;
OnAllocationEvent(result, object_size);
@@ -3363,6 +3402,9 @@ AllocationResult Heap::CopyCode(Code* code) {
isolate_->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ incremental_marking()->IterateBlackObject(new_code);
return new_code;
}
@@ -3382,6 +3424,7 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
copy->set_constant_pool(bytecode_array->constant_pool());
copy->set_handler_table(bytecode_array->handler_table());
copy->set_source_position_table(bytecode_array->source_position_table());
+ copy->set_interrupt_budget(bytecode_array->interrupt_budget());
bytecode_array->CopyBytecodesTo(copy);
return copy;
}
@@ -3429,7 +3472,9 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
new_obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
-
+ // We have to iterate over over the object and process its pointers when
+ // black allocation is on.
+ incremental_marking()->IterateBlackObject(new_code);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) code->ObjectVerify();
#endif
@@ -3560,11 +3605,12 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Make the clone.
Map* map = source->map();
- // We can only clone regexps, normal objects or arrays. Copying anything else
- // will break invariants.
+ // We can only clone regexps, normal objects, api objects or arrays. Copying
+ // anything else will break invariants.
CHECK(map->instance_type() == JS_REGEXP_TYPE ||
map->instance_type() == JS_OBJECT_TYPE ||
- map->instance_type() == JS_ARRAY_TYPE);
+ map->instance_type() == JS_ARRAY_TYPE ||
+ map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
int object_size = map->instance_size();
HeapObject* clone = nullptr;
@@ -3972,7 +4018,8 @@ AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length",
+ kDoubleAligned);
}
int size = FixedDoubleArray::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
@@ -4078,8 +4125,8 @@ static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
double Heap::YoungGenerationMutatorUtilization() {
double mutator_speed = static_cast<double>(
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
- double gc_speed = static_cast<double>(
- tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
+ double gc_speed =
+ tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
if (FLAG_trace_mutator_utilization) {
PrintIsolate(isolate(),
@@ -4158,7 +4205,7 @@ void Heap::ReduceNewSpaceSize() {
// TODO(ulan): Unify this constant with the similar constant in
// GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000;
- const size_t allocation_throughput =
+ const double allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
if (FLAG_predictable) return;
@@ -4187,21 +4234,20 @@ void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
- static_cast<size_t>(
- tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+ double final_incremental_mark_compact_speed_in_bytes_per_ms =
+ tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
- static_cast<size_t>(idle_time_in_ms)))) {
+ idle_time_in_ms))) {
FinalizeIncrementalMarking(
"Idle notification: finalize incremental marking");
return true;
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty() &&
gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
- static_cast<size_t>(idle_time_in_ms), size_of_objects,
+ idle_time_in_ms, size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(current_gc_flags_,
"idle notification: finalize incremental marking");
@@ -4210,6 +4256,40 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
return false;
}
+void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
+ // TODO(hpayer): We do not have to iterate reservations on black objects
+ // for marking. We just have to execute the special visiting side effect
+ // code that adds objects to global data structures, e.g. for array buffers.
+
+ // Code space, map space, and large object space do not use black pages.
+ // Hence we have to color all objects of the reservation first black to avoid
+ // unnecessary marking deque load.
+ if (incremental_marking()->black_allocation()) {
+ for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ const Heap::Reservation& res = reservations[i];
+ for (auto& chunk : res) {
+ Address addr = chunk.start;
+ while (addr < chunk.end) {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ Marking::MarkBlack(Marking::MarkBitFrom(obj));
+ MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ addr += obj->Size();
+ }
+ }
+ }
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ const Heap::Reservation& res = reservations[i];
+ for (auto& chunk : res) {
+ Address addr = chunk.start;
+ while (addr < chunk.end) {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ incremental_marking()->IterateBlackObject(obj);
+ addr += obj->Size();
+ }
+ }
+ }
+ }
+}
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
@@ -4356,6 +4436,59 @@ bool Heap::RecentIdleNotificationHappened() {
MonotonicallyIncreasingTimeInMs();
}
+class MemoryPressureInterruptTask : public CancelableTask {
+ public:
+ explicit MemoryPressureInterruptTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+ virtual ~MemoryPressureInterruptTask() {}
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override { heap_->CheckMemoryPressure(); }
+
+ Heap* heap_;
+ DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
+};
+
+void Heap::CheckMemoryPressure() {
+ if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
+ CollectGarbageOnMemoryPressure("memory pressure");
+ } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
+ if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+ StartIdleIncrementalMarking();
+ }
+ }
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyPossibleGarbage(event);
+}
+
+void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+ CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+ source);
+}
+
+void Heap::MemoryPressureNotification(MemoryPressureLevel level,
+ bool is_isolate_locked) {
+ MemoryPressureLevel previous = memory_pressure_level_.Value();
+ memory_pressure_level_.SetValue(level);
+ if ((previous != MemoryPressureLevel::kCritical &&
+ level == MemoryPressureLevel::kCritical) ||
+ (previous == MemoryPressureLevel::kNone &&
+ level == MemoryPressureLevel::kModerate)) {
+ if (is_isolate_locked) {
+ CheckMemoryPressure();
+ } else {
+ ExecutionAccess access(isolate());
+ isolate()->stack_guard()->RequestGC();
+ V8::GetCurrentPlatform()->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate()),
+ new MemoryPressureInterruptTask(this));
+ }
+ }
+}
#ifdef DEBUG
@@ -4556,10 +4689,9 @@ void Heap::ZapFromSpace() {
}
}
-
-void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback) {
+void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
+ Address end, bool record_slots,
+ ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
@@ -4586,24 +4718,29 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
}
}
-
-class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+class IteratePromotedObjectsVisitor final : public ObjectVisitor {
public:
- IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
- bool record_slots,
- ObjectSlotCallback callback)
+ IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+ bool record_slots, ObjectSlotCallback callback)
: heap_(heap),
target_(target),
record_slots_(record_slots),
callback_(callback) {}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
- heap_->IterateAndMarkPointersToFromSpace(
+ heap_->IteratePromotedObjectPointers(
target_, reinterpret_cast<Address>(start),
reinterpret_cast<Address>(end), record_slots_, callback_);
}
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ // Black allocation requires us to process objects referenced by
+ // promoted objects.
+ if (heap_->incremental_marking()->black_allocation()) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+ IncrementalMarking::MarkObject(heap_, code);
+ }
+ }
private:
Heap* heap_;
@@ -4612,9 +4749,9 @@ class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
ObjectSlotCallback callback_;
};
-
-void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
- ObjectSlotCallback callback) {
+void Heap::IteratePromotedObject(HeapObject* target, int size,
+ bool was_marked_black,
+ ObjectSlotCallback callback) {
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4627,9 +4764,20 @@ void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
record_slots = Marking::IsBlack(mark_bit);
}
- IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
- callback);
+ IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
target->IterateBody(target->map()->instance_type(), size, &visitor);
+
+ // When black allocations is on, we have to visit not already marked black
+ // objects (in new space) promoted to black pages to keep their references
+ // alive.
+ // TODO(hpayer): Implement a special promotion visitor that incorporates
+ // regular visiting and IteratePromotedObjectPointers.
+ if (!was_marked_black) {
+ if (incremental_marking()->black_allocation()) {
+ IncrementalMarking::MarkObject(this, target->map());
+ incremental_marking()->IterateBlackObject(target);
+ }
+ }
}
@@ -4661,6 +4809,10 @@ void Heap::IterateSmiRoots(ObjectVisitor* v) {
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
+ // The serializer/deserializer iterates the root list twice, first to pick
+ // off immortal immovable roots to make sure they end up on the first page,
+ // and then again for the rest.
+ if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
@@ -4689,7 +4841,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over global handles.
switch (mode) {
+ case VISIT_ONLY_STRONG_ROOT_LIST:
+ UNREACHABLE();
+ break;
case VISIT_ONLY_STRONG:
+ case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
@@ -4720,15 +4876,10 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
- // Iterate over the pointers the Serialization/Deserialization code is
- // holding.
- // During garbage collection this keeps the partial snapshot cache alive.
- // During deserialization of the startup snapshot this creates the partial
- // snapshot cache and deserializes the objects it refers to. During
- // serialization this does nothing, since the partial snapshot cache is
- // empty. However the next thing we do is create the partial snapshot,
- // filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(isolate_, v);
+ // Iterate over the partial snapshot cache unless serializing.
+ if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
+ SerializerDeserializer::Iterate(isolate_, v);
+ }
// We don't do a v->Synchronize call here, because in debug mode that will
// output a flag to the snapshot. However at this point the serializer and
// deserializer are deliberately a little unsynchronized (see above) so the
@@ -4778,32 +4929,10 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
max_semi_space_size_ = Page::kPageSize;
}
- if (isolate()->snapshot_available()) {
- // If we are using a snapshot we always reserve the default amount
- // of memory for each semispace because code in the snapshot has
- // write-barrier code that relies on the size and alignment of new
- // space. We therefore cannot use a larger max semispace size
- // than the default reserved semispace size.
- if (max_semi_space_size_ > reserved_semispace_size_) {
- max_semi_space_size_ = reserved_semispace_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Max semi-space size cannot be more than %d kbytes\n",
- reserved_semispace_size_ >> 10);
- }
- }
- } else {
- // If we are not using snapshots we reserve space for the actual
- // max semispace size.
- reserved_semispace_size_ = max_semi_space_size_;
- }
-
// The new space size must be a power of two to support single-bit testing
// for containment.
max_semi_space_size_ =
base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
- reserved_semispace_size_ =
- base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
if (FLAG_min_semi_space_size > 0) {
int initial_semispace_size = FLAG_min_semi_space_size * MB;
@@ -5147,7 +5276,7 @@ bool Heap::SetUp() {
incremental_marking_ = new IncrementalMarking(this);
// Set up new space.
- if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
+ if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
return false;
}
new_space_top_after_last_gc_ = new_space()->top();
@@ -5255,6 +5384,10 @@ void Heap::SetStackLimits() {
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
+void Heap::ClearStackLimits() {
+ roots_[kStackLimitRootIndex] = Smi::FromInt(0);
+ roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
+}
void Heap::PrintAlloctionsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
@@ -5274,6 +5407,13 @@ void Heap::NotifyDeserializationComplete() {
#endif // DEBUG
}
+void Heap::RegisterExternallyReferencedObject(Object** object) {
+ DCHECK(mark_compact_collector()->in_use());
+ HeapObject* heap_object = HeapObject::cast(*object);
+ DCHECK(Contains(heap_object));
+ MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+ mark_compact_collector()->MarkObject(heap_object, mark_bit);
+}
void Heap::TearDown() {
#ifdef VERIFY_HEAP
@@ -5439,6 +5579,11 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
UNREACHABLE();
}
+void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+ DCHECK_NOT_NULL(tracer);
+ CHECK_NULL(embedder_heap_tracer_);
+ embedder_heap_tracer_ = tracer;
+}
// TODO(ishell): Find a better place for this.
void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
@@ -5503,8 +5648,9 @@ void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
if (new_length != length) retained_maps->SetLength(new_length);
}
-void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
- v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
+
+void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+ v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}
#ifdef DEBUG
@@ -5580,18 +5726,17 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+ RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
}
}
-void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
- Object** end) {
- if (!InNewSpace(object)) {
+void Heap::ClearRecordedSlotRange(Address start, Address end) {
+ Page* page = Page::FromAddress(start);
+ if (!page->InNewSpace()) {
store_buffer()->MoveEntriesToRememberedSet();
- Address start_addr = reinterpret_cast<Address>(start);
- Address end_addr = reinterpret_cast<Address>(end);
- Page* page = Page::FromAddress(start_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index f8467ba8f9..9457453561 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -10,6 +10,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "include/v8.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/atomic-utils.h"
@@ -23,6 +24,8 @@
namespace v8 {
namespace internal {
+using v8::MemoryPressureLevel;
+
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
V(Map, byte_array_map, ByteArrayMap) \
@@ -30,7 +33,6 @@ namespace internal {
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Smi, store_buffer_top, StoreBufferTop) \
V(Oddball, undefined_value, UndefinedValue) \
V(Oddball, the_hole_value, TheHoleValue) \
V(Oddball, null_value, NullValue) \
@@ -75,6 +77,7 @@ namespace internal {
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, exception, Exception) \
V(Oddball, termination_exception, TerminationException) \
+ V(Oddball, optimized_out, OptimizedOut) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -134,6 +137,7 @@ namespace internal {
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
+ V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
V(Map, script_context_map, ScriptContextMap) \
@@ -147,6 +151,7 @@ namespace internal {
V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
V(Map, exception_map, ExceptionMap) \
V(Map, termination_exception_map, TerminationExceptionMap) \
+ V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
V(Map, neander_map, NeanderMap) \
@@ -270,6 +275,10 @@ namespace internal {
V(JSMessageObjectMap) \
V(ForeignMap) \
V(NeanderMap) \
+ V(NanValue) \
+ V(InfinityValue) \
+ V(MinusZeroValue) \
+ V(MinusInfinityValue) \
V(EmptyWeakCell) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
@@ -326,7 +335,7 @@ class PromotionQueue {
// If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
- limit_ = reinterpret_cast<intptr_t*>(limit);
+ limit_ = reinterpret_cast<struct Entry*>(limit);
if (limit_ <= rear_) {
return;
@@ -348,7 +357,7 @@ class PromotionQueue {
}
// If the to space top pointer is smaller or equal than the promotion
// queue head, then the to-space objects are below the promotion queue.
- return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
+ return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
}
bool is_empty() {
@@ -356,44 +365,49 @@ class PromotionQueue {
(emergency_stack_ == NULL || emergency_stack_->length() == 0);
}
- inline void insert(HeapObject* target, int size);
+ inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
- void remove(HeapObject** target, int* size) {
+ void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
DCHECK(!is_empty());
if (front_ == rear_) {
Entry e = emergency_stack_->RemoveLast();
*target = e.obj_;
*size = e.size_;
+ *was_marked_black = e.was_marked_black_;
return;
}
- *target = reinterpret_cast<HeapObject*>(*(--front_));
- *size = static_cast<int>(*(--front_));
+ struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
+ *target = entry->obj_;
+ *size = entry->size_;
+ *was_marked_black = entry->was_marked_black_;
+
// Assert no underflow.
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
reinterpret_cast<Address>(front_));
}
private:
- // The front of the queue is higher in the memory page chain than the rear.
- intptr_t* front_;
- intptr_t* rear_;
- intptr_t* limit_;
-
- static const int kEntrySizeInWords = 2;
-
struct Entry {
- Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
+ Entry(HeapObject* obj, int32_t size, bool was_marked_black)
+ : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
HeapObject* obj_;
- int size_;
+ int32_t size_ : 31;
+ bool was_marked_black_ : 1;
};
+
+ void RelocateQueueHead();
+
+ // The front of the queue is higher in the memory page chain than the rear.
+ struct Entry* front_;
+ struct Entry* rear_;
+ struct Entry* limit_;
+
List<Entry>* emergency_stack_;
Heap* heap_;
- void RelocateQueueHead();
-
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
@@ -403,6 +417,7 @@ enum ArrayStorageAllocationMode {
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
+enum class ClearRecordedSlots { kYes, kNo };
class Heap {
public:
@@ -536,6 +551,7 @@ class Heap {
STATIC_ASSERT(kUndefinedValueRootIndex ==
Internals::kUndefinedValueRootIndex);
+ STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
@@ -552,7 +568,7 @@ class Heap {
static inline bool IsOneByte(T t, int chars);
static void FatalProcessOutOfMemory(const char* location,
- bool is_heap_oom = false);
+ bool take_snapshot = false);
static bool RootIsImmortalImmovable(int root_index);
@@ -582,10 +598,6 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- // Optimized version of memmove for blocks with pointer size aligned sizes and
- // pointer size aligned addresses.
- static inline void MoveBlock(Address dst, Address src, int byte_size);
-
// Determines a static visitor id based on the given {map} that can then be
// stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
static int GetStaticVisitorIdForMap(Map* map);
@@ -632,8 +644,10 @@ class Heap {
void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
// Initialize a filler object to keep the ability to iterate over the heap
- // when introducing gaps within pages.
- void CreateFillerObjectAt(Address addr, int size);
+ // when introducing gaps within pages. If slots could have been recorded in
+ // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
+ // pass ClearRecordedSlots::kNo.
+ void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
bool CanMoveObjectStart(HeapObject* object);
@@ -649,7 +663,7 @@ class Heap {
void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
- inline Object* ToBoolean(bool condition);
+ inline Oddball* ToBoolean(bool condition);
// Check whether the heap is currently iterable.
bool IsHeapIterable();
@@ -726,6 +740,10 @@ class Heap {
bool IdleNotification(double deadline_in_seconds);
bool IdleNotification(int idle_time_in_ms);
+ void MemoryPressureNotification(MemoryPressureLevel level,
+ bool is_isolate_locked);
+ void CheckMemoryPressure();
+
double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -740,6 +758,8 @@ class Heap {
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
+ if (HighMemoryPressure()) return true;
+
return false;
}
@@ -823,7 +843,12 @@ class Heap {
void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
void SetOptimizeForMemoryUsage();
- bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+ bool ShouldOptimizeForMemoryUsage() {
+ return optimize_for_memory_usage_ || HighMemoryPressure();
+ }
+ bool HighMemoryPressure() {
+ return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
+ }
// ===========================================================================
// Initialization. ===========================================================
@@ -853,10 +878,6 @@ class Heap {
// Getters for spaces. =======================================================
// ===========================================================================
- // Return the starting address and a mask for the new space. And-masking an
- // address with the mask will result in the start address of the new space
- // for all addresses in either semispace.
- Address NewSpaceStart() { return new_space_.start(); }
Address NewSpaceTop() { return new_space_.top(); }
NewSpace* new_space() { return &new_space_; }
@@ -895,11 +916,21 @@ class Heap {
const char* GetSpaceName(int idx);
// ===========================================================================
+ // API. ======================================================================
+ // ===========================================================================
+
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ void RegisterExternallyReferencedObject(Object** object);
+
+ // ===========================================================================
// Getters to other components. ==============================================
// ===========================================================================
GCTracer* tracer() { return tracer_; }
+ EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
PromotionQueue* promotion_queue() { return &promotion_queue_; }
inline Isolate* isolate();
@@ -974,6 +1005,10 @@ class Heap {
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
+ // The stack limit is thread-dependent. To be able to reproduce the same
+ // snapshot blob, we need to reset it before serializing.
+ void ClearStackLimits();
+
// Generated code can treat direct references to this root as constant.
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
@@ -1039,14 +1074,14 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
- // Iterate pointers to from semispace of new space found in memory interval
- // from start to end within |object|.
- void IteratePointersToFromSpace(HeapObject* target, int size,
- ObjectSlotCallback callback);
+ // Iterate pointers of promoted objects.
+ void IteratePromotedObject(HeapObject* target, int size,
+ bool was_marked_black,
+ ObjectSlotCallback callback);
- void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback);
+ void IteratePromotedObjectPointers(HeapObject* object, Address start,
+ Address end, bool record_slots,
+ ObjectSlotCallback callback);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1055,12 +1090,10 @@ class Heap {
// Write barrier support for object[offset] = o;
inline void RecordWrite(Object* object, int offset, Object* o);
- Address* store_buffer_top_address() {
- return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
- }
+ Address* store_buffer_top_address() { return store_buffer()->top_address(); }
void ClearRecordedSlot(HeapObject* object, Object** slot);
- void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
+ void ClearRecordedSlotRange(Address start, Address end);
// ===========================================================================
// Incremental marking API. ==================================================
@@ -1081,6 +1114,8 @@ class Heap {
bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
+ void RegisterReservationsForBlackAllocation(Reservation* reservations);
+
IncrementalMarking* incremental_marking() { return incremental_marking_; }
// ===========================================================================
@@ -1144,16 +1179,11 @@ class Heap {
// GC statistics. ============================================================
// ===========================================================================
- // Returns the maximum amount of memory reserved for the heap. For
- // the young generation, we reserve 4 times the amount needed for a
- // semi space. The young generation consists of two semi spaces and
- // we reserve twice the amount needed for those in order to ensure
- // that new space can be aligned to its size.
+ // Returns the maximum amount of memory reserved for the heap.
intptr_t MaxReserved() {
- return 4 * reserved_semispace_size_ + max_old_generation_size_;
+ return 2 * max_semi_space_size_ + max_old_generation_size_;
}
int MaxSemiSpaceSize() { return max_semi_space_size_; }
- int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
@@ -1618,6 +1648,8 @@ class Heap {
void CompactRetainedMaps(ArrayList* retained_maps);
+ void CollectGarbageOnMemoryPressure(const char* source);
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
@@ -1672,6 +1704,7 @@ class Heap {
void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
void ProcessNativeContexts(WeakObjectRetainer* retainer);
void ProcessAllocationSites(WeakObjectRetainer* retainer);
+ void ProcessWeakListRoots(WeakObjectRetainer* retainer);
// ===========================================================================
// GC statistics. ============================================================
@@ -1962,10 +1995,8 @@ class Heap {
Object* roots_[kRootListLength];
size_t code_range_size_;
- int reserved_semispace_size_;
int max_semi_space_size_;
int initial_semispace_size_;
- int target_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t initial_old_generation_size_;
bool old_generation_size_configured_;
@@ -1983,6 +2014,10 @@ class Heap {
// count, as scopes can be acquired from multiple tasks (read: threads).
AtomicNumber<size_t> always_allocate_scope_count_;
+ // Stores the memory pressure level that set by MemoryPressureNotification
+ // and reset by a mark-compact garbage collection.
+ AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2069,6 +2104,7 @@ class Heap {
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
GCTracer* tracer_;
+ EmbedderHeapTracer* embedder_heap_tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
@@ -2210,7 +2246,7 @@ class Heap {
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
- friend class IteratePointersToFromSpaceVisitor;
+ friend class IteratePromotedObjectsVisitor;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 0d55b83a9d..fa22da6d41 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -26,11 +26,10 @@ void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
}
}
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
- RecordWriteIntoCodeSlow(obj, rinfo, value);
+ RecordWriteIntoCodeSlow(host, rinfo, value);
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index a69dfac2fa..3ccbec23d6 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+const double IncrementalMarkingJob::kLongDelayInSeconds = 5;
+const double IncrementalMarkingJob::kShortDelayInSeconds = 0.5;
void IncrementalMarkingJob::Start(Heap* heap) {
DCHECK(!heap->incremental_marking()->IsStopped());
@@ -58,8 +60,10 @@ void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
delayed_task_pending_ = true;
made_progress_since_last_delayed_task_ = false;
auto task = new DelayedTask(heap->isolate(), this);
+ double delay =
+ heap->HighMemoryPressure() ? kShortDelayInSeconds : kLongDelayInSeconds;
V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
- kDelayInSeconds);
+ delay);
}
}
@@ -79,7 +83,7 @@ IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
}
const double remaining_idle_time_in_ms =
incremental_marking->AdvanceIncrementalMarking(
- 0, deadline_in_ms, IncrementalMarking::IdleStepActions());
+ deadline_in_ms, IncrementalMarking::IdleStepActions());
if (remaining_idle_time_in_ms > 0.0) {
heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
}
@@ -117,10 +121,10 @@ void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
- 0, deadline, i::IncrementalMarking::StepActions(
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_MARKING,
- i::IncrementalMarking::FORCE_COMPLETION));
+ deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
heap->FinalizeIncrementalMarkingIfComplete(
"Incremental marking task: finalize incremental marking");
}
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index c998139a92..9c78182f2e 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -49,7 +49,8 @@ class IncrementalMarkingJob {
};
// Delay of the delayed task.
- static const int kDelayInSeconds = 5;
+ static const double kLongDelayInSeconds;
+ static const double kShortDelayInSeconds;
IncrementalMarkingJob()
: idle_task_pending_(false),
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4b113dd4ae..376e8488ce 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -39,12 +39,12 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
allocated_(0),
write_barriers_invoked_since_last_step_(0),
idle_marking_delay_counter_(0),
- no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0),
was_activated_(false),
+ black_allocation_(false),
finalize_marking_completed_(false),
incremental_marking_finalization_rounds_(0),
- request_type_(COMPLETE_MARKING) {}
+ request_type_(NONE) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
@@ -131,60 +131,12 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
}
}
-
-void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
- RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
Object* value) {
- if (BaseRecordWrite(obj, value)) {
- // Object is not going to be rescanned. We need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
- }
-}
-
-
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- chunk->set_progress_bar(0);
- }
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
- MarkBit mark_bit) {
- DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
- DCHECK(obj->Size() >= 2 * kPointerSize);
- DCHECK(IsMarking());
- Marking::BlackToGrey(mark_bit);
- int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
- bytes_scanned_ -= obj_size;
- int64_t old_bytes_rescanned = bytes_rescanned_;
- bytes_rescanned_ = old_bytes_rescanned + obj_size;
- if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
- // If we have queued twice the heap size for rescanning then we are
- // going around in circles, scanning the same objects again and again
- // as the program mutates the heap faster than we can incrementally
- // trace it. In this case we switch to non-incremental marking in
- // order to finish off this marking phase.
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(
- heap()->isolate(),
- "Hurrying incremental marking because of lack of progress\n");
- }
- marking_speed_ = kMaxMarkingSpeed;
- }
+ if (BaseRecordWrite(host, value)) {
+ // Object is not going to be rescanned. We need to record the slot.
+ heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
}
-
- heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
}
@@ -323,6 +275,16 @@ class IncrementalMarkingMarkingVisitor
}
};
+void IncrementalMarking::IterateBlackObject(HeapObject* object) {
+ if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
+ Page* page = Page::FromAddress(object->address());
+ if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+ // IterateBlackObject requires us to visit the hole object.
+ page->ResetProgressBar();
+ }
+ IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
+ }
+}
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
public:
@@ -404,7 +366,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page();
- while (LargePage::IsValid(lop)) {
+ while (lop->is_valid()) {
SetOldSpacePageFlags(lop, false, false);
lop = lop->next_page();
}
@@ -436,7 +398,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page();
- while (LargePage::IsValid(lop)) {
+ while (lop->is_valid()) {
SetOldSpacePageFlags(lop, true, is_compacting_);
lop = lop->next_page();
}
@@ -556,6 +518,15 @@ void IncrementalMarking::Start(const char* reason) {
void IncrementalMarking::StartMarking() {
+ if (heap_->isolate()->serializer_enabled()) {
+ // Black allocation currently starts when we start incremental marking,
+ // but we cannot enable black allocation while deserializing. Hence, we
+ // have to delay the start of incremental marking in that case.
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Start delayed - serializer\n");
+ }
+ return;
+ }
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n");
}
@@ -603,6 +574,26 @@ void IncrementalMarking::StartMarking() {
}
}
+void IncrementalMarking::StartBlackAllocation() {
+ DCHECK(FLAG_black_allocation);
+ DCHECK(IsMarking());
+ black_allocation_ = true;
+ OldSpace* old_space = heap()->old_space();
+ old_space->EmptyAllocationInfo();
+ old_space->free_list()->Reset();
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Black allocation started\n");
+ }
+}
+
+void IncrementalMarking::FinishBlackAllocation() {
+ if (black_allocation_) {
+ black_allocation_ = false;
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Black allocation finished\n");
+ }
+ }
+}
void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
@@ -775,6 +766,13 @@ void IncrementalMarking::FinalizeIncrementally() {
FLAG_min_progress_during_incremental_marking_finalization)) {
finalize_marking_completed_ = true;
}
+
+ if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
+ !black_allocation_) {
+ // TODO(hpayer): Move to an earlier point as soon as we make faster marking
+ // progress.
+ StartBlackAllocation();
+ }
}
@@ -805,6 +803,8 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
+ if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
+ continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
@@ -904,7 +904,12 @@ void IncrementalMarking::ProcessMarkingDeque() {
void IncrementalMarking::Hurry() {
- if (state() == MARKING) {
+ // A scavenge may have pushed new objects on the marking deque (due to black
+ // allocation) even in COMPLETE state. This may happen if scavenges are
+ // forced e.g. in tests. It should not happen when COMPLETE was set when
+ // incremental marking finished and a regular GC was triggered after that
+ // because should_hurry_ will force a full GC.
+ if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
start = heap_->MonotonicallyIncreasingTimeInMs();
@@ -969,6 +974,7 @@ void IncrementalMarking::Stop() {
heap_->isolate()->stack_guard()->ClearGC();
state_ = STOPPED;
is_compacting_ = false;
+ FinishBlackAllocation();
}
@@ -1016,28 +1022,26 @@ void IncrementalMarking::Epilogue() {
incremental_marking_finalization_rounds_ = 0;
}
-
double IncrementalMarking::AdvanceIncrementalMarking(
- intptr_t step_size_in_bytes, double deadline_in_ms,
- IncrementalMarking::StepActions step_actions) {
+ double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
DCHECK(!IsStopped());
- if (step_size_in_bytes == 0) {
- step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
- static_cast<size_t>(
- heap()
- ->tracer()
- ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
- }
-
+ intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+ GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
+ heap()
+ ->tracer()
+ ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
double remaining_time_in_ms = 0.0;
+ intptr_t bytes_processed = 0;
+
do {
- Step(step_size_in_bytes, step_actions.completion_action,
- step_actions.force_marking, step_actions.force_completion);
+ bytes_processed =
+ Step(step_size_in_bytes, step_actions.completion_action,
+ step_actions.force_marking, step_actions.force_completion);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms >=
+ } while (bytes_processed > 0 &&
+ remaining_time_in_ms >=
2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
!IsComplete() &&
!heap()->mark_compact_collector()->marking_deque()->IsEmpty());
@@ -1152,8 +1156,6 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
return 0;
}
- if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
-
intptr_t bytes_processed = 0;
{
HistogramTimerScope incremental_marking_scope(
@@ -1187,7 +1189,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
bytes_scanned_ = 0;
StartMarking();
}
- } else if (state_ == MARKING) {
+ }
+ if (state_ == MARKING) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 387dd0c74a..f10150da34 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -29,7 +29,7 @@ class IncrementalMarking {
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
- enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
+ enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
struct StepActions {
StepActions(CompletionAction complete_action_,
@@ -80,6 +80,8 @@ class IncrementalMarking {
GCRequestType request_type() const { return request_type_; }
+ void reset_request_type() { request_type_ = NONE; }
+
bool CanBeActivated();
bool ShouldActivateEvenWithoutIdleNotification();
@@ -104,13 +106,10 @@ class IncrementalMarking {
void Epilogue();
- // Performs incremental marking steps of step_size_in_bytes as long as
- // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
- // an estimate increment. Returns the remaining time that cannot be used
- // for incremental marking anymore because a single step would exceed the
- // deadline.
- double AdvanceIncrementalMarking(intptr_t step_size_in_bytes,
- double deadline_in_ms,
+ // Performs incremental marking steps until deadline_in_ms is reached. It
+ // returns the remaining time that cannot be used for incremental marking
+ // anymore because a single step would exceed the deadline.
+ double AdvanceIncrementalMarking(double deadline_in_ms,
StepActions step_actions);
// It's hard to know how much work the incremental marker should do to make
@@ -165,23 +164,17 @@ class IncrementalMarking {
// the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
- Object* value));
+ INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
- void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
- Object* value);
+ void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- void RecordWrites(HeapObject* obj);
-
- void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
@@ -198,10 +191,6 @@ class IncrementalMarking {
void NotifyOfHighPromotionRate();
- void EnterNoMarkingScope() { no_marking_scope_depth_++; }
-
- void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
-
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
@@ -210,7 +199,9 @@ class IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
- INLINE(static void MarkObject(Heap* heap, HeapObject* object));
+ static void MarkObject(Heap* heap, HeapObject* object);
+
+ void IterateBlackObject(HeapObject* object);
Heap* heap() const { return heap_; }
@@ -218,6 +209,8 @@ class IncrementalMarking {
return &incremental_marking_job_;
}
+ bool black_allocation() { return black_allocation_; }
+
private:
class Observer : public AllocationObserver {
public:
@@ -242,6 +235,9 @@ class IncrementalMarking {
void StartMarking();
+ void StartBlackAllocation();
+ void FinishBlackAllocation();
+
void MarkRoots();
void MarkObjectGroups();
void ProcessWeakCells();
@@ -288,12 +284,12 @@ class IncrementalMarking {
intptr_t write_barriers_invoked_since_last_step_;
size_t idle_marking_delay_counter_;
- int no_marking_scope_depth_;
-
int unscanned_bytes_of_large_object_;
bool was_activated_;
+ bool black_allocation_;
+
bool finalize_marking_completed_;
int incremental_marking_finalization_rounds_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index f117acee9b..281ece4cc8 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -6,7 +6,7 @@
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
-#include "src/heap/slots-buffer.h"
+#include "src/heap/remembered-set.h"
#include "src/isolate.h"
namespace v8 {
@@ -70,25 +70,12 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
- if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
- target_page->slots_buffer_address(), slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictPopularEvacuationCandidate(target_page);
- }
- }
-}
-
-
-void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
- Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(object)) {
- CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
- target_page->slots_buffer_address(), slot,
- SlotsBuffer::IGNORE_OVERFLOW));
+ DCHECK(Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)));
+ RememberedSet<OLD_TO_OLD>::Insert(source_page,
+ reinterpret_cast<Address>(slot));
}
}
@@ -182,6 +169,7 @@ HeapObject* LiveObjectIterator<T>::Next() {
} else if (T == kAllLiveObjects) {
object = HeapObject::FromAddress(addr);
}
+
// Clear the second bit of the found object.
current_cell_ &= ~second_bit_index;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 641ac7d1dc..5ffea25488 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -21,7 +21,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
-#include "src/heap/slots-buffer.h"
+#include "src/heap/page-parallel-job.h"
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -55,8 +55,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
- slots_buffer_allocator_(nullptr),
- migration_slots_buffer_(nullptr),
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
@@ -64,9 +62,9 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
have_code_to_deoptimize_(false),
compacting_(false),
sweeping_in_progress_(false),
- compaction_in_progress_(false),
pending_sweeper_tasks_semaphore_(0),
- pending_compaction_tasks_semaphore_(0) {
+ pending_compaction_tasks_semaphore_(0),
+ page_parallel_job_semaphore_(0) {
}
#ifdef VERIFY_HEAP
@@ -122,6 +120,15 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
}
}
+static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
+ CHECK(page->IsFlagSet(Page::BLACK_PAGE));
+ VerifyMarkingVisitor visitor(heap);
+ HeapObjectIterator it(page);
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ object->Iterate(&visitor);
+ }
+}
static void VerifyMarking(NewSpace* space) {
Address end = space->top();
@@ -144,7 +151,11 @@ static void VerifyMarking(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
- VerifyMarking(space->heap(), p->area_start(), p->area_end());
+ if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ VerifyMarkingBlackPage(space->heap(), p);
+ } else {
+ VerifyMarking(space->heap(), p->area_start(), p->area_end());
+ }
}
}
@@ -244,12 +255,8 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- free_list_old_space_.Reset(new FreeList(heap_->old_space()));
- free_list_code_space_.Reset(new FreeList(heap_->code_space()));
- free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
- slots_buffer_allocator_ = new SlotsBufferAllocator();
if (FLAG_flush_code) {
code_flusher_ = new CodeFlusher(isolate());
@@ -263,7 +270,6 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
- delete slots_buffer_allocator_;
delete code_flusher_;
}
@@ -310,55 +316,25 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
return compacting_;
}
-
-void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
+void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
+// There is not need to filter the old to old set because
+// it is completely cleared after the mark-compact GC.
+// The slots that become invalid due to runtime transitions are
+// cleared eagerly immediately after the transition.
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
- for (Page* p : evacuation_candidates_) {
- SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
- }
- }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyValidStoreAndSlotsBufferEntries();
+ RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
+ RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
}
#endif
}
-#ifdef VERIFY_HEAP
-static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SlotsBuffer::VerifySlots(heap, p->slots_buffer());
- }
-}
-
-
-void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
- RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
-
- VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
- VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
- VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
-
- LargeObjectIterator it(heap()->lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
- }
-}
-#endif
-
-
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -448,7 +424,11 @@ static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
- Bitmap::Clear(it.next());
+ Page* p = it.next();
+ Bitmap::Clear(p);
+ if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ p->ClearFlag(Page::BLACK_PAGE);
+ }
}
}
@@ -471,8 +451,12 @@ void MarkCompactCollector::ClearMarkbits() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Marking::MarkWhite(Marking::MarkBitFrom(obj));
- Page::FromAddress(obj->address())->ResetProgressBar();
- Page::FromAddress(obj->address())->ResetLiveBytes();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ chunk->ResetProgressBar();
+ chunk->ResetLiveBytes();
+ if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
+ chunk->ClearFlag(Page::BLACK_PAGE);
+ }
}
}
@@ -509,9 +493,6 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
- DCHECK(free_list_old_space_.get()->IsEmpty());
- DCHECK(free_list_code_space_.get()->IsEmpty());
- DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -589,7 +570,9 @@ void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
DCHECK(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
- if (!heap->incremental_marking()->IsMarking()) return;
+ if (!heap->incremental_marking()->IsMarking() ||
+ Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
+ return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
@@ -661,15 +644,15 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
} else {
- const intptr_t estimated_compaction_speed =
+ const double estimated_compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
if (estimated_compaction_speed != 0) {
// Estimate the target fragmentation based on traced compaction speed
// and a goal for a single page.
- const intptr_t estimated_ms_per_area =
- 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
- *target_fragmentation_percent =
- 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+ const double estimated_ms_per_area =
+ 1 + area_size / estimated_compaction_speed;
+ *target_fragmentation_percent = static_cast<int>(
+ 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
if (*target_fragmentation_percent <
kTargetFragmentationPercentForReduceMemory) {
*target_fragmentation_percent =
@@ -698,17 +681,14 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
if (p->NeverEvacuate()) continue;
- if (p->IsFlagSet(Page::POPULAR_PAGE)) {
- // This page had slots buffer overflow on previous GC, skip it.
- p->ClearFlag(Page::POPULAR_PAGE);
- continue;
- }
+ if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
// released.
CHECK(!p->IsEvacuationCandidate());
- CHECK(p->slots_buffer() == nullptr);
+ CHECK_NULL(p->old_to_old_slots());
+ CHECK_NULL(p->typed_old_to_old_slots());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
@@ -814,10 +794,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
+ RememberedSet<OLD_TO_OLD>::ClearAll(heap());
for (Page* p : evacuation_candidates_) {
- slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
compacting_ = false;
evacuation_candidates_.Rewind(0);
@@ -877,7 +856,7 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash(
@@ -1031,7 +1010,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+ isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
if (FLAG_trace_code_flushing) {
PrintF("[code-flushing abandons function-info: ");
@@ -1067,8 +1046,9 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
Object* undefined = isolate_->heap()->undefined_value();
// Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(function);
- isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+ isolate_->heap()->incremental_marking()->IterateBlackObject(function);
+ isolate_->heap()->incremental_marking()->IterateBlackObject(
+ function->shared());
if (FLAG_trace_code_flushing) {
PrintF("[code-flushing abandons closure: ");
@@ -1231,9 +1211,11 @@ class MarkCompactMarkingVisitor
// was marked through the compilation cache before marker reached JSRegExp
// object.
FixedArray* data = FixedArray::cast(re->data());
- Object** slot =
- data->data_start() + JSRegExp::saved_code_index(is_one_byte);
- heap->mark_compact_collector()->RecordSlot(data, slot, code);
+ if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
+ Object** slot =
+ data->data_start() + JSRegExp::saved_code_index(is_one_byte);
+ heap->mark_compact_collector()->RecordSlot(data, slot, code);
+ }
// Set a number in the 0-255 range to guarantee no smi overflow.
re->SetDataAt(JSRegExp::code_index(is_one_byte),
@@ -1353,12 +1335,6 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
// If code flushing is disabled, there is no need to prepare for it.
if (!is_code_flushing_enabled()) return;
- // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
- // relies on it being marked before any other descriptor array.
- HeapObject* descriptor_array = heap()->empty_descriptor_array();
- MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
- MarkObject(descriptor_array, descriptor_array_mark);
-
// Make sure we are not referencing the code from the stack.
DCHECK(this == heap()->mark_compact_collector());
PrepareThreadForCodeFlushing(heap()->isolate(),
@@ -1422,25 +1398,34 @@ class RootMarkingVisitor : public ObjectVisitor {
// Helper class for pruning the string table.
-template <bool finalize_external_strings>
+template <bool finalize_external_strings, bool record_slots>
class StringTableCleaner : public ObjectVisitor {
public:
- explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
+ StringTableCleaner(Heap* heap, HeapObject* table)
+ : heap_(heap), pointers_removed_(0), table_(table) {
+ DCHECK(!record_slots || table != nullptr);
+ }
void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
+ MarkCompactCollector* collector = heap_->mark_compact_collector();
for (Object** p = start; p < end; p++) {
Object* o = *p;
- if (o->IsHeapObject() &&
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
- if (finalize_external_strings) {
- DCHECK(o->IsExternalString());
- heap_->FinalizeExternalString(String::cast(*p));
- } else {
- pointers_removed_++;
+ if (o->IsHeapObject()) {
+ if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
+ if (finalize_external_strings) {
+ DCHECK(o->IsExternalString());
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ pointers_removed_++;
+ }
+ // Set the entry to the_hole_value (as deleted).
+ *p = heap_->the_hole_value();
+ } else if (record_slots) {
+ // StringTable contains only old space strings.
+ DCHECK(!heap_->InNewSpace(o));
+ collector->RecordSlot(table_, p, o);
}
- // Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
}
}
}
@@ -1453,12 +1438,11 @@ class StringTableCleaner : public ObjectVisitor {
private:
Heap* heap_;
int pointers_removed_;
+ HeapObject* table_;
};
-
-typedef StringTableCleaner<false> InternalizedStringTableCleaner;
-typedef StringTableCleaner<true> ExternalStringTableCleaner;
-
+typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
@@ -1504,7 +1488,6 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
}
}
-
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
DCHECK(!marking_deque()->IsFull());
LiveObjectIterator<kGreyObjects> it(p);
@@ -1518,6 +1501,39 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
}
}
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+ inline void VisitPointer(Object** p) final {
+ RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
+ }
+
+ inline void VisitPointers(Object** start, Object** end) final {
+ while (start < end) {
+ RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
+ ++start;
+ }
+ }
+
+ inline void VisitCodeEntry(Address code_entry_slot) final {
+ Address code_entry = Memory::Address_at(code_entry_slot);
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
+ CODE_ENTRY_SLOT, code_entry_slot);
+ }
+ }
+
+ private:
+ inline void RecordMigratedSlot(Object* value, Address slot) {
+ if (value->IsHeapObject()) {
+ Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ if (p->InNewSpace()) {
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ } else if (p->IsEvacuationCandidate()) {
+ RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ }
+ }
+ }
+};
class MarkCompactCollector::HeapObjectVisitor {
public:
@@ -1525,40 +1541,83 @@ class MarkCompactCollector::HeapObjectVisitor {
virtual bool Visit(HeapObject* object) = 0;
};
-
class MarkCompactCollector::EvacuateVisitorBase
: public MarkCompactCollector::HeapObjectVisitor {
- public:
- EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer)
+ protected:
+ enum MigrationMode { kFast, kProfiled };
+
+ EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
: heap_(heap),
- evacuation_slots_buffer_(evacuation_slots_buffer),
compaction_spaces_(compaction_spaces),
- local_store_buffer_(local_store_buffer) {}
+ profiling_(
+ heap->isolate()->cpu_profiler()->is_profiling() ||
+ heap->isolate()->logger()->is_logging_code_events() ||
+ heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
- bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
- HeapObject** target_object) {
+ inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+ HeapObject** target_object) {
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (allocation.To(target_object)) {
- heap_->mark_compact_collector()->MigrateObject(
- *target_object, object, size, target_space->identity(),
- evacuation_slots_buffer_, local_store_buffer_);
+ MigrateObject(*target_object, object, size, target_space->identity());
return true;
}
return false;
}
- protected:
+ inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+ AllocationSpace dest) {
+ if (profiling_) {
+ MigrateObject<kProfiled>(dst, src, size, dest);
+ } else {
+ MigrateObject<kFast>(dst, src, size, dest);
+ }
+ }
+
+ template <MigrationMode mode>
+ inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+ AllocationSpace dest) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
+ DCHECK(heap_->AllowedToBeMigrated(src, dest));
+ DCHECK(dest != LO_SPACE);
+ if (dest == OLD_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(IsAligned(size, kPointerSize));
+ heap_->CopyBlock(dst_addr, src_addr, size);
+ if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
+ PROFILE(heap_->isolate(),
+ CodeMoveEvent(AbstractCode::cast(src), dst_addr));
+ }
+ RecordMigratedSlotVisitor visitor;
+ dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
+ } else if (dest == CODE_SPACE) {
+ DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
+ if (mode == kProfiled) {
+ PROFILE(heap_->isolate(),
+ CodeMoveEvent(AbstractCode::cast(src), dst_addr));
+ }
+ heap_->CopyBlock(dst_addr, src_addr, size);
+ RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
+ RELOCATED_CODE_OBJECT, dst_addr);
+ Code::cast(dst)->Relocate(dst_addr - src_addr);
+ } else {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(dest == NEW_SPACE);
+ heap_->CopyBlock(dst_addr, src_addr, size);
+ }
+ if (mode == kProfiled) {
+ heap_->OnMoveEvent(dst, src, size);
+ }
+ Memory::Address_at(src_addr) = dst_addr;
+ }
+
Heap* heap_;
- SlotsBuffer** evacuation_slots_buffer_;
CompactionSpaceCollection* compaction_spaces_;
- LocalStoreBuffer* local_store_buffer_;
+ bool profiling_;
};
-
class MarkCompactCollector::EvacuateNewSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase {
public:
@@ -1567,11 +1626,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
explicit EvacuateNewSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer,
HashMap* local_pretenuring_feedback)
- : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
- local_store_buffer),
+ : EvacuateVisitorBase(heap, compaction_spaces),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
promoted_size_(0),
@@ -1596,10 +1652,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
}
HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, &target);
- heap_->mark_compact_collector()->MigrateObject(
- HeapObject::cast(target), object, size, space,
- (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
- (space == NEW_SPACE) ? nullptr : local_store_buffer_);
+ MigrateObject(HeapObject::cast(target), object, size, space);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
@@ -1675,8 +1728,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
alignment);
if (allocation.IsRetry()) {
- v8::internal::Heap::FatalProcessOutOfMemory(
- "MarkCompactCollector: semi-space copy, fallback in old gen", true);
+ FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen\n");
}
return allocation;
}
@@ -1718,11 +1771,8 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap,
- CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer)
- : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
- local_store_buffer) {}
+ CompactionSpaceCollection* compaction_spaces)
+ : EvacuateVisitorBase(heap, compaction_spaces) {}
bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
@@ -1741,7 +1791,9 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- DiscoverGreyObjectsOnPage(p);
+ if (!p->IsFlagSet(Page::BLACK_PAGE)) {
+ DiscoverGreyObjectsOnPage(p);
+ }
if (marking_deque()->IsFull()) return;
}
}
@@ -2030,7 +2082,7 @@ void MarkingDeque::Uninitialize(bool aborting) {
void MarkCompactCollector::MarkLiveObjects() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
start_time = heap_->MonotonicallyIncreasingTimeInMs();
@@ -2041,8 +2093,7 @@ void MarkCompactCollector::MarkLiveObjects() {
PostponeInterruptsScope postpone(isolate());
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
IncrementalMarking* incremental_marking = heap_->incremental_marking();
if (was_marked_incrementally_) {
incremental_marking->Finalize();
@@ -2064,27 +2115,30 @@ void MarkCompactCollector::MarkLiveObjects() {
MarkCompactCollector::kMaxMarkingDequeSize);
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
PrepareForCodeFlushing();
}
RootMarkingVisitor root_visitor(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
MarkRoots(&root_visitor);
ProcessTopOptimizedFrame(&root_visitor);
}
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic or through Harmony weak maps.
- ProcessEphemeralMarking(&root_visitor, false);
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
+ ProcessEphemeralMarking(&root_visitor, false);
+ ProcessMarkingDeque();
+ }
// The objects reachable from the roots, weak maps or object groups
// are marked. Objects pointed to only by weak global handles cannot be
@@ -2093,18 +2147,32 @@ void MarkCompactCollector::MarkLiveObjects() {
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
- heap()->isolate()->global_handles()->IdentifyWeakHandles(
- &IsUnmarkedHeapObject);
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
+ heap()->isolate()->global_handles()->IdentifyWeakHandles(
+ &IsUnmarkedHeapObject);
+ ProcessMarkingDeque();
+ }
// Then we mark the objects.
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- ProcessMarkingDeque();
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
+ heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ ProcessMarkingDeque();
+ }
// Repeat Harmony weak maps marking to mark unmarked objects reachable from
// the weak roots we just marked as pending destruction.
//
// We only process harmony collections, as all object groups have been fully
// processed and no weakly reachable node can discover new objects groups.
- ProcessEphemeralMarking(&root_visitor, true);
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
+ ProcessEphemeralMarking(&root_visitor, true);
+ ProcessMarkingDeque();
+ }
}
if (FLAG_print_cumulative_gc_stat) {
@@ -2121,36 +2189,33 @@ void MarkCompactCollector::MarkLiveObjects() {
void MarkCompactCollector::ClearNonLiveReferences() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_STRING_TABLE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
StringTable* string_table = heap()->string_table();
- InternalizedStringTableCleaner internalized_visitor(heap());
+ InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
string_table->IterateElements(&internalized_visitor);
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
- ExternalStringTableCleaner external_visitor(heap());
+ ExternalStringTableCleaner external_visitor(heap(), nullptr);
heap()->external_string_table_.Iterate(&external_visitor);
heap()->external_string_table_.CleanUp();
}
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
// Remove object groups after marking phase.
heap()->isolate()->global_handles()->RemoveObjectGroups();
@@ -2159,8 +2224,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
code_flusher_->ProcessCandidates();
}
@@ -2170,7 +2234,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearWeakCells(&non_live_map_list, &dependent_code_list);
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
ClearSimpleMapTransitions(non_live_map_list);
ClearFullMapTransitions();
}
@@ -2179,14 +2243,13 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearWeakCollections();
- ClearInvalidStoreAndSlotsBufferEntries();
+ ClearInvalidRememberedSetSlots();
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization(
DependentCode* list_head) {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
Isolate* isolate = this->isolate();
DependentCode* current = list_head;
while (current->length() > 0) {
@@ -2407,8 +2470,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2445,8 +2507,7 @@ void MarkCompactCollector::AbortWeakCollections() {
void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
DependentCode** dependent_code_list) {
Heap* heap = this->heap();
- GCTracer::Scope gc_scope(heap->tracer(),
- GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+ TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
Object* weak_cell_obj = heap->encountered_weak_cells();
Object* the_hole_value = heap->the_hole_value();
DependentCode* dependent_code_head =
@@ -2541,215 +2602,77 @@ void MarkCompactCollector::AbortTransitionArrays() {
heap()->set_encountered_transition_arrays(Smi::FromInt(0));
}
-void MarkCompactCollector::RecordMigratedSlot(
- Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer) {
- // When parallel compaction is in progress, store and slots buffer entries
- // require synchronization.
- if (heap_->InNewSpace(value)) {
- if (compaction_in_progress_) {
- local_store_buffer->Record(slot);
- } else {
- Page* page = Page::FromAddress(slot);
- RememberedSet<OLD_TO_NEW>::Insert(page, slot);
- }
- } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
- reinterpret_cast<Object**>(slot),
- SlotsBuffer::IGNORE_OVERFLOW);
- }
-}
-
-
-void MarkCompactCollector::RecordMigratedCodeEntrySlot(
- Address code_entry, Address code_entry_slot,
- SlotsBuffer** evacuation_slots_buffer) {
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
-}
-
-
-void MarkCompactCollector::RecordMigratedCodeObjectSlot(
- Address code_object, SlotsBuffer** evacuation_slots_buffer) {
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
- SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
- SlotsBuffer::IGNORE_OVERFLOW);
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTarget(rmode)) {
- return SlotsBuffer::CODE_TARGET_SLOT;
+ return CODE_TARGET_SLOT;
} else if (RelocInfo::IsCell(rmode)) {
- return SlotsBuffer::CELL_TARGET_SLOT;
+ return CELL_TARGET_SLOT;
} else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+ return EMBEDDED_OBJECT_SLOT;
} else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return SlotsBuffer::DEBUG_TARGET_SLOT;
+ return DEBUG_TARGET_SLOT;
}
UNREACHABLE();
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+ return NUMBER_OF_SLOT_TYPES;
}
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
- SlotsBuffer::ObjectSlot slot) {
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
+ Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
Address addr = rinfo->pc();
- SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+ SlotType slot_type = SlotTypeForRMode(rmode);
if (rinfo->IsInConstantPool()) {
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTarget(rmode)) {
- slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+ slot_type = CODE_ENTRY_SLOT;
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rmode));
- slot_type = SlotsBuffer::OBJECT_SLOT;
+ slot_type = OBJECT_SLOT;
}
}
- bool success = SlotsBuffer::AddTo(
- slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
- addr, SlotsBuffer::FAIL_ON_OVERFLOW);
- if (!success) {
- EvictPopularEvacuationCandidate(target_page);
- }
- }
-}
-
-
-class RecordMigratedSlotVisitor final : public ObjectVisitor {
- public:
- RecordMigratedSlotVisitor(MarkCompactCollector* collector,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer)
- : collector_(collector),
- evacuation_slots_buffer_(evacuation_slots_buffer),
- local_store_buffer_(local_store_buffer) {}
-
- V8_INLINE void VisitPointer(Object** p) override {
- collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
- evacuation_slots_buffer_,
- local_store_buffer_);
- }
-
- V8_INLINE void VisitPointers(Object** start, Object** end) override {
- while (start < end) {
- collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
- evacuation_slots_buffer_,
- local_store_buffer_);
- ++start;
- }
+ RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
}
-
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
- if (collector_->compacting_) {
- Address code_entry = Memory::Address_at(code_entry_slot);
- collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
- evacuation_slots_buffer_);
- }
- }
-
- private:
- MarkCompactCollector* collector_;
- SlotsBuffer** evacuation_slots_buffer_;
- LocalStoreBuffer* local_store_buffer_;
-};
-
-
-// We scavenge new space simultaneously with sweeping. This is done in two
-// passes.
-//
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwarding address is written directly into
-// first word of object without any encoding. If object is dead we write
-// NULL as a forwarding address.
-//
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space. We should clear them to avoid encountering them during next
-// pointer iteration. This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
- int size, AllocationSpace dest,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer) {
- Address dst_addr = dst->address();
- Address src_addr = src->address();
- DCHECK(heap()->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE);
- if (dest == OLD_SPACE) {
- DCHECK_OBJECT_SIZE(size);
- DCHECK(evacuation_slots_buffer != nullptr);
- DCHECK(IsAligned(size, kPointerSize));
-
- heap()->MoveBlock(dst->address(), src->address(), size);
- RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
- local_store_buffer);
- dst->IterateBody(&visitor);
- } else if (dest == CODE_SPACE) {
- DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
- DCHECK(evacuation_slots_buffer != nullptr);
- PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
- heap()->MoveBlock(dst_addr, src_addr, size);
- RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
- Code::cast(dst)->Relocate(dst_addr - src_addr);
- } else {
- DCHECK_OBJECT_SIZE(size);
- DCHECK(evacuation_slots_buffer == nullptr);
- DCHECK(dest == NEW_SPACE);
- heap()->MoveBlock(dst_addr, src_addr, size);
- }
- heap()->OnMoveEvent(dst, src, size);
- Memory::Address_at(src_addr) = dst_addr;
}
-
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type, Address addr) {
+static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
+ SlotType slot_type, Address addr) {
switch (slot_type) {
- case SlotsBuffer::CODE_TARGET_SLOT: {
+ case CODE_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
- case SlotsBuffer::CELL_TARGET_SLOT: {
+ case CELL_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
- case SlotsBuffer::CODE_ENTRY_SLOT: {
+ case CODE_ENTRY_SLOT: {
v->VisitCodeEntry(addr);
break;
}
- case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+ case RELOCATED_CODE_OBJECT: {
HeapObject* obj = HeapObject::FromAddress(addr);
Code::BodyDescriptor::IterateBody(obj, v);
break;
}
- case SlotsBuffer::DEBUG_TARGET_SLOT: {
+ case DEBUG_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
NULL);
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+ case EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
- case SlotsBuffer::OBJECT_SLOT: {
+ case OBJECT_SLOT: {
v->VisitPointer(reinterpret_cast<Object**>(addr));
break;
}
@@ -2853,48 +2776,6 @@ class PointersUpdatingVisitor : public ObjectVisitor {
Heap* heap_;
};
-
-void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
- PointersUpdatingVisitor v(heap_);
- size_t buffer_size = buffer->Size();
-
- for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
- SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
- if (!SlotsBuffer::IsTypedSlot(slot)) {
- PointersUpdatingVisitor::UpdateSlot(heap_, slot);
- } else {
- ++slot_idx;
- DCHECK(slot_idx < buffer_size);
- UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(buffer->Get(slot_idx)));
- }
- }
-}
-
-
-void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
- while (buffer != NULL) {
- UpdateSlots(buffer);
- buffer = buffer->next();
- }
-}
-
-
-static void UpdatePointer(HeapObject** address, HeapObject* object) {
- MapWord map_word = object->map_word();
- // Since we only filter invalid slots in old space, the store buffer can
- // still contain stale pointers in large object and in map spaces. Ignore
- // these pointers here.
- DCHECK(map_word.IsForwardingAddress() ||
- !object->GetHeap()->old_space()->Contains(
- reinterpret_cast<Address>(address)));
- if (map_word.IsForwardingAddress()) {
- // Update the corresponding slot.
- *address = map_word.ToForwardingAddress();
- }
-}
-
-
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
@@ -2906,21 +2787,15 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
return String::cast(*p);
}
-
-bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
- HeapObject** out_object) {
+bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == NULL) {
- Object* large_object = heap_->lo_space()->FindObject(slot);
- // This object has to exist, otherwise we would not have recorded a slot
- // for it.
- CHECK(large_object->IsHeapObject());
- HeapObject* large_heap_object = HeapObject::cast(large_object);
- if (IsMarked(large_heap_object)) {
- *out_object = large_heap_object;
- return true;
- }
- return false;
+ DCHECK(owner != heap_->lo_space() && owner != nullptr);
+ USE(owner);
+
+ // If we are on a black page, we cannot find the actual object start
+ // easiliy. We just return true but do not set the out_object.
+ if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ return true;
}
uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
@@ -2995,66 +2870,49 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
// in a live object.
// Slots pointing to the first word of an object are invalid and removed.
// This can happen when we move the object header while left trimming.
- *out_object = object;
return true;
}
return false;
}
-
-bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
- // This function does not support large objects right now.
+HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
+ Page* p = Page::FromAddress(slot);
Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == NULL) {
+ if (owner == heap_->lo_space() || owner == nullptr) {
Object* large_object = heap_->lo_space()->FindObject(slot);
// This object has to exist, otherwise we would not have recorded a slot
// for it.
CHECK(large_object->IsHeapObject());
HeapObject* large_heap_object = HeapObject::cast(large_object);
+
if (IsMarked(large_heap_object)) {
- return true;
+ return large_heap_object;
}
- return false;
+ return nullptr;
}
- LiveObjectIterator<kBlackObjects> it(p);
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- int size = object->Size();
-
- if (object->address() > slot) return false;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return true;
+ if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ HeapObjectIterator it(p);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ int size = object->Size();
+ if (object->address() > slot) return nullptr;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return object;
+ }
+ }
+ } else {
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ int size = object->Size();
+ if (object->address() > slot) return nullptr;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return object;
+ }
}
}
- return false;
-}
-
-
-bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
- HeapObject* object = NULL;
- // The target object is black but we don't know if the source slot is black.
- // The source object could have died and the slot could be part of a free
- // space. Find out based on mark bits if the slot is part of a live object.
- if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
- return false;
- }
-
- DCHECK(object != NULL);
- int offset = static_cast<int>(slot - object->address());
- return object->IsValidSlot(offset);
-}
-
-
-void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
- HeapObject* object) {
- // The target object has to be black.
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- // The target object is black but we don't know if the source slot is black.
- // The source object could have died and the slot could be part of a free
- // space. Use the mark bit iterator to find out about liveness of the slot.
- CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
+ return nullptr;
}
@@ -3074,36 +2932,20 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
}
-void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
- SlotsBuffer* evacuation_slots_buffer) {
- base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
- evacuation_slots_buffers_.Add(evacuation_slots_buffer);
-}
-
class MarkCompactCollector::Evacuator : public Malloced {
public:
- Evacuator(MarkCompactCollector* collector,
- const List<Page*>& evacuation_candidates,
- const List<NewSpacePage*>& newspace_evacuation_candidates)
+ explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
- evacuation_candidates_(evacuation_candidates),
- newspace_evacuation_candidates_(newspace_evacuation_candidates),
compaction_spaces_(collector->heap()),
- local_slots_buffer_(nullptr),
- local_store_buffer_(collector->heap()),
local_pretenuring_feedback_(HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
- &local_slots_buffer_, &local_store_buffer_,
&local_pretenuring_feedback_),
- old_space_visitor_(collector->heap(), &compaction_spaces_,
- &local_slots_buffer_, &local_store_buffer_),
+ old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
- bytes_compacted_(0),
- task_id_(0) {}
+ bytes_compacted_(0) {}
- // Evacuate the configured set of pages in parallel.
- inline void EvacuatePages();
+ inline bool EvacuatePage(MemoryChunk* chunk);
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
@@ -3111,9 +2953,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
- uint32_t task_id() { return task_id_; }
- void set_task_id(uint32_t id) { task_id_ = id; }
-
private:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -3128,77 +2967,58 @@ class MarkCompactCollector::Evacuator : public Malloced {
MarkCompactCollector* collector_;
- // Pages to process.
- const List<Page*>& evacuation_candidates_;
- const List<NewSpacePage*>& newspace_evacuation_candidates_;
-
// Locally cached collector data.
CompactionSpaceCollection compaction_spaces_;
- SlotsBuffer* local_slots_buffer_;
- LocalStoreBuffer local_store_buffer_;
HashMap local_pretenuring_feedback_;
- // Vistors for the corresponding spaces.
+ // Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
double duration_;
intptr_t bytes_compacted_;
-
- // Task id, if this evacuator is executed on a background task instead of
- // the main thread. Can be used to try to abort the task currently scheduled
- // to executed to evacuate pages.
- uint32_t task_id_;
};
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
MemoryChunk* p, HeapObjectVisitor* visitor) {
- bool success = true;
- if (p->parallel_compaction_state().TrySetValue(
- MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
- if (p->IsEvacuationCandidate() || p->InNewSpace()) {
- DCHECK_EQ(p->parallel_compaction_state().Value(),
- MemoryChunk::kCompactingInProgress);
- int saved_live_bytes = p->LiveBytes();
- double evacuation_time;
- {
- AlwaysAllocateScope always_allocate(heap()->isolate());
- TimedScope timed_scope(&evacuation_time);
- success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
- }
- if (success) {
- ReportCompactionProgress(evacuation_time, saved_live_bytes);
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingFinalize);
- } else {
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingAborted);
- }
- } else {
- // There could be popular pages in the list of evacuation candidates
- // which we do not compact.
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
- }
+ bool success = false;
+ DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
+ int saved_live_bytes = p->LiveBytes();
+ double evacuation_time;
+ {
+ AlwaysAllocateScope always_allocate(heap()->isolate());
+ TimedScope timed_scope(&evacuation_time);
+ success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+ }
+ if (FLAG_trace_evacuation) {
+ PrintIsolate(heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d executable=%d "
+ "live_bytes=%d time=%f\n",
+ this, p, p->InNewSpace(),
+ p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
+ evacuation_time);
+ }
+ if (success) {
+ ReportCompactionProgress(evacuation_time, saved_live_bytes);
}
return success;
}
-void MarkCompactCollector::Evacuator::EvacuatePages() {
- for (NewSpacePage* p : newspace_evacuation_candidates_) {
- DCHECK(p->InNewSpace());
- DCHECK_EQ(p->concurrent_sweeping_state().Value(),
+bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
+ bool success = false;
+ if (chunk->InNewSpace()) {
+ DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
NewSpacePage::kSweepingDone);
- bool success = EvacuateSinglePage(p, &new_space_visitor_);
+ success = EvacuateSinglePage(chunk, &new_space_visitor_);
DCHECK(success);
USE(success);
+ } else {
+ DCHECK(chunk->IsEvacuationCandidate());
+ DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
+ success = EvacuateSinglePage(chunk, &old_space_visitor_);
}
- for (Page* p : evacuation_candidates_) {
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
- DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
- EvacuateSinglePage(p, &old_space_visitor_);
- }
+ return success;
}
void MarkCompactCollector::Evacuator::Finalize() {
@@ -3213,33 +3033,8 @@ void MarkCompactCollector::Evacuator::Finalize() {
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
- local_store_buffer_.Process(heap()->store_buffer());
- collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
}
-class MarkCompactCollector::CompactionTask : public CancelableTask {
- public:
- explicit CompactionTask(Heap* heap, Evacuator* evacuator)
- : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
- evacuator->set_task_id(id());
- }
-
- virtual ~CompactionTask() {}
-
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- evacuator_->EvacuatePages();
- heap_->mark_compact_collector()
- ->pending_compaction_tasks_semaphore_.Signal();
- }
-
- Heap* heap_;
- Evacuator* evacuator_;
-
- DISALLOW_COPY_AND_ASSIGN(CompactionTask);
-};
-
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
@@ -3252,15 +3047,17 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
- intptr_t compaction_speed =
+ double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
- const int available_cores =
- Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
+ const int available_cores = Max(
+ 1, static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
+ kNumSweepingTasks - 1);
int tasks;
if (compaction_speed > 0) {
- tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
- compaction_speed / kTargetCompactionTimeInMs);
+ tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
+ kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
@@ -3268,133 +3065,98 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
return Min(available_cores, tasks_capped_pages);
}
+class EvacuationJobTraits {
+ public:
+ typedef int* PerPageData; // Pointer to number of aborted pages.
+ typedef MarkCompactCollector::Evacuator* PerTaskData;
-void MarkCompactCollector::EvacuatePagesInParallel() {
- int num_pages = 0;
- intptr_t live_bytes = 0;
- for (Page* page : evacuation_candidates_) {
- num_pages++;
- live_bytes += page->LiveBytes();
- }
- for (NewSpacePage* page : newspace_evacuation_candidates_) {
- num_pages++;
- live_bytes += page->LiveBytes();
- }
- DCHECK_GE(num_pages, 1);
-
- // Used for trace summary.
- intptr_t compaction_speed = 0;
- if (FLAG_trace_fragmentation) {
- compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
- }
-
- const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
+ static const bool NeedSequentialFinalization = true;
- // Set up compaction spaces.
- Evacuator** evacuators = new Evacuator*[num_tasks];
- for (int i = 0; i < num_tasks; i++) {
- evacuators[i] = new Evacuator(this, evacuation_candidates_,
- newspace_evacuation_candidates_);
+ static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
+ MemoryChunk* chunk, PerPageData) {
+ return evacuator->EvacuatePage(chunk);
}
- // Kick off parallel tasks.
- StartParallelCompaction(evacuators, num_tasks);
- // Wait for unfinished and not-yet-started tasks.
- WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
-
- // Finalize local evacuators by merging back all locally cached data.
- for (int i = 0; i < num_tasks; i++) {
- evacuators[i]->Finalize();
- delete evacuators[i];
- }
- delete[] evacuators;
-
- // Finalize pages sequentially.
- for (NewSpacePage* p : newspace_evacuation_candidates_) {
- DCHECK_EQ(p->parallel_compaction_state().Value(),
- MemoryChunk::kCompactingFinalize);
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
- }
-
- int abandoned_pages = 0;
- for (Page* p : evacuation_candidates_) {
- switch (p->parallel_compaction_state().Value()) {
- case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+ static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
+ PerPageData data) {
+ if (chunk->InNewSpace()) {
+ DCHECK(success);
+ } else {
+ Page* p = static_cast<Page*>(chunk);
+ if (success) {
+ DCHECK(p->IsEvacuationCandidate());
+ DCHECK(p->SweepingDone());
+ p->Unlink();
+ } else {
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
// We need to:
- // - Leave the evacuation candidate flag for later processing of
- // slots buffer entries.
+ // - Leave the evacuation candidate flag for later processing of slots
+ // buffer entries.
// - Leave the slots buffer there for processing of entries added by
// the write barrier.
// - Rescan the page as slot recording in the migration buffer only
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
- // - Mark them for rescanning for store buffer entries as we otherwise
- // might have stale store buffer entries that become "valid" again
- // after reusing the memory. Note that all existing store buffer
- // entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
- abandoned_pages++;
- break;
- case MemoryChunk::kCompactingFinalize:
- DCHECK(p->IsEvacuationCandidate());
- DCHECK(p->SweepingDone());
- p->Unlink();
- break;
- case MemoryChunk::kCompactingDone:
- DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
- DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- break;
- default:
- // MemoryChunk::kCompactingInProgress.
- UNREACHABLE();
+ *data += 1;
+ }
}
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
- if (FLAG_trace_fragmentation) {
- PrintIsolate(isolate(),
- "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
- "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
- "d compaction_speed=%" V8_PTR_PREFIX "d\n",
- isolate()->time_millis_since_init(), FLAG_parallel_compaction,
- num_pages, abandoned_pages, num_tasks,
- base::SysInfo::NumberOfProcessors(), live_bytes,
- compaction_speed);
+};
+
+void MarkCompactCollector::EvacuatePagesInParallel() {
+ PageParallelJob<EvacuationJobTraits> job(
+ heap_, heap_->isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ int abandoned_pages = 0;
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ job.AddPage(page, &abandoned_pages);
}
-}
+ for (NewSpacePage* page : newspace_evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ job.AddPage(page, &abandoned_pages);
+ }
+ DCHECK_GE(job.NumberOfPages(), 1);
-void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
- int len) {
- compaction_in_progress_ = true;
- for (int i = 1; i < len; i++) {
- CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ // Used for trace summary.
+ double compaction_speed = 0;
+ if (FLAG_trace_evacuation) {
+ compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
- // Contribute on main thread.
- evacuators[0]->EvacuatePages();
-}
+ const int wanted_num_tasks =
+ NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
+ Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
+ for (int i = 0; i < wanted_num_tasks; i++) {
+ evacuators[i] = new Evacuator(this);
+ }
+ job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
+ for (int i = 0; i < wanted_num_tasks; i++) {
+ evacuators[i]->Finalize();
+ delete evacuators[i];
+ }
+ delete[] evacuators;
-void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
- int len) {
- // Try to cancel compaction tasks that have not been run (as they might be
- // stuck in a worker queue). Tasks that cannot be canceled, have either
- // already completed or are still running, hence we need to wait for their
- // semaphore signal.
- for (int i = 0; i < len; i++) {
- if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
- evacuators[i]->task_id())) {
- pending_compaction_tasks_semaphore_.Wait();
- }
+ if (FLAG_trace_evacuation) {
+ PrintIsolate(
+ isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
+ "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+ "d compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
+ abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
+ live_bytes, compaction_speed);
}
- compaction_in_progress_ = false;
}
-
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@@ -3409,28 +3171,12 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
-
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
-
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
-
-template <MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
- int size) {
- if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
- DCHECK(free_list == NULL);
- return space->Free(start, size);
- } else {
- return size - free_list->Free(start, size);
- }
-}
-
-
// Sweeps a page. After sweeping the page can be iterated.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
@@ -3439,9 +3185,9 @@ template <SweepingMode sweeping_mode,
MarkCompactCollector::SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
-static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
- ObjectVisitor* v) {
+static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
@@ -3473,7 +3219,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+ freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
Map* map = object->synchronized_map();
@@ -3501,7 +3247,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+ freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
@@ -3521,8 +3267,10 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
// Ignore all slots that might have been recorded in the body of the
// deoptimized code object. Assumption: no slots will be recorded for
// this object after invalidating it.
- RemoveObjectSlots(code->instruction_start(),
- code->address() + code->Size());
+ Page* page = Page::FromAddress(code->address());
+ Address start = code->instruction_start();
+ Address end = code->address() + code->Size();
+ RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
}
}
@@ -3533,21 +3281,6 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
}
-void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
- Address end_slot) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- for (Page* p : evacuation_candidates_) {
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- if (p->IsEvacuationCandidate()) {
- SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
- end_slot);
- }
- }
-}
-
-
#ifdef VERIFY_HEAP
static void VerifyAllBlackObjects(MemoryChunk* page) {
LiveObjectIterator<kAllLiveObjects> it(page);
@@ -3629,33 +3362,36 @@ void MarkCompactCollector::SweepAbortedPages() {
switch (space->identity()) {
case OLD_SPACE:
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ IGNORE_FREE_SPACE>(space, p, nullptr);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+ ZAP_FREE_SPACE>(space, p, nullptr);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+ IGNORE_FREE_SPACE>(space, p, nullptr);
}
break;
default:
UNREACHABLE();
break;
}
+ {
+ base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
+ swept_pages(space->identity())->Add(p);
+ }
}
}
}
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
EvacuationScope evacuation_scope(this);
EvacuateNewSpacePrologue();
@@ -3673,8 +3409,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap()->FreeQueuedChunks();
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
// After updating all pointers, we can finally sweep the aborted pages,
// effectively overriding any forward pointers.
SweepAbortedPages();
@@ -3695,127 +3430,170 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
#endif
}
+template <PointerDirection direction>
+class PointerUpdateJobTraits {
+ public:
+ typedef int PerPageData; // Per page data is not used in this job.
+ typedef PointersUpdatingVisitor* PerTaskData;
-void MarkCompactCollector::UpdatePointersAfterEvacuation() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- {
- GCTracer::Scope gc_scope(
- heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
- UpdateSlotsRecordedIn(migration_slots_buffer_);
- if (FLAG_trace_fragmentation_verbose) {
- PrintF(" migration slots buffer: %d\n",
- SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+ static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk, PerPageData) {
+ UpdateUntypedPointers(heap, chunk);
+ UpdateTypedPointers(heap, chunk, visitor);
+ return true;
+ }
+ static const bool NeedSequentialFinalization = false;
+ static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+ }
+
+ private:
+ static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
+ if (direction == OLD_TO_NEW) {
+ RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
+ UpdateOldToNewSlot);
+ } else {
+ RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
+ PointersUpdatingVisitor::UpdateSlot(heap,
+ reinterpret_cast<Object**>(slot));
+ return REMOVE_SLOT;
+ });
+ }
+ }
+
+ static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
+ PointersUpdatingVisitor* visitor) {
+ if (direction == OLD_TO_OLD) {
+ Isolate* isolate = heap->isolate();
+ RememberedSet<OLD_TO_OLD>::IterateTyped(
+ chunk, [isolate, visitor](SlotType type, Address slot) {
+ UpdateTypedSlot(isolate, visitor, type, slot);
+ return REMOVE_SLOT;
+ });
+ }
+ }
+
+ static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
+ MapWord map_word = object->map_word();
+ // Since we only filter invalid slots in old space, the store buffer can
+ // still contain stale pointers in large object and in map spaces. Ignore
+ // these pointers here.
+ DCHECK(map_word.IsForwardingAddress() ||
+ !object->GetHeap()->old_space()->Contains(
+ reinterpret_cast<Address>(address)));
+ if (map_word.IsForwardingAddress()) {
+ // Update the corresponding slot.
+ *address = map_word.ToForwardingAddress();
}
- slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
- DCHECK(migration_slots_buffer_ == NULL);
+ }
+};
+
+int NumberOfPointerUpdateTasks(int pages) {
+ if (!FLAG_parallel_pointer_update) return 1;
+ const int kMaxTasks = 4;
+ const int kPagesPerTask = 4;
+ return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
+}
+
+template <PointerDirection direction>
+void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
+ PageParallelJob<PointerUpdateJobTraits<direction> > job(
+ heap, heap->isolate()->cancelable_task_manager(), semaphore);
+ RememberedSet<direction>::IterateMemoryChunks(
+ heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
+ PointersUpdatingVisitor visitor(heap);
+ int num_pages = job.NumberOfPages();
+ int num_tasks = NumberOfPointerUpdateTasks(num_pages);
+ job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+}
+
+class ToSpacePointerUpdateJobTraits {
+ public:
+ typedef std::pair<Address, Address> PerPageData;
+ typedef PointersUpdatingVisitor* PerTaskData;
- // TODO(hpayer): Process the slots buffers in parallel. This has to be done
- // after evacuation of all pages finishes.
- int buffers = evacuation_slots_buffers_.length();
- for (int i = 0; i < buffers; i++) {
- SlotsBuffer* buffer = evacuation_slots_buffers_[i];
- UpdateSlotsRecordedIn(buffer);
- slots_buffer_allocator_->DeallocateChain(&buffer);
+ static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk, PerPageData limits) {
+ for (Address cur = limits.first; cur < limits.second;) {
+ HeapObject* object = HeapObject::FromAddress(cur);
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ cur += size;
}
- evacuation_slots_buffers_.Rewind(0);
+ return true;
+ }
+ static const bool NeedSequentialFinalization = false;
+ static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+ }
+};
+
+void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
+ PageParallelJob<ToSpacePointerUpdateJobTraits> job(
+ heap, heap->isolate()->cancelable_task_manager(), semaphore);
+ Address space_start = heap->new_space()->bottom();
+ Address space_end = heap->new_space()->top();
+ NewSpacePageIterator it(space_start, space_end);
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ Address start =
+ page->Contains(space_start) ? space_start : page->area_start();
+ Address end = page->Contains(space_end) ? space_end : page->area_end();
+ job.AddPage(page, std::make_pair(start, end));
}
+ PointersUpdatingVisitor visitor(heap);
+ int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
+ job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+}
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- // Second pass: find pointers to new space and update them.
PointersUpdatingVisitor updating_visitor(heap());
{
- GCTracer::Scope gc_scope(
- heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- // Update pointers in to space.
- SemiSpaceIterator to_it(heap()->new_space());
- for (HeapObject* object = to_it.Next(); object != NULL;
- object = to_it.Next()) {
- Map* map = object->map();
- object->IterateBody(map->instance_type(), object->SizeFromMap(map),
- &updating_visitor);
- }
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
+ UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+ UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
+ }
- RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
+ {
+ Heap* heap = this->heap();
+ TRACE_GC(heap->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
+ UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
}
{
- GCTracer::Scope gc_scope(
- heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (Page* p : evacuation_candidates_) {
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-
- if (p->IsEvacuationCandidate()) {
- UpdateSlotsRecordedIn(p->slots_buffer());
- if (FLAG_trace_fragmentation_verbose) {
- PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
- SlotsBuffer::SizeOfChain(p->slots_buffer()));
- }
- slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
-
- // Important: skip list should be cleared only after roots were updated
- // because root iteration traverses the stack and might have to find
- // code objects from non-updated pc pointing into evacuation candidate.
- SkipList* list = p->skip_list();
- if (list != NULL) list->Clear();
-
- // First pass on aborted pages, fixing up all live objects.
- if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- p->ClearEvacuationCandidate();
- VisitLiveObjectsBody(p, &updating_visitor);
- }
- }
-
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
- reinterpret_cast<intptr_t>(p));
- }
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-
- switch (space->identity()) {
- case OLD_SPACE:
- Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
- &updating_visitor);
- break;
- case CODE_SPACE:
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
- &updating_visitor);
- } else {
- Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
- &updating_visitor);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
+ DCHECK(p->IsEvacuationCandidate());
+ // Important: skip list should be cleared only after roots were updated
+ // because root iteration traverses the stack and might have to find
+ // code objects from non-updated pc pointing into evacuation candidate.
+ SkipList* list = p->skip_list();
+ if (list != NULL) list->Clear();
+
+ // First pass on aborted pages, fixing up all live objects.
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearEvacuationCandidate();
+ VisitLiveObjectsBody(p, &updating_visitor);
}
}
}
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
- heap_->string_table()->Iterate(&updating_visitor);
-
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+ heap()->ProcessWeakListRoots(&evacuation_object_retainer);
}
}
@@ -3824,10 +3602,9 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- space->Free(p->area_start(), p->area_size());
p->ResetLiveBytes();
CHECK(p->SweepingDone());
- space->ReleasePage(p, true);
+ space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
@@ -3866,25 +3643,20 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- FreeList* free_list;
- FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
- free_list = free_list_old_space_.get();
- max_freed =
- Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, page, NULL);
} else if (space->identity() == CODE_SPACE) {
- free_list = free_list_code_space_.get();
- max_freed =
- Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, page, NULL);
} else {
- free_list = free_list_map_space_.get();
- max_freed =
- Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, page, NULL);
+ }
+ {
+ base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
+ swept_pages(space->identity())->Add(page);
}
- free_list->Concatenate(&private_free_list);
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
}
@@ -3904,13 +3676,22 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Page* p = it.next();
DCHECK(p->SweepingDone());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
- p->IsEvacuationCandidate()) {
+ if (p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
DCHECK(evacuation_candidates_.length() > 0);
continue;
}
+ // We can not sweep black pages, since all mark bits are set for these
+ // pages.
+ if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ Bitmap::Clear(p);
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+ p->ClearFlag(Page::BLACK_PAGE);
+ // TODO(hpayer): Free unused memory of last black page.
+ continue;
+ }
+
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// We need to sweep the page to get it into an iterable state again. Note
// that this adds unusable memory into the free list that is later on
@@ -3918,7 +3699,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ IGNORE_FREE_SPACE>(space, p, nullptr);
continue;
}
@@ -3928,7 +3709,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
- space->ReleasePage(p, false);
+ space->ReleasePage(p);
continue;
}
unused_page_present = true;
@@ -3951,7 +3732,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
void MarkCompactCollector::SweepSpaces() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
start_time = heap_->MonotonicallyIncreasingTimeInMs();
@@ -3999,18 +3780,6 @@ void MarkCompactCollector::ParallelSweepSpacesComplete() {
sweeping_list(heap()->map_space()).clear();
}
-
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
- Isolate* isolate) {
- if (obj->IsCode()) {
- PROFILE(isolate, CodeDeleteEvent(obj->address()));
- }
-}
-
-
Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
@@ -4019,41 +3788,13 @@ void MarkCompactCollector::Initialize() {
IncrementalMarking::Initialize();
}
-
-void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
- if (FLAG_trace_fragmentation) {
- PrintF("Page %p is too popular. Disabling evacuation.\n",
- reinterpret_cast<void*>(page));
- }
-
- isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
-
- // TODO(gc) If all evacuation candidates are too popular we
- // should stop slots recording entirely.
- page->ClearEvacuationCandidate();
-
- DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
- page->SetFlag(Page::POPULAR_PAGE);
-
- // We were not collecting slots on this page that point
- // to other evacuation candidates thus we have to
- // rescan the page after evacuation to discover and update all
- // pointers to evacuated objects.
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(object)) {
- if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT, slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictPopularEvacuationCandidate(target_page);
- }
+ !ShouldSkipEvacuationSlotRecording(host)) {
+ RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
}
}
@@ -4067,7 +3808,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
- RecordRelocSlot(&rinfo, target);
+ RecordRelocSlot(host, &rinfo, target);
}
}
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index cc5449f977..9fee8269d5 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -25,9 +25,6 @@ class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
-class SlotsBuffer;
-class SlotsBufferAllocator;
-
class Marking : public AllStatic {
public:
@@ -160,6 +157,8 @@ class Marking : public AllStatic {
// Returns true if the transferred color is black.
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
+ if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
+ return true;
MarkBit from_mark_bit = MarkBitFrom(from);
MarkBit to_mark_bit = MarkBitFrom(to);
DCHECK(Marking::IsWhite(to_mark_bit));
@@ -318,11 +317,89 @@ class CodeFlusher {
// Defined in isolate.h.
class ThreadLocalTop;
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+ explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+ last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ cell_base_ = chunk_->area_start();
+ cell_index_ = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+ cells_ = chunk_->markbits()->cells();
+ }
+
+ inline bool Done() { return cell_index_ == last_cell_index_; }
+
+ inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+ inline MarkBit::CellType* CurrentCell() {
+ DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return &cells_[cell_index_];
+ }
+
+ inline Address CurrentCellBase() {
+ DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
+ return cell_base_;
+ }
+
+ inline void Advance() {
+ cell_index_++;
+ cell_base_ += 32 * kPointerSize;
+ }
+
+ // Return the next mark bit cell. If there is no next it returns 0;
+ inline MarkBit::CellType PeekNext() {
+ if (HasNext()) {
+ return cells_[cell_index_ + 1];
+ }
+ return 0;
+ }
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBit::CellType* cells_;
+ unsigned int last_cell_index_;
+ unsigned int cell_index_;
+ Address cell_base_;
+};
+
+// Grey objects can happen on black pages when black objects transition to
+// grey e.g. when calling RecordWrites on them.
+enum LiveObjectIterationMode {
+ kBlackObjects,
+ kGreyObjects,
+ kAllLiveObjects
+};
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+ explicit LiveObjectIterator(MemoryChunk* chunk)
+ : chunk_(chunk),
+ it_(chunk_),
+ cell_base_(it_.CurrentCellBase()),
+ current_cell_(*it_.CurrentCell()) {
+ // Black pages can not be iterated.
+ DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
+ }
+
+ HeapObject* Next();
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+};
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
public:
+ class Evacuator;
+
enum IterationMode {
kKeepMarking,
kClearMarkbits,
@@ -395,8 +472,8 @@ class MarkCompactCollector {
->IsEvacuationCandidate();
}
- void RecordRelocSlot(RelocInfo* rinfo, Object* target);
- void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
+ void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
+ void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
@@ -405,11 +482,6 @@ class MarkCompactCollector {
void UpdateSlots(SlotsBuffer* buffer);
void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
- void MigrateObject(HeapObject* dst, HeapObject* src, int size,
- AllocationSpace to_old_space,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer);
-
void InvalidateCode(Code* code);
void ClearMarkbits();
@@ -480,38 +552,35 @@ class MarkCompactCollector {
void InitializeMarkingDeque();
- // The following four methods can just be called after marking, when the
+ // The following two methods can just be called after marking, when the
// whole transitive closure is known. They must be called before sweeping
// when mark bits are still intact.
- bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
- bool IsSlotInBlackObjectSlow(Page* p, Address slot);
- bool IsSlotInLiveObject(Address slot);
- void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
+ bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
+ HeapObject* FindBlackObjectBySlotSlow(Address slot);
// Removes all the slots in the slot buffers that are within the given
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
- //
- // Free lists filled by sweeper and consumed by corresponding spaces
- // (including compaction spaces).
- //
- base::SmartPointer<FreeList>& free_list_old_space() {
- return free_list_old_space_;
- }
- base::SmartPointer<FreeList>& free_list_code_space() {
- return free_list_code_space_;
- }
- base::SmartPointer<FreeList>& free_list_map_space() {
- return free_list_map_space_;
+ base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
+ List<Page*>* swept_pages(AllocationSpace id) {
+ switch (id) {
+ case OLD_SPACE:
+ return &swept_old_space_pages_;
+ case CODE_SPACE:
+ return &swept_code_space_pages_;
+ case MAP_SPACE:
+ return &swept_map_space_pages_;
+ default:
+ UNREACHABLE();
+ }
+ return nullptr;
}
private:
- class CompactionTask;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class EvacuateVisitorBase;
- class Evacuator;
class HeapObjectVisitor;
class SweeperTask;
@@ -520,8 +589,7 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
- void EvictPopularEvacuationCandidate(Page* page);
- void ClearInvalidStoreAndSlotsBufferEntries();
+ void ClearInvalidRememberedSetSlots();
void StartSweeperThreads();
@@ -550,10 +618,6 @@ class MarkCompactCollector {
bool evacuation_;
- SlotsBufferAllocator* slots_buffer_allocator_;
-
- SlotsBuffer* migration_slots_buffer_;
-
// Finishes GC, performs heap verification if enabled.
void Finish();
@@ -707,17 +771,11 @@ class MarkCompactCollector {
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
- void AddEvacuationSlotsBufferSynchronized(
- SlotsBuffer* evacuation_slots_buffer);
-
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
- void StartParallelCompaction(Evacuator** evacuators, int len);
- void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
-
void EvacuateNewSpaceAndCandidates();
void UpdatePointersAfterEvacuation();
@@ -743,19 +801,6 @@ class MarkCompactCollector {
// swept in parallel.
void ParallelSweepSpacesComplete();
- // Updates store buffer and slot buffer for a pointer in a migrating object.
- void RecordMigratedSlot(Object* value, Address slot,
- SlotsBuffer** evacuation_slots_buffer,
- LocalStoreBuffer* local_store_buffer);
-
- // Adds the code entry slot to the slots buffer.
- void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
- SlotsBuffer** evacuation_slots_buffer);
-
- // Adds the slot of a moved code object.
- void RecordMigratedCodeObjectSlot(Address code_object,
- SlotsBuffer** evacuation_slots_buffer);
-
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -774,17 +819,10 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
- // The evacuation_slots_buffers_ are used by the compaction threads.
- // When a compaction task finishes, it uses
- // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
- // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
- // lock.
- base::Mutex evacuation_slots_buffers_mutex_;
- List<SlotsBuffer*> evacuation_slots_buffers_;
-
- base::SmartPointer<FreeList> free_list_old_space_;
- base::SmartPointer<FreeList> free_list_code_space_;
- base::SmartPointer<FreeList> free_list_map_space_;
+ base::Mutex swept_pages_mutex_;
+ List<Page*> swept_old_space_pages_;
+ List<Page*> swept_code_space_pages_;
+ List<Page*> swept_map_space_pages_;
SweepingList sweeping_list_old_space_;
SweepingList sweeping_list_code_space_;
@@ -797,86 +835,18 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
- // True if parallel compaction is currently in progress.
- bool compaction_in_progress_;
-
// Semaphore used to synchronize sweeper tasks.
base::Semaphore pending_sweeper_tasks_semaphore_;
// Semaphore used to synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;
- friend class Heap;
- friend class StoreBuffer;
-};
-
-
-class MarkBitCellIterator BASE_EMBEDDED {
- public:
- explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
- last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(chunk_->area_end())));
- cell_base_ = chunk_->area_start();
- cell_index_ = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
- cells_ = chunk_->markbits()->cells();
- }
-
- inline bool Done() { return cell_index_ == last_cell_index_; }
-
- inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
-
- inline MarkBit::CellType* CurrentCell() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
- return &cells_[cell_index_];
- }
-
- inline Address CurrentCellBase() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
- return cell_base_;
- }
-
- inline void Advance() {
- cell_index_++;
- cell_base_ += 32 * kPointerSize;
- }
+ base::Semaphore page_parallel_job_semaphore_;
- // Return the next mark bit cell. If there is no next it returns 0;
- inline MarkBit::CellType PeekNext() {
- if (HasNext()) {
- return cells_[cell_index_ + 1];
- }
- return 0;
- }
+ bool black_allocation_;
- private:
- MemoryChunk* chunk_;
- MarkBit::CellType* cells_;
- unsigned int last_cell_index_;
- unsigned int cell_index_;
- Address cell_base_;
-};
-
-enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
-
-template <LiveObjectIterationMode T>
-class LiveObjectIterator BASE_EMBEDDED {
- public:
- explicit LiveObjectIterator(MemoryChunk* chunk)
- : chunk_(chunk),
- it_(chunk_),
- cell_base_(it_.CurrentCellBase()),
- current_cell_(*it_.CurrentCell()) {}
-
- HeapObject* Next();
-
- private:
- MemoryChunk* chunk_;
- MarkBitCellIterator it_;
- Address cell_base_;
- MarkBit::CellType current_cell_;
+ friend class Heap;
+ friend class StoreBuffer;
};
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index f53730785a..699e10e603 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -84,10 +84,10 @@ void MemoryReducer::NotifyTimer(const Event& event) {
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
- 0, deadline, i::IncrementalMarking::StepActions(
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_MARKING,
- i::IncrementalMarking::FORCE_COMPLETION));
+ deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
heap()->FinalizeIncrementalMarkingIfComplete(
"Memory reducer: finalize incremental marking");
}
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d71c879a73..c415713ee3 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -220,11 +220,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
Heap* heap, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ Code* host = rinfo->host();
+ heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
// TODO(ulan): It could be better to record slots only for strongly embedded
// objects here and record slots for weakly embedded object during clearing
// of non-live references in mark-compact.
- if (!rinfo->host()->IsWeakObject(object)) {
+ if (!host->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -235,8 +236,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, cell);
- if (!rinfo->host()->IsWeakObject(cell)) {
+ Code* host = rinfo->host();
+ heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
+ if (!host->IsWeakObject(cell)) {
StaticVisitor::MarkObject(heap, cell);
}
}
@@ -248,7 +250,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ Code* host = rinfo->host();
+ heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
@@ -268,7 +271,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ Code* host = rinfo->host();
+ heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
@@ -279,7 +283,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* target = rinfo->code_age_stub();
DCHECK(target != NULL);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ Code* host = rinfo->host();
+ heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 0003a0702d..0b857dc423 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -111,6 +111,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARRAY_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
new file mode 100644
index 0000000000..02583c7818
--- /dev/null
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -0,0 +1,194 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGE_PARALLEL_JOB_
+#define V8_HEAP_PAGE_PARALLEL_JOB_
+
+#include "src/allocation.h"
+#include "src/cancelable-task.h"
+#include "src/utils.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+// This class manages background tasks that process set of pages in parallel.
+// The JobTraits class needs to define:
+// - PerPageData type - state associated with each page.
+// - PerTaskData type - state associated with each task.
+// - static bool ProcessPageInParallel(Heap* heap,
+// PerTaskData task_data,
+// MemoryChunk* page,
+// PerPageData page_data)
+// The function should return true iff processing succeeded.
+// - static const bool NeedSequentialFinalization
+// - static void FinalizePageSequentially(Heap* heap,
+// bool processing_succeeded,
+// MemoryChunk* page,
+// PerPageData page_data)
+template <typename JobTraits>
+class PageParallelJob {
+ public:
+ // PageParallelJob cannot dynamically create a semaphore because of a bug in
+ // glibc. See http://crbug.com/609249 and
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
+ // The caller must provide a semaphore with value 0 and ensure that
+ // the lifetime of the semaphore is the same as the lifetime of the Isolate
+ // It is guaranteed that the semaphore value will be 0 after Run() call.
+ PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
+ base::Semaphore* semaphore)
+ : heap_(heap),
+ cancelable_task_manager_(cancelable_task_manager),
+ items_(nullptr),
+ num_items_(0),
+ num_tasks_(0),
+ pending_tasks_(semaphore) {}
+
+ ~PageParallelJob() {
+ Item* item = items_;
+ while (item != nullptr) {
+ Item* next = item->next;
+ delete item;
+ item = next;
+ }
+ }
+
+ void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
+ Item* item = new Item(chunk, data, items_);
+ items_ = item;
+ ++num_items_;
+ }
+
+ int NumberOfPages() { return num_items_; }
+
+ // Returns the number of tasks that were spawned when running the job.
+ int NumberOfTasks() { return num_tasks_; }
+
+ // Runs the given number of tasks in parallel and processes the previously
+ // added pages. This function blocks until all tasks finish.
+ // The callback takes the index of a task and returns data for that task.
+ template <typename Callback>
+ void Run(int num_tasks, Callback per_task_data_callback) {
+ if (num_items_ == 0) return;
+ DCHECK_GE(num_tasks, 1);
+ uint32_t task_ids[kMaxNumberOfTasks];
+ const int max_num_tasks = Min(
+ kMaxNumberOfTasks,
+ static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
+ int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
+ int start_index = 0;
+ Task* main_task = nullptr;
+ for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
+ if (start_index >= num_items_) {
+ start_index -= num_items_;
+ }
+ Task* task = new Task(heap_, items_, num_items_, start_index,
+ pending_tasks_, per_task_data_callback(i));
+ task_ids[i] = task->id();
+ if (i > 0) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ main_task = task;
+ }
+ }
+ // Contribute on main thread.
+ main_task->Run();
+ delete main_task;
+ // Wait for background tasks.
+ for (int i = 0; i < num_tasks_; i++) {
+ if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
+ pending_tasks_->Wait();
+ }
+ }
+ if (JobTraits::NeedSequentialFinalization) {
+ Item* item = items_;
+ while (item != nullptr) {
+ bool success = (item->state.Value() == kFinished);
+ JobTraits::FinalizePageSequentially(heap_, item->chunk, success,
+ item->data);
+ item = item->next;
+ }
+ }
+ }
+
+ private:
+ static const int kMaxNumberOfTasks = 10;
+
+ enum ProcessingState { kAvailable, kProcessing, kFinished, kFailed };
+
+ struct Item : public Malloced {
+ Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
+ : chunk(chunk), state(kAvailable), data(data), next(next) {}
+ MemoryChunk* chunk;
+ AtomicValue<ProcessingState> state;
+ typename JobTraits::PerPageData data;
+ Item* next;
+ };
+
+ class Task : public CancelableTask {
+ public:
+ Task(Heap* heap, Item* items, int num_items, int start_index,
+ base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
+ : CancelableTask(heap->isolate()),
+ heap_(heap),
+ items_(items),
+ num_items_(num_items),
+ start_index_(start_index),
+ on_finish_(on_finish),
+ data_(data) {}
+
+ virtual ~Task() {}
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {
+ // Each task starts at a different index to improve parallelization.
+ Item* current = items_;
+ int skip = start_index_;
+ while (skip-- > 0) {
+ current = current->next;
+ }
+ for (int i = 0; i < num_items_; i++) {
+ if (current->state.TrySetValue(kAvailable, kProcessing)) {
+ bool success = JobTraits::ProcessPageInParallel(
+ heap_, data_, current->chunk, current->data);
+ current->state.SetValue(success ? kFinished : kFailed);
+ }
+ current = current->next;
+ // Wrap around if needed.
+ if (current == nullptr) {
+ current = items_;
+ }
+ }
+ on_finish_->Signal();
+ }
+
+ Heap* heap_;
+ Item* items_;
+ int num_items_;
+ int start_index_;
+ base::Semaphore* on_finish_;
+ typename JobTraits::PerTaskData data_;
+ DISALLOW_COPY_AND_ASSIGN(Task);
+ };
+
+ Heap* heap_;
+ CancelableTaskManager* cancelable_task_manager_;
+ Item* items_;
+ int num_items_;
+ int num_tasks_;
+ base::Semaphore* pending_tasks_;
+ DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PAGE_PARALLEL_JOB_
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
index d9d5914273..403c99b057 100644
--- a/deps/v8/src/heap/remembered-set.cc
+++ b/deps/v8/src/heap/remembered-set.cc
@@ -22,10 +22,9 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
chunk = it.next();
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
- slots->Iterate([heap](Address addr) {
+ slots->Iterate([heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
- return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
- : SlotSet::REMOVE_SLOT;
+ return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
});
}
}
@@ -33,22 +32,30 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
Iterate(heap, [heap](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- Object* object = *slot;
- if (Page::FromAddress(addr)->owner() != nullptr &&
- Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
- CHECK(IsValidSlot(heap, slot));
- heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
- reinterpret_cast<Address>(slot), HeapObject::cast(object));
+ HeapObject* obj =
+ heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
+ if (obj == nullptr) {
+ // The slot is in dead object.
+ MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
+ AllocationSpace owner = chunk->owner()->identity();
+ // The old to old remembered set should not have dead slots.
+ CHECK_NE(direction, OLD_TO_OLD);
+ // The old to new remembered set is allowed to have slots in dead
+ // objects only in map and large object space because these space
+ // cannot have raw untagged pointers.
+ CHECK(owner == MAP_SPACE || owner == LO_SPACE);
+ } else {
+ int offset = static_cast<int>(addr - obj->address());
+ CHECK(obj->IsValidSlot(offset));
}
- return SlotSet::KEEP_SLOT;
+ return KEEP_SLOT;
});
}
template <PointerDirection direction>
-bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
+bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
+ Object** slot) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object* object = *slot;
if (!heap->InNewSpace(object)) {
@@ -58,12 +65,13 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
- heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot));
+ heap->mark_compact_collector()->IsSlotInBlackObject(
+ chunk, reinterpret_cast<Address>(slot));
}
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
+template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 351d76edb8..45408bf1e9 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -56,22 +56,44 @@ class RememberedSet {
}
// Iterates and filters the remembered set with the given callback.
- // The callback should take (Address slot) and return SlotSet::CallbackResult.
+ // The callback should take (Address slot) and return SlotCallbackResult.
template <typename Callback>
static void Iterate(Heap* heap, Callback callback) {
- PointerChunkIterator it(heap);
+ IterateMemoryChunks(
+ heap, [callback](MemoryChunk* chunk) { Iterate(chunk, callback); });
+ }
+
+ // Iterates over all memory chunks that contains non-empty slot sets.
+ // The callback should take (MemoryChunk* chunk) and return void.
+ template <typename Callback>
+ static void IterateMemoryChunks(Heap* heap, Callback callback) {
+ MemoryChunkIterator it(heap, direction == OLD_TO_OLD
+ ? MemoryChunkIterator::ALL
+ : MemoryChunkIterator::ALL_BUT_CODE_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
- if (slots != nullptr) {
- size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
- int new_count = 0;
- for (size_t page = 0; page < pages; page++) {
- new_count += slots[page].Iterate(callback);
- }
- if (new_count == 0) {
- ReleaseSlotSet(chunk);
- }
+ TypedSlotSet* typed_slots = GetTypedSlotSet(chunk);
+ if (slots != nullptr || typed_slots != nullptr) {
+ callback(chunk);
+ }
+ }
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ template <typename Callback>
+ static void Iterate(MemoryChunk* chunk, Callback callback) {
+ SlotSet* slots = GetSlotSet(chunk);
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ int new_count = 0;
+ for (size_t page = 0; page < pages; page++) {
+ new_count += slots[page].Iterate(callback);
+ }
+ if (new_count == 0) {
+ ReleaseSlotSet(chunk);
}
}
}
@@ -89,6 +111,64 @@ class RememberedSet {
});
}
+ template <typename Callback>
+ static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
+ Callback callback) {
+ Iterate(chunk, [heap, callback](Address addr) {
+ return Wrapper(heap, addr, callback);
+ });
+ }
+
+ // Given a page and a typed slot in that page, this function adds the slot
+ // to the remembered set.
+ static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
+ STATIC_ASSERT(direction == OLD_TO_OLD);
+ TypedSlotSet* slot_set = page->typed_old_to_old_slots();
+ if (slot_set == nullptr) {
+ page->AllocateTypedOldToOldSlots();
+ slot_set = page->typed_old_to_old_slots();
+ }
+ uintptr_t offset = slot_addr - page->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
+ }
+
+ // Given a page and a range of typed slots in that page, this function removes
+ // the slots from the remembered set.
+ static void RemoveRangeTyped(Page* page, Address start, Address end) {
+ TypedSlotSet* slots = page->typed_old_to_old_slots();
+ if (slots != nullptr) {
+ slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
+ return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
+ });
+ }
+ }
+
+ // Iterates and filters typed old to old pointers in the given memory chunk
+ // with the given callback. The callback should take (SlotType slot_type,
+ // Address slot_addr) and return SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(MemoryChunk* chunk, Callback callback) {
+ TypedSlotSet* slots = chunk->typed_old_to_old_slots();
+ if (slots != nullptr) {
+ int new_count = slots->Iterate(callback);
+ if (new_count == 0) {
+ chunk->ReleaseTypedOldToOldSlots();
+ }
+ }
+ }
+
+ // Clear all old to old slots from the remembered set.
+ static void ClearAll(Heap* heap) {
+ STATIC_ASSERT(direction == OLD_TO_OLD);
+ MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ chunk->ReleaseOldToOldSlots();
+ chunk->ReleaseTypedOldToOldSlots();
+ }
+ }
+
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
@@ -106,6 +186,14 @@ class RememberedSet {
}
}
+ static TypedSlotSet* GetTypedSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ return chunk->typed_old_to_old_slots();
+ } else {
+ return nullptr;
+ }
+ }
+
static void ReleaseSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseOldToOldSlots();
@@ -125,8 +213,8 @@ class RememberedSet {
}
template <typename Callback>
- static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
- Callback slot_callback) {
+ static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
+ Callback slot_callback) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
@@ -140,15 +228,15 @@ class RememberedSet {
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(object)) {
- return SlotSet::KEEP_SLOT;
+ return KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
- return SlotSet::REMOVE_SLOT;
+ return REMOVE_SLOT;
}
- static bool IsValidSlot(Heap* heap, Object** slot);
+ static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
};
} // namespace internal
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 52ba97a9c7..d89c9453c5 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -23,8 +23,8 @@ void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
static_cast<double>(base::Time::kMillisecondsPerSecond);
double start_ms = heap->MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
- size_t scavenge_speed_in_bytes_per_ms =
- static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
+ double scavenge_speed_in_bytes_per_ms =
+ heap->tracer()->ScavengeSpeedInBytesPerMillisecond();
size_t new_space_size = heap->new_space()->Size();
size_t new_space_capacity = heap->new_space()->Capacity();
@@ -42,9 +42,8 @@ void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
}
}
-
bool ScavengeJob::ReachedIdleAllocationLimit(
- size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
+ double scavenge_speed_in_bytes_per_ms, size_t new_space_size,
size_t new_space_capacity) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
@@ -52,27 +51,24 @@ bool ScavengeJob::ReachedIdleAllocationLimit(
// Set the allocation limit to the number of bytes we can scavenge in an
// average idle task.
- size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
+ double allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
// Keep the limit smaller than the new space capacity.
allocation_limit =
- Min(allocation_limit,
- static_cast<size_t>(new_space_capacity *
- kMaxAllocationLimitAsFractionOfNewSpace));
+ Min<double>(allocation_limit,
+ new_space_capacity * kMaxAllocationLimitAsFractionOfNewSpace);
// Adjust the limit to take into account bytes that will be allocated until
- // the next check.
- allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
- ? 0
- : allocation_limit - kBytesAllocatedBeforeNextIdleTask;
- // Keep the limit large enough to avoid scavenges in tiny new space.
- allocation_limit = Max(allocation_limit, kMinAllocationLimit);
+ // the next check and keep the limit large enough to avoid scavenges in tiny
+ // new space.
+ allocation_limit =
+ Max<double>(allocation_limit - kBytesAllocatedBeforeNextIdleTask,
+ kMinAllocationLimit);
return allocation_limit <= new_space_size;
}
-
bool ScavengeJob::EnoughIdleTimeForScavenge(
- double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
+ double idle_time_in_ms, double scavenge_speed_in_bytes_per_ms,
size_t new_space_size) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index 56299a154b..fadfccdcc4 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -47,12 +47,12 @@ class ScavengeJob {
void NotifyIdleTask() { idle_task_pending_ = false; }
bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
- static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
+ static bool ReachedIdleAllocationLimit(double scavenge_speed_in_bytes_per_ms,
size_t new_space_size,
size_t new_space_capacity);
static bool EnoughIdleTimeForScavenge(double idle_time_ms,
- size_t scavenge_speed_in_bytes_per_ms,
+ double scavenge_speed_in_bytes_per_ms,
size_t new_space_size);
// If we haven't recorded any scavenger events yet, we use a conservative
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 40aeb74aa9..3f532ead62 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -186,7 +186,9 @@ class ScavengingVisitor : public StaticVisitorBase {
*slot = target;
if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(target, object_size);
+ heap->promotion_queue()->insert(
+ target, object_size,
+ Marking::IsBlack(Marking::MarkBitFrom(object)));
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -236,7 +238,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (Marking::IsBlack(mark_bit)) {
// This object is black and it might not be rescanned by marker.
// We should explicitly record code entry slot for compaction because
- // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+ // promotion queue processing (IteratePromotedObjectPointers) will
// miss it as it is not HeapObject-tagged.
Address code_entry_slot =
target->address() + JSFunction::kCodeEntryOffset;
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 6144706f71..e55ffe98e6 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -7,10 +7,13 @@
#include "src/allocation.h"
#include "src/base/bits.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
+
// Data structure for maintaining a set of slots in a standard (non-large)
// page. The base address of the page must be set with SetPageStart before any
// operation.
@@ -19,8 +22,6 @@ namespace internal {
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
- enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
-
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
bucket[i] = nullptr;
@@ -213,6 +214,124 @@ class SlotSet : public Malloced {
Address page_start_;
};
+enum SlotType {
+ EMBEDDED_OBJECT_SLOT,
+ OBJECT_SLOT,
+ RELOCATED_CODE_OBJECT,
+ CELL_TARGET_SLOT,
+ CODE_TARGET_SLOT,
+ CODE_ENTRY_SLOT,
+ DEBUG_TARGET_SLOT,
+ NUMBER_OF_SLOT_TYPES
+};
+
+// Data structure for maintaining a multiset of typed slots in a page.
+// Typed slots can only appear in Code and JSFunction objects, so
+// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
+// The implementation is a chain of chunks, where each chunks is an array of
+// encoded (slot type, slot offset) pairs.
+// There is no duplicate detection and we do not expect many duplicates because
+// typed slots contain V8 internal pointers that are not directly exposed to JS.
+class TypedSlotSet {
+ public:
+ typedef uint32_t TypedSlot;
+ static const int kMaxOffset = 1 << 29;
+
+ explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
+ chunk_ = new Chunk(nullptr, kInitialBufferSize);
+ }
+
+ ~TypedSlotSet() {
+ Chunk* chunk = chunk_;
+ while (chunk != nullptr) {
+ Chunk* next = chunk->next;
+ delete chunk;
+ chunk = next;
+ }
+ }
+
+ // The slot offset specifies a slot at address page_start_ + offset.
+ void Insert(SlotType type, int offset) {
+ TypedSlot slot = ToTypedSlot(type, offset);
+ if (!chunk_->AddSlot(slot)) {
+ chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
+ bool added = chunk_->AddSlot(slot);
+ DCHECK(added);
+ USE(added);
+ }
+ }
+
+ // Iterate over all slots in the set and for each slot invoke the callback.
+ // If the callback returns REMOVE_SLOT then the slot is removed from the set.
+ // Returns the new number of slots.
+ //
+ // Sample usage:
+ // Iterate([](SlotType slot_type, Address slot_address) {
+ // if (good(slot_type, slot_address)) return KEEP_SLOT;
+ // else return REMOVE_SLOT;
+ // });
+ template <typename Callback>
+ int Iterate(Callback callback) {
+ STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
+ const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
+ Chunk* chunk = chunk_;
+ int new_count = 0;
+ while (chunk != nullptr) {
+ TypedSlot* buffer = chunk->buffer;
+ int count = chunk->count;
+ for (int i = 0; i < count; i++) {
+ TypedSlot slot = buffer[i];
+ if (slot != kRemovedSlot) {
+ SlotType type = TypeField::decode(slot);
+ Address addr = page_start_ + OffsetField::decode(slot);
+ if (callback(type, addr) == KEEP_SLOT) {
+ new_count++;
+ } else {
+ buffer[i] = kRemovedSlot;
+ }
+ }
+ }
+ chunk = chunk->next;
+ }
+ return new_count;
+ }
+
+ private:
+ static const int kInitialBufferSize = 100;
+ static const int kMaxBufferSize = 16 * KB;
+
+ static int NextCapacity(int capacity) {
+ return Min(kMaxBufferSize, capacity * 2);
+ }
+
+ static TypedSlot ToTypedSlot(SlotType type, int offset) {
+ return TypeField::encode(type) | OffsetField::encode(offset);
+ }
+
+ class OffsetField : public BitField<int, 0, 29> {};
+ class TypeField : public BitField<SlotType, 29, 3> {};
+
+ struct Chunk : Malloced {
+ explicit Chunk(Chunk* next_chunk, int capacity)
+ : next(next_chunk), count(0), capacity(capacity) {
+ buffer = NewArray<TypedSlot>(capacity);
+ }
+ bool AddSlot(TypedSlot slot) {
+ if (count == capacity) return false;
+ buffer[count++] = slot;
+ return true;
+ }
+ ~Chunk() { DeleteArray(buffer); }
+ Chunk* next;
+ int count;
+ int capacity;
+ TypedSlot* buffer;
+ };
+
+ Address page_start_;
+ Chunk* chunk_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/slots-buffer.cc b/deps/v8/src/heap/slots-buffer.cc
deleted file mode 100644
index 5a3db281fd..0000000000
--- a/deps/v8/src/heap/slots-buffer.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/slots-buffer.h"
-
-#include "src/assembler.h"
-#include "src/heap/heap.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
- return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type,
- Address addr, AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- DCHECK(buffer->HasSpaceForTypedSlot());
- buffer->Add(reinterpret_cast<ObjectSlot>(type));
- buffer->Add(reinterpret_cast<ObjectSlot>(addr));
- return true;
-}
-
-
-void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- const ObjectSlot kRemovedEntry = HeapObject::RawField(
- heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
- DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
- ->NeverEvacuate());
-
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Object* object = *slot;
- // Slots are invalid when they currently:
- // - do not point to a heap object (SMI)
- // - point to a heap object in new space
- // - are not within a live heap object on a valid pointer slot
- // - point to a heap object not on an evacuation candidate
- // TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
- // we filter out unboxed double slots eagerly.
- if (!object->IsHeapObject() ||
- !heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot)) ||
- heap->InNewSpace(object) ||
- !Page::FromAddress(reinterpret_cast<Address>(object))
- ->IsEvacuationCandidate()) {
- // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
- // could shrink the slots buffer in-place.
- slots[slot_idx] = kRemovedEntry;
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
- Address start_slot, Address end_slot) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- const ObjectSlot kRemovedEntry = HeapObject::RawField(
- heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
- DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
- ->NeverEvacuate());
-
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
- bool is_typed_slot = false;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Address slot_address = reinterpret_cast<Address>(slot);
- if (slot_address >= start_slot && slot_address < end_slot) {
- // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
- // could shrink the slots buffer in-place.
- slots[slot_idx] = kRemovedEntry;
- if (is_typed_slot) {
- slots[slot_idx - 1] = kRemovedEntry;
- }
- }
- is_typed_slot = false;
- } else {
- is_typed_slot = true;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Object* object = *slot;
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- CHECK(!heap->InNewSpace(object));
- heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
- reinterpret_cast<Address>(slot), heap_object);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
- return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
- delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
- SlotsBuffer* buffer = *buffer_address;
- while (buffer != NULL) {
- SlotsBuffer* next_buffer = buffer->next();
- DeallocateBuffer(buffer);
- buffer = next_buffer;
- }
- *buffer_address = NULL;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/slots-buffer.h b/deps/v8/src/heap/slots-buffer.h
deleted file mode 100644
index dc6c922963..0000000000
--- a/deps/v8/src/heap/slots-buffer.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_SLOTS_BUFFER_H_
-#define V8_HEAP_SLOTS_BUFFER_H_
-
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class SlotsBuffer;
-
-
-// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
-// chunks and links them together. Slots buffer chunks are always created by the
-// SlotsBufferAllocator.
-class SlotsBufferAllocator {
- public:
- SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
- void DeallocateBuffer(SlotsBuffer* buffer);
-
- void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-// - Untyped slots are expected to contain a tagged object pointer.
-// They are recorded by an address.
-// - Typed slots are expected to contain an encoded pointer to a heap
-// object where the way of encoding depends on the type of the slot.
-// They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
- typedef Object** ObjectSlot;
-
- explicit SlotsBuffer(SlotsBuffer* next_buffer)
- : idx_(0), chain_length_(1), next_(next_buffer) {
- if (next_ != NULL) {
- chain_length_ = next_->chain_length_ + 1;
- }
- }
-
- ~SlotsBuffer() {}
-
- void Add(ObjectSlot slot) {
- DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
-#ifdef DEBUG
- if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
- DCHECK_NOT_NULL(*slot);
- }
-#endif
- slots_[idx_++] = slot;
- }
-
- ObjectSlot Get(intptr_t i) {
- DCHECK(i >= 0 && i < kNumberOfElements);
- return slots_[i];
- }
-
- size_t Size() {
- DCHECK(idx_ <= kNumberOfElements);
- return idx_;
- }
-
- enum SlotType {
- EMBEDDED_OBJECT_SLOT,
- OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
- CELL_TARGET_SLOT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- DEBUG_TARGET_SLOT,
- NUMBER_OF_SLOT_TYPES
- };
-
- static const char* SlotTypeToString(SlotType type) {
- switch (type) {
- case EMBEDDED_OBJECT_SLOT:
- return "EMBEDDED_OBJECT_SLOT";
- case OBJECT_SLOT:
- return "OBJECT_SLOT";
- case RELOCATED_CODE_OBJECT:
- return "RELOCATED_CODE_OBJECT";
- case CELL_TARGET_SLOT:
- return "CELL_TARGET_SLOT";
- case CODE_TARGET_SLOT:
- return "CODE_TARGET_SLOT";
- case CODE_ENTRY_SLOT:
- return "CODE_ENTRY_SLOT";
- case DEBUG_TARGET_SLOT:
- return "DEBUG_TARGET_SLOT";
- case NUMBER_OF_SLOT_TYPES:
- return "NUMBER_OF_SLOT_TYPES";
- }
- return "UNKNOWN SlotType";
- }
-
- SlotsBuffer* next() { return next_; }
-
- static int SizeOfChain(SlotsBuffer* buffer) {
- if (buffer == NULL) return 0;
- return static_cast<int>(buffer->idx_ +
- (buffer->chain_length_ - 1) * kNumberOfElements);
- }
-
- inline bool IsFull() { return idx_ == kNumberOfElements; }
-
- inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
-
- enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
-
- static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
- return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
- }
-
- INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, ObjectSlot slot,
- AdditionMode mode)) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || buffer->IsFull()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- buffer->Add(slot);
- return true;
- }
-
- static bool IsTypedSlot(ObjectSlot slot);
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type, Address addr,
- AdditionMode mode);
-
- // Eliminates all stale entries from the slots buffer, i.e., slots that
- // are not part of live objects anymore. This method must be called after
- // marking, when the whole transitive closure is known and must be called
- // before sweeping when mark bits are still intact.
- static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
-
- // Eliminate all slots that are within the given address range.
- static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
- Address start_slot, Address end_slot);
-
- // Ensures that there are no invalid slots in the chain of slots buffers.
- static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
-
- static const int kNumberOfElements = 1021;
-
- private:
- static const int kChainLengthThreshold = 15;
-
- intptr_t idx_;
- intptr_t chain_length_;
- SlotsBuffer* next_;
- ObjectSlot slots_[kNumberOfElements];
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_SLOTS_BUFFER_H_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index c89df597ec..135498f69d 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -25,6 +25,11 @@ void Bitmap::Clear(MemoryChunk* chunk) {
chunk->ResetLiveBytes();
}
+void Bitmap::SetAllBits(MemoryChunk* chunk) {
+ Bitmap* bitmap = chunk->markbits();
+ for (int i = 0; i < bitmap->CellsCount(); i++)
+ bitmap->cells()[i] = 0xffffffff;
+}
// -----------------------------------------------------------------------------
// PageIterator
@@ -147,6 +152,19 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return NULL;
}
+// -----------------------------------------------------------------------------
+// LargePageIterator
+
+LargePageIterator::LargePageIterator(LargeObjectSpace* space)
+ : next_page_(space->first_page()) {}
+
+LargePage* LargePageIterator::next() {
+ LargePage* result = next_page_;
+ if (next_page_ != nullptr) {
+ next_page_ = next_page_->next_page();
+ }
+ return result;
+}
// -----------------------------------------------------------------------------
// MemoryAllocator
@@ -233,6 +251,19 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
}
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable,
+ SemiSpace* owner) {
+ DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+ bool in_to_space = (owner->id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE);
+ DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+ heap->incremental_marking()->SetNewSpacePageFlags(page);
+ return page;
+}
// --------------------------------------------------------------------------
// PagedSpace
@@ -243,14 +274,23 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
page->mutex_ = new base::Mutex();
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
- owner->IncreaseCapacity(page->area_size());
- owner->Free(page->area_start(), page->area_size());
+ owner->IncreaseCapacity(page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+ // Make sure that categories are initialized before freeing the area.
+ page->InitializeFreeListCategories();
+ owner->Free(page->area_start(), page->area_size());
+
return page;
}
+void Page::InitializeFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
+ }
+}
+
void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
@@ -264,6 +304,7 @@ void MemoryChunk::ResetLiveBytes() {
}
void MemoryChunk::IncrementLiveBytes(int by) {
+ if (IsFlagSet(BLACK_PAGE)) return;
if (FLAG_trace_live_bytes) {
PrintIsolate(heap()->isolate(),
"live-bytes: update page=%p delta=%d %d->%d\n", this, by,
@@ -284,17 +325,35 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
- if (!Page::IsValid(p)) return false;
+ if (!p->is_valid()) return false;
return p->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false;
Page* p = Page::FromAddress(HeapObject::cast(o)->address());
- if (!Page::IsValid(p)) return false;
+ if (!p->is_valid()) return false;
return p->owner() == this;
}
+void PagedSpace::UnlinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ page->ForAllFreeListCategories([this](FreeListCategory* category) {
+ DCHECK_EQ(free_list(), category->owner());
+ free_list()->RemoveCategory(category);
+ });
+}
+
+intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ intptr_t added = 0;
+ page->ForAllFreeListCategories([&added](FreeListCategory* category) {
+ added += category->available();
+ category->Relink();
+ });
+ return added;
+}
+
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
@@ -308,15 +367,36 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
+void Page::MarkNeverAllocateForTesting() {
+ DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
+ SetFlag(NEVER_ALLOCATE_ON_PAGE);
+ reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
+}
+
+void Page::MarkEvacuationCandidate() {
+ DCHECK(!IsFlagSet(NEVER_EVACUATE));
+ DCHECK_NULL(old_to_old_slots_);
+ DCHECK_NULL(typed_old_to_old_slots_);
+ SetFlag(EVACUATION_CANDIDATE);
+ reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
+}
+
+void Page::ClearEvacuationCandidate() {
+ DCHECK_NULL(old_to_old_slots_);
+ DCHECK_NULL(typed_old_to_old_slots_);
+ ClearFlag(EVACUATION_CANDIDATE);
+ InitializeFreeListCategories();
+}
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
+MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
: state_(kOldSpaceState),
+ mode_(mode),
old_iterator_(heap->old_space()),
+ code_iterator_(heap->code_space()),
map_iterator_(heap->map_space()),
lo_iterator_(heap->lo_space()) {}
-
-MemoryChunk* PointerChunkIterator::next() {
+MemoryChunk* MemoryChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_.has_next()) {
@@ -326,47 +406,59 @@ MemoryChunk* PointerChunkIterator::next() {
// Fall through.
}
case kMapState: {
- if (map_iterator_.has_next()) {
+ if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
return map_iterator_.next();
}
+ state_ = kCodeState;
+ // Fall through.
+ }
+ case kCodeState: {
+ if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
+ return code_iterator_.next();
+ }
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
+ MemoryChunk* answer = lo_iterator_.next();
+ if (answer != nullptr) {
+ return answer;
+ }
+ state_ = kFinishedState;
+ // Fall through;
}
case kFinishedState:
- return NULL;
+ return nullptr;
default:
break;
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
-
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
-
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
+Page* FreeListCategory::page() {
+ return Page::FromAddress(reinterpret_cast<Address>(this));
+}
+
+FreeList* FreeListCategory::owner() {
+ return reinterpret_cast<PagedSpace*>(
+ Page::FromAddress(reinterpret_cast<Address>(this))->owner())
+ ->free_list();
+}
+
+bool FreeListCategory::is_linked() {
+ return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
+}
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (e.g. move to the next page or try free list
@@ -420,7 +512,8 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
// Raw allocation.
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(
+ int size_in_bytes, UpdateSkipList update_skip_list) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
@@ -431,7 +524,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
}
if (object != NULL) {
- if (identity() == CODE_SPACE) {
+ if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index e3b851d97a..a0a37523b2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -8,7 +8,6 @@
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
#include "src/heap/slot-set.h"
-#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
@@ -316,15 +315,18 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
void MemoryAllocator::TearDown() {
+ for (MemoryChunk* chunk : chunk_pool_) {
+ FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
+ NOT_EXECUTABLE);
+ }
// Check that spaces were torn down before MemoryAllocator.
- DCHECK(size_.Value() == 0);
+ DCHECK_EQ(size_.Value(), 0);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
capacity_executable_ = 0;
}
-
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size,
@@ -336,20 +338,6 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size,
}
-void MemoryAllocator::FreeNewSpaceMemory(Address addr,
- base::VirtualMemory* reservation,
- Executability executable) {
- LOG(isolate_, DeleteEvent("NewSpace", addr));
-
- DCHECK(reservation->IsReserved());
- const intptr_t size = static_cast<intptr_t>(reservation->size());
- DCHECK(size_.Value() >= size);
- size_.Increment(-size);
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- FreeMemory(reservation, NOT_EXECUTABLE);
-}
-
-
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
@@ -434,26 +422,6 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
set_next_page(this);
}
-
-NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
- SemiSpace* semi_space) {
- Address area_start = start + NewSpacePage::kObjectStartOffset;
- Address area_end = start + Page::kPageSize;
-
- MemoryChunk* chunk =
- MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
- area_end, NOT_EXECUTABLE, semi_space, nullptr);
- bool in_to_space = (semi_space->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- return page;
-}
-
-
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
set_owner(semi_space);
set_next_chunk(this);
@@ -478,15 +446,14 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->slots_buffer_ = nullptr;
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
+ chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
- chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = nullptr;
chunk->available_in_free_list_ = 0;
chunk->wasted_memory_ = 0;
@@ -717,21 +684,16 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
-
-Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
- if (chunk == NULL) return NULL;
- return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
-
-
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
MemoryChunk* chunk =
AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
return LargePage::Initialize(isolate_->heap(), chunk);
}
@@ -780,12 +742,77 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
}
}
-
+template <MemoryAllocator::AllocationMode mode>
void MemoryAllocator::Free(MemoryChunk* chunk) {
- PreFreeMemory(chunk);
- PerformFreeMemory(chunk);
+ if (mode == kRegular) {
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+ } else {
+ DCHECK_EQ(mode, kPooled);
+ FreePooled(chunk);
+ }
}
+template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
+ MemoryChunk* chunk);
+
+template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
+ MemoryChunk* chunk);
+
+template <typename PageType, MemoryAllocator::AllocationMode mode,
+ typename SpaceType>
+PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+ Executability executable) {
+ MemoryChunk* chunk = nullptr;
+ if (mode == kPooled) {
+ DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
+ DCHECK_EQ(executable, NOT_EXECUTABLE);
+ chunk = AllocatePagePooled(owner);
+ }
+ if (chunk == nullptr) {
+ chunk = AllocateChunk(size, size, executable, owner);
+ }
+ if (chunk == nullptr) return nullptr;
+ return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
+ PagedSpace>(intptr_t, PagedSpace*,
+ Executability);
+
+template NewSpacePage* MemoryAllocator::AllocatePage<
+ NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
+ Executability);
+
+template <typename SpaceType>
+MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+ if (chunk_pool_.is_empty()) return nullptr;
+ const int size = MemoryChunk::kPageSize;
+ MemoryChunk* chunk = chunk_pool_.RemoveLast();
+ const Address start = reinterpret_cast<Address>(chunk);
+ const Address area_start = start + MemoryChunk::kObjectStartOffset;
+ const Address area_end = start + size;
+ if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
+ return nullptr;
+ }
+ base::VirtualMemory reservation(start, size);
+ MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
+ NOT_EXECUTABLE, owner, &reservation);
+ size_.Increment(size);
+ return chunk;
+}
+
+void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
+ DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+ DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+ chunk_pool_.Add(chunk);
+ intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
+ if (chunk->executable() == EXECUTABLE) {
+ size_executable_.Increment(-chunk_size);
+ }
+ size_.Increment(-chunk_size);
+ UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
+}
bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
@@ -932,8 +959,6 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() {
- delete slots_buffer_;
- slots_buffer_ = nullptr;
delete skip_list_;
skip_list_ = nullptr;
delete mutex_;
@@ -972,6 +997,15 @@ void MemoryChunk::ReleaseOldToOldSlots() {
old_to_old_slots_ = nullptr;
}
+void MemoryChunk::AllocateTypedOldToOldSlots() {
+ DCHECK(nullptr == typed_old_to_old_slots_);
+ typed_old_to_old_slots_ = new TypedSlotSet(address());
+}
+
+void MemoryChunk::ReleaseTypedOldToOldSlots() {
+ delete typed_old_to_old_slots_;
+ typed_old_to_old_slots_ = nullptr;
+}
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1021,79 +1055,46 @@ void PagedSpace::TearDown() {
accounting_stats_.Clear();
}
-
-void PagedSpace::AddMemory(Address start, intptr_t size) {
- accounting_stats_.ExpandSpace(static_cast<int>(size));
- Free(start, static_cast<int>(size));
-}
-
-
void PagedSpace::RefillFreeList() {
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- FreeList* free_list = nullptr;
- if (this == heap()->old_space()) {
- free_list = collector->free_list_old_space().get();
- } else if (this == heap()->code_space()) {
- free_list = collector->free_list_code_space().get();
- } else if (this == heap()->map_space()) {
- free_list = collector->free_list_map_space().get();
- } else {
- // Any PagedSpace might invoke RefillFreeList. We filter all but our old
- // generation spaces out.
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
+ identity() != MAP_SPACE) {
return;
}
- DCHECK(free_list != nullptr);
- intptr_t added = free_list_.Concatenate(free_list);
- accounting_stats_.IncreaseCapacity(added);
-}
-
-
-void CompactionSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- FreeList* free_list = nullptr;
- if (identity() == OLD_SPACE) {
- free_list = collector->free_list_old_space().get();
- } else if (identity() == CODE_SPACE) {
- free_list = collector->free_list_code_space().get();
- } else {
- // Compaction spaces only represent old or code space.
- UNREACHABLE();
- }
- DCHECK(free_list != nullptr);
- intptr_t refilled = 0;
- while (refilled < kCompactionMemoryWanted) {
- FreeSpace* node =
- free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
- if (node == nullptr) return;
- refilled += node->size();
- AddMemory(node->address(), node->size());
+ List<Page*>* swept_pages = collector->swept_pages(identity());
+ intptr_t added = 0;
+ {
+ base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
+ for (int i = swept_pages->length() - 1; i >= 0; --i) {
+ Page* p = (*swept_pages)[i];
+ // Only during compaction pages can actually change ownership. This is
+ // safe because there exists no other competing action on the page links
+ // during compaction.
+ if (is_local() && (p->owner() != this)) {
+ if (added > kCompactionMemoryWanted) break;
+ base::LockGuard<base::Mutex> guard(
+ reinterpret_cast<PagedSpace*>(p->owner())->mutex());
+ p->Unlink();
+ p->set_owner(this);
+ p->InsertAfter(anchor_.prev_page());
+ }
+ added += RelinkFreeListCategories(p);
+ added += p->wasted_memory();
+ swept_pages->Remove(i);
+ }
}
-}
-
-void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
- DCHECK(identity() == other->identity());
- // Destroy the linear allocation space of {other}. This is needed to
- // (a) not waste the memory and
- // (b) keep the rest of the chunk in an iterable state (filler is needed).
- other->EmptyAllocationInfo();
-
- // Move over the free list. Concatenate makes sure that the source free list
- // gets properly reset after moving over all nodes.
- intptr_t added = free_list_.Concatenate(other->free_list());
-
- // Moved memory is not recorded as allocated memory, but rather increases and
- // decreases capacity of the corresponding spaces.
- other->accounting_stats_.DecreaseCapacity(added);
accounting_stats_.IncreaseCapacity(added);
}
-
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
// anchor_
- MoveOverFreeMemory(other);
+ other->EmptyAllocationInfo();
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
@@ -1110,9 +1111,14 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Page* p = nullptr;
while (it.has_next()) {
p = it.next();
+
+ // Relinking requires the category to be unlinked.
+ other->UnlinkFreeListCategories(p);
+
p->Unlink();
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
+ RelinkFreeListCategories(p);
}
}
@@ -1178,8 +1184,8 @@ bool PagedSpace::Expand() {
if (!CanExpand(size)) return false;
- Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
- executable());
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
+ size, this, executable());
if (p == NULL) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
@@ -1187,6 +1193,17 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
+ // When incremental marking was activated, old space pages are allocated
+ // black.
+ if (heap()->incremental_marking()->black_allocation() &&
+ identity() == OLD_SPACE) {
+ Bitmap::SetAllBits(p);
+ p->SetFlag(Page::BLACK_PAGE);
+ if (FLAG_trace_incremental_marking) {
+ PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
+ }
+ }
+
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
p->InsertAfter(anchor_.prev_page());
@@ -1219,17 +1236,12 @@ void PagedSpace::IncreaseCapacity(int size) {
accounting_stats_.ExpandSpace(size);
}
+void PagedSpace::ReleasePage(Page* page) {
+ DCHECK_EQ(page->LiveBytes(), 0);
+ DCHECK_EQ(AreaSize(), page->area_size());
+ DCHECK_EQ(page->owner(), this);
-void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
- DCHECK(page->LiveBytes() == 0);
- DCHECK(AreaSize() == page->area_size());
-
- if (evict_free_list_items) {
- intptr_t size = free_list_.EvictFreeListItems(page);
- accounting_stats_.AllocateBytes(size);
- DCHECK_EQ(AreaSize(), static_cast<int>(size));
- }
-
+ free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@@ -1249,7 +1261,6 @@ void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
accounting_stats_.ShrinkSpace(AreaSize());
}
-
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -1288,7 +1299,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+ if (!page->IsFlagSet(Page::BLACK_PAGE) &&
+ Marking::IsBlack(Marking::MarkBitFrom(object))) {
black_size += size;
}
@@ -1304,55 +1316,28 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// -----------------------------------------------------------------------------
// NewSpace implementation
-
-bool NewSpace::SetUp(int reserved_semispace_capacity,
+bool NewSpace::SetUp(int initial_semispace_capacity,
int maximum_semispace_capacity) {
- // Set up new space based on the preallocated memory block defined by
- // start and size. The provided space is divided into two semi-spaces.
- // To support fast containment testing in the new space, the size of
- // this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
- size_t size = 2 * reserved_semispace_capacity;
- Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
- size, size, &reservation_);
- if (base == NULL) return false;
-
- chunk_base_ = base;
- chunk_size_ = static_cast<uintptr_t>(size);
- LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
-
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+ to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+ from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+ if (!to_space_.Commit()) {
+ return false;
+ }
+ DCHECK(!from_space_.is_committed()); // No need to use memory yet.
+ ResetAllocationInfo();
+
// Allocate and set up the histogram arrays if necessary.
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-
#define SET_NAME(name) \
allocated_histogram_[name].set_name(#name); \
promoted_histogram_[name].set_name(#name);
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
- DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
- DCHECK(static_cast<intptr_t>(chunk_size_) >=
- 2 * heap()->ReservedSemiSpaceSize());
- DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
-
- to_space_.SetUp(chunk_base_, initial_semispace_capacity,
- maximum_semispace_capacity);
- from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity, maximum_semispace_capacity);
- if (!to_space_.Commit()) {
- return false;
- }
- DCHECK(!from_space_.is_committed()); // No need to use memory yet.
-
- start_ = chunk_base_;
-
- ResetAllocationInfo();
-
return true;
}
@@ -1367,18 +1352,10 @@ void NewSpace::TearDown() {
promoted_histogram_ = NULL;
}
- start_ = NULL;
allocation_info_.Reset(nullptr, nullptr);
-
to_space_.TearDown();
from_space_.TearDown();
-
- heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
- chunk_base_, &reservation_, NOT_EXECUTABLE);
-
- chunk_base_ = NULL;
- chunk_size_ = 0;
}
@@ -1432,7 +1409,8 @@ void LocalAllocationBuffer::Close() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
- static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
+ ClearRecordedSlots::kNo);
}
}
@@ -1443,7 +1421,8 @@ LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
- static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
+ ClearRecordedSlots::kNo);
}
}
@@ -1526,7 +1505,7 @@ bool NewSpace::AddFreshPage() {
}
int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page);
+ heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
pages_used_++;
UpdateAllocationInfo();
@@ -1662,7 +1641,7 @@ void NewSpace::Verify() {
// The object should not be code or a map.
CHECK(!object->IsMap());
- CHECK(!object->IsCode());
+ CHECK(!object->IsAbstractCode());
// The object itself should look OK.
object->ObjectVerify();
@@ -1693,43 +1672,45 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
-void SemiSpace::SetUp(Address start, int initial_capacity,
- int maximum_capacity) {
+void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
DCHECK_GE(maximum_capacity, Page::kPageSize);
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
current_capacity_ = minimum_capacity_;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
committed_ = false;
- start_ = start;
- age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
}
void SemiSpace::TearDown() {
- start_ = nullptr;
- current_capacity_ = 0;
+ // Properly uncommit memory to keep the allocator counters in sync.
+ if (is_committed()) Uncommit();
+ current_capacity_ = maximum_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_, current_capacity_, executable())) {
- return false;
- }
- AccountCommitted(current_capacity_);
-
NewSpacePage* current = anchor();
const int num_pages = current_capacity_ / Page::kPageSize;
- for (int i = 0; i < num_pages; i++) {
+ for (int pages_added = 0; pages_added < num_pages; pages_added++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+ heap()
+ ->isolate()
+ ->memory_allocator()
+ ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
+ NewSpacePage::kAllocatableMemory, this, executable());
+ if (new_page == nullptr) {
+ RewindPages(current, pages_added);
+ return false;
+ }
new_page->InsertAfter(current);
current = new_page;
}
Reset();
-
- set_current_capacity(current_capacity_);
+ AccountCommitted(current_capacity_);
+ if (age_mark_ == nullptr) {
+ age_mark_ = first_page()->area_start();
+ }
committed_ = true;
return true;
}
@@ -1737,16 +1718,14 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- Address start = start_ + maximum_capacity_ - current_capacity_;
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- start, current_capacity_)) {
- return false;
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+ it.next());
}
- AccountUncommitted(current_capacity_);
-
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
-
+ AccountUncommitted(current_capacity_);
committed_ = false;
return true;
}
@@ -1767,27 +1746,25 @@ bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
- int pages_before = current_capacity_ / Page::kPageSize;
- int pages_after = new_capacity / Page::kPageSize;
-
- size_t delta = new_capacity - current_capacity_;
-
+ const int delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_ + current_capacity_, delta, executable())) {
- return false;
- }
- AccountCommitted(static_cast<intptr_t>(delta));
- set_current_capacity(new_capacity);
+ int delta_pages = delta / NewSpacePage::kPageSize;
NewSpacePage* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
- for (int i = pages_before; i < pages_after; i++) {
- Address page_address = start_ + i * Page::kPageSize;
+ for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), page_address, this);
+ heap()
+ ->isolate()
+ ->memory_allocator()
+ ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
+ NewSpacePage::kAllocatableMemory, this, executable());
+ if (new_page == nullptr) {
+ RewindPages(last_page, pages_added);
+ return false;
+ }
new_page->InsertAfter(last_page);
Bitmap::Clear(new_page);
// Duplicate the flags that was set on the old page.
@@ -1795,34 +1772,46 @@ bool SemiSpace::GrowTo(int new_capacity) {
NewSpacePage::kCopyOnFlipFlagsMask);
last_page = new_page;
}
+ AccountCommitted(static_cast<intptr_t>(delta));
+ current_capacity_ = new_capacity;
return true;
}
+void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
+ NewSpacePage* new_last_page = nullptr;
+ NewSpacePage* last_page = start;
+ while (num_pages > 0) {
+ DCHECK_NE(last_page, anchor());
+ new_last_page = last_page->prev_page();
+ last_page->prev_page()->set_next_page(last_page->next_page());
+ last_page->next_page()->set_prev_page(last_page->prev_page());
+ last_page = new_last_page;
+ num_pages--;
+ }
+}
bool SemiSpace::ShrinkTo(int new_capacity) {
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
- size_t delta = current_capacity_ - new_capacity;
+ const int delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-
- MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
- if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
- return false;
+ int delta_pages = delta / NewSpacePage::kPageSize;
+ NewSpacePage* new_last_page;
+ NewSpacePage* last_page;
+ while (delta_pages > 0) {
+ last_page = anchor()->prev_page();
+ new_last_page = last_page->prev_page();
+ new_last_page->set_next_page(anchor());
+ anchor()->set_prev_page(new_last_page);
+ heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+ last_page);
+ delta_pages--;
}
AccountUncommitted(static_cast<intptr_t>(delta));
-
- int pages_after = new_capacity / Page::kPageSize;
- NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
-
- set_current_capacity(new_capacity);
-
+ current_capacity_ = new_capacity;
return true;
}
@@ -1869,7 +1858,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->current_capacity_, to->current_capacity_);
std::swap(from->maximum_capacity_, to->maximum_capacity_);
std::swap(from->minimum_capacity_, to->minimum_capacity_);
- std::swap(from->start_, to->start_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
std::swap(from->anchor_, to->anchor_);
@@ -2153,137 +2141,54 @@ size_t NewSpace::CommittedPhysicalMemory() {
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
- intptr_t free_bytes = 0;
- if (category->top() != NULL) {
- DCHECK(category->end_ != NULL);
- free_bytes = category->available();
- if (end_ == NULL) {
- end_ = category->end();
- } else {
- category->end()->set_next(top());
- }
- set_top(category->top());
- available_ += category->available();
- category->Reset();
- }
- return free_bytes;
-}
-
void FreeListCategory::Reset() {
set_top(nullptr);
- set_end(nullptr);
+ set_prev(nullptr);
+ set_next(nullptr);
available_ = 0;
}
-
-intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
- intptr_t sum = 0;
- FreeSpace* prev_node = nullptr;
- for (FreeSpace* cur_node = top(); cur_node != nullptr;
- cur_node = cur_node->next()) {
- Page* page_for_node = Page::FromAddress(cur_node->address());
- if (page_for_node == p) {
- // FreeSpace node on eviction page found, unlink it.
- int size = cur_node->size();
- sum += size;
- DCHECK((prev_node != nullptr) || (top() == cur_node));
- if (cur_node == top()) {
- set_top(cur_node->next());
- }
- if (cur_node == end()) {
- set_end(prev_node);
- }
- if (prev_node != nullptr) {
- prev_node->set_next(cur_node->next());
- }
- continue;
- }
- prev_node = cur_node;
- }
- p->add_available_in_free_list(-sum);
- available_ -= sum;
- return sum;
-}
-
-
-bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeSpace* node = top();
- while (node != NULL) {
- if (Page::FromAddress(node->address()) == p) return true;
- node = node->next();
- }
- return false;
-}
-
-
FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+ DCHECK(page()->CanAllocate());
+
FreeSpace* node = top();
if (node == nullptr) return nullptr;
-
- Page* page = Page::FromAddress(node->address());
- while ((node != nullptr) && !page->CanAllocate()) {
- available_ -= node->size();
- page->add_available_in_free_list(-(node->Size()));
- node = node->next();
- }
-
- if (node != nullptr) {
- set_top(node->next());
- *node_size = node->Size();
- available_ -= *node_size;
- } else {
- set_top(nullptr);
- }
-
- if (top() == nullptr) {
- set_end(nullptr);
- }
-
+ set_top(node->next());
+ *node_size = node->Size();
+ available_ -= *node_size;
return node;
}
+FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
+ int* node_size) {
+ DCHECK(page()->CanAllocate());
-FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
- int* node_size) {
FreeSpace* node = PickNodeFromList(node_size);
- if ((node != nullptr) && (*node_size < size_in_bytes)) {
- Free(node, *node_size);
+ if ((node != nullptr) && (*node_size < minimum_size)) {
+ Free(node, *node_size, kLinkCategory);
*node_size = 0;
return nullptr;
}
return node;
}
-
-FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
int* node_size) {
+ DCHECK(page()->CanAllocate());
+
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
int size = cur_node->size();
- Page* page_for_node = Page::FromAddress(cur_node->address());
-
- if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
- // The node is either large enough or contained in an evacuation
- // candidate. In both cases we need to unlink it from the list.
+ if (size >= minimum_size) {
available_ -= size;
if (cur_node == top()) {
set_top(cur_node->next());
}
- if (cur_node == end()) {
- set_end(prev_non_evac_node);
- }
if (prev_non_evac_node != nullptr) {
prev_non_evac_node->set_next(cur_node->next());
}
- // For evacuation candidates we continue.
- if (!page_for_node->CanAllocate()) {
- page_for_node->add_available_in_free_list(-size);
- continue;
- }
- // Otherwise we have a large enough node and can return.
*node_size = size;
return cur_node;
}
@@ -2293,14 +2198,17 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
return nullptr;
}
+bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
+ FreeMode mode) {
+ if (!page()->CanAllocate()) return false;
-void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
free_space->set_next(top());
set_top(free_space);
- if (end_ == NULL) {
- end_ = free_space;
- }
available_ += size_in_bytes;
+ if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
+ owner()->AddCategory(this);
+ }
+ return true;
}
@@ -2317,59 +2225,46 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
}
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
- }
- Reset();
+void FreeListCategory::Relink() {
+ DCHECK(!is_linked());
+ owner()->AddCategory(this);
}
+void FreeListCategory::Invalidate() {
+ page()->add_available_in_free_list(-available());
+ Reset();
+ type_ = kInvalidCategory;
+}
-intptr_t FreeList::Concatenate(FreeList* other) {
- intptr_t usable_bytes = 0;
- intptr_t wasted_bytes = 0;
-
- // This is safe (not going to deadlock) since Concatenate operations
- // are never performed on the same free lists at the same time in
- // reverse order. Furthermore, we only lock if the PagedSpace containing
- // the free list is know to be globally available, i.e., not local.
- if (!owner()->is_local()) mutex_.Lock();
- if (!other->owner()->is_local()) other->mutex()->Lock();
-
- wasted_bytes = other->wasted_bytes_;
- wasted_bytes_ += wasted_bytes;
- other->wasted_bytes_ = 0;
-
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- usable_bytes += category_[i].Concatenate(
- other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
+ categories_[i] = nullptr;
}
-
- if (!other->owner()->is_local()) other->mutex()->Unlock();
- if (!owner()->is_local()) mutex_.Unlock();
- return usable_bytes + wasted_bytes;
+ Reset();
}
void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->Reset(); });
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- category_[i].Reset();
+ categories_[i] = nullptr;
}
ResetStats();
}
-
-int FreeList::Free(Address start, int size_in_bytes) {
+int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
if (size_in_bytes == 0) return 0;
- owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
+ owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
+ ClearRecordedSlots::kNo);
Page* page = Page::FromAddress(start);
- // Early return to drop too-small blocks on the floor.
- if (size_in_bytes <= kSmallListMin) {
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
+ wasted_bytes_.Increment(size_in_bytes);
return size_in_bytes;
}
@@ -2377,16 +2272,34 @@ int FreeList::Free(Address start, int size_in_bytes) {
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- category_[type].Free(free_space, size_in_bytes);
- page->add_available_in_free_list(size_in_bytes);
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
+ page->add_available_in_free_list(size_in_bytes);
+ }
return 0;
}
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace* node = nullptr;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->PickNodeFromList(node_size);
+ if (node != nullptr) {
+ Page::FromAddress(node->address())
+ ->add_available_in_free_list(-(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+ }
+ RemoveCategory(current);
+ }
+ return node;
+}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
- FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
+ int minimum_size) {
+ if (categories_[type] == nullptr) return nullptr;
+ FreeSpace* node =
+ categories_[type]->TryPickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
->add_available_in_free_list(-(*node_size));
@@ -2395,10 +2308,25 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
return node;
}
+FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
+ int* node_size, int minimum_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace* node = nullptr;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (node != nullptr) {
+ Page::FromAddress(node->address())
+ ->add_available_in_free_list(-(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+ }
+ }
+ return node;
+}
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = nullptr;
- Page* page = nullptr;
// First try the allocation fast path: try to allocate the minimum element
// size of a free list category. This operation is constant time.
@@ -2411,10 +2339,8 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
- node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
+ node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
if (node != nullptr) {
- page = Page::FromAddress(node->address());
- page->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2424,51 +2350,14 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
if (type == kHuge) return nullptr;
// Now search the best fitting free list for a node that has at least the
- // requested size. This takes linear time in the number of elements.
+ // requested size.
type = SelectFreeListCategoryType(size_in_bytes);
- node = category_[type].PickNodeFromList(size_in_bytes, node_size);
- if (node != nullptr) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_free_list(-(*node_size));
- }
+ node = TryFindNodeIn(type, node_size, size_in_bytes);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
-
-FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
- hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
- base::LockGuard<base::Mutex> guard(&mutex_);
- FreeSpace* node = nullptr;
- int node_size = 0;
- // Try to find a node that fits exactly.
- node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
- // If no node could be found get as much memory as possible.
- if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
- if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
- if (node != nullptr) {
- // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
- // size larger then the minimum size required for FreeSpace, and (b) to get
- // a block that can actually be freed into some FreeList later on.
- if (hint_size_in_bytes <= kSmallListMin) {
- hint_size_in_bytes = kSmallListMin + kPointerSize;
- }
- // Give back left overs that were not required by {size_in_bytes}.
- intptr_t left_over = node_size - hint_size_in_bytes;
-
- // Do not bother to return anything below {kSmallListMin} as it would be
- // immediately discarded anyways.
- if (left_over > kSmallListMin) {
- Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
- node->set_size(static_cast<int>(hint_size_in_bytes));
- }
- }
- return node;
-}
-
-
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
@@ -2542,32 +2431,76 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
return new_node;
}
-
-intptr_t FreeList::EvictFreeListItems(Page* p) {
- intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
- if (sum < p->area_size()) {
- for (int i = kFirstCategory; i <= kLarge; i++) {
- sum += category_[i].EvictFreeListItemsInList(p);
- }
- }
+intptr_t FreeList::EvictFreeListItems(Page* page) {
+ intptr_t sum = 0;
+ page->ForAllFreeListCategories(
+ [this, &sum, page](FreeListCategory* category) {
+ DCHECK_EQ(this, category->owner());
+ sum += category->available();
+ RemoveCategory(category);
+ category->Invalidate();
+ });
return sum;
}
+bool FreeList::ContainsPageFreeListItems(Page* page) {
+ bool contained = false;
+ page->ForAllFreeListCategories(
+ [this, &contained](FreeListCategory* category) {
+ if (category->owner() == this && category->is_linked()) {
+ contained = true;
+ }
+ });
+ return contained;
+}
+
+void FreeList::RepairLists(Heap* heap) {
+ ForAllFreeListCategories(
+ [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
+}
-bool FreeList::ContainsPageFreeListItems(Page* p) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (category_[i].EvictFreeListItemsInList(p)) {
- return true;
- }
+bool FreeList::AddCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ FreeListCategory* top = categories_[type];
+
+ if (category->is_empty()) return false;
+ if (top == category) return false;
+
+ // Common double-linked list insertion.
+ if (top != nullptr) {
+ top->set_prev(category);
}
- return false;
+ category->set_next(top);
+ categories_[type] = category;
+ return true;
}
+void FreeList::RemoveCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ FreeListCategory* top = categories_[type];
-void FreeList::RepairLists(Heap* heap) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- category_[i].RepairFreeList(heap);
+ // Common double-linked list removal.
+ if (top == category) {
+ categories_[type] = category->next();
}
+ if (category->prev() != nullptr) {
+ category->prev()->set_next(category->next());
+ }
+ if (category->next() != nullptr) {
+ category->next()->set_prev(category->prev());
+ }
+ category->set_next(nullptr);
+ category->set_prev(nullptr);
+}
+
+void FreeList::PrintCategories(FreeListCategoryType type) {
+ FreeListCategoryIterator it(this, type);
+ PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ PrintF("%p -> ", current);
+ }
+ PrintF("null\n");
}
@@ -2583,7 +2516,6 @@ intptr_t FreeListCategory::SumFreeList() {
return sum;
}
-
int FreeListCategory::FreeListLength() {
int length = 0;
FreeSpace* cur = top();
@@ -2595,16 +2527,13 @@ int FreeListCategory::FreeListLength() {
return length;
}
-
-bool FreeListCategory::IsVeryLong() {
- return FreeListLength() == kVeryLongFreeList;
-}
-
-
bool FreeList::IsVeryLong() {
+ int len = 0;
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (category_[i].IsVeryLong()) {
- return true;
+ FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
+ while (it.HasNext()) {
+ len += it.Next()->FreeListLength();
+ if (len >= FreeListCategory::kVeryLongFreeList) return true;
}
}
return false;
@@ -2616,9 +2545,8 @@ bool FreeList::IsVeryLong() {
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
intptr_t sum = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- sum += category_[i].SumFreeList();
- }
+ ForAllFreeListCategories(
+ [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
return sum;
}
#endif
@@ -2660,7 +2588,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
int size = static_cast<int>(page->wasted_memory());
if (size == 0) continue;
Address address = page->OffsetToAddress(Page::kPageSize - size);
- heap()->CreateFillerObjectAt(address, size);
+ heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
}
}
@@ -2672,7 +2600,8 @@ void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
- heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
+ ClearRecordedSlots::kNo);
allocation_info_.Reset(nullptr, nullptr);
}
}
@@ -2855,9 +2784,14 @@ void PagedSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ if (obj->IsAbstractCode()) {
+ AbstractCode* code = AbstractCode::cast(obj);
+ isolate->code_kind_statistics()[code->kind()] += code->Size();
+ }
if (obj->IsCode()) {
+ // TODO(mythria): Also enable this for BytecodeArray when it supports
+ // RelocInformation.
Code* code = Code::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
RelocIterator it(code);
int delta = 0;
const byte* prev_pc = code->instruction_start();
@@ -2998,7 +2932,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
HeapObject* object = page->GetObject();
-
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
if (Heap::ShouldZapGarbage()) {
@@ -3044,7 +2977,7 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
if (e != NULL) {
DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
- DCHECK(LargePage::IsValid(page));
+ DCHECK(page->is_valid());
if (page->Contains(a)) {
return page;
}
@@ -3088,8 +3021,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
// Free the chunk.
- heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
- heap()->isolate());
size_ -= static_cast<int>(page->size());
AccountUncommitted(static_cast<intptr_t>(page->size()));
objects_size_ -= object->Size();
@@ -3146,7 +3077,7 @@ void LargeObjectSpace::Verify() {
// (sequential strings that have been morphed into external
// strings), fixed arrays, byte arrays, and constant pool arrays in the
// large object space.
- CHECK(object->IsCode() || object->IsSeqString() ||
+ CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
object->IsFixedDoubleArray() || object->IsByteArray());
@@ -3154,7 +3085,7 @@ void LargeObjectSpace::Verify() {
object->ObjectVerify();
// Byte arrays and strings don't have interior pointers.
- if (object->IsCode()) {
+ if (object->IsAbstractCode()) {
VerifyPointersVisitor code_visitor;
object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
@@ -3205,8 +3136,8 @@ void LargeObjectSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
+ if (obj->IsAbstractCode()) {
+ AbstractCode* code = AbstractCode::cast(obj);
isolate->code_kind_statistics()[code->kind()] += code->Size();
}
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 4e3c0002cf..93a81cc933 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -27,11 +27,14 @@ class FreeList;
class Isolate;
class MemoryAllocator;
class MemoryChunk;
+class NewSpacePage;
+class Page;
class PagedSpace;
class SemiSpace;
class SkipList;
class SlotsBuffer;
class SlotSet;
+class TypedSlotSet;
class Space;
// -----------------------------------------------------------------------------
@@ -105,10 +108,6 @@ class Space;
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
-#define DCHECK_MAP_PAGE_INDEX(index) \
- DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
class MarkBit {
public:
typedef uint32_t CellType;
@@ -204,6 +203,8 @@ class Bitmap {
static inline void Clear(MemoryChunk* chunk);
+ static inline void SetAllBits(MemoryChunk* chunk);
+
static void PrintWord(uint32_t word, uint32_t himask = 0) {
for (uint32_t mask = 1; mask != 0; mask <<= 1) {
if ((mask & himask) != 0) PrintF("[");
@@ -288,6 +289,113 @@ class Bitmap {
}
};
+enum FreeListCategoryType {
+ kTiniest,
+ kTiny,
+ kSmall,
+ kMedium,
+ kLarge,
+ kHuge,
+
+ kFirstCategory = kTiniest,
+ kLastCategory = kHuge,
+ kNumberOfCategories = kLastCategory + 1,
+ kInvalidCategory
+};
+
+enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+
+// A free list category maintains a linked list of free memory blocks.
+class FreeListCategory {
+ public:
+ static const int kSize = kIntSize + // FreeListCategoryType type_
+ kIntSize + // int available_
+ kPointerSize + // FreeSpace* top_
+ kPointerSize + // FreeListCategory* prev_
+ kPointerSize; // FreeListCategory* next_
+
+ FreeListCategory()
+ : type_(kInvalidCategory),
+ available_(0),
+ top_(nullptr),
+ prev_(nullptr),
+ next_(nullptr) {}
+
+ void Initialize(FreeListCategoryType type) {
+ type_ = type;
+ available_ = 0;
+ top_ = nullptr;
+ prev_ = nullptr;
+ next_ = nullptr;
+ }
+
+ void Invalidate();
+
+ void Reset();
+
+ void ResetStats() { Reset(); }
+
+ void RepairFreeList(Heap* heap);
+
+ // Relinks the category into the currently owning free list. Requires that the
+ // category is currently unlinked.
+ void Relink();
+
+ bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+
+ // Picks a node from the list and stores its size in |node_size|. Returns
+ // nullptr if the category is empty.
+ FreeSpace* PickNodeFromList(int* node_size);
+
+ // Performs a single try to pick a node of at least |minimum_size| from the
+ // category. Stores the actual size in |node_size|. Returns nullptr if no
+ // node is found.
+ FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+
+ // Picks a node of at least |minimum_size| from the category. Stores the
+ // actual size in |node_size|. Returns nullptr if no node is found.
+ FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+
+ inline FreeList* owner();
+ inline bool is_linked();
+ bool is_empty() { return top() == nullptr; }
+ int available() const { return available_; }
+
+#ifdef DEBUG
+ intptr_t SumFreeList();
+ int FreeListLength();
+#endif
+
+ private:
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
+
+ inline Page* page();
+
+ FreeSpace* top() { return top_; }
+ void set_top(FreeSpace* top) { top_ = top; }
+ FreeListCategory* prev() { return prev_; }
+ void set_prev(FreeListCategory* prev) { prev_ = prev; }
+ FreeListCategory* next() { return next_; }
+ void set_next(FreeListCategory* next) { next_ = next; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_;
+
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
+ int available_;
+
+ // |top_|: Points to the top FreeSpace* in the free list category.
+ FreeSpace* top_;
+
+ FreeListCategory* prev_;
+ FreeListCategory* next_;
+
+ friend class FreeList;
+ friend class PagedSpace;
+};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
@@ -303,9 +411,7 @@ class MemoryChunk {
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
NEVER_EVACUATE, // May contain immortal immutables.
- POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
@@ -313,6 +419,11 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
+ // A black page has all mark bits set to 1 (black). A black page currently
+ // cannot be iterated because it is not swept. Moreover live bytes are also
+ // not updated.
+ BLACK_PAGE,
+
// This flag is intended to be used for testing. Works only when both
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
// are set. It forces the page to become an evacuation candidate at next
@@ -334,19 +445,6 @@ class MemoryChunk {
NUM_MEMORY_CHUNK_FLAGS
};
- // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
- // |kCompactingInProgress|: Parallel compaction is currently in progress.
- // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
- // be finalized.
- // |kCompactingAborted|: Parallel compaction has been aborted, which should
- // for now only happen in OOM scenarios.
- enum ParallelCompactingState {
- kCompactingDone,
- kCompactingInProgress,
- kCompactingFinalize,
- kCompactingAborted,
- };
-
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. Sweeper threads that are done with their
// work will set this value and not touch the page anymore.
@@ -372,8 +470,7 @@ class MemoryChunk {
static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+ (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -382,6 +479,8 @@ class MemoryChunk {
static const intptr_t kSizeOffset = 0;
+ static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize // size_t size
+ kIntptrSize // intptr_t flags_
@@ -392,25 +491,26 @@ class MemoryChunk {
+ kPointerSize // Heap* heap_
+ kIntSize; // int progress_bar_
- static const size_t kSlotsBufferOffset =
+ static const size_t kOldToNewSlotsOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
static const size_t kWriteBarrierCounterOffset =
- kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
- + kPointerSize // SlotSet* old_to_new_slots_;
- + kPointerSize // SlotSet* old_to_old_slots_;
- + kPointerSize; // SkipList* skip_list_;
+ kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
+ + kPointerSize // SlotSet* old_to_old_slots_;
+ + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ + kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
- + kPointerSize // base::AtomicWord parallel_sweeping_
- + kPointerSize // AtomicValue parallel_compaction_
+ + kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
- + kPointerSize; // AtomicValue prev_chunk_
+ + kPointerSize // AtomicValue prev_chunk_
+ // FreeListCategory categories_[kNumberOfCategories]
+ + FreeListCategory::kSize * kNumberOfCategories;
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
@@ -428,7 +528,11 @@ class MemoryChunk {
kBodyOffset - 1 +
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
- static const int kFlagsOffset = kPointerSize;
+ // Page size in bytes. This must be a multiple of the OS page size.
+ static const int kPageSize = 1 << kPageSizeBits;
+ static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+ static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
@@ -454,10 +558,10 @@ class MemoryChunk {
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
}
- static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
-
Address address() { return reinterpret_cast<Address>(this); }
+ bool is_valid() { return address() != NULL; }
+
base::Mutex* mutex() { return mutex_; }
bool Contains(Address addr) {
@@ -474,20 +578,18 @@ class MemoryChunk {
return concurrent_sweeping_;
}
- AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
- return parallel_compaction_;
- }
-
// Manage live byte count, i.e., count of bytes in black objects.
inline void ResetLiveBytes();
inline void IncrementLiveBytes(int by);
int LiveBytes() {
- DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
+ DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+ DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
+ if (IsFlagSet(BLACK_PAGE)) return;
DCHECK_GE(live_bytes, 0);
DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
@@ -509,17 +611,18 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
- inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
-
- inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
-
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+ inline TypedSlotSet* typed_old_to_old_slots() {
+ return typed_old_to_old_slots_;
+ }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
+ void AllocateTypedOldToOldSlots();
+ void ReleaseTypedOldToOldSlots();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
@@ -591,17 +694,6 @@ class MemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
- void MarkEvacuationCandidate() {
- DCHECK(!IsFlagSet(NEVER_EVACUATE));
- DCHECK_NULL(slots_buffer_);
- SetFlag(EVACUATION_CANDIDATE);
- }
-
- void ClearEvacuationCandidate() {
- DCHECK(slots_buffer_ == NULL);
- ClearFlag(EVACUATION_CANDIDATE);
- }
-
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
@@ -683,13 +775,12 @@ class MemoryChunk {
// Count of bytes marked black on page.
int live_byte_count_;
- SlotsBuffer* slots_buffer_;
-
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
+ TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
@@ -702,7 +793,6 @@ class MemoryChunk {
base::Mutex* mutex_;
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
- AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_free_list_;
@@ -713,6 +803,8 @@ class MemoryChunk {
// prev_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> prev_chunk_;
+ FreeListCategory categories_[kNumberOfCategories];
+
private:
void InitializeReservedMemory() { reservation_.Reset(); }
@@ -720,17 +812,6 @@ class MemoryChunk {
friend class MemoryChunkValidator;
};
-enum FreeListCategoryType {
- kSmall,
- kMedium,
- kLarge,
- kHuge,
-
- kFirstCategory = kSmall,
- kLastCategory = kHuge,
- kNumberOfCategories = kLastCategory + 1
-};
-
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -790,9 +871,6 @@ class Page : public MemoryChunk {
// ---------------------------------------------------------------------
- // Page size in bytes. This must be a multiple of the OS page size.
- static const int kPageSize = 1 << kPageSizeBits;
-
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
// memory. This also applies to new space allocation, since objects are never
@@ -802,11 +880,6 @@ class Page : public MemoryChunk {
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 600 * KB;
- static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
-
- // Page size mask.
- static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
inline void ClearGCFields();
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
@@ -834,6 +907,17 @@ class Page : public MemoryChunk {
available_in_free_list());
}
+ template <typename Callback>
+ inline void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ callback(&categories_[i]);
+ }
+ }
+
+ FreeListCategory* free_list_category(FreeListCategoryType type) {
+ return &categories_[type];
+ }
+
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
type name() { return name##_.Value(); } \
void set_##name(type name) { name##_.SetValue(name); } \
@@ -848,6 +932,13 @@ class Page : public MemoryChunk {
void Print();
#endif // DEBUG
+ inline void MarkNeverAllocateForTesting();
+ inline void MarkEvacuationCandidate();
+ inline void ClearEvacuationCandidate();
+
+ private:
+ inline void InitializeFreeListCategories();
+
friend class MemoryAllocator;
};
@@ -862,6 +953,12 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+ // A limit to guarantee that we do not overflow typed slot offset in
+ // the old to old remembered set.
+ // Note that this limit is higher than what assembler already imposes on
+ // x64 and ia32 architectures.
+ static const int kMaxCodePageSize = 512 * MB;
+
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
@@ -977,8 +1074,8 @@ class MemoryChunkValidator {
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
offsetof(MemoryChunk, live_byte_count_));
- STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
- offsetof(MemoryChunk, slots_buffer_));
+ STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
+ offsetof(MemoryChunk, old_to_new_slots_));
STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
offsetof(MemoryChunk, write_barrier_counter_));
@@ -1104,7 +1201,14 @@ class SkipList {
int start_region = RegionNumber(addr);
int end_region = RegionNumber(addr + size - kPointerSize);
for (int idx = start_region; idx <= end_region; idx++) {
- if (starts_[idx] > addr) starts_[idx] = addr;
+ if (starts_[idx] > addr) {
+ starts_[idx] = addr;
+ } else {
+ // In the first region, there may already be an object closer to the
+ // start of the region. Do not change the start in that case. If this
+ // is not the first region, you probably added overlapping objects.
+ DCHECK_EQ(start_region, idx);
+ }
}
}
@@ -1143,6 +1247,11 @@ class SkipList {
//
class MemoryAllocator {
public:
+ enum AllocationMode {
+ kRegular,
+ kPooled,
+ };
+
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
@@ -1151,8 +1260,13 @@ class MemoryAllocator {
void TearDown();
- Page* AllocatePage(intptr_t size, PagedSpace* owner,
- Executability executable);
+ // Allocates either Page or NewSpacePage from the allocator. AllocationMode
+ // is used to indicate whether pooled allocation, which only works for
+ // MemoryChunk::kPageSize, should be tried first.
+ template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+ typename SpaceType>
+ PageType* AllocatePage(intptr_t size, SpaceType* owner,
+ Executability executable);
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable);
@@ -1164,8 +1278,9 @@ class MemoryAllocator {
// FreeMemory can be called concurrently when PreFree was executed before.
void PerformFreeMemory(MemoryChunk* chunk);
- // Free is a wrapper method, which calls PreFree and PerformFreeMemory
- // together.
+ // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
+ // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
+ template <MemoryAllocator::AllocationMode mode = kRegular>
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
@@ -1219,8 +1334,6 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable);
- void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
- Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1273,6 +1386,14 @@ class MemoryAllocator {
size_t reserved_size);
private:
+ // See AllocatePage for public interface. Note that currently we only support
+ // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
+ template <typename SpaceType>
+ MemoryChunk* AllocatePagePooled(SpaceType* owner);
+
+ // Free that chunk into the pool.
+ void FreePooled(MemoryChunk* chunk);
+
Isolate* isolate_;
// Maximum space size in bytes.
@@ -1326,6 +1447,8 @@ class MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
+ List<MemoryChunk*> chunk_pool_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1481,12 +1604,6 @@ class AllocationStats BASE_EMBEDDED {
void ClearSize() { size_ = capacity_; }
- // Reset the allocation statistics (i.e., available = capacity with no wasted
- // or allocated bytes).
- void Reset() {
- size_ = 0;
- }
-
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
@@ -1513,13 +1630,13 @@ class AllocationStats BASE_EMBEDDED {
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
- CHECK(size_ >= 0);
+ CHECK_GE(size_, 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
- CHECK(size_ >= 0);
+ CHECK_GE(size_, 0);
}
// Free allocated bytes, making them available (size -> available).
@@ -1558,80 +1675,6 @@ class AllocationStats BASE_EMBEDDED {
intptr_t size_;
};
-
-// A free list category maintains a linked list of free memory blocks.
-class FreeListCategory {
- public:
- FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
-
- void Initialize(FreeList* owner, FreeListCategoryType type) {
- owner_ = owner;
- type_ = type;
- }
-
- // Concatenates {category} into {this}.
- //
- // Note: Thread-safe.
- intptr_t Concatenate(FreeListCategory* category);
-
- void Reset();
-
- void Free(FreeSpace* node, int size_in_bytes);
-
- // Pick a node from the list.
- FreeSpace* PickNodeFromList(int* node_size);
-
- // Pick a node from the list and compare it against {size_in_bytes}. If the
- // node's size is greater or equal return the node and null otherwise.
- FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
-
- // Search for a node of size {size_in_bytes}.
- FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
-
- intptr_t EvictFreeListItemsInList(Page* p);
- bool ContainsPageFreeListItemsInList(Page* p);
-
- void RepairFreeList(Heap* heap);
-
- bool IsEmpty() { return top() == nullptr; }
-
- FreeList* owner() { return owner_; }
- int available() const { return available_; }
-
-#ifdef DEBUG
- intptr_t SumFreeList();
- int FreeListLength();
- bool IsVeryLong();
-#endif
-
- private:
- // For debug builds we accurately compute free lists lengths up until
- // {kVeryLongFreeList} by manually walking the list.
- static const int kVeryLongFreeList = 500;
-
- FreeSpace* top() { return top_.Value(); }
- void set_top(FreeSpace* top) { top_.SetValue(top); }
-
- FreeSpace* end() const { return end_; }
- void set_end(FreeSpace* end) { end_ = end; }
-
- // |type_|: The type of this free list category.
- FreeListCategoryType type_;
-
- // |top_|: Points to the top FreeSpace* in the free list category.
- AtomicValue<FreeSpace*> top_;
-
- // |end_|: Points to the end FreeSpace* in the free list category.
- FreeSpace* end_;
-
- // |available_|: Total available bytes in all blocks of this free list
- // category.
- int available_;
-
- // |owner_|: The owning free list of this category.
- FreeList* owner_;
-};
-
// A free list maintaining free blocks of memory. The free list is organized in
// a way to encourage objects allocated around the same time to be near each
// other. The normal way to allocate is intended to be by bumping a 'top'
@@ -1641,9 +1684,10 @@ class FreeListCategory {
// categories would scatter allocation more.
// The free list is organized in categories as follows:
-// 1-31 words (too small): Such small free areas are discarded for efficiency
-// reasons. They can be reclaimed by the compactor. However the distance
-// between top and limit may be this small.
+// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
+// allocation, when categories >= small do not have entries anymore.
+// 11-31 words (tiny): The tiny blocks are only used for allocation, when
+// categories >= small do not have entries anymore.
// 32-255 words (small): Used for allocating free space between 1-31 words in
// size.
// 256-2047 words (medium): Used for allocating free space between 32-255 words
@@ -1657,8 +1701,12 @@ class FreeList {
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
- if (maximum_freed <= kSmallListMin) {
+ if (maximum_freed <= kTiniestListMax) {
+ // Since we are not iterating over all list entries, we cannot guarantee
+ // that we can find the maximum freed block in that free list.
return 0;
+ } else if (maximum_freed <= kTinyListMax) {
+ return kTinyAllocationMax;
} else if (maximum_freed <= kSmallListMax) {
return kSmallAllocationMax;
} else if (maximum_freed <= kMediumListMax) {
@@ -1671,19 +1719,13 @@ class FreeList {
explicit FreeList(PagedSpace* owner);
- // The method concatenates {other} into {this} and returns the added bytes,
- // including waste.
- //
- // Note: Thread-safe.
- intptr_t Concatenate(FreeList* other);
-
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
// bytes that were not added to the free list, because they freed memory block
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
+ int Free(Address start, int size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is
// unitialized. A failure is returned if no block is available. The size
@@ -1693,70 +1735,118 @@ class FreeList {
// Clear the free list.
void Reset();
- void ResetStats() { wasted_bytes_ = 0; }
+ void ResetStats() {
+ wasted_bytes_.SetValue(0);
+ ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->ResetStats(); });
+ }
// Return the number of bytes available on the free list.
intptr_t Available() {
intptr_t available = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- available += category_[i].available();
- }
+ ForAllFreeListCategories([&available](FreeListCategory* category) {
+ available += category->available();
+ });
return available;
}
- // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
- // size in the free list category exactly matching the size. If no suitable
- // node could be found, the method falls back to retrieving a {FreeSpace}
- // from the large or huge free list category.
- //
- // Can be used concurrently.
- MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
-
bool IsEmpty() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (!category_[i].IsEmpty()) return false;
- }
- return true;
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
}
// Used after booting the VM.
void RepairLists(Heap* heap);
- intptr_t EvictFreeListItems(Page* p);
- bool ContainsPageFreeListItems(Page* p);
+ intptr_t EvictFreeListItems(Page* page);
+ bool ContainsPageFreeListItems(Page* page);
PagedSpace* owner() { return owner_; }
- intptr_t wasted_bytes() { return wasted_bytes_; }
- base::Mutex* mutex() { return &mutex_; }
+ intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ bool AddCategory(FreeListCategory* category);
+ void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
#ifdef DEBUG
- void Zap();
intptr_t SumFreeLists();
bool IsVeryLong();
#endif
private:
+ class FreeListCategoryIterator {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kAllocatableMemory;
- static const int kSmallListMin = 0x1f * kPointerSize;
+ static const int kTiniestListMax = 0xa * kPointerSize;
+ static const int kTinyListMax = 0x1f * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin;
+ static const int kTinyAllocationMax = kTiniestListMax;
+ static const int kSmallAllocationMax = kTinyListMax;
static const int kMediumAllocationMax = kSmallListMax;
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
- FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
- FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
- return &category_[category];
- }
+ // Walks all available categories for a given |type| and tries to retrieve
+ // a node. Returns nullptr if the category is empty.
+ FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty.
+ FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
+ int minimum_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
+ int minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
- if (size_in_bytes <= kSmallListMax) {
+ if (size_in_bytes <= kTiniestListMax) {
+ return kTiniest;
+ } else if (size_in_bytes <= kTinyListMax) {
+ return kTiny;
+ } else if (size_in_bytes <= kSmallListMax) {
return kSmall;
} else if (size_in_bytes <= kMediumListMax) {
return kMedium;
@@ -1766,6 +1856,7 @@ class FreeList {
return kHuge;
}
+ // The tiny categories are not used for fast allocation.
FreeListCategoryType SelectFastAllocationFreeListCategoryType(
size_t size_in_bytes) {
if (size_in_bytes <= kSmallAllocationMax) {
@@ -1778,10 +1869,13 @@ class FreeList {
return kHuge;
}
+ FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
+
PagedSpace* owner_;
- base::Mutex mutex_;
- intptr_t wasted_bytes_;
- FreeListCategory category_[kNumberOfCategories];
+ AtomicNumber<intptr_t> wasted_bytes_;
+ FreeListCategory* categories_[kNumberOfCategories];
+
+ friend class FreeListCategory;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@@ -1883,7 +1977,6 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
-
class PagedSpace : public Space {
public:
static const intptr_t kCompactionMemoryWanted = 500 * KB;
@@ -1940,11 +2033,6 @@ class PagedSpace : public Space {
ResetFreeListStatistics();
}
- // Increases the number of available bytes of that space.
- void AddToAccountingStats(intptr_t bytes) {
- accounting_stats_.DeallocateBytes(bytes);
- }
-
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
@@ -1977,10 +2065,13 @@ class PagedSpace : public Space {
return allocation_info_.limit_address();
}
+ enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
+
// Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
+ // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
+ // to be manually updated later.
MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes);
+ int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
int size_in_bytes);
@@ -2000,11 +2091,16 @@ class PagedSpace : public Space {
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes);
+ int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
accounting_stats_.DeallocateBytes(size_in_bytes);
return size_in_bytes - wasted;
}
+ int UnaccountedFree(Address start, int size_in_bytes) {
+ int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ return size_in_bytes - wasted;
+ }
+
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
@@ -2029,7 +2125,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
- void ReleasePage(Page* page, bool evict_free_list_items);
+ void ReleasePage(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -2085,17 +2181,18 @@ class PagedSpace : public Space {
// sweeper.
virtual void RefillFreeList();
- protected:
- void AddMemory(Address start, intptr_t size);
+ FreeList* free_list() { return &free_list_; }
- void MoveOverFreeMemory(PagedSpace* other);
+ base::Mutex* mutex() { return &space_mutex_; }
+ inline void UnlinkFreeListCategories(Page* page);
+ inline intptr_t RelinkFreeListCategories(Page* page);
+
+ protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
- FreeList* free_list() { return &free_list_; }
-
bool HasPages() { return anchor_.next_page() != &anchor_; }
// Cleans up the space, frees all pages in this space except those belonging
@@ -2143,6 +2240,7 @@ class PagedSpace : public Space {
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
+ friend class IncrementalMarking;
friend class MarkCompactCollector;
friend class PageIterator;
@@ -2191,29 +2289,9 @@ enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
class NewSpacePage : public MemoryChunk {
public:
- // GC related flags copied from from-space to to-space when
- // flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- static const int kAreaSize = Page::kAllocatableMemory;
-
- inline NewSpacePage* next_page() {
- return static_cast<NewSpacePage*>(next_chunk());
- }
-
- inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
- inline NewSpacePage* prev_page() {
- return static_cast<NewSpacePage*>(prev_chunk());
- }
-
- inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
- SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
- bool is_anchor() { return !this->InNewSpace(); }
+ static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable,
+ SemiSpace* owner);
static bool IsAtStart(Address addr) {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
@@ -2224,8 +2302,6 @@ class NewSpacePage : public MemoryChunk {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
}
- Address address() { return reinterpret_cast<Address>(this); }
-
// Finds the NewSpacePage containing the given address.
static inline NewSpacePage* FromAddress(Address address_in_page) {
Address page_start =
@@ -2247,14 +2323,33 @@ class NewSpacePage : public MemoryChunk {
NewSpacePage::FromAddress(address2);
}
+ inline NewSpacePage* next_page() {
+ return static_cast<NewSpacePage*>(next_chunk());
+ }
+
+ inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
+
+ inline NewSpacePage* prev_page() {
+ return static_cast<NewSpacePage*>(prev_chunk());
+ }
+
+ inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
+
+ SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
+
+ bool is_anchor() { return !this->InNewSpace(); }
+
private:
+ // GC related flags copied from from-space to to-space when
+ // flipping semispaces.
+ static const intptr_t kCopyOnFlipFlagsMask =
+ (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
- static NewSpacePage* Initialize(Heap* heap, Address start,
- SemiSpace* semi_space);
-
// Intialize a fake NewSpacePage used as sentinel at the ends
// of a doubly-linked list of real NewSpacePages.
// Only uses the prev/next links, and sets flags to not be in new-space.
@@ -2280,7 +2375,6 @@ class SemiSpace : public Space {
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
- start_(nullptr),
age_mark_(nullptr),
committed_(false),
id_(semispace),
@@ -2291,39 +2385,38 @@ class SemiSpace : public Space {
inline bool Contains(Object* o);
inline bool ContainsSlow(Address a);
- // Creates a space in the young generation. The constructor does not
- // allocate memory from the OS.
- void SetUp(Address start, int initial_capacity, int maximum_capacity);
-
- // Tear down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
+ void SetUp(int initial_capacity, int maximum_capacity);
void TearDown();
+ bool HasBeenSetUp() { return maximum_capacity_ != 0; }
- // True if the space has been set up but not torn down.
- bool HasBeenSetUp() { return start_ != nullptr; }
+ bool Commit();
+ bool Uncommit();
+ bool is_committed() { return committed_; }
- // Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity and less than
- // the maximum capacity.
+ // Grow the semispace to the new capacity. The new capacity requested must
+ // be larger than the current capacity and less than the maximum capacity.
bool GrowTo(int new_capacity);
- // Shrinks the semispace to the new capacity. The new capacity
- // requested must be more than the amount of used memory in the
- // semispace and less than the current capacity.
+ // Shrinks the semispace to the new capacity. The new capacity requested
+ // must be more than the amount of used memory in the semispace and less
+ // than the current capacity.
bool ShrinkTo(int new_capacity);
// Returns the start address of the first page of the space.
Address space_start() {
- DCHECK_NE(anchor_.next_page(), &anchor_);
+ DCHECK_NE(anchor_.next_page(), anchor());
return anchor_.next_page()->area_start();
}
- // Returns the start address of the current page of the space.
- Address page_low() { return current_page_->area_start(); }
+ NewSpacePage* first_page() { return anchor_.next_page(); }
+ NewSpacePage* current_page() { return current_page_; }
// Returns one past the end address of the space.
Address space_end() { return anchor_.prev_page()->area_end(); }
+ // Returns the start address of the current page of the space.
+ Address page_low() { return current_page_->area_start(); }
+
// Returns one past the end address of the current page of the space.
Address page_high() { return current_page_->area_end(); }
@@ -2341,17 +2434,10 @@ class SemiSpace : public Space {
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
- bool is_committed() { return committed_; }
- bool Commit();
- bool Uncommit();
-
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
- // Returns the current total capacity of the semispace.
+ // Returns the current capacity of the semispace.
int current_capacity() { return current_capacity_; }
- // Returns the maximum total capacity of the semispace.
+ // Returns the maximum capacity of the semispace.
int maximum_capacity() { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
@@ -2393,11 +2479,9 @@ class SemiSpace : public Space {
#endif
private:
- NewSpacePage* anchor() { return &anchor_; }
+ void RewindPages(NewSpacePage* start, int num_pages);
- void set_current_capacity(int new_capacity) {
- current_capacity_ = new_capacity;
- }
+ inline NewSpacePage* anchor() { return &anchor_; }
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2408,11 +2492,9 @@ class SemiSpace : public Space {
// The maximum capacity that can be used by this space.
int maximum_capacity_;
- // The mimnimum capacity for the space. A space cannot shrink below this size.
+ // The minimum capacity for the space. A space cannot shrink below this size.
int minimum_capacity_;
- // The start address of the space.
- Address start_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
@@ -2488,20 +2570,21 @@ class NewSpacePageIterator BASE_EMBEDDED {
class NewSpace : public Space {
public:
- // Constructor.
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- top_on_previous_step_(0) {}
+ pages_used_(0),
+ top_on_previous_step_(0),
+ allocated_histogram_(nullptr),
+ promoted_histogram_(nullptr) {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
inline bool Contains(Object* o);
- // Sets up the new space using the given chunk.
- bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
+ bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2524,7 +2607,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace.
intptr_t Size() override {
- return pages_used_ * NewSpacePage::kAreaSize +
+ return pages_used_ * NewSpacePage::kAllocatableMemory +
static_cast<int>(top() - to_space_.page_low());
}
@@ -2537,7 +2620,7 @@ class NewSpace : public Space {
intptr_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
- NewSpacePage::kAreaSize;
+ NewSpacePage::kAllocatableMemory;
}
// Return the current size of a semispace, allocatable and non-allocatable
@@ -2564,22 +2647,40 @@ class NewSpace : public Space {
// Return the available bytes without growing.
intptr_t Available() override { return Capacity() - Size(); }
- intptr_t PagesFromStart(Address addr) {
- return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
- }
-
size_t AllocatedSinceLastGC() {
- intptr_t allocated = top() - to_space_.age_mark();
- if (allocated < 0) {
- // Runtime has lowered the top below the age mark.
+ bool seen_age_mark = false;
+ Address age_mark = to_space_.age_mark();
+ NewSpacePage* current_page = to_space_.first_page();
+ NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
+ NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
+ if (age_mark_page == last_page) {
+ if (top() - age_mark >= 0) {
+ return top() - age_mark;
+ }
+ // Top was reset at some point, invalidating this metric.
return 0;
}
- // Correctly account for non-allocatable regions at the beginning of
- // each page from the age_mark() to the top().
- intptr_t pages =
- PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
- allocated -= pages * (NewSpacePage::kObjectStartOffset);
- DCHECK(0 <= allocated && allocated <= Size());
+ while (current_page != last_page) {
+ if (current_page == age_mark_page) {
+ seen_age_mark = true;
+ break;
+ }
+ current_page = current_page->next_page();
+ }
+ if (!seen_age_mark) {
+ // Top was reset at some point, invalidating this metric.
+ return 0;
+ }
+ intptr_t allocated = age_mark_page->area_end() - age_mark;
+ DCHECK_EQ(current_page, age_mark_page);
+ current_page = age_mark_page->next_page();
+ while (current_page != last_page) {
+ allocated += NewSpacePage::kAllocatableMemory;
+ current_page = current_page->next_page();
+ }
+ allocated += top() - current_page->area_start();
+ DCHECK_LE(0, allocated);
+ DCHECK_LE(allocated, Size());
return static_cast<size_t>(allocated);
}
@@ -2617,10 +2718,6 @@ class NewSpace : public Space {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- // The start address of the space and a bit mask. Anding an address in the
- // new space with the mask will result in the start address.
- Address start() { return start_; }
-
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
@@ -2735,18 +2832,12 @@ class NewSpace : public Space {
base::Mutex mutex_;
- Address chunk_base_;
- uintptr_t chunk_size_;
-
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
base::VirtualMemory reservation_;
int pages_used_;
- // Start address and bit mask for containment testing.
- Address start_;
-
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
@@ -2792,8 +2883,6 @@ class CompactionSpace : public PagedSpace {
bool is_local() override { return true; }
- void RefillFreeList() override;
-
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
@@ -2856,12 +2945,7 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
MapSpace(Heap* heap, AllocationSpace id)
- : PagedSpace(heap, id, NOT_EXECUTABLE),
- max_map_space_pages_(kMaxMapPageIndex - 1) {}
-
- // Given an index, returns the page address.
- // TODO(1600): this limit is artifical just to keep code compilable
- static const int kMaxMapPageIndex = 1 << 16;
+ : PagedSpace(heap, id, NOT_EXECUTABLE) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo32(Map::kSize)) {
@@ -2874,16 +2958,6 @@ class MapSpace : public PagedSpace {
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject* obj) override;
#endif
-
- private:
- static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
-
- // Do map space compaction if there is a page gap.
- int CompactionThreshold() {
- return kMapsPerPage * (max_map_space_pages_ - 1);
- }
-
- const int max_map_space_pages_;
};
@@ -2950,6 +3024,8 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
+ void AdjustLiveBytes(int by) { objects_size_ += by; }
+
LargePage* first_page() { return first_page_; }
#ifdef VERIFY_HEAP
@@ -2988,25 +3064,42 @@ class LargeObjectIterator : public ObjectIterator {
LargePage* current_;
};
+class LargePageIterator BASE_EMBEDDED {
+ public:
+ explicit inline LargePageIterator(LargeObjectSpace* space);
+
+ inline LargePage* next();
+
+ private:
+ LargePage* next_page_;
+};
// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
+// pointers to new space or to evacuation candidates.
+class MemoryChunkIterator BASE_EMBEDDED {
public:
- inline explicit PointerChunkIterator(Heap* heap);
+ enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
+ inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
// Return NULL when the iterator is done.
inline MemoryChunk* next();
private:
- enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
+ enum State {
+ kOldSpaceState,
+ kMapState,
+ kCodeState,
+ kLargeObjectState,
+ kFinishedState
+ };
State state_;
+ const Mode mode_;
PageIterator old_iterator_;
+ PageIterator code_iterator_;
PageIterator map_iterator_;
- LargeObjectIterator lo_iterator_;
+ LargePageIterator lo_iterator_;
};
-
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
deleted file mode 100644
index 920ec3411d..0000000000
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "src/heap/heap.h"
-#include "src/heap/remembered-set.h"
-#include "src/heap/spaces-inl.h"
-#include "src/heap/store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-void LocalStoreBuffer::Record(Address addr) {
- if (top_->is_full()) top_ = new Node(top_);
- top_->buffer[top_->count++] = addr;
-}
-
-void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
- Node* current = top_;
- while (current != nullptr) {
- for (int i = 0; i < current->count; i++) {
- Address slot = current->buffer[i];
- Page* page = Page::FromAnyPointerAddress(heap_, slot);
- RememberedSet<OLD_TO_NEW>::Insert(page, slot);
- }
- current = current->next;
- }
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 21f375b195..a982eb3c40 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -8,7 +8,6 @@
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/store-buffer-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -17,17 +16,20 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
+ : heap_(heap),
+ top_(nullptr),
+ start_(nullptr),
+ limit_(nullptr),
+ virtual_memory_(nullptr) {}
void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+ virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
- start_ =
- reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+ start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
limit_ = start_ + (kStoreBufferSize / kPointerSize);
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
@@ -38,23 +40,20 @@ void StoreBuffer::SetUp() {
DCHECK(start_ <= vm_limit);
DCHECK(limit_ <= vm_limit);
USE(vm_limit);
- DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
- DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
- 0);
+ DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
kStoreBufferSize,
false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
- heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
+ top_ = start_;
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
- start_ = limit_ = NULL;
- heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
+ top_ = start_ = limit_ = nullptr;
}
@@ -64,16 +63,15 @@ void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
}
void StoreBuffer::MoveEntriesToRememberedSet() {
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- if (top == start_) return;
- DCHECK(top <= limit_);
- heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
- for (Address* current = start_; current < top; current++) {
+ if (top_ == start_) return;
+ DCHECK(top_ <= limit_);
+ for (Address* current = start_; current < top_; current++) {
DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr);
RememberedSet<OLD_TO_NEW>::Insert(page, addr);
}
+ top_ = start_;
}
} // namespace internal
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index e7e9c985eb..1b3fcb0a98 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -18,20 +18,25 @@ namespace internal {
// code. On buffer overflow the slots are moved to the remembered set.
class StoreBuffer {
public:
- explicit StoreBuffer(Heap* heap);
+ static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
+ static const int kStoreBufferMask = kStoreBufferSize - 1;
+
static void StoreBufferOverflow(Isolate* isolate);
+
+ explicit StoreBuffer(Heap* heap);
void SetUp();
void TearDown();
- static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
- static const int kStoreBufferSize = kStoreBufferOverflowBit;
- static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+ // Used to add entries from generated code.
+ inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
void MoveEntriesToRememberedSet();
private:
Heap* heap_;
+ Address* top_;
+
// The start and the limit of the buffer that contains store slots
// added from the generated code.
Address* start_;
@@ -40,41 +45,6 @@ class StoreBuffer {
base::VirtualMemory* virtual_memory_;
};
-
-class LocalStoreBuffer BASE_EMBEDDED {
- public:
- explicit LocalStoreBuffer(Heap* heap)
- : top_(new Node(nullptr)), heap_(heap) {}
-
- ~LocalStoreBuffer() {
- Node* current = top_;
- while (current != nullptr) {
- Node* tmp = current->next;
- delete current;
- current = tmp;
- }
- }
-
- inline void Record(Address addr);
- inline void Process(StoreBuffer* store_buffer);
-
- private:
- static const int kBufferSize = 16 * KB;
-
- struct Node : Malloced {
- explicit Node(Node* next_node) : next(next_node), count(0) {}
-
- inline bool is_full() { return count == kBufferSize; }
-
- Node* next;
- Address buffer[kBufferSize];
- int count;
- };
-
- Node* top_;
- Heap* heap_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 8de2d2998a..623de50157 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -39,7 +39,8 @@ bool ExtractStringSetting(Isolate* isolate,
const char* key,
icu::UnicodeString* setting) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ Handle<Object> object =
+ JSReceiver::GetProperty(options, str).ToHandleChecked();
if (object->IsString()) {
v8::String::Utf8Value utf8_string(
v8::Utils::ToLocal(Handle<String>::cast(object)));
@@ -55,7 +56,8 @@ bool ExtractIntegerSetting(Isolate* isolate,
const char* key,
int32_t* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ Handle<Object> object =
+ JSReceiver::GetProperty(options, str).ToHandleChecked();
if (object->IsNumber()) {
object->ToInt32(value);
return true;
@@ -69,7 +71,8 @@ bool ExtractBooleanSetting(Isolate* isolate,
const char* key,
bool* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
- Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+ Handle<Object> object =
+ JSReceiver::GetProperty(options, str).ToHandleChecked();
if (object->IsBoolean()) {
*value = object->BooleanValue();
return true;
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index cb6bad8a20..cafa6763fa 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -81,6 +81,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -115,6 +119,20 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Memory::Address_at(pc_) = updated_reference;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+ }
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -139,8 +157,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -321,6 +339,10 @@ Immediate::Immediate(int x) {
rmode_ = RelocInfo::NONE32;
}
+Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
+ x_ = reinterpret_cast<int32_t>(x);
+ rmode_ = rmode;
+}
Immediate::Immediate(const ExternalReference& ext) {
x_ = reinterpret_cast<int32_t>(ext.address());
@@ -429,6 +451,11 @@ void Assembler::emit_code_relative_offset(Label* label) {
}
}
+void Assembler::emit_b(Immediate x) {
+ DCHECK(x.is_int8() || x.is_uint8());
+ uint8_t value = static_cast<uint8_t>(x.x_);
+ *pc_++ = value;
+}
void Assembler::emit_w(const Immediate& x) {
DCHECK(RelocInfo::IsNone(x.rmode_));
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 2ac3088020..150131cdbc 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -746,8 +746,8 @@ void Assembler::and_(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
+void Assembler::cmpb(const Operand& op, Immediate imm8) {
+ DCHECK(imm8.is_int8() || imm8.is_uint8());
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x3C);
@@ -755,7 +755,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
EMIT(0x80);
emit_operand(edi, op); // edi == 7
}
- EMIT(imm8);
+ emit_b(imm8);
}
@@ -784,6 +784,19 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
emit_w(imm16);
}
+void Assembler::cmpw(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x39);
+ emit_operand(reg, op);
+}
+
+void Assembler::cmpw(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
void Assembler::cmp(Register reg, int32_t imm32) {
EnsureSpace ensure_space(this);
@@ -1068,19 +1081,26 @@ void Assembler::sar_cl(const Operand& dst) {
emit_operand(edi, dst);
}
-
void Assembler::sbb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0x1B);
emit_operand(dst, src);
}
+void Assembler::shld(Register dst, Register src, uint8_t shift) {
+ DCHECK(is_uint5(shift));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA4);
+ emit_operand(src, Operand(dst));
+ EMIT(shift);
+}
-void Assembler::shld(Register dst, const Operand& src) {
+void Assembler::shld_cl(Register dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA5);
- emit_operand(dst, src);
+ emit_operand(src, Operand(dst));
}
@@ -1104,15 +1124,6 @@ void Assembler::shl_cl(const Operand& dst) {
emit_operand(esp, dst);
}
-
-void Assembler::shrd(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(dst, src);
-}
-
-
void Assembler::shr(const Operand& dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@@ -1133,6 +1144,21 @@ void Assembler::shr_cl(const Operand& dst) {
emit_operand(ebp, dst);
}
+void Assembler::shrd(Register dst, Register src, uint8_t shift) {
+ DCHECK(is_uint5(shift));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAC);
+ emit_operand(dst, Operand(src));
+ EMIT(shift);
+}
+
+void Assembler::shrd_cl(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(src, dst);
+}
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
@@ -1155,8 +1181,8 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
- if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
- test_b(reg, imm.x_);
+ if (imm.is_uint8()) {
+ test_b(reg, imm);
return;
}
@@ -1193,8 +1219,8 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
- if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
- return test_b(op, imm.x_);
+ if (imm.is_uint8()) {
+ return test_b(op, imm);
}
EnsureSpace ensure_space(this);
EMIT(0xF7);
@@ -1202,25 +1228,25 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
emit(imm);
}
-
-void Assembler::test_b(Register reg, uint8_t imm8) {
+void Assembler::test_b(Register reg, Immediate imm8) {
+ DCHECK(imm8.is_uint8());
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx.
if (reg.is(eax)) {
EMIT(0xA8);
- EMIT(imm8);
+ emit_b(imm8);
} else if (reg.is_byte_register()) {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
+ emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
} else {
+ EMIT(0x66);
EMIT(0xF7);
EMIT(0xC0 | reg.code());
- emit(imm8);
+ emit_w(imm8);
}
}
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
+void Assembler::test_b(const Operand& op, Immediate imm8) {
if (op.is_reg_only()) {
test_b(op.reg(), imm8);
return;
@@ -1228,9 +1254,42 @@ void Assembler::test_b(const Operand& op, uint8_t imm8) {
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
- EMIT(imm8);
+ emit_b(imm8);
+}
+
+void Assembler::test_w(Register reg, Immediate imm16) {
+ DCHECK(imm16.is_int16() || imm16.is_uint16());
+ EnsureSpace ensure_space(this);
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ emit_w(imm16);
+ } else {
+ EMIT(0x66);
+ EMIT(0xF7);
+ EMIT(0xc0 | reg.code());
+ emit_w(imm16);
+ }
+}
+
+void Assembler::test_w(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x85);
+ emit_operand(reg, op);
}
+void Assembler::test_w(const Operand& op, Immediate imm16) {
+ DCHECK(imm16.is_int16() || imm16.is_uint16());
+ if (op.is_reg_only()) {
+ test_w(op.reg(), imm16);
+ return;
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit_w(imm16);
+}
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index f517c9878e..5105ff5a4e 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -271,6 +271,7 @@ class Immediate BASE_EMBEDDED {
inline explicit Immediate(Handle<Object> handle);
inline explicit Immediate(Smi* value);
inline explicit Immediate(Address addr);
+ inline explicit Immediate(Address x, RelocInfo::Mode rmode);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
@@ -280,9 +281,15 @@ class Immediate BASE_EMBEDDED {
bool is_int8() const {
return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
}
+ bool is_uint8() const {
+ return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
+ }
bool is_int16() const {
return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
}
+ bool is_uint16() const {
+ return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
+ }
private:
inline explicit Immediate(Label* value);
@@ -666,13 +673,18 @@ class Assembler : public AssemblerBase {
void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x);
- void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
+ void cmpb(const Operand& op, Immediate imm8);
void cmpb(Register reg, const Operand& op);
void cmpb(const Operand& op, Register reg);
+ void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
+ void cmpw(const Operand& dst, Immediate src);
+ void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
+ void cmpw(Register dst, const Operand& src);
+ void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
+ void cmpw(const Operand& dst, Register src);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
@@ -738,21 +750,20 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
- void shld(Register dst, Register src) { shld(dst, Operand(src)); }
- void shld(Register dst, const Operand& src);
-
void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
void shl(const Operand& dst, uint8_t imm8);
void shl_cl(Register dst) { shl_cl(Operand(dst)); }
void shl_cl(const Operand& dst);
-
- void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
- void shrd(Register dst, const Operand& src);
+ void shld(Register dst, Register src, uint8_t shift);
+ void shld_cl(Register dst, Register src);
void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
void shr(const Operand& dst, uint8_t imm8);
void shr_cl(Register dst) { shr_cl(Operand(dst)); }
void shr_cl(const Operand& dst);
+ void shrd(Register dst, Register src, uint8_t shift);
+ void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
+ void shrd_cl(const Operand& dst, Register src);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
@@ -763,10 +774,18 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
- void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8);
- void test_b(const Operand& op, uint8_t imm8);
+ void test(const Operand& op, Register reg) { test(reg, op); }
+ void test_b(Register reg, const Operand& op);
+ void test_b(Register reg, Immediate imm8);
+ void test_b(const Operand& op, Immediate imm8);
+ void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+ void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
+ void test_w(Register reg, const Operand& op);
+ void test_w(Register reg, Immediate imm16);
+ void test_w(const Operand& op, Immediate imm16);
+ void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+ void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
@@ -1435,7 +1454,9 @@ class Assembler : public AssemblerBase {
static bool IsNop(Address addr);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
int relocation_writer_size() {
return (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -1482,6 +1503,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
+ inline void emit_b(Immediate x);
inline void emit_w(const Immediate& x);
inline void emit_q(uint64_t x);
@@ -1542,8 +1564,8 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index c48c74abad..b7e33d9a74 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -123,6 +123,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
+ // -- esi: context
// -- edi: constructor function
// -- ebx: allocation site or undefined
// -- edx: new target
@@ -134,6 +135,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(ebx);
+ __ push(esi);
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
@@ -201,7 +203,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -324,9 +326,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ Move(esi, Immediate(0));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -602,27 +601,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// they are to be pushed onto the stack.
// -----------------------------------
- // Save number of arguments on the stack below where arguments are going
- // to be pushed.
- __ mov(ecx, eax);
- __ neg(ecx);
- __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
- __ mov(eax, ecx);
-
// Pop return address to allow tail-call after pushing arguments.
__ Pop(ecx);
- // Find the address of the last argument.
- __ shl(eax, kPointerSizeLog2);
- __ add(eax, ebx);
+ // Push edi in the slot meant for receiver. We need an extra register
+ // so store edi temporarily on stack.
+ __ Push(edi);
- // Push padding for receiver.
- __ Push(Immediate(0));
+ // Find the address of the last argument.
+ __ mov(edi, eax);
+ __ neg(edi);
+ __ shl(edi, kPointerSizeLog2);
+ __ add(edi, ebx);
- Generate_InterpreterPushArgs(masm, eax);
+ Generate_InterpreterPushArgs(masm, edi);
- // Restore number of arguments from slot on stack.
- __ mov(eax, Operand(esp, -kPointerSize));
+ // Restore the constructor from slot on stack. It was pushed at the slot
+ // meant for receiver.
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
// Re-push return address.
__ Push(ecx);
@@ -960,6 +956,28 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
}
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : first argument (left-hand side)
+ // -- esp[8] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ mov(InstanceOfDescriptor::LeftRegister(),
+ Operand(ebp, 2 * kPointerSize)); // Load left-hand side.
+ __ mov(InstanceOfDescriptor::RightRegister(),
+ Operand(ebp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ ret(2 * kPointerSize);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1007,7 +1025,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label receiver_not_callable;
__ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &receiver_not_callable, Label::kNear);
// 3. Tail call with no arguments if argArray is null or undefined.
@@ -1130,7 +1149,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label target_not_callable;
__ JumpIfSmi(edi, &target_not_callable, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &target_not_callable, Label::kNear);
// 3a. Apply the target to the given argumentsList (passing undefined for
@@ -1146,7 +1166,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1195,14 +1214,16 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label target_not_constructor;
__ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &target_not_constructor, Label::kNear);
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
__ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &new_target_not_constructor, Label::kNear);
// 4a. Construct the target with the given new.target and argumentsList.
@@ -1865,18 +1886,20 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ movzx_b(scratch1,
+ Operand::StaticVariable(is_tail_call_elimination_enabled));
__ cmp(scratch1, Immediate(0));
- __ j(not_equal, &done, Label::kNear);
+ __ j(equal, &done, Label::kNear);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::STUB)));
__ j(not_equal, &no_interpreter_frame, Label::kNear);
__ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -1884,16 +1907,18 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(ebp, scratch2);
- __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
__ jmp(&formal_parameter_count_loaded, Label::kNear);
__ bind(&no_arguments_adaptor);
@@ -1902,57 +1927,15 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ mov(scratch1,
FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ mov(
- scratch1,
+ caller_args_count_reg,
FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch2;
- __ sub(scratch1, args_reg);
- __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset));
-
- if (FLAG_debug_code) {
- __ cmp(esp, new_sp_reg);
- __ Check(below, kStackAccessBelowStackPointer);
- }
-
- // Copy receiver and return address as well.
- Register count_reg = scratch1;
- __ lea(count_reg, Operand(args_reg, 2));
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch3;
- __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- __ mov(Operand(esp, 0), tmp_reg);
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- Operand src(esp, count_reg, times_pointer_size, 0);
- Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ dec(count_reg);
- __ mov(tmp_reg, src);
- __ mov(dst, tmp_reg);
- __ bind(&entry);
- __ cmp(count_reg, Immediate(0));
- __ j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- __ mov(esp, new_sp_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack, 0);
__ bind(&done);
}
} // namespace
@@ -1972,7 +1955,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -1984,8 +1967,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- (1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2207,7 +2190,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
RelocInfo::CODE_TARGET);
// Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
@@ -2343,7 +2327,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
// Check if target has a [[Construct]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2415,27 +2400,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
-
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
- 1 << SharedFunctionInfo::kStrongModeBitWithinByte);
- __ j(equal, &no_strong_error, Label::kNear);
-
- // What we really care about is the required number of arguments.
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
- __ SmiUntag(ecx);
- __ cmp(eax, ecx);
- __ j(greater_equal, &no_strong_error, Label::kNear);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentsAdaptorStackCheck(masm, &stack_overflow);
@@ -2474,7 +2438,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point.
__ bind(&invoke);
// Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
@@ -2649,24 +2613,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ ret(0);
-}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 510b58e723..53b35a3a84 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_IA32
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ia32/code-stubs-ia32.h"
#include "src/ia32/frames-ia32.h"
@@ -84,6 +85,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -275,7 +280,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
__ add(result_reg,
Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
- __ shrd(result_reg, scratch1);
+ __ shrd_cl(scratch1, result_reg);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
__ cmov(not_equal, scratch1, result_reg);
@@ -553,7 +558,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fstp(1); // 2^X
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
- __ test_b(eax, 0x5F); // We check for all but precision exception.
+ __ test_b(eax,
+ Immediate(0x5F)); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
__ movsd(double_result, Operand(esp, 0));
@@ -665,34 +671,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register scratch = eax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the key is an array index, that is Uint32.
- __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(scratch);
- __ push(receiver); // receiver
- __ push(key); // key
- __ push(scratch); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
@@ -1103,7 +1081,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
- __ test_b(ebx, kIsIndirectStringMask);
+ __ test_b(ebx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
@@ -1112,7 +1090,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8) Is the external string one byte? If yes, go to (5).
- __ test_b(ebx, kStringEncodingMask);
+ __ test_b(ebx, Immediate(kStringEncodingMask));
__ j(not_zero, &seq_one_byte_string); // Go to (5).
// eax: sequential subject string (or look-alike, external string)
@@ -1237,13 +1215,13 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
+ __ cmpb(ecx, Immediate(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
- __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ cmpb(ecx, Immediate(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
// Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+ __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
__ j(equal, &runtime_call, Label::kFar);
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -1389,7 +1367,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal, undetectable;
+ Label return_equal, return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1397,16 +1375,16 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime_call, Label::kNear);
+ __ j(not_zero, &runtime_call);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &undetectable, Label::kNear);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &return_unequal, Label::kNear);
__ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
@@ -1420,8 +1398,18 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&undetectable);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(zero, &return_unequal, Label::kNear);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CmpInstanceType(ebx, ODDBALL_TYPE);
+ __ j(zero, &return_equal, Label::kNear);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(not_zero, &return_unequal, Label::kNear);
+
+ __ bind(&return_equal);
__ Move(eax, Immediate(EQUAL));
__ ret(0); // eax, edx were pushed
}
@@ -1980,8 +1968,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push marker in two places.
int marker = type();
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
+ __ push(Immediate(Smi::FromInt(marker))); // marker
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ __ push(Operand::StaticVariable(context_address)); // context
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
@@ -2110,9 +2099,14 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
__ j(not_equal, &slow_case);
+ // Go to the runtime if the function is not a constructor.
+ __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &slow_case);
+
// Ensure that {function} has an instance prototype.
__ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+ Immediate(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
@@ -2146,7 +2140,7 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Check if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &fast_runtime_fallback, Label::kNear);
// Check if the current object is a Proxy.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2181,7 +2175,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Push(object);
__ Push(function);
__ PushReturnAddressFrom(scratch);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -2493,13 +2488,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Label two_byte_sequential, runtime_drop_two, sequential_string;
STATIC_ASSERT(kExternalStringTag != 0);
STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, kExternalStringTag);
+ __ test_b(ebx, Immediate(kExternalStringTag));
__ j(zero, &sequential_string);
// Handle external string.
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ebx, kShortExternalStringMask);
+ __ test_b(ebx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
@@ -2512,7 +2507,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ push(edi);
__ SmiUntag(ecx);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, kStringEncodingMask);
+ __ test_b(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
// Sequential one byte string. Allocate the result.
@@ -2601,23 +2596,21 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_heap_number);
- Label not_string, slow_string;
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in eax.
+ __ AssertNotNumber(eax);
+
+ Label not_string;
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
// eax: object
// edi: object map
__ j(above_equal, &not_string, Label::kNear);
- // Check if string has a cached array index.
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &slow_string, Label::kNear);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
- __ Ret();
- __ bind(&slow_string);
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ bind(&not_string);
Label not_oddball;
@@ -2633,26 +2626,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in eax.
+ __ AssertString(eax);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes on argument in eax.
- Label not_smi, positive_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, eax);
- __ j(greater_equal, &positive_smi, Label::kNear);
- __ xor_(eax, eax);
- __ bind(&positive_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(not_zero, &runtime, Label::kNear);
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
__ Ret();
- __ bind(&not_smi);
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ PopReturnAddressTo(ecx); // Pop return address.
+ __ Push(eax); // Push argument.
+ __ PushReturnAddressFrom(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -2849,44 +2842,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : left string
- // -- eax : right string
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertString(edx);
- __ AssertString(eax);
-
- Label not_same;
- __ cmp(edx, eax);
- __ j(not_equal, &not_same, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat one-byte strings.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
- edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(eax);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : left
@@ -3217,13 +3172,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left);
+ __ Push(right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ sub(eax, Immediate(masm->isolate()->factory()->true_value()));
+ __ Ret();
} else {
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3708,7 +3670,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx);
@@ -4426,7 +4388,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, 1);
+ __ test_b(edx, Immediate(1));
__ j(not_zero, &normal_sequence);
}
@@ -4875,7 +4837,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
@@ -4883,7 +4845,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_rest_parameters, Label::kNear);
@@ -5026,7 +4988,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
@@ -5263,14 +5225,14 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
@@ -5570,7 +5532,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label profiler_disabled;
Label end_profiler_check;
__ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
- __ cmpb(Operand(eax, 0), 0);
+ __ cmpb(Operand(eax, 0), Immediate(0));
__ j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
@@ -5693,17 +5655,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edi : callee
// -- ebx : call_data
// -- ecx : holder
// -- edx : api_function_address
// -- esi : context
- // -- eax : number of arguments if argc is a register
// --
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5730,17 +5688,9 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || eax.is(argc.reg()));
-
- if (argc.is_immediate()) {
- __ pop(return_address);
- // context save.
- __ push(context);
- } else {
- // pop return address and save context
- __ xchg(context, Operand(esp, 0));
- return_address = context;
- }
+ __ pop(return_address);
+ // context save.
+ __ push(context);
// callee
__ push(callee);
@@ -5749,7 +5699,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
// return value
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
// return value default
@@ -5770,7 +5720,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// push return address
__ push(return_address);
- if (!is_lazy) {
+ if (!is_lazy()) {
// load context from callee
__ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
}
@@ -5789,27 +5739,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), scratch);
- if (argc.is_immediate()) {
- __ add(scratch,
- Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
- } else {
- __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
- (FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ mov(ApiParameterOperand(4), argc.reg());
- // FunctionCallbackInfo::is_construct_call_.
- __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
- (FCA::kArgsLength + 1) * kPointerSize));
- __ mov(ApiParameterOperand(5), argc.reg());
- }
+ __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc()));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5822,7 +5758,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
(2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5831,10 +5767,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
int stack_space = 0;
Operand is_construct_call_operand = ApiParameterOperand(5);
Operand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = nullptr;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
ApiParameterOperand(1), stack_space,
stack_space_operand, return_value_operand,
@@ -5842,23 +5776,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- esp[0] : return address
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 2f94f35665..2190531b43 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -704,6 +704,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(equal, &only_change_map);
+ __ push(esi);
__ push(eax);
__ push(edx);
__ push(ebx);
@@ -753,10 +754,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(ebx);
__ pop(edx);
__ pop(eax);
+ __ pop(esi);
__ jmp(fail);
// Box doubles into heap numbers.
@@ -818,7 +819,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Restore registers.
__ pop(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
__ bind(&success);
}
@@ -886,11 +887,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(result, kShortExternalStringMask);
+ __ test_b(result, Immediate(kShortExternalStringMask));
__ j(not_zero, call_runtime);
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, kStringEncodingMask);
+ __ test_b(result, Immediate(kStringEncodingMask));
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index a3756ae443..656d3e97c3 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -186,20 +186,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- int parameter_count = shared->internal_formal_parameter_count() + 1;
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned alignment_state_offset =
- input_frame_size - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
- DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- return (alignment_state == kAlignmentPaddingPushed);
-}
-
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -240,7 +226,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
+ __ mov(eax, Immediate(0));
+ Label context_check;
+ __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
@@ -311,19 +302,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(eax);
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, kAlignmentMarkerExpected);
- }
- __ bind(&no_padding);
+ __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
- // Replace the current frame with the output frames.
+ // Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index b11ff97752..3cd0ac6e52 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -29,29 +29,18 @@ struct ByteMnemonic {
};
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}};
+ {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER}, {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -1218,20 +1207,34 @@ static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x0B:
return "ud2";
- case 0x18: return "prefetch";
- case 0xA2: return "cpuid";
- case 0xBE: return "movsx_b";
- case 0xBF: return "movsx_w";
- case 0xB6: return "movzx_b";
- case 0xB7: return "movzx_w";
- case 0xAF: return "imul";
- case 0xA5: return "shld";
- case 0xAD: return "shrd";
- case 0xAC: return "shrd"; // 3-operand version.
- case 0xAB: return "bts";
+ case 0x18:
+ return "prefetch";
+ case 0xA2:
+ return "cpuid";
+ case 0xBE:
+ return "movsx_b";
+ case 0xBF:
+ return "movsx_w";
+ case 0xB6:
+ return "movzx_b";
+ case 0xB7:
+ return "movzx_w";
+ case 0xAF:
+ return "imul";
+ case 0xA4:
+ return "shld";
+ case 0xA5:
+ return "shld";
+ case 0xAD:
+ return "shrd";
+ case 0xAC:
+ return "shrd"; // 3-operand version.
+ case 0xAB:
+ return "bts";
case 0xBC:
return "bsf";
- case 0xBD: return "bsr";
+ case 0xBD:
+ return "bsr";
default: return NULL;
}
}
@@ -1470,8 +1473,18 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += SetCC(data);
} else if ((f0byte & 0xF0) == 0x40) {
data += CMov(data);
+ } else if (f0byte == 0xA4 || f0byte == 0xAC) {
+ // shld, shrd
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ data += 2;
+ AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
+ NameOfCPURegister(regop), static_cast<int>(imm8));
} else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
+ // shrd_cl, shld_cl, bts
data += 2;
AppendToBuffer("%s ", f0mnem);
int mod, regop, rm;
@@ -1608,6 +1621,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int imm = *reinterpret_cast<int16_t*>(data);
AppendToBuffer(",0x%x", imm);
data += 2;
+ } else if (*data == 0xF7) {
+ data++;
+ AppendToBuffer("%s ", "test_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
} else if (*data == 0x0F) {
data++;
if (*data == 0x38) {
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 609dfec7b6..2d7cd02a42 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -28,10 +28,6 @@ const int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
-
// ----------------------------------------------------
@@ -46,13 +42,11 @@ class EntryFrameConstants : public AllStatic {
static const int kArgvOffset = +6 * kPointerSize;
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
@@ -70,13 +64,11 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
-
- static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index b36cf63b87..2748f907ac 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -114,36 +114,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
// static
-const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -272,6 +244,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -316,24 +295,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
+ Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -397,21 +368,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
- eax, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // callee
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 12daec8285..f9fd8d6a40 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -204,15 +204,15 @@ void MacroAssembler::RememberedSetHelper(
mov(Operand::StaticVariable(store_buffer), scratch);
// Call stub on end of buffer.
// Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
+ j(equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
DCHECK(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
+ j(not_equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
@@ -454,7 +454,7 @@ void MacroAssembler::RecordWriteField(
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(dst, (1 << kPointerSizeLog2) - 1);
+ test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -487,7 +487,7 @@ void MacroAssembler::RecordWriteForMap(
if (emit_debug_code()) {
Label ok;
lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, (1 << kPointerSizeLog2) - 1);
+ test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -682,7 +682,6 @@ void MacroAssembler::DebugBreak() {
call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
-
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
xorps(dst, dst);
cvtsi2sd(dst, src);
@@ -707,6 +706,71 @@ void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
bind(&jmp_return);
}
+void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(high, low);
+ shl(high, shift - 32);
+ xor_(low, low);
+ } else {
+ shld(high, low, shift);
+ shl(low, shift);
+ }
+}
+
+void MacroAssembler::ShlPair_cl(Register high, Register low) {
+ shld_cl(high, low);
+ shl_cl(low);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(high, low);
+ xor_(low, low);
+ bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(low, high);
+ shr(low, shift - 32);
+ xor_(high, high);
+ } else {
+ shrd(high, low, shift);
+ shr(high, shift);
+ }
+}
+
+void MacroAssembler::ShrPair_cl(Register high, Register low) {
+ shrd_cl(low, high);
+ shr_cl(high);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(low, high);
+ xor_(high, high);
+ bind(&done);
+}
+
+void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(low, high);
+ sar(low, shift - 32);
+ sar(high, 31);
+ } else {
+ shrd(high, low, shift);
+ sar(high, shift);
+ }
+}
+
+void MacroAssembler::SarPair_cl(Register high, Register low) {
+ shrd_cl(low, high);
+ sar_cl(high);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(low, high);
+ sar(high, 31);
+ bind(&done);
+}
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
@@ -744,8 +808,7 @@ void MacroAssembler::CmpObjectType(Register heap_object,
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- static_cast<int8_t>(type));
+ cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
@@ -757,7 +820,7 @@ void MacroAssembler::CheckFastElements(Register map,
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -770,10 +833,10 @@ void MacroAssembler::CheckFastObjectElements(Register map,
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -784,7 +847,7 @@ void MacroAssembler::CheckFastSmiElements(Register map,
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(above, fail, distance);
}
@@ -873,7 +936,7 @@ Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register instance_type) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+ cmpb(instance_type, Immediate(LAST_NAME_TYPE));
return below_equal;
}
@@ -895,6 +958,15 @@ void MacroAssembler::AssertNumber(Register object) {
}
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsANumber);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(not_equal, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
@@ -988,15 +1060,12 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
- push(esi); // Callee's context.
- push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ push(Immediate(Smi::FromInt(type)));
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
@@ -1031,9 +1100,10 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
- push(esi);
push(Immediate(Smi::FromInt(type)));
- push(Immediate(CodeObject()));
+ if (type == StackFrame::INTERNAL) {
+ push(Immediate(CodeObject()));
+ }
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
Check(not_equal, kCodeObjectNotProperlyPatched);
@@ -1043,7 +1113,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
- cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(type)));
Check(equal, kStackFrameTypesMustMatch);
}
@@ -1053,15 +1123,17 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
- DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+ DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
+ DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
@@ -1080,7 +1152,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
argc * kPointerSize;
sub(esp, Immediate(space));
- const int offset = -2 * kPointerSize;
+ const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@@ -1123,7 +1195,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
- const int offset = -2 * kPointerSize;
+ const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
@@ -1206,8 +1278,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(scratch2));
DCHECK(!scratch1.is(scratch2));
- // Load current lexical context from the stack frame.
- mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ mov(scratch2, ebp);
+ bind(&load_context);
+ mov(scratch1,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch1, &has_context);
+ mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+ jmp(&load_context);
+ bind(&has_context);
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
@@ -1920,7 +2002,7 @@ void MacroAssembler::BooleanBitTest(Register object,
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
- static_cast<byte>(1 << byte_bit_index));
+ Immediate(1 << byte_bit_index));
}
@@ -2086,6 +2168,87 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::PrepareForTailCall(
+ const ParameterCount& callee_args_count, Register caller_args_count_reg,
+ Register scratch0, Register scratch1, ReturnAddressState ra_state,
+ int number_of_temp_values_after_return_address) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+ DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
+ number_of_temp_values_after_return_address == 0);
+#endif
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch0;
+ if (callee_args_count.is_reg()) {
+ sub(caller_args_count_reg, callee_args_count.reg());
+ lea(new_sp_reg,
+ Operand(ebp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ number_of_temp_values_after_return_address * kPointerSize));
+ } else {
+ lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ (callee_args_count.immediate() +
+ number_of_temp_values_after_return_address) *
+ kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ cmp(esp, new_sp_reg);
+ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch1;
+ if (ra_state == ReturnAddressState::kOnStack) {
+ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+ tmp_reg);
+ } else {
+ DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+ DCHECK_EQ(0, number_of_temp_values_after_return_address);
+ Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ }
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // +2 here is to copy both receiver and return address.
+ Register count_reg = caller_args_count_reg;
+ if (callee_args_count.is_reg()) {
+ lea(count_reg, Operand(callee_args_count.reg(),
+ 2 + number_of_temp_values_after_return_address));
+ } else {
+ mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
+ number_of_temp_values_after_return_address));
+ // TODO(ishell): Unroll copying loop for small immediate values.
+ }
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ jmp(&entry, Label::kNear);
+ bind(&loop);
+ dec(count_reg);
+ mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
+ mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+ bind(&entry);
+ cmp(count_reg, Immediate(0));
+ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ mov(esp, new_sp_reg);
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2160,7 +2323,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Label skip_flooding;
ExternalReference step_in_enabled =
ExternalReference::debug_step_in_enabled_address(isolate());
- cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
j(equal, &skip_flooding);
{
FrameScope frame(this,
@@ -2850,7 +3013,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
Label succeed;
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
j(zero, &succeed);
- cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ cmpb(operand, Immediate(SYMBOL_TYPE));
j(not_equal, not_unique_name, distance);
bind(&succeed);
@@ -2998,8 +3161,7 @@ void MacroAssembler::CheckPageFlag(
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
} else {
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
}
@@ -3022,7 +3184,7 @@ void MacroAssembler::CheckPageFlagForMap(
DCHECK(!isolate()->heap()->mark_compact_collector()->
IsOnEvacuationCandidate(*map));
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ test_b(Operand::StaticVariable(reference), Immediate(mask));
} else {
test(Operand::StaticVariable(reference), Immediate(mask));
}
@@ -3062,7 +3224,8 @@ void MacroAssembler::HasColor(Register object,
jmp(&other_color, Label::kNear);
bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+ test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
+ Immediate(1));
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
bind(&other_color);
@@ -3164,19 +3327,40 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- cmp(scratch_reg, Immediate(new_space_start));
- j(less, no_memento_found);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(zero, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xor_(scratch_reg, receiver_reg);
+ test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(not_zero, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
j(greater, no_memento_found);
- cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(isolate()->factory()->allocation_memento_map()));
+ // Memento map check.
+ bind(&map_check);
+ mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
+ cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 9ebae1f463..be11f66202 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -44,6 +44,8 @@ enum PointersToHereCheck {
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
@@ -234,7 +236,7 @@ class MacroAssembler: public Assembler {
void DebugBreak();
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame. Expects the number of
@@ -323,6 +325,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // |ra_state| defines whether return address is already pushed to stack or
+ // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed. |number_of_temp_values_after_return_address| specifies
+ // the number of words pushed to the stack after the return address. This is
+ // to allow "allocation" of scratch registers that this function requires
+ // by saving their values on the stack.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1, ReturnAddressState ra_state,
+ int number_of_temp_values_after_return_address);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
@@ -358,6 +374,13 @@ class MacroAssembler: public Assembler {
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
+ void ShlPair(Register high, Register low, uint8_t imm8);
+ void ShlPair_cl(Register high, Register low);
+ void ShrPair(Register high, Register low, uint8_t imm8);
+ void ShrPair_cl(Register high, Register src);
+ void SarPair(Register high, Register low, uint8_t imm8);
+ void SarPair_cl(Register high, Register low);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeMove(Register dst, const Immediate& x);
@@ -522,6 +545,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number, enabled via --debug-code.
void AssertNumber(Register object);
+ void AssertNotNumber(Register object);
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object);
@@ -777,12 +801,6 @@ class MacroAssembler: public Assembler {
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
void Popcnt(Register dst, const Operand& src);
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- call(self, RelocInfo::CODE_TARGET);
- }
-
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -796,6 +814,7 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+ void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
// Push a handle value.
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index 0f1b7b9bf1..c99219201a 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -18,7 +18,8 @@ Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
- OFStream os(stdout);
+ CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
+ OFStream os(trace_scope.file());
code->Disassemble(name, os);
}
#endif
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 5f2b861d08..a3f23d3f22 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -28,6 +29,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(cp);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ mov(r0, Operand(0));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -51,7 +55,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(cp);
}
__ Ret();
}
@@ -66,6 +70,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(cp);
// Save value register, so we can restore it later.
__ push(value());
@@ -81,11 +87,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
receiver = scratch;
}
__ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ mov(r0, Operand(1));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -96,7 +102,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ pop(r0);
// Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(cp);
}
__ Ret();
}
@@ -248,7 +254,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = r0;
Register data = r4;
Register holder = r2;
@@ -317,7 +323,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -742,18 +748,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 51ae3b50cf..a704492550 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -160,7 +161,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = x0;
Register data = x4;
Register holder = x2;
@@ -229,7 +230,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Mov(api_function_address, ref);
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -245,6 +246,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ Push(cp);
// Save value register, so we can restore it later.
__ Push(value());
@@ -260,11 +263,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
receiver = scratch;
}
__ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Mov(x0, 1);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -275,7 +278,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Pop(x0);
// Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(cp);
}
__ Ret();
}
@@ -287,6 +290,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ Push(cp);
+
if (accessor_index >= 0) {
DCHECK(!AreAliased(holder, scratch));
DCHECK(!AreAliased(receiver, scratch));
@@ -298,11 +304,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Mov(x0, 0);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -310,7 +316,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(cp);
}
__ Ret();
}
@@ -380,22 +386,6 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- Label miss;
-
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
-
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 803281e24d..714888c8b3 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -81,7 +81,8 @@ Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
Handle<Name> name) {
Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG, *code, *name));
+ PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG,
+ AbstractCode::cast(*code), *name));
#ifdef DEBUG
code->VerifyEmbeddedObjects();
#endif
@@ -581,7 +582,9 @@ void ElementHandlerCompiler::CompileElementHandlers(
(is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
*receiver_map == isolate()->get_initial_js_array_map(elements_kind));
- if (receiver_map->has_indexed_interceptor()) {
+ if (receiver_map->has_indexed_interceptor() &&
+ !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined() &&
+ !receiver_map->GetIndexedInterceptor()->non_masking()) {
cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
} else if (IsSloppyArgumentsElements(elements_kind)) {
cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 45d7d73089..76036a260f 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -231,7 +231,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
int accessor_index,
int expected_arguments);
- Handle<Code> CompileStoreInterceptor(Handle<Name> name);
static void GenerateStoreViaSetter(MacroAssembler* masm, Handle<Map> map,
Register receiver, Register holder,
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 3bdddf9b6d..132090dc8e 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -23,6 +24,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(esi);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -34,11 +38,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(eax, 0);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -46,7 +50,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
}
__ ret(0);
}
@@ -90,7 +94,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Bail out if the receiver has a named interceptor or requires access checks.
__ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
+ Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
@@ -158,7 +162,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Stack now matches JSFunction abi.
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = edi;
Register data = ebx;
Register holder = ecx;
@@ -220,7 +224,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -252,6 +256,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(esi);
// Save value register, so we can restore it later.
__ push(value());
@@ -267,11 +273,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
__ push(receiver);
__ push(value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(eax, 1);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -280,9 +286,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// We have to return the passed value, not the return value of the setter.
__ pop(eax);
-
// Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
}
__ ret(0);
}
@@ -758,22 +763,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ pop(scratch1()); // remove the return address
- __ push(receiver());
- __ push(this->name());
- __ push(value());
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 0eba42720d..e66716f6cb 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -150,8 +150,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+ __ test_b(
+ FieldOperand(map, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. In
// the case that the object is a value-wrapper object, we enter the runtime
@@ -202,9 +203,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// scratch2: map of current prototype
__ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
__ j(below, slow);
- __ test_b(
- FieldOperand(scratch2, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+ __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
__ j(not_zero, slow);
__ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
__ j(not_equal, slow);
@@ -251,7 +252,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
// bit test is enough.
STATIC_ASSERT(kNotInternalizedTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- kIsNotInternalizedMask);
+ Immediate(kIsNotInternalizedMask));
__ j(not_zero, not_unique);
__ bind(&unique);
@@ -521,7 +522,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+ Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index f74c69e50d..d1e9416d41 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -132,29 +132,6 @@ Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
}
-Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub) {
- Isolate* isolate = receiver_map->GetIsolate();
- Handle<String> name(isolate->heap()->empty_string());
- if (!receiver_map->is_dictionary_map()) {
- Handle<Code> cached_ic =
- Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
- if (!cached_ic.is_null()) return cached_ic;
- }
-
- Code::FindAndReplacePattern pattern;
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- pattern.Add(isolate->factory()->meta_map(), cell);
- Handle<Code> ic = stub->GetCodeCopy(pattern);
-
- if (!receiver_map->is_dictionary_map()) {
- Map::UpdateCodeCache(receiver_map, name, ic);
- }
-
- return ic;
-}
-
-
void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
@@ -175,7 +152,8 @@ void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
LoadIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
- PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG,
+ AbstractCode::cast(*code), 0));
return code;
}
@@ -183,7 +161,8 @@ Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
StoreIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG,
+ AbstractCode::cast(*code), 0));
return code;
}
@@ -191,8 +170,8 @@ Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
StoreIC::GeneratePreMonomorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG,
+ AbstractCode::cast(*code), 0));
return code;
}
@@ -202,7 +181,8 @@ Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
LanguageMode language_mode = StoreICState::GetLanguageMode(extra_state);
GenerateRuntimeSetProperty(masm(), language_mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG,
+ AbstractCode::cast(*code), 0));
return code;
}
@@ -210,7 +190,8 @@ Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
StoreIC::GenerateMegamorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG,
+ AbstractCode::cast(*code), 0));
return code;
}
@@ -221,7 +202,8 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
Code::Flags flags =
Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ PROFILE(isolate(),
+ CodeCreateEvent(log_kind(code), AbstractCode::cast(*code), *name));
#ifdef DEBUG
code->VerifyEmbeddedObjects();
#endif
@@ -235,8 +217,11 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map(receiver_maps->at(i));
Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- Map::FindTransitionedMap(receiver_map, receiver_maps);
+ Handle<Map> transitioned_map;
+ {
+ Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+ if (tmap != nullptr) transitioned_map = handle(tmap);
+ }
// TODO(mvstanton): The code below is doing pessimistic elements
// transitions. I would like to stop doing that and rely on Allocation Site
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index 08444df654..3a5aecccbb 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -33,10 +33,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
- // Compare nil
- static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub);
-
// Helpers
// TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
// and make the helpers private.
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 933803c653..bf1e45fb50 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -404,7 +404,9 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsInternalizedString()) return INTERNALIZED_STRING;
if (value->IsString()) return STRING;
if (value->IsSymbol()) return UNIQUE_NAME;
- if (value->IsJSReceiver()) return RECEIVER;
+ if (value->IsJSReceiver() && !value->IsUndetectable()) {
+ return RECEIVER;
+ }
break;
case BOOLEAN:
if (value->IsBoolean()) return BOOLEAN;
@@ -428,7 +430,9 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsUniqueName()) return UNIQUE_NAME;
break;
case RECEIVER:
- if (value->IsJSReceiver()) return RECEIVER;
+ if (value->IsJSReceiver() && !value->IsUndetectable()) {
+ return RECEIVER;
+ }
break;
case GENERIC:
break;
@@ -464,6 +468,9 @@ CompareICState::State CompareICState::TargetState(
}
if (x->IsString() && y->IsString()) return STRING;
if (x->IsJSReceiver() && y->IsJSReceiver()) {
+ if (x->IsUndetectable() || y->IsUndetectable()) {
+ return GENERIC;
+ }
if (Handle<JSReceiver>::cast(x)->map() ==
Handle<JSReceiver>::cast(y)->map()) {
return KNOWN_RECEIVER;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index c0b3e49338..49bbc6ed8d 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -6,8 +6,10 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/api-arguments.h"
#include "src/arguments.h"
#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
@@ -465,8 +467,6 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
return;
case Code::COMPARE_IC:
return CompareIC::Clear(isolate, address, target, constant_pool);
- case Code::COMPARE_NIL_IC:
- return CompareNilIC::Clear(address, target, constant_pool);
case Code::CALL_IC: // CallICs are vector-based and cleared differently.
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
@@ -836,11 +836,12 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
ElementsKind target_elements_kind = target_map->elements_kind();
bool more_general_transition = IsMoreGeneralElementsKindTransition(
source_map->elements_kind(), target_elements_kind);
- Map* transitioned_map =
- more_general_transition
- ? source_map->LookupElementsTransitionMap(target_elements_kind)
- : NULL;
-
+ Map* transitioned_map = nullptr;
+ if (more_general_transition) {
+ MapHandleList map_list;
+ map_list.Add(handle(target_map));
+ transitioned_map = source_map->FindElementsKindTransitionedMap(&map_list);
+ }
return transitioned_map == target_map;
}
@@ -1106,9 +1107,8 @@ Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
// TODO(mvstanton): we'd only like to cache code on the map when it's custom
// code compiled for this map, otherwise it's already cached in the global
- // code
- // cache. We are also guarding against installing code with flags that don't
- // match the desired CacheHolderFlag computed above, which would lead to
+ // code cache. We are also guarding against installing code with flags that
+ // don't match the desired CacheHolderFlag computed above, which would lead to
// invalid lookups later.
if (code->type() != Code::NORMAL &&
Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
@@ -1208,6 +1208,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
break;
}
if (!holder->HasFastProperties()) break;
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
NamedLoadHandlerCompiler compiler(isolate(), map, holder,
cache_holder);
return compiler.CompileLoadCallback(lookup->name(), info);
@@ -1297,10 +1298,10 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
Handle<Code> null_handle;
Handle<Map> receiver_map(receiver->map(), isolate());
+ DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
-
if (target_receiver_maps.length() == 0) {
Handle<Code> handler =
PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
@@ -1309,6 +1310,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
return null_handle;
}
+ for (int i = 0; i < target_receiver_maps.length(); i++) {
+ if (!target_receiver_maps.at(i).is_null() &&
+ target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
+ return megamorphic_stub();
+ }
+ }
+
// The first time a receiver is seen that is a transitioned version of the
// previous monomorphic receiver type, assume the new ElementsKind is the
// monomorphic type. This benefits global arrays that only transition
@@ -1422,7 +1431,8 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
Handle<JSObject> holder = it->GetHolder<JSObject>();
InterceptorInfo* info = holder->GetNamedInterceptor();
if (it->HolderIsReceiverOrHiddenPrototype()) {
- if (!info->setter()->IsUndefined()) return true;
+ return !info->non_masking() && receiver.is_identical_to(holder) &&
+ !info->setter()->IsUndefined();
} else if (!info->getter()->IsUndefined() ||
!info->query()->IsUndefined()) {
return false;
@@ -1722,8 +1732,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::INTERCEPTOR: {
DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreInterceptor(lookup->name());
+ return CodeFactory::StoreInterceptor(isolate()).code();
}
case LookupIterator::ACCESSOR: {
@@ -1749,6 +1758,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
break;
}
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
return compiler.CompileStoreCallback(receiver, lookup->name(), info,
language_mode());
@@ -2722,57 +2732,6 @@ RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
- if (IsCleared(target)) return;
- ExtraICState state = target->extra_ic_state();
-
- CompareNilICStub stub(target->GetIsolate(), state,
- HydrogenCodeStub::UNINITIALIZED);
- stub.ClearState();
-
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
-
- SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extra_ic_state();
-
- CompareNilICStub stub(isolate(), extra_ic_state);
-
- // Extract the current supported types from the patched IC and calculate what
- // types must be supported as a result of the miss.
- bool already_monomorphic = stub.IsMonomorphic();
-
- stub.UpdateStatus(object);
-
- // Find or create the specialized stub to support the new set of types.
- Handle<Code> code;
- if (stub.IsMonomorphic()) {
- Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
- ? FirstTargetMap()
- : HeapObject::cast(*object)->map());
- code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
- } else {
- code = stub.GetCode();
- }
- set_target(*code);
- return isolate()->factory()->ToBoolean(object->IsUndetectableObject());
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompareNilIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- CompareNilIC ic(isolate);
- return *ic.CompareNil(object);
-}
-
-
RUNTIME_FUNCTION(Runtime_Unreachable) {
UNREACHABLE();
CHECK(false);
@@ -2781,7 +2740,7 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(isolate(), target()->extra_ic_state());
+ ToBooleanICStub stub(isolate(), target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode();
set_target(*code);
@@ -2821,12 +2780,11 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
DCHECK(fun != NULL);
- LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
Object::ShouldThrow should_throw =
is_sloppy(language_mode) ? Object::DONT_THROW : Object::THROW_ON_ERROR;
PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
*holder, should_throw);
- custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+ custom_args.Call(fun, name, value);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -2843,17 +2801,29 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptorOnly) {
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<Object> receiver =
+ args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
HandleScope scope(isolate);
- LookupIterator it(receiver, name, holder, LookupIterator::OWN);
- bool done;
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::GetPropertyWithInterceptor(&it, &done));
- if (done) return *result;
+
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver));
+ }
+
+ InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+ PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
+
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ Handle<Object> result = arguments.Call(getter, name);
+
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+ if (!result.is_null()) return *result;
return isolate->heap()->no_interceptor_result_sentinel();
}
@@ -2867,21 +2837,42 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<Object> receiver =
+ args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
- Handle<Object> result;
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver));
+ }
+
+ InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+ PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
+
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ Handle<Object> result = arguments.Call(getter, name);
+
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+ if (!result.is_null()) return *result;
+
LookupIterator it(receiver, name, holder);
- // TODO(conradw): Investigate strong mode semantics for this.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::GetProperty(&it));
+ // Skip any lookup work until we hit the (possibly non-masking) interceptor.
+ while (it.state() != LookupIterator::INTERCEPTOR ||
+ !it.GetHolder<JSObject>().is_identical_to(holder)) {
+ DCHECK(it.state() != LookupIterator::ACCESS_CHECK || it.HasAccess());
+ it.Next();
+ }
+ // Skip past the interceptor.
+ it.Next();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
if (it.IsFound()) return *result;
- // Return the undefined result if the reference error should not be thrown.
- // Note that both keyed and non-keyed loads may end up here.
LoadICNexus nexus(isolate);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
if (!ic.ShouldThrowReferenceError(it.GetReceiver())) {
@@ -2902,26 +2893,34 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<Name> name = args.at<Name>(1);
Handle<Object> value = args.at<Object>(2);
-#ifdef DEBUG
- PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER,
- PrototypeIterator::END_AT_NON_HIDDEN);
- bool found = false;
- for (; !iter.IsAtEnd(); iter.Advance()) {
- Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- if (current->IsJSObject() &&
- Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
- found = true;
- break;
- }
+
+ DCHECK(receiver->HasNamedInterceptor());
+ InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
+ DCHECK(!interceptor->non_masking());
+ PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+ *receiver, Object::DONT_THROW);
+
+ v8::GenericNamedPropertySetterCallback setter =
+ v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+ interceptor->setter());
+ Handle<Object> result = arguments.Call(setter, name, value);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (!result.is_null()) return *value;
+
+ LookupIterator it(receiver, name, receiver);
+ // Skip past any access check on the receiver.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ DCHECK(it.HasAccess());
+ it.Next();
}
- DCHECK(found);
-#endif
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetProperty(receiver, name, value, ic.language_mode()));
- return *result;
+ // Skip past the interceptor on the receiver.
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
+ it.Next();
+
+ MAYBE_RETURN(Object::SetProperty(&it, value, ic.language_mode(),
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
@@ -2931,9 +2930,25 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
Handle<JSObject> receiver = args.at<JSObject>(0);
DCHECK(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::GetElement(isolate, receiver, index));
+
+ InterceptorInfo* interceptor = receiver->GetIndexedInterceptor();
+ PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+ *receiver, Object::DONT_THROW);
+
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ Handle<Object> result = arguments.Call(getter, index);
+
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+ if (result.is_null()) {
+ LookupIterator it(isolate, receiver, index, receiver);
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
+ it.Next();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::GetProperty(&it));
+ }
+
return *result;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 0a324a8205..8bd2f447b8 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -606,18 +606,6 @@ class CompareIC : public IC {
};
-class CompareNilIC : public IC {
- public:
- explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- Handle<Object> CompareNil(Handle<Object> object);
-
- static Handle<Code> GetUninitialized();
-
- static void Clear(Address address, Code* target, Address constant_pool);
-};
-
-
class ToBooleanIC : public IC {
public:
explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index f3af1cf537..b924bdad78 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -28,6 +29,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(cp);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(a0, Operand(0));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -51,7 +55,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(cp);
}
__ Ret();
}
@@ -66,8 +70,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Save value register, so we can restore it later.
- __ push(value());
+ // Save context and value registers, so we can restore them later.
+ __ Push(cp, value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
@@ -81,11 +85,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
receiver = scratch;
}
__ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(a0, Operand(1));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -93,10 +97,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(v0);
-
// Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(cp, v0);
}
__ Ret();
}
@@ -241,7 +243,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = a0;
Register data = t0;
Register holder = a2;
@@ -308,7 +310,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -739,18 +741,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 968effdd7f..52260ee754 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -28,6 +29,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(cp);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(a0, Operand(V8_INT64_C(0)));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -51,7 +55,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(cp);
}
__ Ret();
}
@@ -66,8 +70,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Save value register, so we can restore it later.
- __ push(value());
+ // Save context and value registers, so we can restore them later.
+ __ Push(cp, value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
@@ -81,11 +85,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
receiver = scratch;
}
__ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(a0, Operand(1));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -93,10 +97,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(v0);
-
// Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(cp, v0);
}
__ Ret();
}
@@ -241,7 +243,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = a0;
Register data = a4;
Register holder = a2;
@@ -308,7 +310,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -739,18 +741,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 6e7d78afd3..832c25ae48 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -28,6 +29,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(cp);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(r3, Operand::Zero());
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -51,7 +55,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(cp);
}
__ Ret();
}
@@ -66,8 +70,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
// Save value register, so we can restore it later.
- __ push(value());
+ __ Push(cp, value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
@@ -81,11 +86,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
receiver = scratch;
}
__ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ li(r3, Operand(1));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -93,10 +98,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(r3);
-
// Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(cp, r3);
}
__ Ret();
}
@@ -246,7 +249,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = r3;
Register data = r7;
Register holder = r5;
@@ -315,7 +318,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -738,18 +741,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/s390/OWNERS b/deps/v8/src/ic/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/ic/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ic/s390/access-compiler-s390.cc b/deps/v8/src/ic/s390/access-compiler-s390.cc
new file mode 100644
index 0000000000..316be715c2
--- /dev/null
+++ b/deps/v8/src/ic/s390/access-compiler-s390.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, r5, r2, r6, r7};
+ return registers;
+}
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ static Register registers[] = {receiver, name, r5, r6, r7};
+ return registers;
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
new file mode 100644
index 0000000000..1b39782c28
--- /dev/null
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -0,0 +1,750 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/handler-compiler.h"
+
+#include "src/api-arguments.h"
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- r2 : receiver
+ // -- r4 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save context register
+ __ push(cp);
+
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ // Call the JavaScript getter with the receiver on the stack.
+ if (map->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ LoadP(scratch,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
+ }
+ __ Push(receiver);
+ __ LoadAccessor(r3, holder, accessor_index, ACCESSOR_GETTER);
+ __ LoadImmP(r2, Operand::Zero());
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ pop(cp);
+ }
+ __ Ret();
+}
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save context register
+ // Save value register, so we can restore it later.
+ __ Push(cp, value());
+
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (map->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ LoadP(scratch,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
+ }
+ __ Push(receiver, value());
+ __ LoadAccessor(r3, holder, accessor_index, ACCESSOR_SETTER);
+ __ LoadImmP(r2, Operand(1));
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ // Restore context register.
+ __ Pop(cp, r2);
+ }
+ __ Ret();
+}
+
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ la(sp, MemOperand(sp, 2 * kPointerSize));
+}
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(name->IsUniqueName());
+ DCHECK(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ LoadlB(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ AndP(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ bne(miss_label);
+
+ // Check that receiver is a JSObject.
+ // TODO(joransiu): Merge into SI compare
+ __ LoadlB(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ CmpP(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ blt(miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHashTableMapRootIndex);
+ __ bne(miss_label);
+
+ // Restore the temporarily used register.
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register result, Label* miss) {
+ __ LoadNativeContextSlot(index, result);
+ // Load its initial map. The global functions all have initial maps.
+ __ LoadP(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ LoadRR(r2, scratch1);
+ __ Ret();
+}
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ bne(miss);
+}
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name);
+ __ Push(receiver);
+ __ Push(holder);
+}
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallRuntime(id);
+}
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
+ DCHECK(!receiver.is(scratch_in));
+ __ Push(receiver);
+ // Write the arguments to stack frame.
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ Push(store_parameter);
+ }
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiCallbackStub.
+ Register callee = r2;
+ Register data = r6;
+ Register holder = r4;
+ Register api_function_address = r3;
+
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ bool call_data_undefined = false;
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
+ } else {
+ if (optimization.is_constant_call()) {
+ __ LoadP(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ LoadP(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
+ __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
+ }
+
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+ __ mov(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
+ __ TailCallStub(&stub);
+}
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+}
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
+}
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Operand(name));
+ }
+}
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
+ __ mov(this->name(), Operand(name));
+}
+
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
+ DCHECK(false); // Not implemented.
+}
+
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ DecodeField<Map::Deprecated>(r0, scratch);
+ __ bne(miss);
+ }
+}
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
+ Register value_reg,
+ Register scratch,
+ Label* miss_label) {
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ CmpP(value_reg, FieldMemOperand(
+ scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ bne(miss_label);
+}
+
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
+ Register value_reg,
+ Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ JumpIfSmi(value_reg, miss_label);
+ if (field_type->IsClass()) {
+ __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ bne(miss_label);
+ }
+}
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ ReturnHolder return_what) {
+ Handle<Map> receiver_map = map();
+
+ // Make sure there's no overlap between holder and object registers.
+ DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
+
+ if (FLAG_eliminate_prototype_chain_checks) {
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
+ validity_cell->value());
+ __ mov(scratch1, Operand(validity_cell));
+ __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+ __ bne(miss);
+ }
+
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ b(ne, miss);
+ }
+ }
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
+ }
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ // TODO(jkummerow): Cache and re-use weak cell.
+ __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+ if (!FLAG_eliminate_prototype_chain_checks) {
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ }
+ } else {
+ Register map_reg = scratch1;
+ if (!FLAG_eliminate_prototype_chain_checks) {
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (!FLAG_eliminate_prototype_chain_checks &&
+ (depth != 1 || check == CHECK_ALL_MAPS)) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ bne(miss);
+ }
+ if (!FLAG_eliminate_prototype_chain_checks) {
+ __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ }
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (!FLAG_eliminate_prototype_chain_checks &&
+ (depth != 0 || check == CHECK_ALL_MAPS)) {
+ // Check the holder map.
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ bne(miss);
+ }
+
+ bool return_holder = return_what == RETURN_HOLDER;
+ if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ }
+
+ // Return the register containing the holder.
+ return return_holder ? reg : no_reg;
+}
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ __ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ Move(r2, value);
+ __ Ret();
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ __ Push(receiver());
+ // Push data from AccessorInfo.
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Move(scratch2(), data);
+ } else {
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch2(), cell);
+ }
+ __ push(scratch2());
+ __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch2(), scratch2());
+ __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ // should_throw_on_error -> false
+ __ mov(scratch3(), Operand(Smi::FromInt(0)));
+ __ Push(scratch2(), reg, scratch3(), name());
+
+ // Abi for CallApiGetter
+ Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ mov(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ InterceptorVectorSlotPush(holder_reg);
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ Runtime::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(r2, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ beq(&interceptor_failed, Label::kNear);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ Pop(receiver());
+ }
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
+}
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
+ Register holder_reg = Frontend(name);
+
+ __ Push(receiver(), holder_reg); // receiver
+
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ mov(ip, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ mov(ip, Operand(cell));
+ }
+ __ Push(ip);
+ __ mov(ip, Operand(name));
+ __ Push(ip, value());
+ __ Push(Smi::FromInt(language_mode));
+
+ // Do tail-call to the runtime system.
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+Register NamedStoreHandlerCompiler::value() {
+ return StoreDescriptor::ValueRegister();
+}
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
+ FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
+
+ // Get the value from the cell.
+ Register result = StoreDescriptor::ValueRegister();
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ beq(&miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r3, r5);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
+ __ Ret();
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/s390/ic-compiler-s390.cc b/deps/v8/src/ic/s390/ic-compiler-s390.cc
new file mode 100644
index 0000000000..a7691d83c5
--- /dev/null
+++ b/deps/v8/src/ic/s390/ic-compiler-s390.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
+ __ mov(r0, Operand(Smi::FromInt(language_mode)));
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(), r0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty);
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
new file mode 100644
index 0000000000..d4f28868e7
--- /dev/null
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -0,0 +1,897 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/ic.h"
+#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ beq(global_object);
+ __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ beq(global_object);
+}
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ LoadRR(r0, scratch2);
+ __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ AndP(scratch2, scratch1);
+ __ bne(miss);
+ __ LoadRR(scratch2, r0);
+
+ // Get the value at the masked, scaled index and return.
+ __ LoadP(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ LoadRR(r0, scratch2);
+ __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
+ __ AndP(scratch2, scratch1);
+ __ bne(miss /*, cr0*/);
+ __ LoadRR(scratch2, r0);
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ StoreP(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ LoadRR(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver, Register map,
+ Register scratch,
+ int interceptor_bit, Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
+ __ mov(r0,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ AndP(r0, scratch);
+ __ bne(slow /*, cr0*/);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
+ __ blt(slow);
+}
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* slow) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // elements - holds the elements of the receiver and its protoypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
+ //
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, absent;
+
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ AssertFastElements(elements);
+
+ // Check that the key (index) is within bounds.
+ __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ CmpLogicalP(key, scratch1);
+ __ blt(&in_bounds, Label::kNear);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ CmpP(key, Operand::Zero());
+ __ blt(slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ __ beq(&absent, Label::kNear);
+ __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+ __ blt(slow);
+ __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ bne(slow);
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ bne(slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&absent);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
+ // Fast case: Do the load.
+ __ AddP(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ __ SmiToPtrArrayOffset(scratch2, key);
+ __ LoadP(scratch2, MemOperand(scratch2, scratch1));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ beq(&check_prototypes);
+ __ LoadRR(result, scratch2);
+ __ bind(&done);
+}
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
+ // The key is not a smi.
+ Label unique;
+ // Is it a name?
+ __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+ __ bgt(not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ beq(&unique, Label::kNear);
+
+ // Is the string an array index, with cached numeric value?
+ __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
+ __ AndP(r0, hash, r7);
+ __ beq(index_string);
+
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
+ // map: key map
+ __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ tmll(hash, Operand(kIsNotInternalizedMask));
+ __ bne(not_unique);
+
+ __ bind(&unique);
+}
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = r2;
+ DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+ Label slow;
+
+ __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+ JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary,
+ LoadDescriptor::NameRegister(), r2, r5, r6);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r5; }
+
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+}
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
+
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
+}
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kGetProperty);
+}
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
+
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
+}
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
+}
+
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = LoadDescriptor::NameRegister();
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ DCHECK(key.is(r4));
+ DCHECK(receiver.is(r3));
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
+ Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r2, r5, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
+ r5);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // r5: elements map
+ // r6: elements
+ __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
+ __ bne(&slow, Label::kNear);
+ __ SmiUntag(r2, key);
+ __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
+ __ Ret();
+
+ // Slow case, key and receiver still in r2 and r3.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
+ r5);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
+ Map::kHasNamedInterceptor, &slow);
+
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
+ __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
+ __ beq(&probe_dictionary);
+
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ receiver, key, r6, r7, r8, r9);
+ // Cache miss.
+ GenerateMiss(masm);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // r5: elements
+ __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
+ // Load the property to r2.
+ GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+ r6, r5);
+ __ Ret();
+
+ __ bind(&index_name);
+ __ IndexFromHash(r5, key);
+ // Now jump to the place where smi keys are handled.
+ __ b(&index_smi);
+}
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+}
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
+}
+
+static void KeyedStoreGenerateMegamorphicHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch = r6;
+ Register address = r7;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, address));
+
+ if (check_map == kCheckMap) {
+ __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ CmpP(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ bne(fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ // @TODO(joransiu) : Fold AddP into memref of LoadP
+ __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ LoadP(scratch, MemOperand(address, scratch));
+ __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ bne(&holecheck_passed1, Label::kNear);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StoreP(value, MemOperand(address, scratch));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StoreP(value, MemOperand(address, scratch));
+ __ la(address, MemOperand(address, scratch));
+ // Update write barrier for the elements array address.
+ __ LoadRR(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+ __ bne(slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ // @TODO(joransiu) : Fold AddP Operand into LoadlW
+ __ AddP(address, elements,
+ Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
+ kHeapObjectTag)));
+ __ SmiToDoubleArrayOffset(scratch, key);
+ __ LoadlW(scratch, MemOperand(address, scratch));
+ __ CmpP(scratch, Operand(kHoleNanUpper32));
+ __ bne(&fast_double_without_map_check, Label::kNear);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ bne(&non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, scratch, slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, scratch, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ b(&finish_object_store);
+}
+
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
+ // ---------- S t a t e --------------
+ // -- r2 : value
+ // -- r3 : key
+ // -- r4 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array, maybe_name_key, miss;
+
+ // Register usage.
+ Register value = StoreDescriptor::ValueRegister();
+ Register key = StoreDescriptor::NameRegister();
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ DCHECK(receiver.is(r3));
+ DCHECK(key.is(r4));
+ DCHECK(value.is(r2));
+ Register receiver_map = r5;
+ Register elements_map = r8;
+ Register elements = r9; // Elements array of the receiver.
+ // r6 and r7 are used as general scratch registers.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &maybe_name_key);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+ // Get the map of the object.
+ __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ AndP(r0, ip,
+ Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ bne(&slow, Label::kNear);
+ // Check if the object is a JS array or not.
+ __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ CmpP(r6, Operand(JS_ARRAY_TYPE));
+ __ beq(&array);
+ // Check that the object is some kind of JSObject.
+ __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
+ __ blt(&slow, Label::kNear);
+
+ // Object case: Check key against length in the elements array.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ blt(&fast_object);
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+ // Entry registers are intact.
+ // r2: value.
+ // r3: key.
+ // r4: receiver.
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
+ // Never returns to here.
+
+ __ bind(&maybe_name_key);
+ __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(r6, &slow);
+
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, r7, r8, r9, ip);
+ // Cache miss.
+ __ b(&miss);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // Condition code from comparing key and array length is still available.
+ __ bne(&slow); // Only support writing to writing to array[array.length].
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ bge(&slow);
+ __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ bne(&check_if_double_array, Label::kNear);
+ __ b(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ CmpP(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ bne(&slow);
+ __ b(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ bge(&extra);
+
+ KeyedStoreGenerateMegamorphicHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ DCHECK(receiver.is(r3));
+ DCHECK(name.is(r4));
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, r5, r6, r7, r8);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
+}
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Register dictionary = r7;
+ DCHECK(receiver.is(r3));
+ DCHECK(name.is(r4));
+ DCHECK(value.is(r2));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6));
+
+ __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
+ GenerateMiss(masm);
+}
+
+#undef __
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a CHI, nothing
+ // was inlined.
+ return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI);
+}
+
+//
+// This code is paired with the JumpPatchSite class in full-codegen-s390.cc
+//
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) {
+ return;
+ }
+
+ if (Instruction::S390OpcodeValue(address) != BRASL) {
+ return;
+ }
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = instr & 0x0000ffff;
+
+ // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+ // nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
+ cmp_instruction_address, delta);
+ }
+
+ // Expected sequence to enable by changing the following
+ // CR/CGR Rx, Rx // 2 / 4 bytes
+ // LR R0, R0 // 2 bytes // 31-bit only!
+ // BRC/BRCL // 4 / 6 bytes
+ // into
+ // TMLL Rx, XXX // 4 bytes
+ // BRC/BRCL // 4 / 6 bytes
+ // And vice versa to disable.
+
+ // The following constant is the size of the CR/CGR + LR + LR
+ const int kPatchAreaSizeNoBranch = 4;
+ Address patch_address = cmp_instruction_address - delta;
+ Address branch_address = patch_address + kPatchAreaSizeNoBranch;
+
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ SixByteInstr branch_instr = Assembler::instr_at(branch_address);
+
+ // This is patching a conditional "jump if not smi/jump if smi" site.
+ size_t patch_size = 0;
+ if (Instruction::S390OpcodeValue(branch_address) == BRC) {
+ patch_size = kPatchAreaSizeNoBranch + 4;
+ } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
+ patch_size = kPatchAreaSizeNoBranch + 6;
+ } else {
+ DCHECK(false);
+ }
+ CodePatcher patcher(isolate, patch_address, patch_size);
+ Register reg;
+ reg.reg_code = instr_at_patch & 0xf;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ patcher.masm()->TestIfSmi(reg);
+ } else {
+ // Emit the NOP to ensure sufficient place for patching
+ // (replaced by LR + NILL)
+ DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ patcher.masm()->CmpP(reg, reg);
+#ifndef V8_TARGET_ARCH_S390X
+ patcher.masm()->nop();
+#endif
+ }
+
+ Condition cc = al;
+ if (Instruction::S390OpcodeValue(branch_address) == BRC) {
+ cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
+ DCHECK((cc == ne) || (cc == eq));
+ cc = (cc == ne) ? eq : ne;
+ patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
+ } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
+ cc = static_cast<Condition>(
+ (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
+ DCHECK((cc == ne) || (cc == eq));
+ cc = (cc == ne) ? eq : ne;
+ patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
+ } else {
+ DCHECK(false);
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/s390/stub-cache-s390.cc b/deps/v8/src/ic/s390/stub-cache-s390.cc
new file mode 100644
index 0000000000..054b946df8
--- /dev/null
+++ b/deps/v8/src/ic/s390/stub-cache-s390.cc
@@ -0,0 +1,187 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Kind ic_kind, Code::Flags flags,
+ StubCache::Table table, Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ ShiftLeftP(offset_scratch, offset, Operand(1));
+ __ AddP(offset_scratch, offset, offset_scratch);
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+#if V8_TARGET_ARCH_S390X
+ DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
+ __ ShiftLeftP(offset_scratch, offset_scratch,
+ Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
+#else
+ DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
+#endif
+ __ AddP(base_addr, base_addr, offset_scratch);
+
+ // Check that the key in the entry matches the name.
+ __ CmpP(name, MemOperand(base_addr, 0));
+ __ bne(&miss, Label::kNear);
+
+ // Check the map matches.
+ __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bne(&miss, Label::kNear);
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ LoadlW(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+
+ DCHECK(!r0.is(flags_reg));
+ __ AndP(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+ __ CmpLogicalP(flags_reg, Operand(flags));
+ __ bne(&miss, Label::kNear);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ b(&miss, Label::kNear);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ b(&miss, Label::kNear);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ // TODO(joransiu): Combine into indirect branch
+ __ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
+ __ b(code);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+#if V8_TARGET_ARCH_S390X
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 24.
+ DCHECK(sizeof(Entry) == 24);
+#else
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ DCHECK(sizeof(Entry) == 12);
+#endif
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Check scratch, extra and extra2 registers are valid.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ AddP(scratch, scratch, ip);
+ __ XorP(scratch, scratch, Operand(flags));
+ // The mask omits the last two bits because they are not part of the hash.
+ __ AndP(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ SubP(scratch, scratch, name);
+ __ AddP(scratch, scratch, Operand(flags));
+ __ AndP(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index ac3dd9a367..dde61691d5 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -142,7 +143,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ PushReturnAddressFrom(scratch);
// Stack now matches JSFunction abi.
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = rdi;
Register data = rbx;
Register holder = rcx;
@@ -209,7 +210,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -238,6 +239,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ pushq(rsi);
// Save value register, so we can restore it later.
__ Push(value());
@@ -254,11 +257,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
__ Push(receiver);
__ Push(value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(rax, 1);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -269,7 +272,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ Pop(rax);
// Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ popq(rsi);
}
__ ret(0);
}
@@ -286,6 +289,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ pushq(rsi);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -297,11 +303,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(rax, 0);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -309,7 +315,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ popq(rsi);
}
__ ret(0);
}
@@ -754,22 +760,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ PopReturnAddressTo(scratch1());
- __ Push(receiver());
- __ Push(this->name());
- __ Push(value());
- __ PushReturnAddressFrom(scratch1());
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 1b25f06347..281faba3c7 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -6,6 +6,7 @@
#include "src/ic/handler-compiler.h"
+#include "src/api-arguments.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -23,6 +24,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(esi);
+
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
@@ -34,11 +38,11 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
receiver = scratch;
}
__ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(eax, 0);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -46,7 +50,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
}
__ ret(0);
}
@@ -90,7 +94,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Bail out if the receiver has a named interceptor or requires access checks.
__ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
+ Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
@@ -158,7 +162,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Stack now matches JSFunction abi.
DCHECK(optimization.is_simple_api_call());
- // Abi for CallApiFunctionStub.
+ // Abi for CallApiCallbackStub.
Register callee = edi;
Register data = ebx;
Register holder = ecx;
@@ -220,7 +224,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -252,6 +256,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Save context register
+ __ push(esi);
// Save value register, so we can restore it later.
__ push(value());
@@ -267,11 +273,11 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
__ push(receiver);
__ push(value());
- ParameterCount actual(1);
- ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
+ __ Set(eax, 1);
+ __ Call(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -280,9 +286,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// We have to return the passed value, not the return value of the setter.
__ pop(eax);
-
// Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
}
__ ret(0);
}
@@ -758,22 +763,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ pop(scratch1()); // remove the return address
- __ push(receiver());
- __ push(this->name());
- __ push(value());
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 5bbd9c5814..b51045bee8 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -150,8 +150,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+ __ test_b(
+ FieldOperand(map, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. In
// the case that the object is a value-wrapper object, we enter the runtime
@@ -202,9 +203,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// scratch2: map of current prototype
__ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
__ j(below, slow);
- __ test_b(
- FieldOperand(scratch2, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+ __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
__ j(not_zero, slow);
__ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
__ j(not_equal, slow);
@@ -251,7 +252,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
// bit test is enough.
STATIC_ASSERT(kNotInternalizedTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- kIsNotInternalizedMask);
+ Immediate(kIsNotInternalizedMask));
__ j(not_zero, not_unique);
__ bind(&unique);
@@ -521,7 +522,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+ Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index cc46a56d94..9ee4269d3e 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -197,31 +197,9 @@ void StringCompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToLengthDescriptor::InitializePlatformSpecific(
+void TypeConversionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToNameDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister()};
+ Register registers[] = {ArgumentRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -352,6 +330,15 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+FunctionType* FastArrayPushDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), AnyTagged(zone), 1, zone)->AsFunction();
+ function->InitParameter(0, UntaggedIntegral32(zone)); // actual #arguments
+ return function;
+}
+
FunctionType*
FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
@@ -494,28 +481,45 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-FunctionType* ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // callee
- function->InitParameter(1, AnyTagged(zone)); // call_data
- function->InitParameter(2, AnyTagged(zone)); // holder
- function->InitParameter(3, ExternalPointer(zone)); // api_function_address
- function->InitParameter(4, UntaggedIntegral32(zone)); // actual #arguments
- return function;
+CallInterfaceDescriptor ApiCallbackDescriptorBase::ForArgs(Isolate* isolate,
+ int argc) {
+ switch (argc) {
+ case 0:
+ return ApiCallbackWith0ArgsDescriptor(isolate);
+ case 1:
+ return ApiCallbackWith1ArgsDescriptor(isolate);
+ case 2:
+ return ApiCallbackWith2ArgsDescriptor(isolate);
+ case 3:
+ return ApiCallbackWith3ArgsDescriptor(isolate);
+ case 4:
+ return ApiCallbackWith4ArgsDescriptor(isolate);
+ case 5:
+ return ApiCallbackWith5ArgsDescriptor(isolate);
+ case 6:
+ return ApiCallbackWith6ArgsDescriptor(isolate);
+ case 7:
+ return ApiCallbackWith7ArgsDescriptor(isolate);
+ default:
+ UNREACHABLE();
+ return VoidDescriptor(isolate);
+ }
}
-FunctionType* ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+FunctionType*
+ApiCallbackDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
+ Isolate* isolate, int parameter_count, int argc) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4 + argc, zone)
+ ->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // callee
function->InitParameter(1, AnyTagged(zone)); // call_data
function->InitParameter(2, AnyTagged(zone)); // holder
function->InitParameter(3, ExternalPointer(zone)); // api_function_address
+ for (int i = 0; i < argc; i++) {
+ function->InitParameter(i, AnyTagged(zone));
+ }
return function;
}
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index fb1969d8ef..dcce0afe5c 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -23,18 +23,14 @@ class PlatformInterfaceDescriptor;
V(VectorStoreIC) \
V(InstanceOf) \
V(LoadWithVector) \
+ V(FastArrayPush) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastNewObject) \
V(FastNewRestParameter) \
V(FastNewSloppyArguments) \
V(FastNewStrictArguments) \
- V(ToNumber) \
- V(ToLength) \
- V(ToString) \
- V(ToName) \
- V(ToObject) \
- V(NumberToString) \
+ V(TypeConversion) \
V(Typeof) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
@@ -52,14 +48,22 @@ class PlatformInterfaceDescriptor;
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
+ V(AllocateFloat32x4) \
+ V(AllocateInt32x4) \
+ V(AllocateUint32x4) \
+ V(AllocateBool32x4) \
+ V(AllocateInt16x8) \
+ V(AllocateUint16x8) \
+ V(AllocateBool16x8) \
+ V(AllocateInt8x16) \
+ V(AllocateUint8x16) \
+ V(AllocateBool8x16) \
V(AllocateInNewSpace) \
V(ArrayConstructorConstantArgCount) \
V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \
V(InternalArrayConstructor) \
V(Compare) \
- V(CompareNil) \
- V(ToBoolean) \
V(BinaryOp) \
V(BinaryOpWithAllocationSite) \
V(StringAdd) \
@@ -68,8 +72,14 @@ class PlatformInterfaceDescriptor;
V(Named) \
V(CallHandler) \
V(ArgumentAdaptor) \
- V(ApiFunction) \
- V(ApiAccessor) \
+ V(ApiCallbackWith0Args) \
+ V(ApiCallbackWith1Args) \
+ V(ApiCallbackWith2Args) \
+ V(ApiCallbackWith3Args) \
+ V(ApiCallbackWith4Args) \
+ V(ApiCallbackWith5Args) \
+ V(ApiCallbackWith6Args) \
+ V(ApiCallbackWith7Args) \
V(ApiGetter) \
V(LoadGlobalViaContext) \
V(StoreGlobalViaContext) \
@@ -200,6 +210,7 @@ class CallInterfaceDescriptor {
void Initialize(Isolate* isolate, CallDescriptors::Key key) {
if (!data()->IsInitialized()) {
CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
+ DCHECK(d == data()); // d should be a modifiable pointer to data().
InitializePlatformSpecific(d);
FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
isolate, d->register_param_count());
@@ -211,18 +222,20 @@ class CallInterfaceDescriptor {
const CallInterfaceDescriptorData* data_;
};
+#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ public: \
+ explicit name(Isolate* isolate) : base(isolate, key()) { \
+ Initialize(isolate, key()); \
+ } \
+ static inline CallDescriptors::Key key();
#define DECLARE_DESCRIPTOR(name, base) \
- explicit name(Isolate* isolate) : base(isolate, key()) { \
- Initialize(isolate, key()); \
- } \
- \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
protected: \
void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
\
- public: \
- static inline CallDescriptors::Key key();
+ public:
#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
DECLARE_DESCRIPTOR(name, base) \
@@ -232,6 +245,17 @@ class CallInterfaceDescriptor {
\
public:
+#define DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(name, base, arg) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ FunctionType* BuildCallInterfaceDescriptorFunctionType( \
+ Isolate* isolate, int register_param_count) override { \
+ return BuildCallInterfaceDescriptorFunctionTypeWithArg( \
+ isolate, register_param_count, arg); \
+ } \
+ \
+ public:
+
class VoidDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -397,56 +421,13 @@ class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
-
-class ToNumberDescriptor : public CallInterfaceDescriptor {
+class TypeConversionDescriptor final : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ToNumberDescriptor, CallInterfaceDescriptor)
-};
+ enum ParameterIndices { kArgumentIndex };
+ DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
-class ToLengthDescriptor : public CallInterfaceDescriptor {
- public:
- enum ParameterIndices { kReceiverIndex };
-
- DECLARE_DESCRIPTOR(ToLengthDescriptor, CallInterfaceDescriptor)
-
- static const Register ReceiverRegister();
-};
-
-
-class ToStringDescriptor : public CallInterfaceDescriptor {
- public:
- enum ParameterIndices { kReceiverIndex };
-
- DECLARE_DESCRIPTOR(ToStringDescriptor, CallInterfaceDescriptor)
-
- static const Register ReceiverRegister();
-};
-
-
-class ToNameDescriptor : public CallInterfaceDescriptor {
- public:
- enum ParameterIndices { kReceiverIndex };
-
- DECLARE_DESCRIPTOR(ToNameDescriptor, CallInterfaceDescriptor)
-
- static const Register ReceiverRegister();
-};
-
-
-class ToObjectDescriptor : public CallInterfaceDescriptor {
- public:
- enum ParameterIndices { kReceiverIndex };
-
- DECLARE_DESCRIPTOR(ToObjectDescriptor, CallInterfaceDescriptor)
-
- static const Register ReceiverRegister();
-};
-
-
-class NumberToStringDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(NumberToStringDescriptor, CallInterfaceDescriptor)
+ static const Register ArgumentRegister();
};
@@ -581,6 +562,13 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
};
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ class Allocate##Type##Descriptor : public CallInterfaceDescriptor { \
+ public: \
+ DECLARE_DESCRIPTOR(Allocate##Type##Descriptor, CallInterfaceDescriptor) \
+ };
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
public:
@@ -631,18 +619,6 @@ class CompareDescriptor : public CallInterfaceDescriptor {
};
-class CompareNilDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(CompareNilDescriptor, CallInterfaceDescriptor)
-};
-
-
-class ToBooleanDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(ToBooleanDescriptor, CallInterfaceDescriptor)
-};
-
-
class BinaryOpDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
@@ -696,18 +672,75 @@ class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+// The ApiCallback*Descriptors have a lot of boilerplate. The superclass
+// ApiCallbackDescriptorBase contains all the logic, and the
+// ApiCallbackWith*ArgsDescriptor merely instantiate these with a
+// parameter for the number of args.
+//
+// The base class is not meant to be instantiated directly and has no
+// public constructors to ensure this is so.
+//
+// The simplest usage for all the ApiCallback*Descriptors is probably
+// ApiCallbackDescriptorBase::ForArgs(isolate, argc)
+//
+class ApiCallbackDescriptorBase : public CallInterfaceDescriptor {
+ public:
+ static CallInterfaceDescriptor ForArgs(Isolate* isolate, int argc);
-class ApiFunctionDescriptor : public CallInterfaceDescriptor {
+ protected:
+ ApiCallbackDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
+ : CallInterfaceDescriptor(isolate, key) {}
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
+ FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
+ Isolate* isolate, int parameter_count, int argc);
+};
+
+class ApiCallbackWith0ArgsDescriptor : public ApiCallbackDescriptorBase {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiFunctionDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith0ArgsDescriptor, ApiCallbackDescriptorBase, 0)
};
+class ApiCallbackWith1ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith1ArgsDescriptor, ApiCallbackDescriptorBase, 1)
+};
-class ApiAccessorDescriptor : public CallInterfaceDescriptor {
+class ApiCallbackWith2ArgsDescriptor : public ApiCallbackDescriptorBase {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiAccessorDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith2ArgsDescriptor, ApiCallbackDescriptorBase, 2)
+};
+
+class ApiCallbackWith3ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith3ArgsDescriptor, ApiCallbackDescriptorBase, 3)
+};
+
+class ApiCallbackWith4ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith4ArgsDescriptor, ApiCallbackDescriptorBase, 4)
+};
+
+class ApiCallbackWith5ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith5ArgsDescriptor, ApiCallbackDescriptorBase, 5)
+};
+
+class ApiCallbackWith6ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith6ArgsDescriptor, ApiCallbackDescriptorBase, 6)
+};
+
+class ApiCallbackWith7ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+ ApiCallbackWith7ArgsDescriptor, ApiCallbackDescriptorBase, 7)
};
@@ -741,6 +774,11 @@ class ContextOnlyDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
};
+class FastArrayPushDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastArrayPushDescriptor,
+ CallInterfaceDescriptor)
+};
class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
public:
@@ -751,7 +789,7 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
-class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
+class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
CallInterfaceDescriptor)
@@ -784,8 +822,10 @@ class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
+#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
-
+#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
+#undef DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG
// We define the association between CallDescriptors::Key and the specialized
// descriptor here to reduce boilerplate and mistakes.
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 7103c72178..109b01eab3 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/compiler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
namespace v8 {
namespace internal {
@@ -17,49 +18,75 @@ class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
// This helper is expected to be instantiated only when the last bytecode is
// in the same basic block.
DCHECK(array_builder_.LastBytecodeInSameBlock());
+ bytecode_ = Bytecodes::FromByte(
+ array_builder_.bytecodes()->at(previous_bytecode_start_));
+ operand_scale_ = OperandScale::kSingle;
+ if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
+ operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
+ bytecode_ = Bytecodes::FromByte(
+ array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
+ }
}
// Returns the previous bytecode in the same basic block.
MUST_USE_RESULT Bytecode GetBytecode() const {
DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- return Bytecodes::FromByte(
- array_builder_.bytecodes()->at(previous_bytecode_start_));
+ return bytecode_;
}
- // Returns the operand at operand_index for the previous bytecode in the
- // same basic block.
- MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- Bytecode bytecode = GetBytecode();
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
- size_t operand_offset =
- previous_bytecode_start_ +
- Bytecodes::GetOperandOffset(bytecode, operand_index);
- OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
- switch (size) {
- case OperandSize::kNone:
- UNREACHABLE();
- break;
- case OperandSize::kByte:
- return static_cast<uint32_t>(
- array_builder_.bytecodes()->at(operand_offset));
- case OperandSize::kShort:
- uint16_t operand =
- (array_builder_.bytecodes()->at(operand_offset) << 8) +
- array_builder_.bytecodes()->at(operand_offset + 1);
- return static_cast<uint32_t>(operand);
- }
- return 0;
+ MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
+ return Register::FromOperand(GetSignedOperand(operand_index));
+ }
+
+ MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
+ return GetUnsignedOperand(operand_index);
}
Handle<Object> GetConstantForIndexOperand(int operand_index) const {
return array_builder_.constant_array_builder()->At(
- GetOperand(operand_index));
+ GetIndexOperand(operand_index));
}
private:
+ // Returns the signed operand at operand_index for the previous
+ // bytecode in the same basic block.
+ MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(bytecode_, operand_index);
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start = GetOperandStart(operand_index);
+ return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+ operand_scale_);
+ }
+
+ // Returns the unsigned operand at operand_index for the previous
+ // bytecode in the same basic block.
+ MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(bytecode_, operand_index);
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start = GetOperandStart(operand_index);
+ return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+ operand_scale_);
+ }
+
+ const uint8_t* GetOperandStart(int operand_index) const {
+ size_t operand_offset =
+ previous_bytecode_start_ + prefix_offset() +
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
+ return &(*array_builder_.bytecodes())[0] + operand_offset;
+ }
+
+ int prefix_offset() const {
+ return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
+ : 0;
+ }
+
const BytecodeArrayBuilder& array_builder_;
+ OperandScale operand_scale_;
+ Bytecode bytecode_;
size_t previous_bytecode_start_;
DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
@@ -67,7 +94,8 @@ class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
int parameter_count,
- int context_count, int locals_count)
+ int context_count, int locals_count,
+ FunctionLiteral* literal)
: isolate_(isolate),
zone_(zone),
bytecodes_(zone),
@@ -82,11 +110,15 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
parameter_count_(parameter_count),
local_register_count_(locals_count),
context_register_count_(context_count),
- temporary_allocator_(zone, fixed_register_count()),
- register_translator_(this) {
+ temporary_allocator_(zone, fixed_register_count()) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
+ return_position_ =
+ literal ? std::max(literal->start_position(), literal->end_position() - 1)
+ : RelocInfo::kNoPosition;
+ LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
+ source_position_table_builder()));
}
BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
@@ -119,45 +151,49 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK(exit_seen_in_block_);
int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count =
- fixed_and_temporary_register_count() + translation_register_count();
+ int register_count = fixed_and_temporary_register_count();
int frame_size = register_count * kPointerSize;
Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
- Handle<FixedArray> source_position_table =
- source_position_table_builder()->ToFixedArray();
- Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+ Handle<ByteArray> source_position_table =
+ source_position_table_builder()->ToSourcePositionTable();
+ Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
constant_pool);
- output->set_handler_table(*handler_table);
- output->set_source_position_table(*source_position_table);
+ bytecode_array->set_handler_table(*handler_table);
+ bytecode_array->set_source_position_table(*source_position_table);
+
+ void* line_info = source_position_table_builder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
+ AbstractCode::cast(*bytecode_array), line_info));
+
bytecode_generated_ = true;
- return output;
+ return bytecode_array;
}
-
template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
+ OperandScale operand_scale) {
// Don't output dead code.
- if (exit_seen_in_block_) {
- source_position_table_builder_.RevertPosition(bytecodes()->size());
- return;
- }
+ if (exit_seen_in_block_) return;
int operand_count = static_cast<int>(N);
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
- int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
- if (register_operand_count > 0) {
- register_translator()->TranslateInputRegisters(bytecode, operands,
- operand_count);
+ last_bytecode_start_ = bytecodes()->size();
+ // Emit prefix bytecode for scale if required.
+ if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+ bytecodes()->push_back(Bytecodes::ToByte(
+ Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
}
- last_bytecode_start_ = bytecodes()->size();
+ // Emit bytecode.
bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+
+ // Emit operands.
for (int i = 0; i < operand_count; i++) {
- DCHECK(OperandIsValid(bytecode, i, operands[i]));
- switch (Bytecodes::GetOperandSize(bytecode, i)) {
+ DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
+ switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
case OperandSize::kNone:
UNREACHABLE();
break;
@@ -171,58 +207,61 @@ void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
operand_bytes + 2);
break;
}
+ case OperandSize::kQuad: {
+ uint8_t operand_bytes[4];
+ WriteUnalignedUInt32(operand_bytes, operands[i]);
+ bytecodes()->insert(bytecodes()->end(), operand_bytes,
+ operand_bytes + 4);
+ break;
+ }
}
}
-
- if (register_operand_count > 0) {
- register_translator()->TranslateOutputRegisters();
- }
}
+void BytecodeArrayBuilder::Output(Bytecode bytecode) {
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- uint32_t operand3) {
- uint32_t operands[] = {operand0, operand1, operand2, operand3};
- Output(bytecode, operands);
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ last_bytecode_start_ = bytecodes()->size();
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
}
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3) {
+ uint32_t operands[] = {operand0, operand1, operand2, operand3};
+ Output(bytecode, operands, operand_scale);
+}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2) {
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2) {
uint32_t operands[] = {operand0, operand1, operand2};
- Output(bytecode, operands);
+ Output(bytecode, operands, operand_scale);
}
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1) {
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1) {
uint32_t operands[] = {operand0, operand1};
- Output(bytecode, operands);
+ Output(bytecode, operands, operand_scale);
}
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+ OperandScale operand_scale,
+ uint32_t operand0) {
uint32_t operands[] = {operand0};
- Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- // Don't output dead code.
- if (exit_seen_in_block_) {
- source_position_table_builder_.RevertPosition(bytecodes()->size());
- return;
- }
-
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- last_bytecode_start_ = bytecodes()->size();
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+ Output(bytecode, operands, operand_scale);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg) {
- Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
+ OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+ OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
+ RegisterOperand(reg));
return *this;
}
@@ -245,7 +284,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
Register reg) {
- Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
+ OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+ OutputScaled(BytecodeForCompareOperation(op), operand_scale,
+ RegisterOperand(reg));
return *this;
}
@@ -255,10 +296,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
int32_t raw_smi = smi->value();
if (raw_smi == 0) {
Output(Bytecode::kLdaZero);
- } else if (raw_smi >= -128 && raw_smi <= 127) {
- Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
} else {
- LoadLiteral(Handle<Object>(smi, isolate_));
+ OperandSize operand_size = SizeForSignedOperand(raw_smi);
+ OperandScale operand_scale = OperandSizesToScale(operand_size);
+ OutputScaled(Bytecode::kLdaSmi, operand_scale,
+ SignedOperand(raw_smi, operand_size));
}
return *this;
}
@@ -266,13 +308,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- if (FitsInIdx8Operand(entry)) {
- Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
- } else if (FitsInIdx16Operand(entry)) {
- Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(entry));
+ OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
return *this;
}
@@ -306,20 +344,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
- if (value) {
- LoadTrue();
- } else {
- LoadFalse();
- }
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
if (!IsRegisterInAccumulator(reg)) {
- Output(Bytecode::kLdar, reg.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(reg));
+ OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
}
return *this;
}
@@ -328,7 +358,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
if (!IsRegisterInAccumulator(reg)) {
- Output(Bytecode::kStar, reg.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(reg));
+ OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
}
return *this;
}
@@ -337,164 +369,98 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
- Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
- } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
- Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
+ SizeForRegisterOperand(to));
+ OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
+ RegisterOperand(to));
return *this;
}
-void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
- Register to) {
- // Move bytecodes modify the stack. Checking validity is an
- // essential mitigation against corrupting the stack.
- if (FitsInReg8OperandUntranslated(from)) {
- CHECK(RegisterIsValid(from, OperandType::kReg8) &&
- RegisterIsValid(to, OperandType::kReg16));
- } else if (FitsInReg8OperandUntranslated(to)) {
- CHECK(RegisterIsValid(from, OperandType::kReg16) &&
- RegisterIsValid(to, OperandType::kReg8));
- } else {
- UNIMPLEMENTED();
- }
- Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
// TODO(rmcilroy): Potentially store typeof information in an
// operand rather than having extra bytecodes.
Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, static_cast<uint8_t>(name_index),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(name_index) &&
- FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(name_index),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+ UnsignedOperand(feedback_slot));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, static_cast<uint8_t>(name_index),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(name_index) &&
- FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(name_index),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+ UnsignedOperand(feedback_slot));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index) {
- DCHECK(slot_index >= 0);
- if (FitsInIdx8Operand(slot_index)) {
- Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
- static_cast<uint8_t>(slot_index));
- } else if (FitsInIdx16Operand(slot_index)) {
- Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
- static_cast<uint16_t>(slot_index));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+ OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
+ RegisterOperand(context), UnsignedOperand(slot_index));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index) {
- DCHECK(slot_index >= 0);
- if (FitsInIdx8Operand(slot_index)) {
- Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
- static_cast<uint8_t>(slot_index));
- } else if (FitsInIdx16Operand(slot_index)) {
- Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
- static_cast<uint16_t>(slot_index));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+ OutputScaled(Bytecode::kStaContextSlot, operand_scale,
+ RegisterOperand(context), UnsignedOperand(slot_index));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
const Handle<String> name, TypeofMode typeof_mode) {
Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
? Bytecode::kLdaLookupSlotInsideTypeof
: Bytecode::kLdaLookupSlot;
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index)) {
- Output(bytecode, static_cast<uint8_t>(name_index));
- } else if (FitsInIdx16Operand(name_index)) {
- Output(BytecodeForWideOperands(bytecode),
- static_cast<uint16_t>(name_index));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(name_index));
+ OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
const Handle<String> name, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index)) {
- Output(bytecode, static_cast<uint8_t>(name_index));
- } else if (FitsInIdx16Operand(name_index)) {
- Output(BytecodeForWideOperands(bytecode),
- static_cast<uint16_t>(name_index));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(name_index));
+ OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(Bytecode::kLoadIC, object.ToRawOperand(),
- static_cast<uint8_t>(name_index),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(name_index) &&
- FitsInIdx16Operand(feedback_slot)) {
- Output(Bytecode::kLoadICWide, object.ToRawOperand(),
- static_cast<uint16_t>(name_index),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
+ UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- if (FitsInIdx8Operand(feedback_slot)) {
- Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(feedback_slot)) {
- Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
+ UnsignedOperand(feedback_slot));
return *this;
}
@@ -503,17 +469,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreIC(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(name_index) &&
- FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
- static_cast<uint16_t>(name_index),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+ UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
}
@@ -522,15 +482,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
- if (FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
- key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(object), SizeForRegisterOperand(key),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+ RegisterOperand(key), UnsignedOperand(feedback_slot));
return *this;
}
@@ -538,16 +494,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
size_t entry = GetConstantPoolEntry(shared_info);
- DCHECK(FitsInImm8Operand(tenured));
- if (FitsInIdx8Operand(entry)) {
- Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
- static_cast<uint8_t>(tenured));
- } else if (FitsInIdx16Operand(entry)) {
- Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
- static_cast<uint8_t>(tenured));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(entry));
+ OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
+ UnsignedOperand(static_cast<size_t>(tenured)));
return *this;
}
@@ -565,73 +515,55 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
Handle<String> pattern, int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
size_t pattern_entry = GetConstantPoolEntry(pattern);
- if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
- Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
- static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
- } else if (FitsInIdx16Operand(literal_index) &&
- FitsInIdx16Operand(pattern_entry)) {
- Output(Bytecode::kCreateRegExpLiteralWide,
- static_cast<uint16_t>(pattern_entry),
- static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForUnsignedOperand(pattern_entry),
+ SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
+ UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
+ UnsignedOperand(flags));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
Handle<FixedArray> constant_elements, int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
- if (FitsInIdx8Operand(literal_index) &&
- FitsInIdx8Operand(constant_elements_entry)) {
- Output(Bytecode::kCreateArrayLiteral,
- static_cast<uint8_t>(constant_elements_entry),
- static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
- } else if (FitsInIdx16Operand(literal_index) &&
- FitsInIdx16Operand(constant_elements_entry)) {
- Output(Bytecode::kCreateArrayLiteralWide,
- static_cast<uint16_t>(constant_elements_entry),
- static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForUnsignedOperand(constant_elements_entry),
+ SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
+ UnsignedOperand(constant_elements_entry),
+ UnsignedOperand(literal_index), UnsignedOperand(flags));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
Handle<FixedArray> constant_properties, int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
- if (FitsInIdx8Operand(literal_index) &&
- FitsInIdx8Operand(constant_properties_entry)) {
- Output(Bytecode::kCreateObjectLiteral,
- static_cast<uint8_t>(constant_properties_entry),
- static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
- } else if (FitsInIdx16Operand(literal_index) &&
- FitsInIdx16Operand(constant_properties_entry)) {
- Output(Bytecode::kCreateObjectLiteralWide,
- static_cast<uint16_t>(constant_properties_entry),
- static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForUnsignedOperand(constant_properties_entry),
+ SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
+ UnsignedOperand(constant_properties_entry),
+ UnsignedOperand(literal_index), UnsignedOperand(flags));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
- Output(Bytecode::kPushContext, context.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(context));
+ OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
- Output(Bytecode::kPopContext, context.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(context));
+ OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
return *this;
}
@@ -649,7 +581,6 @@ bool BytecodeArrayBuilder::NeedToBooleanCast() {
case Bytecode::kTestEqual:
case Bytecode::kTestNotEqual:
case Bytecode::kTestEqualStrict:
- case Bytecode::kTestNotEqualStrict:
case Bytecode::kTestLessThan:
case Bytecode::kTestLessThanOrEqual:
case Bytecode::kTestGreaterThan:
@@ -677,7 +608,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
case Bytecode::kToName:
case Bytecode::kTypeOf:
return *this;
- case Bytecode::kLdaConstantWide:
case Bytecode::kLdaConstant: {
Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
if (object->IsName()) return *this;
@@ -716,8 +646,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
BytecodeLabel* label) {
DCHECK(!label->is_bound());
DCHECK(target.is_bound());
- PatchJump(bytecodes()->begin() + target.offset(),
- bytecodes()->begin() + label->offset());
+ if (label->is_forward_target()) {
+ // An earlier jump instruction refers to this label. Update it's location.
+ PatchJump(bytecodes()->begin() + target.offset(),
+ bytecodes()->begin() + label->offset());
+ // Now treat as if the label will only be back referred to.
+ }
label->bind_to(target.offset());
LeaveBasicBlock();
return *this;
@@ -746,38 +680,10 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfUndefinedConstant;
default:
UNREACHABLE();
- return static_cast<Bytecode>(-1);
- }
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
- Bytecode jump_bytecode) {
- switch (jump_bytecode) {
- case Bytecode::kJump:
- return Bytecode::kJumpConstantWide;
- case Bytecode::kJumpIfTrue:
- return Bytecode::kJumpIfTrueConstantWide;
- case Bytecode::kJumpIfFalse:
- return Bytecode::kJumpIfFalseConstantWide;
- case Bytecode::kJumpIfToBooleanTrue:
- return Bytecode::kJumpIfToBooleanTrueConstantWide;
- case Bytecode::kJumpIfToBooleanFalse:
- return Bytecode::kJumpIfToBooleanFalseConstantWide;
- case Bytecode::kJumpIfNotHole:
- return Bytecode::kJumpIfNotHoleConstantWide;
- case Bytecode::kJumpIfNull:
- return Bytecode::kJumpIfNullConstantWide;
- case Bytecode::kJumpIfUndefined:
- return Bytecode::kJumpIfUndefinedConstantWide;
- default:
- UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
}
-
// static
Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
switch (jump_bytecode) {
@@ -793,7 +699,7 @@ Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
@@ -803,54 +709,88 @@ void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
DCHECK_EQ(*operand_location, 0);
- if (FitsInImm8Operand(delta)) {
- // The jump fits within the range of an Imm8 operand, so cancel
+ if (SizeForSignedOperand(delta) == OperandSize::kByte) {
+ // The jump fits within the range of an Imm operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
*operand_location = static_cast<uint8_t>(delta);
} else {
- // The jump does not fit within the range of an Imm8 operand, so
+ // The jump does not fit within the range of an Imm operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
- DCHECK(FitsInIdx8Operand(entry));
+ DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
*jump_location = Bytecodes::ToByte(jump_bytecode);
*operand_location = static_cast<uint8_t>(entry);
}
}
-
void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
- size_t entry = constant_array_builder()->CommitReservedEntry(
- OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
- DCHECK(FitsInIdx16Operand(entry));
uint8_t operand_bytes[2];
- WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+ } else {
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ *jump_location = Bytecodes::ToByte(jump_bytecode);
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ }
DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
*operand_location++ = operand_bytes[0];
*operand_location = operand_bytes[1];
}
+void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ uint8_t operand_bytes[4];
+ WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+ DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
+ *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
+ *operand_location++ = operand_bytes[0];
+ *operand_location++ = operand_bytes[1];
+ *operand_location++ = operand_bytes[2];
+ *operand_location = operand_bytes[3];
+}
void BytecodeArrayBuilder::PatchJump(
const ZoneVector<uint8_t>::iterator& jump_target,
const ZoneVector<uint8_t>::iterator& jump_location) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
int delta = static_cast<int>(jump_target - jump_location);
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ int prefix_offset = 0;
+ OperandScale operand_scale = OperandScale::kSingle;
+ if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+ // If a prefix scaling bytecode is emitted the target offset is one
+ // less than the case of no prefix scaling bytecode.
+ delta -= 1;
+ prefix_offset = 1;
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+ jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
+ }
+
DCHECK(Bytecodes::IsJump(jump_bytecode));
- switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
- case OperandSize::kByte:
+ switch (operand_scale) {
+ case OperandScale::kSingle:
PatchIndirectJumpWith8BitOperand(jump_location, delta);
break;
- case OperandSize::kShort:
- PatchIndirectJumpWith16BitOperand(jump_location, delta);
+ case OperandScale::kDouble:
+ PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
break;
- case OperandSize::kNone:
+ case OperandScale::kQuadruple:
+ PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
+ break;
+ default:
UNREACHABLE();
}
unbound_jumps_--;
@@ -860,10 +800,7 @@ void BytecodeArrayBuilder::PatchJump(
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
// Don't emit dead code.
- if (exit_seen_in_block_) {
- source_position_table_builder_.RevertPosition(bytecodes()->size());
- return *this;
- }
+ if (exit_seen_in_block_) return *this;
// Check if the value in accumulator is boolean, if not choose an
// appropriate JumpIfToBoolean bytecode.
@@ -877,22 +814,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
size_t abs_delta = bytecodes()->size() - label->offset();
int delta = -static_cast<int>(abs_delta);
-
- if (FitsInImm8Operand(delta)) {
- Output(jump_bytecode, static_cast<uint8_t>(delta));
- } else {
- size_t entry =
- GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdx8Operand(entry)) {
- Output(GetJumpWithConstantOperand(jump_bytecode),
- static_cast<uint8_t>(entry));
- } else if (FitsInIdx16Operand(entry)) {
- Output(GetJumpWithConstantWideOperand(jump_bytecode),
- static_cast<uint16_t>(entry));
- } else {
- UNREACHABLE();
- }
+ OperandSize operand_size = SizeForSignedOperand(delta);
+ if (operand_size > OperandSize::kByte) {
+ // Adjust for scaling byte prefix for wide jump offset.
+ DCHECK_LE(delta, 0);
+ delta -= 1;
}
+ OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
+ SignedOperand(delta, operand_size));
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
@@ -904,16 +833,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
unbound_jumps_++;
OperandSize reserved_operand_size =
constant_array_builder()->CreateReservedEntry();
- switch (reserved_operand_size) {
- case OperandSize::kByte:
- Output(jump_bytecode, 0);
- break;
- case OperandSize::kShort:
- Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
- break;
- case OperandSize::kNone:
- UNREACHABLE();
- }
+ OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
}
LeaveBasicBlock();
return *this;
@@ -970,6 +890,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
+ SetReturnPosition();
Output(Bytecode::kReturn);
exit_seen_in_block_ = true;
return *this;
@@ -982,44 +903,40 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
Register cache_info_triple) {
- if (FitsInReg8Operand(cache_info_triple)) {
- Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
- } else if (FitsInReg16Operand(cache_info_triple)) {
- Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
+ OutputScaled(Bytecode::kForInPrepare, operand_scale,
+ RegisterOperand(cache_info_triple));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
Register cache_length) {
- Output(Bytecode::kForInDone, index.ToRawOperand(),
- cache_length.ToRawOperand());
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
+ OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
+ RegisterOperand(cache_length));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
- Register receiver, Register index, Register cache_type_array_pair) {
- if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
- FitsInReg8Operand(cache_type_array_pair)) {
- Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
- cache_type_array_pair.ToRawOperand());
- } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
- FitsInReg16Operand(cache_type_array_pair)) {
- Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
- index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
- } else {
- UNIMPLEMENTED();
- }
+ Register receiver, Register index, Register cache_type_array_pair,
+ int feedback_slot) {
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
+ SizeForRegisterOperand(cache_type_array_pair),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
+ RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+ UnsignedOperand(feedback_slot));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
- Output(Bytecode::kForInStep, index.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(index));
+ OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
return *this;
}
@@ -1051,12 +968,12 @@ void BytecodeArrayBuilder::LeaveBasicBlock() {
exit_seen_in_block_ = false;
}
-void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
+void BytecodeArrayBuilder::EnsureReturn() {
if (!exit_seen_in_block_) {
LoadUndefined();
- SetReturnPosition(literal);
Return();
}
+ DCHECK(exit_seen_in_block_);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -1065,23 +982,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
int feedback_slot,
TailCallMode tail_call_mode) {
Bytecode bytecode = BytecodeForCall(tail_call_mode);
- if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
- FitsInIdx8Operand(receiver_args_count) &&
- FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
- static_cast<uint8_t>(receiver_args_count),
- static_cast<uint8_t>(feedback_slot));
- } else if (FitsInReg16Operand(callable) &&
- FitsInReg16Operand(receiver_args) &&
- FitsInIdx16Operand(receiver_args_count) &&
- FitsInIdx16Operand(feedback_slot)) {
- bytecode = BytecodeForWideOperands(bytecode);
- Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
- static_cast<uint16_t>(receiver_args_count),
- static_cast<uint16_t>(feedback_slot));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
+ SizeForUnsignedOperand(receiver_args_count),
+ SizeForUnsignedOperand(feedback_slot));
+ OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
+ RegisterOperand(receiver_args),
+ UnsignedOperand(receiver_args_count),
+ UnsignedOperand(feedback_slot));
return *this;
}
@@ -1092,17 +1000,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
- FitsInIdx8Operand(arg_count)) {
- Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
- static_cast<uint8_t>(arg_count));
- } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
- FitsInIdx16Operand(arg_count)) {
- Output(Bytecode::kNewWide, constructor.ToRawOperand(),
- first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
+ SizeForUnsignedOperand(arg_count));
+ OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
+ RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
}
@@ -1110,20 +1012,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
- Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
- first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
- } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
- Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
- first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
- } else {
- UNIMPLEMENTED();
- }
+ Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
+ ? Bytecode::kInvokeIntrinsic
+ : Bytecode::kCallRuntime;
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
+ OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
+ RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
}
@@ -1132,180 +1032,145 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
Register first_return) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
- FitsInReg8Operand(first_return)) {
- Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
- first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
- first_return.ToRawOperand());
- } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
- FitsInReg16Operand(first_return)) {
- Output(Bytecode::kCallRuntimeForPairWide,
- static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
- static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale = OperandSizesToScale(
+ SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
+ SizeForRegisterOperand(first_return));
+ OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
+ static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
+ UnsignedOperand(arg_count), RegisterOperand(first_return));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
int context_index, Register receiver_args, size_t receiver_args_count) {
- DCHECK(FitsInIdx16Operand(context_index));
- if (FitsInReg8Operand(receiver_args) &&
- FitsInIdx8Operand(receiver_args_count)) {
- Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
- receiver_args.ToRawOperand(),
- static_cast<uint8_t>(receiver_args_count));
- } else if (FitsInReg16Operand(receiver_args) &&
- FitsInIdx16Operand(receiver_args_count)) {
- Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
- receiver_args.ToRawOperand(),
- static_cast<uint16_t>(receiver_args_count));
- } else {
- UNIMPLEMENTED();
- }
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForUnsignedOperand(context_index),
+ SizeForRegisterOperand(receiver_args),
+ SizeForUnsignedOperand(receiver_args_count));
+ OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
+ UnsignedOperand(context_index), RegisterOperand(receiver_args),
+ UnsignedOperand(receiver_args_count));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
- Output(BytecodeForDelete(language_mode), object.ToRawOperand());
+ OperandScale operand_scale =
+ OperandSizesToScale(SizeForRegisterOperand(object));
+ OutputScaled(BytecodeForDelete(language_mode), operand_scale,
+ RegisterOperand(object));
return *this;
}
-
size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
return constant_array_builder()->Insert(object);
}
-void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
- int pos = std::max(fun->start_position(), fun->end_position() - 1);
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
+void BytecodeArrayBuilder::SetReturnPosition() {
+ if (return_position_ == RelocInfo::kNoPosition) return;
+ if (exit_seen_in_block_) return;
+ source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+ return_position_);
}
void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
if (stmt->position() == RelocInfo::kNoPosition) return;
+ if (exit_seen_in_block_) return;
source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
stmt->position());
}
void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
+ if (exit_seen_in_block_) return;
source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
expr->position());
}
+void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ if (exit_seen_in_block_) return;
+ source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+ expr->position());
+}
+
bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
return temporary_register_allocator()->RegisterIsLive(reg);
}
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
+bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
+ OperandScale operand_scale,
+ int operand_index,
uint32_t operand_value) const {
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
- case OperandType::kRegCount16: {
- // Expect kRegCount16 is part of a range previous operand is a
- // valid operand to start a range.
- if (operand_index > 0) {
- OperandType previous_operand_type =
- Bytecodes::GetOperandType(bytecode, operand_index - 1);
- return ((previous_operand_type == OperandType::kMaybeReg16 ||
- previous_operand_type == OperandType::kReg16) &&
- static_cast<uint16_t>(operand_value) == operand_value);
- } else {
- return false;
- }
- }
- case OperandType::kRegCount8: {
- // Expect kRegCount8 is part of a range previous operand is a
- // valid operand to start a range.
+ case OperandType::kRegCount: {
if (operand_index > 0) {
OperandType previous_operand_type =
Bytecodes::GetOperandType(bytecode, operand_index - 1);
- return ((previous_operand_type == OperandType::kMaybeReg8 ||
- previous_operand_type == OperandType::kReg8 ||
- previous_operand_type == OperandType::kMaybeReg16) &&
- static_cast<uint8_t>(operand_value) == operand_value);
- } else {
- return false;
+ if (previous_operand_type != OperandType::kMaybeReg &&
+ previous_operand_type != OperandType::kReg) {
+ return false;
+ }
}
+ } // Fall-through
+ case OperandType::kFlag8:
+ case OperandType::kIdx:
+ case OperandType::kRuntimeId:
+ case OperandType::kImm: {
+ size_t unsigned_value = static_cast<size_t>(operand_value);
+ return SizeForUnsignedOperand(unsigned_value) <= operand_size;
}
- case OperandType::kIdx16:
- return static_cast<uint16_t>(operand_value) == operand_value;
- case OperandType::kImm8:
- case OperandType::kIdx8:
- return static_cast<uint8_t>(operand_value) == operand_value;
- case OperandType::kMaybeReg8:
+ case OperandType::kMaybeReg:
if (operand_value == 0) {
return true;
}
- // Fall-through to kReg8 case.
- case OperandType::kReg8:
- case OperandType::kRegOut8:
- return RegisterIsValid(Register::FromRawOperand(operand_value),
- operand_type);
- case OperandType::kRegOutPair8:
- case OperandType::kRegOutPair16:
- case OperandType::kRegPair8:
- case OperandType::kRegPair16: {
- Register reg0 = Register::FromRawOperand(operand_value);
+ // Fall-through to kReg case.
+ case OperandType::kReg:
+ case OperandType::kRegOut: {
+ Register reg = RegisterFromOperand(operand_value);
+ return RegisterIsValid(reg, operand_size);
+ }
+ case OperandType::kRegOutPair:
+ case OperandType::kRegPair: {
+ Register reg0 = RegisterFromOperand(operand_value);
Register reg1 = Register(reg0.index() + 1);
- return RegisterIsValid(reg0, operand_type) &&
- RegisterIsValid(reg1, operand_type);
+ // The size of reg1 is immaterial.
+ return RegisterIsValid(reg0, operand_size) &&
+ RegisterIsValid(reg1, OperandSize::kQuad);
}
- case OperandType::kRegOutTriple8:
- case OperandType::kRegOutTriple16: {
- Register reg0 = Register::FromRawOperand(operand_value);
+ case OperandType::kRegOutTriple: {
+ Register reg0 = RegisterFromOperand(operand_value);
Register reg1 = Register(reg0.index() + 1);
Register reg2 = Register(reg0.index() + 2);
- return RegisterIsValid(reg0, operand_type) &&
- RegisterIsValid(reg1, operand_type) &&
- RegisterIsValid(reg2, operand_type);
- }
- case OperandType::kMaybeReg16:
- if (operand_value == 0) {
- return true;
- }
- // Fall-through to kReg16 case.
- case OperandType::kReg16:
- case OperandType::kRegOut16: {
- Register reg = Register::FromRawOperand(operand_value);
- return RegisterIsValid(reg, operand_type);
+ // The size of reg1 and reg2 is immaterial.
+ return RegisterIsValid(reg0, operand_size) &&
+ RegisterIsValid(reg1, OperandSize::kQuad) &&
+ RegisterIsValid(reg2, OperandSize::kQuad);
}
}
UNREACHABLE();
return false;
}
-
bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
- OperandType reg_type) const {
+ OperandSize reg_size) const {
if (!reg.is_valid()) {
return false;
}
- switch (Bytecodes::SizeOfOperand(reg_type)) {
- case OperandSize::kByte:
- if (!FitsInReg8OperandUntranslated(reg)) {
- return false;
- }
- break;
- case OperandSize::kShort:
- if (!FitsInReg16OperandUntranslated(reg)) {
- return false;
- }
- break;
- case OperandSize::kNone:
- UNREACHABLE();
- return false;
+ if (SizeForRegisterOperand(reg) > reg_size) {
+ return false;
}
if (reg.is_current_context() || reg.is_function_closure() ||
@@ -1314,15 +1179,10 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
} else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count());
return parameter_index >= 0 && parameter_index < parameter_count();
- } else if (RegisterTranslator::InTranslationWindow(reg)) {
- return translation_register_count() > 0;
+ } else if (reg.index() < fixed_register_count()) {
+ return true;
} else {
- reg = RegisterTranslator::UntranslateRegister(reg);
- if (reg.index() < fixed_register_count()) {
- return true;
- } else {
- return TemporaryRegisterIsLive(reg);
- }
+ return TemporaryRegisterIsLive(reg);
}
}
@@ -1338,9 +1198,7 @@ bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
PreviousBytecodeHelper previous_bytecode(*this);
Bytecode bytecode = previous_bytecode.GetBytecode();
if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
- Register previous_reg =
- Register::FromOperand(previous_bytecode.GetOperand(0));
- return previous_reg == reg;
+ return previous_bytecode.GetRegisterOperand(0) == reg;
}
}
return false;
@@ -1374,7 +1232,7 @@ Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
return Bytecode::kShiftRightLogical;
default:
UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
}
@@ -1388,7 +1246,7 @@ Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
return Bytecode::kDec;
default:
UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
}
@@ -1402,8 +1260,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
return Bytecode::kTestNotEqual;
case Token::Value::EQ_STRICT:
return Bytecode::kTestEqualStrict;
- case Token::Value::NE_STRICT:
- return Bytecode::kTestNotEqualStrict;
case Token::Value::LT:
return Bytecode::kTestLessThan;
case Token::Value::GT:
@@ -1418,49 +1274,7 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
return Bytecode::kTestIn;
default:
UNREACHABLE();
- return static_cast<Bytecode>(-1);
- }
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kCall:
- return Bytecode::kCallWide;
- case Bytecode::kTailCall:
- return Bytecode::kTailCallWide;
- case Bytecode::kLoadIC:
- return Bytecode::kLoadICWide;
- case Bytecode::kKeyedLoadIC:
- return Bytecode::kKeyedLoadICWide;
- case Bytecode::kStoreICSloppy:
- return Bytecode::kStoreICSloppyWide;
- case Bytecode::kStoreICStrict:
- return Bytecode::kStoreICStrictWide;
- case Bytecode::kKeyedStoreICSloppy:
- return Bytecode::kKeyedStoreICSloppyWide;
- case Bytecode::kKeyedStoreICStrict:
- return Bytecode::kKeyedStoreICStrictWide;
- case Bytecode::kLdaGlobal:
- return Bytecode::kLdaGlobalWide;
- case Bytecode::kLdaGlobalInsideTypeof:
- return Bytecode::kLdaGlobalInsideTypeofWide;
- case Bytecode::kStaGlobalSloppy:
- return Bytecode::kStaGlobalSloppyWide;
- case Bytecode::kStaGlobalStrict:
- return Bytecode::kStaGlobalStrictWide;
- case Bytecode::kLdaLookupSlot:
- return Bytecode::kLdaLookupSlotWide;
- case Bytecode::kLdaLookupSlotInsideTypeof:
- return Bytecode::kLdaLookupSlotInsideTypeofWide;
- case Bytecode::kStaLookupSlotStrict:
- return Bytecode::kStaLookupSlotStrictWide;
- case Bytecode::kStaLookupSlotSloppy:
- return Bytecode::kStaLookupSlotSloppyWide;
- default:
- UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
}
@@ -1472,12 +1286,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
return Bytecode::kStoreICSloppy;
case STRICT:
return Bytecode::kStoreICStrict;
- case STRONG:
- UNIMPLEMENTED();
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
@@ -1489,12 +1301,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
return Bytecode::kKeyedStoreICSloppy;
case STRICT:
return Bytecode::kKeyedStoreICStrict;
- case STRONG:
- UNIMPLEMENTED();
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
@@ -1513,12 +1323,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
return Bytecode::kStaGlobalSloppy;
case STRICT:
return Bytecode::kStaGlobalStrict;
- case STRONG:
- UNIMPLEMENTED();
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
@@ -1530,12 +1338,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
return Bytecode::kStaLookupSlotSloppy;
case STRICT:
return Bytecode::kStaLookupSlotStrict;
- case STRONG:
- UNIMPLEMENTED();
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
// static
@@ -1550,7 +1356,7 @@ Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
return Bytecode::kCreateRestParameter;
}
UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
@@ -1561,12 +1367,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
return Bytecode::kDeletePropertySloppy;
case STRICT:
return Bytecode::kDeletePropertyStrict;
- case STRONG:
- UNIMPLEMENTED();
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
// static
@@ -1579,58 +1383,109 @@ Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
default:
UNREACHABLE();
}
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
// static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
- return kMinUInt8 <= value && value <= kMaxUInt8;
+OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
+ if (value.is_byte_operand()) {
+ return OperandSize::kByte;
+ } else if (value.is_short_operand()) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
}
-
// static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
- return value <= static_cast<size_t>(kMaxUInt8);
+OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
+ if (kMinInt8 <= value && value <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (kMinInt16 <= value && value <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
}
-
// static
-bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
- return kMinInt8 <= value && value <= kMaxInt8;
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
+ DCHECK_GE(value, 0);
+ if (value <= kMaxUInt8) {
+ return OperandSize::kByte;
+ } else if (value <= kMaxUInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
}
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
- return kMinUInt16 <= value && value <= kMaxUInt16;
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
+ if (value <= static_cast<size_t>(kMaxUInt8)) {
+ return OperandSize::kByte;
+ } else if (value <= static_cast<size_t>(kMaxUInt16)) {
+ return OperandSize::kShort;
+ } else if (value <= kMaxUInt32) {
+ return OperandSize::kQuad;
+ } else {
+ UNREACHABLE();
+ return OperandSize::kQuad;
+ }
}
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
- return value <= static_cast<size_t>(kMaxUInt16);
+OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
+ OperandSize size1,
+ OperandSize size2,
+ OperandSize size3) {
+ OperandSize upper = std::max(size0, size1);
+ OperandSize lower = std::max(size2, size3);
+ OperandSize result = std::max(upper, lower);
+ // Operand sizes have been scaled before calling this function.
+ // Currently all scalable operands are byte sized at
+ // OperandScale::kSingle.
+ STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+ static_cast<int>(OperandScale::kSingle) &&
+ static_cast<int>(OperandSize::kShort) ==
+ static_cast<int>(OperandScale::kDouble) &&
+ static_cast<int>(OperandSize::kQuad) ==
+ static_cast<int>(OperandScale::kQuadruple));
+ OperandScale operand_scale = static_cast<OperandScale>(result);
+ DCHECK(operand_scale == OperandScale::kSingle ||
+ operand_scale == OperandScale::kDouble ||
+ operand_scale == OperandScale::kQuadruple);
+ return operand_scale;
}
-
-// static
-bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
- return RegisterTranslator::FitsInReg8Operand(value);
+uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
+ return static_cast<uint32_t>(reg.ToOperand());
}
-// static
-bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
- return value.is_byte_operand();
+Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
+ return Register::FromOperand(static_cast<int32_t>(operand));
}
+uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
+ switch (size) {
+ case OperandSize::kByte:
+ return static_cast<uint8_t>(value & 0xff);
+ case OperandSize::kShort:
+ return static_cast<uint16_t>(value & 0xffff);
+ case OperandSize::kQuad:
+ return static_cast<uint32_t>(value);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
-// static
-bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
- return RegisterTranslator::FitsInReg16Operand(value);
+uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
+ DCHECK_GE(value, 0);
+ return static_cast<uint32_t>(value);
}
-// static
-bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
- return value.is_short_operand();
+uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
+ DCHECK_LE(value, kMaxUInt32);
+ return static_cast<uint32_t>(value);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index fe69337184..4446a63596 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -10,7 +10,6 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/register-translator.h"
#include "src/interpreter/source-position-table.h"
#include "src/zone-containers.h"
@@ -24,10 +23,11 @@ namespace interpreter {
class BytecodeLabel;
class Register;
-class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
+class BytecodeArrayBuilder final : public ZoneObject {
public:
BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
- int context_count, int locals_count);
+ int context_count, int locals_count,
+ FunctionLiteral* literal = nullptr);
~BytecodeArrayBuilder();
Handle<BytecodeArray> ToBytecodeArray();
@@ -65,13 +65,6 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
return temporary_register_allocator()->allocation_count();
}
- // Returns the number of registers used for translating wide
- // register operands into byte sized register operands.
- int translation_register_count() const {
- return RegisterTranslator::RegisterCountAdjustment(
- fixed_and_temporary_register_count(), parameter_count());
- }
-
Register Parameter(int parameter_index) const;
// Return true if the register |reg| represents a parameter or a
@@ -89,7 +82,6 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
BytecodeArrayBuilder& LoadTheHole();
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
- BytecodeArrayBuilder& LoadBooleanConstant(bool value);
// Global loads to the accumulator and stores from the accumulator.
BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
@@ -245,7 +237,8 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
- Register cache_type_array_pair);
+ Register cache_type_array_pair,
+ int feedback_slot);
BytecodeArrayBuilder& ForInStep(Register index);
// Exception handling.
@@ -257,8 +250,11 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ void InitializeReturnPosition(FunctionLiteral* literal);
+
void SetStatementPosition(Statement* stmt);
void SetExpressionPosition(Expression* expr);
+ void SetExpressionAsStatementPosition(Expression* expr);
// Accessors
Zone* zone() const { return zone_; }
@@ -269,7 +265,23 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
return &temporary_allocator_;
}
- void EnsureReturn(FunctionLiteral* literal);
+ void EnsureReturn();
+
+ static OperandScale OperandSizesToScale(
+ OperandSize size0, OperandSize size1 = OperandSize::kByte,
+ OperandSize size2 = OperandSize::kByte,
+ OperandSize size3 = OperandSize::kByte);
+
+ static OperandSize SizeForRegisterOperand(Register reg);
+ static OperandSize SizeForSignedOperand(int value);
+ static OperandSize SizeForUnsignedOperand(int value);
+ static OperandSize SizeForUnsignedOperand(size_t value);
+
+ static uint32_t RegisterOperand(Register reg);
+ static Register RegisterFromOperand(uint32_t operand);
+ static uint32_t SignedOperand(int value, OperandSize size);
+ static uint32_t UnsignedOperand(int value);
+ static uint32_t UnsignedOperand(size_t value);
private:
class PreviousBytecodeHelper;
@@ -278,7 +290,6 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
static Bytecode BytecodeForBinaryOperation(Token::Value op);
static Bytecode BytecodeForCountOperation(Token::Value op);
static Bytecode BytecodeForCompareOperation(Token::Value op);
- static Bytecode BytecodeForWideOperands(Bytecode bytecode);
static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
@@ -288,32 +299,22 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
static Bytecode BytecodeForDelete(LanguageMode language_mode);
static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
- static bool FitsInIdx8Operand(int value);
- static bool FitsInIdx8Operand(size_t value);
- static bool FitsInImm8Operand(int value);
- static bool FitsInIdx16Operand(int value);
- static bool FitsInIdx16Operand(size_t value);
- static bool FitsInReg8Operand(Register value);
- static bool FitsInReg8OperandUntranslated(Register value);
- static bool FitsInReg16Operand(Register value);
- static bool FitsInReg16OperandUntranslated(Register value);
-
- // RegisterMover interface.
- void MoveRegisterUntranslated(Register from, Register to) override;
-
static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
- static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
template <size_t N>
- INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3);
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2);
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
- void Output(Bytecode bytecode, uint32_t operand0);
+ INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
+ OperandScale operand_scale = OperandScale::kSingle));
void Output(Bytecode bytecode);
+ void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1, uint32_t operand2,
+ uint32_t operand3);
+ void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1, uint32_t operand2);
+ void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+ uint32_t operand0, uint32_t operand1);
+ void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+ uint32_t operand0);
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
@@ -323,19 +324,21 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
const ZoneVector<uint8_t>::iterator& jump_location, int delta);
void PatchIndirectJumpWith16BitOperand(
const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void PatchIndirectJumpWith32BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
void LeaveBasicBlock();
- bool OperandIsValid(Bytecode bytecode, int operand_index,
- uint32_t operand_value) const;
- bool RegisterIsValid(Register reg, OperandType reg_type) const;
+ bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
+ int operand_index, uint32_t operand_value) const;
+ bool RegisterIsValid(Register reg, OperandSize reg_size) const;
bool LastBytecodeInSameBlock() const;
bool NeedToBooleanCast();
bool IsRegisterInAccumulator(Register reg);
- // Set position for implicit return.
- void SetReturnPosition(FunctionLiteral* fun);
+ // Set position for return.
+ void SetReturnPosition();
// Gets a constant pool entry for the |object|.
size_t GetConstantPoolEntry(Handle<Object> object);
@@ -355,7 +358,6 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
- RegisterTranslator* register_translator() { return &register_translator_; }
Isolate* isolate_;
Zone* zone_;
@@ -371,8 +373,8 @@ class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
int parameter_count_;
int local_register_count_;
int context_register_count_;
+ int return_position_;
TemporaryRegisterAllocator temporary_allocator_;
- RegisterTranslator register_translator_;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index 0fea985efe..a17efcb6ca 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -12,103 +12,119 @@ namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
- : bytecode_array_(bytecode_array), bytecode_offset_(0) {}
-
+ : bytecode_array_(bytecode_array),
+ bytecode_offset_(0),
+ operand_scale_(OperandScale::kSingle),
+ prefix_offset_(0) {
+ UpdateOperandScale();
+}
void BytecodeArrayIterator::Advance() {
- bytecode_offset_ += Bytecodes::Size(current_bytecode());
+ bytecode_offset_ += current_bytecode_size();
+ UpdateOperandScale();
}
+void BytecodeArrayIterator::UpdateOperandScale() {
+ if (!done()) {
+ uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+ operand_scale_ =
+ Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+ prefix_offset_ = 1;
+ } else {
+ operand_scale_ = OperandScale::kSingle;
+ prefix_offset_ = 0;
+ }
+ }
+}
bool BytecodeArrayIterator::done() const {
return bytecode_offset_ >= bytecode_array()->length();
}
-
Bytecode BytecodeArrayIterator::current_bytecode() const {
DCHECK(!done());
- uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
- return interpreter::Bytecodes::FromByte(current_byte);
+ uint8_t current_byte =
+ bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+ return current_bytecode;
}
-
int BytecodeArrayIterator::current_bytecode_size() const {
- return Bytecodes::Size(current_bytecode());
+ return current_prefix_offset() +
+ Bytecodes::Size(current_bytecode(), current_operand_scale());
}
-
-uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
- OperandType operand_type) const {
+uint32_t BytecodeArrayIterator::GetUnsignedOperand(
+ int operand_index, OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
DCHECK_EQ(operand_type,
Bytecodes::GetOperandType(current_bytecode(), operand_index));
- uint8_t* operand_start =
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
- switch (Bytecodes::SizeOfOperand(operand_type)) {
- case OperandSize::kByte:
- return static_cast<uint32_t>(*operand_start);
- case OperandSize::kShort:
- return ReadUnalignedUInt16(operand_start);
- case OperandSize::kNone:
- UNREACHABLE();
- }
- return 0;
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
}
+int32_t BytecodeArrayIterator::GetSignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
-int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
- uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
- return static_cast<int8_t>(operand);
+uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kFlag8);
+ return GetUnsignedOperand(operand_index, OperandType::kFlag8);
}
-int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
- OperandSize size =
- Bytecodes::GetOperandSize(current_bytecode(), operand_index);
- OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
- : OperandType::kRegCount16;
- uint32_t operand = GetRawOperand(operand_index, type);
- return static_cast<int>(operand);
+int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kImm);
+ return GetSignedOperand(operand_index, OperandType::kImm);
}
+uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kRegCount);
+ return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
-int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kIdx8 ||
- operand_type == OperandType::kIdx16);
- uint32_t operand = GetRawOperand(operand_index, operand_type);
- return static_cast<int>(operand);
+ DCHECK_EQ(operand_type, OperandType::kIdx);
+ return GetUnsignedOperand(operand_index, operand_type);
}
-
Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- uint32_t operand = GetRawOperand(operand_index, operand_type);
- Register reg;
- switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
- case OperandSize::kByte:
- reg = Register::FromOperand(static_cast<uint8_t>(operand));
- break;
- case OperandSize::kShort:
- reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
- break;
- case OperandSize::kNone:
- UNREACHABLE();
- reg = Register::invalid_value();
- break;
- }
- DCHECK_GE(reg.index(),
- Register::FromParameterIndex(0, bytecode_array()->parameter_count())
- .index());
- DCHECK(reg.index() < bytecode_array()->register_count() ||
- (reg.index() == 0 &&
- Bytecodes::IsMaybeRegisterOperandType(
- Bytecodes::GetOperandType(current_bytecode(), operand_index))));
- return reg;
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return Bytecodes::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
}
int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
@@ -116,20 +132,17 @@ int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
Bytecodes::GetOperandType(current_bytecode(), operand_index);
DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
switch (operand_type) {
- case OperandType::kRegPair8:
- case OperandType::kRegPair16:
- case OperandType::kRegOutPair8:
- case OperandType::kRegOutPair16:
+ case OperandType::kRegPair:
+ case OperandType::kRegOutPair:
return 2;
- case OperandType::kRegOutTriple8:
- case OperandType::kRegOutTriple16:
+ case OperandType::kRegOutTriple:
return 3;
default: {
if (operand_index + 1 !=
Bytecodes::NumberOfOperands(current_bytecode())) {
OperandType next_operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
- if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+ if (OperandType::kRegCount == next_operand_type) {
return GetRegisterCountOperand(operand_index + 1);
}
}
@@ -138,6 +151,13 @@ int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
}
}
+uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kRuntimeId);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
int operand_index) const {
return FixedArray::get(bytecode_array()->constant_pool(),
@@ -150,11 +170,10 @@ int BytecodeArrayIterator::GetJumpTargetOffset() const {
Bytecode bytecode = current_bytecode();
if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
int relative_offset = GetImmediateOperand(0);
- return current_offset() + relative_offset;
- } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
- interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ return current_offset() + relative_offset + current_prefix_offset();
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
- return current_offset() + smi->value();
+ return current_offset() + smi->value() + current_prefix_offset();
} else {
UNREACHABLE();
return kMinInt;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 5379bbf028..b372894fd8 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -21,31 +21,38 @@ class BytecodeArrayIterator {
bool done() const;
Bytecode current_bytecode() const;
int current_bytecode_size() const;
- void set_current_offset(int offset) { bytecode_offset_ = offset; }
int current_offset() const { return bytecode_offset_; }
+ OperandScale current_operand_scale() const { return operand_scale_; }
+ int current_prefix_offset() const { return prefix_offset_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
- int8_t GetImmediateOperand(int operand_index) const;
- int GetIndexOperand(int operand_index) const;
- int GetRegisterCountOperand(int operand_index) const;
+ uint32_t GetFlagOperand(int operand_index) const;
+ int32_t GetImmediateOperand(int operand_index) const;
+ uint32_t GetIndexOperand(int operand_index) const;
+ uint32_t GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
int GetRegisterOperandRange(int operand_index) const;
+ uint32_t GetRuntimeIdOperand(int operand_index) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
- // Get the raw byte for the given operand. Note: you should prefer using the
- // typed versions above which cast the return to an appropriate type.
- uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
-
// Returns the absolute offset of the branch target at the current
// bytecode. It is an error to call this method if the bytecode is
// not for a jump or conditional jump.
int GetJumpTargetOffset() const;
private:
+ uint32_t GetUnsignedOperand(int operand_index,
+ OperandType operand_type) const;
+ int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+ void UpdateOperandScale();
+
Handle<BytecodeArray> bytecode_array_;
int bytecode_offset_;
+ OperandScale operand_scale_;
+ int prefix_offset_;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
};
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 6f4dc275c1..b0fa245e18 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -72,6 +72,7 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
Scope* scope() const { return scope_; }
Register reg() const { return register_; }
+ bool ShouldPopContext() { return should_pop_context_; }
private:
const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
@@ -212,9 +213,9 @@ class BytecodeGenerator::ControlScopeForTopLevel final
protected:
bool Execute(Command command, Statement* statement) override {
switch (command) {
- case CMD_BREAK:
+ case CMD_BREAK: // We should never see break/continue in top-level.
case CMD_CONTINUE:
- break;
+ UNREACHABLE();
case CMD_RETURN:
generator()->builder()->Return();
return true;
@@ -362,15 +363,20 @@ class BytecodeGenerator::ControlScopeForTryFinally final
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
Statement* statement) {
ControlScope* current = this;
- ContextScope* context = this->context();
+ ContextScope* context = generator()->execution_context();
+ // Pop context to the expected depth but do not pop the outermost context.
+ if (context != current->context() && context->ShouldPopContext()) {
+ generator()->builder()->PopContext(current->context()->reg());
+ }
do {
- if (current->Execute(command, statement)) { return; }
+ if (current->Execute(command, statement)) {
+ return;
+ }
current = current->outer();
if (current->context() != context) {
// Pop context to the expected depth.
// TODO(rmcilroy): Only emit a single context pop.
generator()->builder()->PopContext(current->context()->reg());
- context = current->context();
}
} while (current != nullptr);
UNREACHABLE();
@@ -450,7 +456,7 @@ class BytecodeGenerator::ExpressionResultScope {
virtual ~ExpressionResultScope() {
generator_->set_execution_result(outer_);
- DCHECK(result_identified());
+ DCHECK(result_identified() || generator_->HasStackOverflow());
}
bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -462,6 +468,7 @@ class BytecodeGenerator::ExpressionResultScope {
protected:
ExpressionResultScope* outer() const { return outer_; }
BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+ BytecodeGenerator* generator() const { return generator_; }
const RegisterAllocationScope* allocator() const { return &allocator_; }
void set_result_identified() {
@@ -536,7 +543,12 @@ class BytecodeGenerator::RegisterResultScope final
set_result_identified();
}
- Register ResultRegister() const { return result_register_; }
+ Register ResultRegister() {
+ if (generator()->HasStackOverflow() && !result_identified()) {
+ SetResultInAccumulator();
+ }
+ return result_register_;
+ }
private:
Register result_register_;
@@ -565,7 +577,8 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
// Initialize bytecode array builder.
set_builder(new (zone()) BytecodeArrayBuilder(
isolate(), zone(), info->num_parameters_including_this(),
- scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+ scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
+ info->literal()));
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
@@ -584,7 +597,7 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
MakeBytecodeBody();
}
- builder()->EnsureReturn(info->literal());
+ builder()->EnsureReturn();
set_scope(nullptr);
set_info(nullptr);
return builder()->ToBytecodeArray();
@@ -611,12 +624,6 @@ void BytecodeGenerator::MakeBytecodeBody() {
UNIMPLEMENTED();
}
- // Visit illegal re-declaration and bail out if it exists.
- if (scope()->HasIllegalRedeclaration()) {
- VisitForEffect(scope()->GetIllegalRedeclaration());
- return;
- }
-
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
@@ -826,6 +833,7 @@ void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ builder()->SetStatementPosition(stmt);
BytecodeLabel else_label, end_label;
if (stmt->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
@@ -861,23 +869,26 @@ void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ builder()->SetStatementPosition(stmt);
execution_control()->Continue(stmt->target());
}
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ builder()->SetStatementPosition(stmt);
execution_control()->Break(stmt->target());
}
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- VisitForAccumulatorValue(stmt->expression());
builder()->SetStatementPosition(stmt);
+ VisitForAccumulatorValue(stmt->expression());
execution_control()->ReturnAccumulator();
}
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
+ builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
builder()->CastAccumulatorToJSObject();
VisitNewLocalWithContext();
@@ -893,6 +904,8 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
ControlScopeForBreakable scope(this, stmt, &switch_builder);
int default_index = -1;
+ builder()->SetStatementPosition(stmt);
+
// Keep the switch value in a register until a case matches.
Register tag = VisitForRegisterValue(stmt->tag());
@@ -959,6 +972,7 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
} else {
VisitIterationBody(stmt, &loop_builder);
loop_builder.Condition();
+ builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
loop_builder.JumpToHeaderIfTrue();
}
@@ -975,6 +989,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_builder.LoopHeader();
loop_builder.Condition();
if (!stmt->cond()->ToBooleanIsTrue()) {
+ builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
loop_builder.BreakIfFalse();
}
@@ -998,12 +1013,14 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
loop_builder.LoopHeader();
loop_builder.Condition();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+ builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
loop_builder.BreakIfFalse();
}
VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
loop_builder.Next();
+ builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
loop_builder.JumpToHeader();
@@ -1087,28 +1104,28 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
if (stmt->subject()->IsNullLiteral() ||
- stmt->subject()->IsUndefinedLiteral(isolate())) {
+ stmt->subject()->IsUndefinedLiteral()) {
// ForIn generates lots of code, skip if it wouldn't produce any effects.
return;
}
LoopBuilder loop_builder(builder());
- BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
+ BytecodeLabel subject_null_label, subject_undefined_label;
// Prepare the state for executing ForIn.
+ builder()->SetExpressionAsStatementPosition(stmt->subject());
VisitForAccumulatorValue(stmt->subject());
builder()->JumpIfUndefined(&subject_undefined_label);
builder()->JumpIfNull(&subject_null_label);
Register receiver = register_allocator()->NewRegister();
builder()->CastAccumulatorToJSObject();
- builder()->JumpIfNull(&not_object_label);
builder()->StoreAccumulatorInRegister(receiver);
register_allocator()->PrepareForConsecutiveAllocations(3);
Register cache_type = register_allocator()->NextConsecutiveRegister();
Register cache_array = register_allocator()->NextConsecutiveRegister();
Register cache_length = register_allocator()->NextConsecutiveRegister();
- // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+ // Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
USE(cache_array);
builder()->ForInPrepare(cache_type);
@@ -1119,11 +1136,13 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
loop_builder.LoopHeader();
+ builder()->SetExpressionAsStatementPosition(stmt->each());
loop_builder.Condition();
builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
DCHECK(Register::AreContiguous(cache_type, cache_array));
- builder()->ForInNext(receiver, index, cache_type);
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ builder()->ForInNext(receiver, index, cache_type, feedback_index(slot));
loop_builder.ContinueIfUndefined();
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
VisitIterationBody(stmt, &loop_builder);
@@ -1132,7 +1151,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->StoreAccumulatorInRegister(index);
loop_builder.JumpToHeader();
loop_builder.EndLoop();
- builder()->Bind(&not_object_label);
builder()->Bind(&subject_null_label);
builder()->Bind(&subject_undefined_label);
}
@@ -1146,6 +1164,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
loop_builder.LoopHeader();
loop_builder.Next();
+ builder()->SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
VisitForAccumulatorValue(stmt->result_done());
loop_builder.BreakIfTrue();
@@ -1180,8 +1199,10 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
VisitNewLocalCatchContext(stmt->variable());
builder()->StoreAccumulatorInRegister(context);
- // Clear message object as we enter the catch block.
- builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+ // If requested, clear message object as we enter the catch block.
+ if (stmt->clear_pending_message()) {
+ builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+ }
// Load the catch context into the accumulator.
builder()->LoadAccumulatorWithRegister(context);
@@ -1267,7 +1288,9 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Find or build a shared function info.
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
- CHECK(!shared_info.is_null()); // TODO(rmcilroy): Set stack overflow?
+ if (shared_info.is_null()) {
+ return SetStackOverflow();
+ }
builder()->CreateClosure(shared_info,
expr->pretenure() ? TENURED : NOT_TENURED);
execution_result()->SetResultInAccumulator();
@@ -1679,11 +1702,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- // Transform literals that contain functions to fast properties.
- if (expr->has_function()) {
- builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
- }
-
execution_result()->SetResultInRegister(literal);
}
@@ -1729,6 +1747,7 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
+ builder()->SetExpressionPosition(proxy);
VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
@@ -2173,6 +2192,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
// Store the value.
+ builder()->SetExpressionPosition(expr);
FeedbackVectorSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
@@ -2210,6 +2230,7 @@ void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
void BytecodeGenerator::VisitThrow(Throw* expr) {
VisitForAccumulatorValue(expr->exception());
+ builder()->SetExpressionPosition(expr);
builder()->Throw();
// Throw statments are modeled as expression instead of statments. These are
// converted from assignment statements in Rewriter::ReWrite pass. An
@@ -2222,6 +2243,7 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
LhsKind property_kind = Property::GetAssignType(expr);
FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+ builder()->SetExpressionPosition(expr);
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
@@ -2718,9 +2740,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- if (!is_strong(language_mode())) {
- builder()->CastAccumulatorToNumber();
- }
+ builder()->CastAccumulatorToNumber();
// Save result for postfix expressions.
if (is_postfix) {
@@ -2732,6 +2752,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
builder()->CountOperation(expr->binary_op());
// Store the value.
+ builder()->SetExpressionPosition(expr);
FeedbackVectorSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
@@ -2791,6 +2812,7 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
+ builder()->SetExpressionPosition(expr);
builder()->CompareOperation(expr->op(), lhs);
execution_result()->SetResultInAccumulator();
}
@@ -3129,12 +3151,12 @@ void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
LanguageMode BytecodeGenerator::language_mode() const {
- return info()->language_mode();
+ return execution_context()->scope()->language_mode();
}
int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
- return info()->feedback_vector()->GetIndex(slot);
+ return info()->shared_info()->feedback_vector()->GetIndex(slot);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
index 0a617c048a..9bdde9a470 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.cc
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.cc
@@ -95,17 +95,6 @@ int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
start = run_end;
run_length = 0;
}
- Register reg_start(*start);
- Register reg_expected(expected);
- if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
- RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
- // Run straddles the lower edge of the translation window. Registers
- // after the start of this boundary are displaced by the register
- // translator to provide a hole for translation. Runs either side
- // of the boundary are fine.
- start = run_end;
- run_length = 0;
- }
if (++run_length == count) {
return *start;
}
@@ -121,16 +110,6 @@ int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
// Pad temporaries if extended run would cross translation boundary.
Register reg_first(*start);
Register reg_last(*start + static_cast<int>(count) - 1);
- DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
- RegisterTranslator::DistanceToTranslationWindow(reg_last));
- while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
- RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
- auto pos_insert_pair =
- free_temporaries_.insert(AllocateTemporaryRegister());
- reg_first = Register(*pos_insert_pair.first);
- reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
- run_length = 0;
- }
// Ensure enough registers for run.
while (run_length++ < count) {
@@ -139,10 +118,6 @@ int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
int run_start =
last_temporary_register().index() - static_cast<int>(count) + 1;
- DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
- 0 ||
- RegisterTranslator::DistanceToTranslationWindow(
- Register(run_start + static_cast<int>(count) - 1)) > 0);
return run_start;
}
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
index b8136051bb..c724827356 100644
--- a/deps/v8/src/interpreter/bytecode-traits.h
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -11,22 +11,35 @@ namespace v8 {
namespace internal {
namespace interpreter {
-// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+template <OperandTypeInfo>
+struct OperandTypeInfoTraits {
+ static const bool kIsScalable = false;
+ static const bool kIsUnsigned = false;
+ static const OperandSize kUnscaledSize = OperandSize::kNone;
+};
-// Template helpers to deduce the number of operands each bytecode has.
-#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+#define DECLARE_OPERAND_TYPE_INFO(Name, Scalable, Unsigned, BaseSize) \
+ template <> \
+ struct OperandTypeInfoTraits<OperandTypeInfo::k##Name> { \
+ static const bool kIsScalable = Scalable; \
+ static const bool kIsUnsigned = Unsigned; \
+ static const OperandSize kUnscaledSize = BaseSize; \
+ };
+OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
template <OperandType>
-struct OperandTraits {};
+struct OperandTraits {
+ typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+};
-#define DECLARE_OPERAND_SIZE(Name, Size) \
- template <> \
- struct OperandTraits<OperandType::k##Name> { \
- static const OperandSize kSizeType = Size; \
- static const int kSize = static_cast<int>(Size); \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType) \
+ template <> \
+ struct OperandTraits<OperandType::k##Name> { \
+ typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
};
-OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
-#undef DECLARE_OPERAND_SIZE
+OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
+#undef DECLARE_OPERAND_TYPE_TRAITS
template <OperandType>
struct RegisterOperandTraits {
@@ -41,13 +54,13 @@ struct RegisterOperandTraits {
REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
#undef DECLARE_REGISTER_OPERAND
-template <OperandType... Args>
+template <AccumulatorUse, OperandType...>
struct BytecodeTraits {};
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
- OperandType operand_3>
-struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
- OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+ OperandType operand_1, OperandType operand_2, OperandType operand_3>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
+ operand_3> {
static OperandType GetOperandType(int i) {
DCHECK(0 <= i && i < kOperandCount);
const OperandType kOperands[] = {operand_0, operand_1, operand_2,
@@ -55,32 +68,20 @@ struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
return kOperands[i];
}
- static inline OperandSize GetOperandSize(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandSize kOperandSizes[] =
- {OperandTraits<operand_0>::kSizeType,
- OperandTraits<operand_1>::kSizeType,
- OperandTraits<operand_2>::kSizeType,
- OperandTraits<operand_3>::kSizeType};
- return kOperandSizes[i];
- }
-
- static inline int GetOperandOffset(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const int kOffset0 = 1;
- const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
- const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
- const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
- const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
- return kOperandOffsets[i];
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
operand_3 == ot;
}
+ static inline bool IsScalable() {
+ return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+ OperandTraits<operand_1>::TypeInfo::kIsScalable |
+ OperandTraits<operand_2>::TypeInfo::kIsScalable |
+ OperandTraits<operand_3>::TypeInfo::kIsScalable);
+ }
+
+ static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 4;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -92,42 +93,29 @@ struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
(RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
(RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
(RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
- static const int kSize =
- 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
- OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
};
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
-struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+ OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
static inline OperandType GetOperandType(int i) {
DCHECK(0 <= i && i <= 2);
const OperandType kOperands[] = {operand_0, operand_1, operand_2};
return kOperands[i];
}
- static inline OperandSize GetOperandSize(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandSize kOperandSizes[] =
- {OperandTraits<operand_0>::kSizeType,
- OperandTraits<operand_1>::kSizeType,
- OperandTraits<operand_2>::kSizeType};
- return kOperandSizes[i];
- }
-
- static inline int GetOperandOffset(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const int kOffset0 = 1;
- const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
- const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
- const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
- return kOperandOffsets[i];
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot || operand_2 == ot;
}
+ static inline bool IsScalable() {
+ return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+ OperandTraits<operand_1>::TypeInfo::kIsScalable |
+ OperandTraits<operand_2>::TypeInfo::kIsScalable);
+ }
+
+ static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 3;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -137,40 +125,28 @@ struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
(RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
(RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
- static const int kSize =
- 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
- OperandTraits<operand_2>::kSize;
};
-template <OperandType operand_0, OperandType operand_1>
-struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+ OperandType operand_1>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
static inline OperandType GetOperandType(int i) {
DCHECK(0 <= i && i < kOperandCount);
const OperandType kOperands[] = {operand_0, operand_1};
return kOperands[i];
}
- static inline OperandSize GetOperandSize(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandSize kOperandSizes[] =
- {OperandTraits<operand_0>::kSizeType,
- OperandTraits<operand_1>::kSizeType};
- return kOperandSizes[i];
- }
-
- static inline int GetOperandOffset(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const int kOffset0 = 1;
- const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
- const int kOperandOffsets[] = {kOffset0, kOffset1};
- return kOperandOffsets[i];
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot;
}
+ static inline bool IsScalable() {
+ return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+ OperandTraits<operand_1>::TypeInfo::kIsScalable);
+ }
+
+ static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 2;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -178,68 +154,91 @@ struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
static const int kRegisterOperandBitmap =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
(RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
- static const int kSize =
- 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
};
-template <OperandType operand_0>
-struct BytecodeTraits<operand_0, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0>
+struct BytecodeTraits<accumulator_use, operand_0> {
static inline OperandType GetOperandType(int i) {
DCHECK(i == 0);
return operand_0;
}
- static inline OperandSize GetOperandSize(int i) {
- DCHECK(i == 0);
- return OperandTraits<operand_0>::kSizeType;
- }
-
- static inline int GetOperandOffset(int i) {
- DCHECK(i == 0);
- return 1;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot;
}
+ static inline bool IsScalable() {
+ return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+ }
+
+ static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 1;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand;
static const int kRegisterOperandBitmap =
RegisterOperandTraits<operand_0>::kIsRegisterOperand;
- static const int kSize = 1 + OperandTraits<operand_0>::kSize;
};
-template <>
-struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use>
+struct BytecodeTraits<accumulator_use> {
static inline OperandType GetOperandType(int i) {
UNREACHABLE();
return OperandType::kNone;
}
- static inline OperandSize GetOperandSize(int i) {
- UNREACHABLE();
- return OperandSize::kNone;
- }
-
- static inline int GetOperandOffset(int i) {
- UNREACHABLE();
- return 1;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return false;
}
+ static inline bool IsScalable() { return false; }
+
+ static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 0;
static const int kRegisterOperandCount = 0;
static const int kRegisterOperandBitmap = 0;
- static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
};
+template <bool>
+struct OperandScaler {
+ static int Multiply(int size, int operand_scale) { return 0; }
+};
+
+template <>
+struct OperandScaler<false> {
+ static int Multiply(int size, int operand_scale) { return size; }
+};
+
+template <>
+struct OperandScaler<true> {
+ static int Multiply(int size, int operand_scale) {
+ return size * operand_scale;
+ }
+};
+
+static OperandSize ScaledOperandSize(OperandType operand_type,
+ OperandScale operand_scale) {
+ switch (operand_type) {
+#define CASE(Name, TypeInfo) \
+ case OperandType::k##Name: { \
+ OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize; \
+ int size = \
+ OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
+ static_cast<int>(base_size), static_cast<int>(operand_scale)); \
+ OperandSize operand_size = static_cast<OperandSize>(size); \
+ DCHECK(operand_size == OperandSize::kByte || \
+ operand_size == OperandSize::kShort || \
+ operand_size == OperandSize::kQuad); \
+ return operand_size; \
+ }
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index c3b17c7b10..fd27f391aa 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -4,8 +4,11 @@
#include "src/interpreter/bytecodes.h"
+#include <iomanip>
+
#include "src/frames.h"
#include "src/interpreter/bytecode-traits.h"
+#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
@@ -25,6 +28,35 @@ const char* Bytecodes::ToString(Bytecode bytecode) {
return "";
}
+// static
+std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale) {
+ static const char kSeparator = '.';
+
+ std::string value(ToString(bytecode));
+ if (operand_scale > OperandScale::kSingle) {
+ Bytecode prefix_bytecode = OperandScaleToPrefixBytecode(operand_scale);
+ std::string suffix = ToString(prefix_bytecode);
+ return value.append(1, kSeparator).append(suffix);
+ } else {
+ return value;
+ }
+}
+
+// static
+const char* Bytecodes::AccumulatorUseToString(AccumulatorUse accumulator_use) {
+ switch (accumulator_use) {
+ case AccumulatorUse::kNone:
+ return "None";
+ case AccumulatorUse::kRead:
+ return "Read";
+ case AccumulatorUse::kWrite:
+ return "Write";
+ case AccumulatorUse::kReadWrite:
+ return "ReadWrite";
+ }
+ UNREACHABLE();
+ return "";
+}
// static
const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
@@ -39,6 +71,20 @@ const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
return "";
}
+// static
+const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return "Single";
+ case OperandScale::kDouble:
+ return "Double";
+ case OperandScale::kQuadruple:
+ return "Quadruple";
+ case OperandScale::kInvalid:
+ UNREACHABLE();
+ }
+ return "";
+}
// static
const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
@@ -49,6 +95,8 @@ const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
return "Byte";
case OperandSize::kShort:
return "Short";
+ case OperandSize::kQuad:
+ return "Quad";
}
UNREACHABLE();
return "";
@@ -72,31 +120,34 @@ Bytecode Bytecodes::FromByte(uint8_t value) {
// static
Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
- switch (Size(bytecode)) {
-#define CASE(Name, ...) \
- case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
- return Bytecode::k##Name;
- DEBUG_BREAK_BYTECODE_LIST(CASE)
-#undef CASE
- default:
- break;
+ DCHECK(!IsDebugBreak(bytecode));
+ if (bytecode == Bytecode::kWide) {
+ return Bytecode::kDebugBreakWide;
+ }
+ if (bytecode == Bytecode::kExtraWide) {
+ return Bytecode::kDebugBreakExtraWide;
}
+ int bytecode_size = Size(bytecode, OperandScale::kSingle);
+#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name, ...) \
+ if (bytecode_size == Size(Bytecode::k##Name, OperandScale::kSingle)) { \
+ return Bytecode::k##Name; \
+ }
+ DEBUG_BREAK_PLAIN_BYTECODE_LIST(RETURN_IF_DEBUG_BREAK_SIZE_MATCHES)
+#undef RETURN_IF_DEBUG_BREAK_SIZE_MATCHES
UNREACHABLE();
- return static_cast<Bytecode>(-1);
+ return Bytecode::kIllegal;
}
// static
-int Bytecodes::Size(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
- BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
+ int size = 1;
+ for (int i = 0; i < NumberOfOperands(bytecode); i++) {
+ OperandSize operand_size = GetOperandSize(bytecode, i, operand_scale);
+ int delta = static_cast<int>(operand_size);
+ DCHECK(base::bits::IsPowerOfTwo32(static_cast<uint32_t>(delta)));
+ size += delta;
}
- UNREACHABLE();
- return 0;
+ return size;
}
@@ -106,7 +157,7 @@ int Bytecodes::NumberOfOperands(Bytecode bytecode) {
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+ return BytecodeTraits<__VA_ARGS__>::kOperandCount;
BYTECODE_LIST(CASE)
#undef CASE
}
@@ -119,9 +170,9 @@ int Bytecodes::NumberOfOperands(Bytecode bytecode) {
int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
return Name##Trait::kRegisterOperandCount;
BYTECODE_LIST(CASE)
#undef CASE
@@ -131,42 +182,92 @@ int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
}
// static
-OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+Bytecode Bytecodes::OperandScaleToPrefixBytecode(OperandScale operand_scale) {
+ switch (operand_scale) {
+ case OperandScale::kQuadruple:
+ return Bytecode::kExtraWide;
+ case OperandScale::kDouble:
+ return Bytecode::kWide;
+ default:
+ UNREACHABLE();
+ return Bytecode::kIllegal;
+ }
+}
+
+// static
+bool Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
+ return operand_scale != OperandScale::kSingle;
+}
+
+// static
+OperandScale Bytecodes::PrefixBytecodeToOperandScale(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kExtraWide:
+ case Bytecode::kDebugBreakExtraWide:
+ return OperandScale::kQuadruple;
+ case Bytecode::kWide:
+ case Bytecode::kDebugBreakWide:
+ return OperandScale::kDouble;
+ default:
+ UNREACHABLE();
+ return OperandScale::kSingle;
+ }
+}
+
+// static
+AccumulatorUse Bytecodes::GetAccumulatorUse(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+ return BytecodeTraits<__VA_ARGS__>::kAccumulatorUse;
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
- return OperandType::kNone;
+ return AccumulatorUse::kNone;
}
+// static
+bool Bytecodes::ReadsAccumulator(Bytecode bytecode) {
+ return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
+ AccumulatorUse::kRead;
+}
// static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
+bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
+ return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
+ AccumulatorUse::kWrite;
+}
+
+// static
+OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
+ return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
- return OperandSize::kNone;
+ return OperandType::kNone;
}
+// static
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+ OperandScale operand_scale) {
+ OperandType op_type = GetOperandType(bytecode, i);
+ return ScaledOperandSize(op_type, operand_scale);
+}
// static
int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
return Name##Trait::kRegisterOperandBitmap;
BYTECODE_LIST(CASE)
#undef CASE
@@ -176,34 +277,25 @@ int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
}
// static
-int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
- BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
+ OperandScale operand_scale) {
+ // TODO(oth): restore this to a statically determined constant.
+ int offset = 1;
+ for (int operand_index = 0; operand_index < i; ++operand_index) {
+ OperandSize operand_size =
+ GetOperandSize(bytecode, operand_index, operand_scale);
+ offset += static_cast<int>(operand_size);
}
- UNREACHABLE();
- return 0;
+ return offset;
}
-
// static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
- switch (operand_type) {
-#define CASE(Name, Size) \
- case OperandType::k##Name: \
- return Size;
- OPERAND_TYPE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return OperandSize::kNone;
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
+ OperandScale operand_scale) {
+ return static_cast<OperandSize>(
+ ScaledOperandSize(operand_type, operand_scale));
}
-
// static
bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
return bytecode == Bytecode::kJumpIfTrue ||
@@ -227,24 +319,10 @@ bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
bytecode == Bytecode::kJumpIfUndefinedConstant;
}
-
-// static
-bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrueConstantWide ||
- bytecode == Bytecode::kJumpIfFalseConstantWide ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
- bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
- bytecode == Bytecode::kJumpIfNullConstantWide ||
- bytecode == Bytecode::kJumpIfUndefinedConstantWide;
-}
-
-
// static
bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
return IsConditionalJumpImmediate(bytecode) ||
- IsConditionalJumpConstant(bytecode) ||
- IsConditionalJumpConstantWide(bytecode);
+ IsConditionalJumpConstant(bytecode);
}
@@ -260,26 +338,23 @@ bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
IsConditionalJumpConstant(bytecode);
}
-
-// static
-bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstantWide ||
- IsConditionalJumpConstantWide(bytecode);
-}
-
-
// static
bool Bytecodes::IsJump(Bytecode bytecode) {
- return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
- IsJumpConstantWide(bytecode);
+ return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
}
// static
bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
- bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
- bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+ bytecode == Bytecode::kNew;
+}
+
+// static
+bool Bytecodes::IsCallRuntime(Bytecode bytecode) {
+ return bytecode == Bytecode::kCallRuntime ||
+ bytecode == Bytecode::kCallRuntimeForPair ||
+ bytecode == Bytecode::kInvokeIntrinsic;
}
// static
@@ -296,31 +371,40 @@ bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
}
// static
-bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn || IsJump(bytecode);
-}
-
-// static
-bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
- return operand_type == OperandType::kIdx8 ||
- operand_type == OperandType::kIdx16;
+bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
+ return Name##Trait::IsScalable();
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return false;
}
// static
-bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
- return operand_type == OperandType::kImm8;
+bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kExtraWide:
+ case Bytecode::kDebugBreakExtraWide:
+ case Bytecode::kWide:
+ case Bytecode::kDebugBreakWide:
+ return true;
+ default:
+ return false;
+ }
}
// static
-bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
- return (operand_type == OperandType::kRegCount8 ||
- operand_type == OperandType::kRegCount16);
+bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
// static
bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
- return (operand_type == OperandType::kMaybeReg8 ||
- operand_type == OperandType::kMaybeReg16);
+ return operand_type == OperandType::kMaybeReg;
}
// static
@@ -376,41 +460,102 @@ bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
return false;
}
-namespace {
-static Register DecodeRegister(const uint8_t* operand_start,
- OperandType operand_type) {
- switch (Bytecodes::SizeOfOperand(operand_type)) {
+// static
+bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return false;
+}
+
+// static
+OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
+ DCHECK(operand_scale >= OperandScale::kSingle &&
+ operand_scale <= OperandScale::kMaxValid);
+ return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
+}
+
+// static
+Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ int32_t operand =
+ DecodeSignedOperand(operand_start, operand_type, operand_scale);
+ return Register::FromOperand(operand);
+}
+
+// static
+int32_t Bytecodes::DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
case OperandSize::kByte:
- return Register::FromOperand(*operand_start);
+ return static_cast<int8_t>(*operand_start);
case OperandSize::kShort:
- return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
- case OperandSize::kNone: {
+ return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+ case OperandSize::kQuad:
+ return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+ case OperandSize::kNone:
UNREACHABLE();
- }
}
- return Register();
+ return 0;
}
-} // namespace
+// static
+uint32_t Bytecodes::DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kByte:
+ return *operand_start;
+ case OperandSize::kShort:
+ return ReadUnalignedUInt16(operand_start);
+ case OperandSize::kQuad:
+ return ReadUnalignedUInt32(operand_start);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
// static
std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
int parameter_count) {
- Vector<char> buf = Vector<char>::New(50);
-
Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
- int bytecode_size = Bytecodes::Size(bytecode);
+ int prefix_offset = 0;
+ OperandScale operand_scale = OperandScale::kSingle;
+ if (IsPrefixScalingBytecode(bytecode)) {
+ prefix_offset = 1;
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
+ bytecode = Bytecodes::FromByte(bytecode_start[1]);
+ }
+
+ // Prepare to print bytecode and operands as hex digits.
+ std::ios saved_format(nullptr);
+ saved_format.copyfmt(saved_format);
+ os.fill('0');
+ os.flags(std::ios::hex);
- for (int i = 0; i < bytecode_size; i++) {
- SNPrintF(buf, "%02x ", bytecode_start[i]);
- os << buf.start();
+ int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
+ for (int i = 0; i < prefix_offset + bytecode_size; i++) {
+ os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
}
+ os.copyfmt(saved_format);
+
const int kBytecodeColumnSize = 6;
- for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
+ for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
os << " ";
}
- os << bytecode << " ";
+ os << Bytecodes::ToString(bytecode, operand_scale) << " ";
// Operands for the debug break are from the original instruction.
if (IsDebugBreak(bytecode)) return os;
@@ -420,42 +565,42 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
for (int i = 0; i < number_of_operands; i++) {
OperandType op_type = GetOperandType(bytecode, i);
const uint8_t* operand_start =
- &bytecode_start[GetOperandOffset(bytecode, i)];
+ &bytecode_start[prefix_offset +
+ GetOperandOffset(bytecode, i, operand_scale)];
switch (op_type) {
- case interpreter::OperandType::kRegCount8:
- os << "#" << static_cast<unsigned int>(*operand_start);
- break;
- case interpreter::OperandType::kRegCount16:
- os << '#' << ReadUnalignedUInt16(operand_start);
+ case interpreter::OperandType::kRegCount:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
break;
- case interpreter::OperandType::kIdx8:
- os << "[" << static_cast<unsigned int>(*operand_start) << "]";
+ case interpreter::OperandType::kIdx:
+ case interpreter::OperandType::kRuntimeId:
+ os << "["
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
+ << "]";
break;
- case interpreter::OperandType::kIdx16:
- os << "[" << ReadUnalignedUInt16(operand_start) << "]";
+ case interpreter::OperandType::kImm:
+ os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
+ << "]";
break;
- case interpreter::OperandType::kImm8:
- os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
+ case interpreter::OperandType::kFlag8:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
break;
- case interpreter::OperandType::kMaybeReg8:
- case interpreter::OperandType::kMaybeReg16:
- case interpreter::OperandType::kReg8:
- case interpreter::OperandType::kReg16:
- case interpreter::OperandType::kRegOut8:
- case interpreter::OperandType::kRegOut16: {
- Register reg = DecodeRegister(operand_start, op_type);
+ case interpreter::OperandType::kMaybeReg:
+ case interpreter::OperandType::kReg:
+ case interpreter::OperandType::kRegOut: {
+ Register reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
os << reg.ToString(parameter_count);
break;
}
- case interpreter::OperandType::kRegOutTriple8:
- case interpreter::OperandType::kRegOutTriple16:
+ case interpreter::OperandType::kRegOutTriple:
range += 1;
- case interpreter::OperandType::kRegOutPair8:
- case interpreter::OperandType::kRegOutPair16:
- case interpreter::OperandType::kRegPair8:
- case interpreter::OperandType::kRegPair16: {
+ case interpreter::OperandType::kRegOutPair:
+ case interpreter::OperandType::kRegPair: {
range += 1;
- Register first_reg = DecodeRegister(operand_start, op_type);
+ Register first_reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
Register last_reg = Register(first_reg.index() + range);
os << first_reg.ToString(parameter_count) << "-"
<< last_reg.ToString(parameter_count);
@@ -472,20 +617,33 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
return os;
}
+// static
+bool Bytecodes::BytecodeHasHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
+ return operand_scale == OperandScale::kSingle ||
+ Bytecodes::IsBytecodeWithScalableOperands(bytecode);
+}
+
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
return os << Bytecodes::ToString(bytecode);
}
-
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
- return os << Bytecodes::OperandTypeToString(operand_type);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
+ return os << Bytecodes::AccumulatorUseToString(use);
}
-
std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
return os << Bytecodes::OperandSizeToString(operand_size);
}
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
+ return os << Bytecodes::OperandScaleToString(operand_scale);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+ return os << Bytecodes::OperandTypeToString(operand_type);
+}
+
static const int kLastParamRegisterIndex =
-InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
static const int kFunctionClosureRegisterIndex =
@@ -495,29 +653,17 @@ static const int kCurrentContextRegisterIndex =
static const int kNewTargetRegisterIndex =
-InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
-// The register space is a signed 16-bit space. Register operands
-// occupy range above 0. Parameter indices are biased with the
-// negative value kLastParamRegisterIndex for ease of access in the
-// interpreter.
-static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
-static const int kMaxRegisterIndex = -kMinInt16;
-static const int kMaxReg8Index = -kMinInt8;
-static const int kMinReg8Index = -kMaxInt8;
-static const int kMaxReg16Index = -kMinInt16;
-static const int kMinReg16Index = -kMaxInt16;
-
bool Register::is_byte_operand() const {
- return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+ return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
}
bool Register::is_short_operand() const {
- return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+ return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
}
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
DCHECK_LT(index, parameter_count);
- DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
DCHECK_LT(register_index, 0);
return Register(register_index);
@@ -557,44 +703,6 @@ bool Register::is_new_target() const {
return index() == kNewTargetRegisterIndex;
}
-int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-
-int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
-
-int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
-
-uint8_t Register::ToOperand() const {
- DCHECK(is_byte_operand());
- return static_cast<uint8_t>(-index_);
-}
-
-
-Register Register::FromOperand(uint8_t operand) {
- return Register(-static_cast<int8_t>(operand));
-}
-
-
-uint16_t Register::ToWideOperand() const {
- DCHECK(is_short_operand());
- return static_cast<uint16_t>(-index_);
-}
-
-
-Register Register::FromWideOperand(uint16_t operand) {
- return Register(-static_cast<int16_t>(operand));
-}
-
-
-uint32_t Register::ToRawOperand() const {
- return static_cast<uint32_t>(-index_);
-}
-
-
-Register Register::FromRawOperand(uint32_t operand) {
- return Register(-static_cast<int32_t>(operand));
-}
-
-
bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5) {
if (reg1.index() + 1 != reg2.index()) {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index d4863b1662..23612713aa 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -15,37 +15,24 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#define INVALID_OPERAND_TYPE_LIST(V) \
- V(None, OperandSize::kNone)
-
-#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
- /* Byte operands. */ \
- V(MaybeReg8, OperandSize::kByte) \
- V(Reg8, OperandSize::kByte) \
- V(RegPair8, OperandSize::kByte) \
- /* Short operands. */ \
- V(MaybeReg16, OperandSize::kShort) \
- V(Reg16, OperandSize::kShort) \
- V(RegPair16, OperandSize::kShort)
-
-#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
- /* Byte operands. */ \
- V(RegOut8, OperandSize::kByte) \
- V(RegOutPair8, OperandSize::kByte) \
- V(RegOutTriple8, OperandSize::kByte) \
- /* Short operands. */ \
- V(RegOut16, OperandSize::kShort) \
- V(RegOutPair16, OperandSize::kShort) \
- V(RegOutTriple16, OperandSize::kShort)
-
-#define SCALAR_OPERAND_TYPE_LIST(V) \
- /* Byte operands. */ \
- V(Idx8, OperandSize::kByte) \
- V(Imm8, OperandSize::kByte) \
- V(RegCount8, OperandSize::kByte) \
- /* Short operands. */ \
- V(Idx16, OperandSize::kShort) \
- V(RegCount16, OperandSize::kShort)
+#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+ V(MaybeReg, OperandTypeInfo::kScalableSignedByte) \
+ V(Reg, OperandTypeInfo::kScalableSignedByte) \
+ V(RegPair, OperandTypeInfo::kScalableSignedByte)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
+ V(RegOut, OperandTypeInfo::kScalableSignedByte) \
+ V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
+ V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
+
+#define SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
+ V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte) \
+ V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+ V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
@@ -60,235 +47,258 @@ namespace interpreter {
NON_REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_OPERAND_TYPE_LIST(V)
-// Define one debug break bytecode for each operands size.
-#define DEBUG_BREAK_BYTECODE_LIST(V) \
- V(DebugBreak0, OperandType::kNone) \
- V(DebugBreak1, OperandType::kReg8) \
- V(DebugBreak2, OperandType::kReg16) \
- V(DebugBreak3, OperandType::kReg16, OperandType::kReg8) \
- V(DebugBreak4, OperandType::kReg16, OperandType::kReg16) \
- V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
- V(DebugBreak6, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kReg16) \
- V(DebugBreak7, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kReg16, OperandType::kReg8) \
- V(DebugBreak8, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kReg16, OperandType::kReg16)
+// Define one debug break bytecode for each possible size of unscaled
+// bytecodes. Format is V(<bytecode>, <accumulator_use>, <operands>).
+#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+ V(DebugBreak0, AccumulatorUse::kRead) \
+ V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg) \
+ V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg) \
+ V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ OperandType::kReg, OperandType::kReg, OperandType::kReg)
+
+// Define one debug break for each widening prefix.
+#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
+ V(DebugBreakWide, AccumulatorUse::kRead) \
+ V(DebugBreakExtraWide, AccumulatorUse::kRead)
+
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+ DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+ DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
// The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V) \
- \
- /* Loading the accumulator */ \
- V(LdaZero, OperandType::kNone) \
- V(LdaSmi8, OperandType::kImm8) \
- V(LdaUndefined, OperandType::kNone) \
- V(LdaNull, OperandType::kNone) \
- V(LdaTheHole, OperandType::kNone) \
- V(LdaTrue, OperandType::kNone) \
- V(LdaFalse, OperandType::kNone) \
- V(LdaConstant, OperandType::kIdx8) \
- V(LdaConstantWide, OperandType::kIdx16) \
- \
- /* Globals */ \
- V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
- V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
- V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
- \
- /* Context operations */ \
- V(PushContext, OperandType::kReg8) \
- V(PopContext, OperandType::kReg8) \
- V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
- V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
- V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
- V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
- \
- /* Load-Store lookup slots */ \
- V(LdaLookupSlot, OperandType::kIdx8) \
- V(LdaLookupSlotInsideTypeof, OperandType::kIdx8) \
- V(LdaLookupSlotWide, OperandType::kIdx16) \
- V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16) \
- V(StaLookupSlotSloppy, OperandType::kIdx8) \
- V(StaLookupSlotStrict, OperandType::kIdx8) \
- V(StaLookupSlotSloppyWide, OperandType::kIdx16) \
- V(StaLookupSlotStrictWide, OperandType::kIdx16) \
- \
- /* Register-accumulator transfers */ \
- V(Ldar, OperandType::kReg8) \
- V(Star, OperandType::kRegOut8) \
- \
- /* Register-register transfers */ \
- V(Mov, OperandType::kReg8, OperandType::kRegOut8) \
- V(MovWide, OperandType::kReg16, OperandType::kRegOut16) \
- \
- /* LoadIC operations */ \
- V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
- V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8) \
- V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16) \
- V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16) \
- \
- /* StoreIC operations */ \
- V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
- V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
- V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8, \
- OperandType::kIdx8) \
- V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8, \
- OperandType::kIdx8) \
- V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
- OperandType::kIdx16) \
- V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
- OperandType::kIdx16) \
- V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8, \
- OperandType::kIdx16) \
- V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8, \
- OperandType::kIdx16) \
- \
- /* Binary Operators */ \
- V(Add, OperandType::kReg8) \
- V(Sub, OperandType::kReg8) \
- V(Mul, OperandType::kReg8) \
- V(Div, OperandType::kReg8) \
- V(Mod, OperandType::kReg8) \
- V(BitwiseOr, OperandType::kReg8) \
- V(BitwiseXor, OperandType::kReg8) \
- V(BitwiseAnd, OperandType::kReg8) \
- V(ShiftLeft, OperandType::kReg8) \
- V(ShiftRight, OperandType::kReg8) \
- V(ShiftRightLogical, OperandType::kReg8) \
- \
- /* Unary Operators */ \
- V(Inc, OperandType::kNone) \
- V(Dec, OperandType::kNone) \
- V(LogicalNot, OperandType::kNone) \
- V(TypeOf, OperandType::kNone) \
- V(DeletePropertyStrict, OperandType::kReg8) \
- V(DeletePropertySloppy, OperandType::kReg8) \
- \
- /* Call operations */ \
- V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
- OperandType::kIdx8) \
- V(CallWide, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kRegCount16, OperandType::kIdx16) \
- V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
- OperandType::kIdx8) \
- V(TailCallWide, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kRegCount16, OperandType::kIdx16) \
- V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
- OperandType::kRegCount8) \
- V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16, \
- OperandType::kRegCount8) \
- V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8, \
- OperandType::kRegCount8, OperandType::kRegOutPair8) \
- V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16, \
- OperandType::kRegCount8, OperandType::kRegOutPair16) \
- V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
- OperandType::kRegCount8) \
- V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16, \
- OperandType::kRegCount16) \
- \
- /* New operator */ \
- V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
- V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16, \
- OperandType::kRegCount16) \
- \
- /* Test Operators */ \
- V(TestEqual, OperandType::kReg8) \
- V(TestNotEqual, OperandType::kReg8) \
- V(TestEqualStrict, OperandType::kReg8) \
- V(TestNotEqualStrict, OperandType::kReg8) \
- V(TestLessThan, OperandType::kReg8) \
- V(TestGreaterThan, OperandType::kReg8) \
- V(TestLessThanOrEqual, OperandType::kReg8) \
- V(TestGreaterThanOrEqual, OperandType::kReg8) \
- V(TestInstanceOf, OperandType::kReg8) \
- V(TestIn, OperandType::kReg8) \
- \
- /* Cast operators */ \
- V(ToName, OperandType::kNone) \
- V(ToNumber, OperandType::kNone) \
- V(ToObject, OperandType::kNone) \
- \
- /* Literals */ \
- V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8, \
- OperandType::kImm8) \
- V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8, \
- OperandType::kImm8) \
- V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8, \
- OperandType::kImm8) \
- V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
- OperandType::kImm8) \
- V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
- OperandType::kImm8) \
- V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
- OperandType::kImm8) \
- \
- /* Closure allocation */ \
- V(CreateClosure, OperandType::kIdx8, OperandType::kImm8) \
- V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8) \
- \
- /* Arguments allocation */ \
- V(CreateMappedArguments, OperandType::kNone) \
- V(CreateUnmappedArguments, OperandType::kNone) \
- V(CreateRestParameter, OperandType::kNone) \
- \
- /* Control Flow */ \
- V(Jump, OperandType::kImm8) \
- V(JumpConstant, OperandType::kIdx8) \
- V(JumpConstantWide, OperandType::kIdx16) \
- V(JumpIfTrue, OperandType::kImm8) \
- V(JumpIfTrueConstant, OperandType::kIdx8) \
- V(JumpIfTrueConstantWide, OperandType::kIdx16) \
- V(JumpIfFalse, OperandType::kImm8) \
- V(JumpIfFalseConstant, OperandType::kIdx8) \
- V(JumpIfFalseConstantWide, OperandType::kIdx16) \
- V(JumpIfToBooleanTrue, OperandType::kImm8) \
- V(JumpIfToBooleanTrueConstant, OperandType::kIdx8) \
- V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16) \
- V(JumpIfToBooleanFalse, OperandType::kImm8) \
- V(JumpIfToBooleanFalseConstant, OperandType::kIdx8) \
- V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16) \
- V(JumpIfNull, OperandType::kImm8) \
- V(JumpIfNullConstant, OperandType::kIdx8) \
- V(JumpIfNullConstantWide, OperandType::kIdx16) \
- V(JumpIfUndefined, OperandType::kImm8) \
- V(JumpIfUndefinedConstant, OperandType::kIdx8) \
- V(JumpIfUndefinedConstantWide, OperandType::kIdx16) \
- V(JumpIfNotHole, OperandType::kImm8) \
- V(JumpIfNotHoleConstant, OperandType::kIdx8) \
- V(JumpIfNotHoleConstantWide, OperandType::kIdx16) \
- \
- /* Complex flow control For..in */ \
- V(ForInPrepare, OperandType::kRegOutTriple8) \
- V(ForInPrepareWide, OperandType::kRegOutTriple16) \
- V(ForInDone, OperandType::kReg8, OperandType::kReg8) \
- V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
- V(ForInNextWide, OperandType::kReg16, OperandType::kReg16, \
- OperandType::kRegPair16) \
- V(ForInStep, OperandType::kReg8) \
- \
- /* Perform a stack guard check */ \
- V(StackCheck, OperandType::kNone) \
- \
- /* Non-local flow control */ \
- V(Throw, OperandType::kNone) \
- V(ReThrow, OperandType::kNone) \
- V(Return, OperandType::kNone) \
- \
- /* Debugger */ \
- V(Debugger, OperandType::kNone) \
- DEBUG_BREAK_BYTECODE_LIST(V)
-
-// Enumeration of the size classes of operand types used by bytecodes.
+#define BYTECODE_LIST(V) \
+ /* Extended width operands */ \
+ V(Wide, AccumulatorUse::kNone) \
+ V(ExtraWide, AccumulatorUse::kNone) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, AccumulatorUse::kWrite) \
+ V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm) \
+ V(LdaUndefined, AccumulatorUse::kWrite) \
+ V(LdaNull, AccumulatorUse::kWrite) \
+ V(LdaTheHole, AccumulatorUse::kWrite) \
+ V(LdaTrue, AccumulatorUse::kWrite) \
+ V(LdaFalse, AccumulatorUse::kWrite) \
+ V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
+ \
+ /* Globals */ \
+ V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
+ V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx) \
+ V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kIdx) \
+ V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kIdx) \
+ \
+ /* Context operations */ \
+ V(PushContext, AccumulatorUse::kRead, OperandType::kReg) \
+ V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
+ V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx) \
+ \
+ /* Load-Store lookup slots */ \
+ V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(Star, AccumulatorUse::kRead, OperandType::kRegOut) \
+ \
+ /* Register-register transfers */ \
+ V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut) \
+ \
+ /* LoadIC operations */ \
+ V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \
+ OperandType::kIdx) \
+ V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ \
+ /* StoreIC operations */ \
+ V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kIdx) \
+ \
+ /* Binary Operators */ \
+ V(Add, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Div, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Unary Operators */ \
+ V(Inc, AccumulatorUse::kReadWrite) \
+ V(Dec, AccumulatorUse::kReadWrite) \
+ V(LogicalNot, AccumulatorUse::kReadWrite) \
+ V(TypeOf, AccumulatorUse::kReadWrite) \
+ V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Call operations */ \
+ V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegCount, OperandType::kIdx) \
+ V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegCount, OperandType::kIdx) \
+ V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
+ OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
+ V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kReg, OperandType::kRegCount) \
+ \
+ /* Intrinsics */ \
+ V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ \
+ /* New operator */ \
+ V(New, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ \
+ /* Test Operators */ \
+ V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Cast operators */ \
+ V(ToName, AccumulatorUse::kReadWrite) \
+ V(ToNumber, AccumulatorUse::kReadWrite) \
+ V(ToObject, AccumulatorUse::kReadWrite) \
+ \
+ /* Literals */ \
+ V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
+ \
+ /* Closure allocation */ \
+ V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kFlag8) \
+ \
+ /* Arguments allocation */ \
+ V(CreateMappedArguments, AccumulatorUse::kWrite) \
+ V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
+ V(CreateRestParameter, AccumulatorUse::kWrite) \
+ \
+ /* Control Flow */ \
+ V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
+ V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
+ V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ \
+ /* Complex flow control For..in */ \
+ V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple) \
+ V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg) \
+ V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegPair, OperandType::kIdx) \
+ V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
+ \
+ /* Perform a stack guard check */ \
+ V(StackCheck, AccumulatorUse::kNone) \
+ \
+ /* Non-local flow control */ \
+ V(Throw, AccumulatorUse::kRead) \
+ V(ReThrow, AccumulatorUse::kRead) \
+ V(Return, AccumulatorUse::kNone) \
+ \
+ /* Debugger */ \
+ V(Debugger, AccumulatorUse::kNone) \
+ DEBUG_BREAK_BYTECODE_LIST(V) \
+ \
+ /* Illegal bytecode (terminates execution) */ \
+ V(Illegal, AccumulatorUse::kNone)
+
+enum class AccumulatorUse : uint8_t {
+ kNone = 0,
+ kRead = 1 << 0,
+ kWrite = 1 << 1,
+ kReadWrite = kRead | kWrite
+};
+
+V8_INLINE AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+ int result = static_cast<int>(lhs) & static_cast<int>(rhs);
+ return static_cast<AccumulatorUse>(result);
+}
+
+V8_INLINE AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+ int result = static_cast<int>(lhs) | static_cast<int>(rhs);
+ return static_cast<AccumulatorUse>(result);
+}
+
+// Enumeration of scaling factors applicable to scalable operands. Code
+// relies on being able to cast values to integer scaling values.
+enum class OperandScale : uint8_t {
+ kSingle = 1,
+ kDouble = 2,
+ kQuadruple = 4,
+ kMaxValid = kQuadruple,
+ kInvalid = 8,
+};
+
+// Enumeration of the size classes of operand types used by
+// bytecodes. Code relies on being able to cast values to integer
+// types to get the size in bytes.
enum class OperandSize : uint8_t {
kNone = 0,
kByte = 1,
kShort = 2,
+ kQuad = 4,
+ kLast = kQuad
};
+// Primitive operand info used that summarize properties of operands.
+// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
+#define OPERAND_TYPE_INFO_LIST(V) \
+ V(None, false, false, OperandSize::kNone) \
+ V(ScalableSignedByte, true, false, OperandSize::kByte) \
+ V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
+ V(FixedUnsignedByte, false, true, OperandSize::kByte) \
+ V(FixedUnsignedShort, false, true, OperandSize::kShort)
+
+enum class OperandTypeInfo : uint8_t {
+#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
+ OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
+};
// Enumeration of operand types used by bytecodes.
enum class OperandType : uint8_t {
@@ -330,9 +340,6 @@ class Register {
static Register FromParameterIndex(int index, int parameter_count);
int ToParameterIndex(int parameter_count) const;
- static int MaxParameterIndex();
- static int MaxRegisterIndex();
- static int MaxRegisterIndexForByteOperand();
// Returns an invalid register.
static Register invalid_value() { return Register(); }
@@ -349,14 +356,8 @@ class Register {
static Register new_target();
bool is_new_target() const;
- static Register FromOperand(uint8_t operand);
- uint8_t ToOperand() const;
-
- static Register FromWideOperand(uint16_t operand);
- uint16_t ToWideOperand() const;
-
- static Register FromRawOperand(uint32_t raw_operand);
- uint32_t ToRawOperand() const;
+ int32_t ToOperand() const { return -index_; }
+ static Register FromOperand(int32_t operand) { return Register(-operand); }
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = Register(),
@@ -399,9 +400,18 @@ class Bytecodes {
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
+ // Returns string representation of |bytecode|.
+ static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
+
+ // Returns string representation of |accumulator_use|.
+ static const char* AccumulatorUseToString(AccumulatorUse accumulator_use);
+
// Returns string representation of |operand_type|.
static const char* OperandTypeToString(OperandType operand_type);
+ // Returns string representation of |operand_scale|.
+ static const char* OperandScaleToString(OperandScale operand_scale);
+
// Returns string representation of |operand_size|.
static const char* OperandSizeToString(OperandSize operand_size);
@@ -417,57 +427,72 @@ class Bytecodes {
// Returns the number of register operands expected by |bytecode|.
static int NumberOfRegisterOperands(Bytecode bytecode);
+ // Returns the prefix bytecode representing an operand scale to be
+ // applied to a a bytecode.
+ static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale);
+
+ // Returns true if the operand scale requires a prefix bytecode.
+ static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale);
+
+ // Returns the scaling applied to scalable operands if bytecode is
+ // is a scaling prefix.
+ static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode);
+
+ // Returns how accumulator is used by |bytecode|.
+ static AccumulatorUse GetAccumulatorUse(Bytecode bytecode);
+
+ // Returns true if |bytecode| reads the accumulator.
+ static bool ReadsAccumulator(Bytecode bytecode);
+
+ // Returns true if |bytecode| writes the accumulator.
+ static bool WritesAccumulator(Bytecode bytecode);
+
// Returns the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
// Returns the size of the i-th operand of |bytecode|.
- static OperandSize GetOperandSize(Bytecode bytecode, int i);
+ static OperandSize GetOperandSize(Bytecode bytecode, int i,
+ OperandScale operand_scale);
// Returns the offset of the i-th operand of |bytecode| relative to the start
// of the bytecode.
- static int GetOperandOffset(Bytecode bytecode, int i);
+ static int GetOperandOffset(Bytecode bytecode, int i,
+ OperandScale operand_scale);
// Returns a zero-based bitmap of the register operand positions of
// |bytecode|.
static int GetRegisterOperandBitmap(Bytecode bytecode);
- // Returns a debug break bytecode with a matching operand size.
+ // Returns a debug break bytecode to replace |bytecode|.
static Bytecode GetDebugBreak(Bytecode bytecode);
- // Returns the size of the bytecode including its operands.
- static int Size(Bytecode bytecode);
+ // Returns the size of the bytecode including its operands for the
+ // given |operand_scale|.
+ static int Size(Bytecode bytecode, OperandScale operand_scale);
// Returns the size of |operand|.
- static OperandSize SizeOfOperand(OperandType operand);
+ static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
// Returns true if the bytecode is a conditional jump taking
- // an immediate byte operand (OperandType::kImm8).
+ // an immediate byte operand (OperandType::kImm).
static bool IsConditionalJumpImmediate(Bytecode bytecode);
// Returns true if the bytecode is a conditional jump taking
- // a constant pool entry (OperandType::kIdx8).
+ // a constant pool entry (OperandType::kIdx).
static bool IsConditionalJumpConstant(Bytecode bytecode);
// Returns true if the bytecode is a conditional jump taking
- // a constant pool entry (OperandType::kIdx16).
- static bool IsConditionalJumpConstantWide(Bytecode bytecode);
-
- // Returns true if the bytecode is a conditional jump taking
// any kind of operand.
static bool IsConditionalJump(Bytecode bytecode);
// Returns true if the bytecode is a jump or a conditional jump taking
- // an immediate byte operand (OperandType::kImm8).
+ // an immediate byte operand (OperandType::kImm).
static bool IsJumpImmediate(Bytecode bytecode);
// Returns true if the bytecode is a jump or conditional jump taking a
- // constant pool entry (OperandType::kIdx8).
+ // constant pool entry (OperandType::kIdx).
static bool IsJumpConstant(Bytecode bytecode);
- // Returns true if the bytecode is a jump or conditional jump taking a
- // constant pool entry (OperandType::kIdx16).
- static bool IsJumpConstantWide(Bytecode bytecode);
-
// Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand.
static bool IsJump(Bytecode bytecode);
@@ -478,18 +503,17 @@ class Bytecodes {
// Returns true if the bytecode is a call or a constructor call.
static bool IsCallOrNew(Bytecode bytecode);
+ // Returns true if the bytecode is a call to the runtime.
+ static bool IsCallRuntime(Bytecode bytecode);
+
// Returns true if the bytecode is a debug break.
static bool IsDebugBreak(Bytecode bytecode);
- // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
- static bool IsIndexOperandType(OperandType operand_type);
+ // Returns true if the bytecode has wider operand forms.
+ static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
- // Returns true if |operand_type| represents an immediate.
- static bool IsImmediateOperandType(OperandType operand_type);
-
- // Returns true if |operand_type| is a register count operand
- // (kRegCount8/kRegCount16).
- static bool IsRegisterCountOperandType(OperandType operand_type);
+ // Returns true if the bytecode is a scaling prefix bytecode.
+ static bool IsPrefixScalingBytecode(Bytecode bytecode);
// Returns true if |operand_type| is any type of register operand.
static bool IsRegisterOperandType(OperandType operand_type);
@@ -501,20 +525,52 @@ class Bytecodes {
static bool IsRegisterOutputOperandType(OperandType operand_type);
// Returns true if |operand_type| is a maybe register operand
- // (kMaybeReg8/kMaybeReg16).
+ // (kMaybeReg).
static bool IsMaybeRegisterOperandType(OperandType operand_type);
+ // Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
+ static bool IsRuntimeIdOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| is unsigned, false if signed.
+ static bool IsUnsignedOperandType(OperandType operand_type);
+
+ // Decodes a register operand in a byte array.
+ static Register DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes a signed operand in a byte array.
+ static int32_t DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes an unsigned operand in a byte array.
+ static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
int number_of_parameters);
+ // Returns true if a handler is generated for a bytecode at a given
+ // operand scale. All bytecodes have handlers at OperandScale::kSingle,
+ // but only bytecodes with scalable operands have handlers with larger
+ // OperandScale values.
+ static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
+
+ // Return the next larger operand scale.
+ static OperandScale NextOperandScale(OperandScale operand_scale);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
};
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index e8b1281b5a..7ce50b580e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -11,28 +11,25 @@ namespace v8 {
namespace internal {
namespace interpreter {
-ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
- size_t start_index,
- size_t capacity)
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(
+ Zone* zone, size_t start_index, size_t capacity, OperandSize operand_size)
: start_index_(start_index),
capacity_(capacity),
reserved_(0),
+ operand_size_(operand_size),
constants_(zone) {}
-
void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
DCHECK_GT(available(), 0u);
reserved_++;
DCHECK_LE(reserved_, capacity() - constants_.size());
}
-
void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
DCHECK_GT(reserved_, 0u);
reserved_--;
}
-
size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
Handle<Object> object) {
DCHECK_GT(available(), 0u);
@@ -42,45 +39,57 @@ size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
return index + start_index();
}
-
Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
size_t index) const {
+ DCHECK_GE(index, start_index());
+ DCHECK_LT(index, start_index() + size());
return constants_[index - start_index()];
}
-
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
-
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilder::k16BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilder::k32BitCapacity;
ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- idx8_slice_(zone, 0, kLowCapacity),
- idx16_slice_(zone, kLowCapacity, kHighCapacity),
- constants_map_(isolate->heap(), zone) {
- STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
- DCHECK_EQ(idx8_slice_.start_index(), 0u);
- DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
- DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
- DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+ : isolate_(isolate), constants_map_(isolate->heap(), zone) {
+ idx_slice_[0] =
+ new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
+ idx_slice_[1] = new (zone) ConstantArraySlice(
+ zone, k8BitCapacity, k16BitCapacity, OperandSize::kShort);
+ idx_slice_[2] = new (zone) ConstantArraySlice(
+ zone, k8BitCapacity + k16BitCapacity, k32BitCapacity, OperandSize::kQuad);
}
-
size_t ConstantArrayBuilder::size() const {
- if (idx16_slice_.size() > 0) {
- return idx16_slice_.start_index() + idx16_slice_.size();
- } else {
- return idx8_slice_.size();
+ size_t i = arraysize(idx_slice_);
+ while (i > 0) {
+ ConstantArraySlice* slice = idx_slice_[--i];
+ if (slice->size() > 0) {
+ return slice->start_index() + slice->size();
+ }
}
+ return idx_slice_[0]->size();
}
+const ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::IndexToSlice(size_t index) const {
+ for (const ConstantArraySlice* slice : idx_slice_) {
+ if (index <= slice->max_index()) {
+ return slice;
+ }
+ }
+ UNREACHABLE();
+ return nullptr;
+}
Handle<Object> ConstantArrayBuilder::At(size_t index) const {
- if (index >= idx16_slice_.start_index()) {
- return idx16_slice_.At(index);
- } else if (index < idx8_slice_.size()) {
- return idx8_slice_.At(index);
+ const ConstantArraySlice* slice = IndexToSlice(index);
+ if (index < slice->start_index() + slice->size()) {
+ return slice->At(index);
} else {
+ DCHECK_LT(index, slice->capacity());
return isolate_->factory()->the_hole_value();
}
}
@@ -88,49 +97,82 @@ Handle<Object> ConstantArrayBuilder::At(size_t index) const {
Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
static_cast<int>(size()), PretenureFlag::TENURED);
- for (int i = 0; i < fixed_array->length(); i++) {
- fixed_array->set(i, *At(static_cast<size_t>(i)));
+ int array_index = 0;
+ for (const ConstantArraySlice* slice : idx_slice_) {
+ if (array_index == fixed_array->length()) {
+ break;
+ }
+ DCHECK(array_index == 0 ||
+ base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+ // Copy objects from slice into array.
+ for (size_t i = 0; i < slice->size(); ++i) {
+ fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
+ }
+ // Insert holes where reservations led to unused slots.
+ size_t padding =
+ std::min(static_cast<size_t>(fixed_array->length() - array_index),
+ slice->capacity() - slice->size());
+ for (size_t i = 0; i < padding; i++) {
+ fixed_array->set(array_index++, *isolate_->factory()->the_hole_value());
+ }
}
+ DCHECK_EQ(array_index, fixed_array->length());
constants_map()->Clear();
return fixed_array;
}
-
size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
index_t* entry = constants_map()->Find(object);
return (entry == nullptr) ? AllocateEntry(object) : *entry;
}
-
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
Handle<Object> object) {
DCHECK(!object->IsOddball());
- size_t index;
index_t* entry = constants_map()->Get(object);
- if (idx8_slice_.available() > 0) {
- index = idx8_slice_.Allocate(object);
- } else {
- index = idx16_slice_.Allocate(object);
+ for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+ if (idx_slice_[i]->available() > 0) {
+ size_t index = idx_slice_[i]->Allocate(object);
+ *entry = static_cast<index_t>(index);
+ return *entry;
+ break;
+ }
}
- CHECK_LT(index, kMaxCapacity);
- *entry = static_cast<index_t>(index);
- return *entry;
+ UNREACHABLE();
+ return kMaxUInt32;
}
-
OperandSize ConstantArrayBuilder::CreateReservedEntry() {
- if (idx8_slice_.available() > 0) {
- idx8_slice_.Reserve();
- return OperandSize::kByte;
- } else if (idx16_slice_.available() > 0) {
- idx16_slice_.Reserve();
- return OperandSize::kShort;
- } else {
- UNREACHABLE();
- return OperandSize::kNone;
+ for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+ if (idx_slice_[i]->available() > 0) {
+ idx_slice_[i]->Reserve();
+ return idx_slice_[i]->operand_size();
+ }
}
+ UNREACHABLE();
+ return OperandSize::kNone;
}
+ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
+ ConstantArraySlice* slice = nullptr;
+ switch (operand_size) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ break;
+ case OperandSize::kByte:
+ slice = idx_slice_[0];
+ break;
+ case OperandSize::kShort:
+ slice = idx_slice_[1];
+ break;
+ case OperandSize::kQuad:
+ slice = idx_slice_[2];
+ break;
+ }
+ DCHECK(slice->operand_size() == operand_size);
+ return slice;
+}
size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
Handle<Object> object) {
@@ -140,33 +182,20 @@ size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
if (nullptr == entry) {
index = AllocateEntry(object);
} else {
- if (operand_size == OperandSize::kByte &&
- *entry >= idx8_slice_.capacity()) {
- // The object is already in the constant array, but has an index
- // outside the range of an idx8 operand so we need to create a
- // duplicate entry in the idx8 operand range to satisfy the
- // commitment.
- *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+ ConstantArraySlice* slice = OperandSizeToSlice(operand_size);
+ if (*entry > slice->max_index()) {
+ // The object is already in the constant array, but may have an
+ // index too big for the reserved operand_size. So, duplicate
+ // entry with the smaller operand size.
+ *entry = static_cast<index_t>(slice->Allocate(object));
}
index = *entry;
}
- DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
- DCHECK_LT(index, kMaxCapacity);
return index;
}
-
void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
- switch (operand_size) {
- case OperandSize::kByte:
- idx8_slice_.Unreserve();
- return;
- case OperandSize::kShort:
- idx16_slice_.Unreserve();
- return;
- default:
- UNREACHABLE();
- }
+ OperandSizeToSlice(operand_size)->Unreserve();
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index d7e41e3771..1a68646251 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -23,13 +23,14 @@ namespace interpreter {
class ConstantArrayBuilder final BASE_EMBEDDED {
public:
// Capacity of the 8-bit operand slice.
- static const size_t kLowCapacity = 1u << kBitsPerByte;
-
- // Capacity of the combined 8-bit and 16-bit operand slices.
- static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+ static const size_t k8BitCapacity = 1u << kBitsPerByte;
// Capacity of the 16-bit operand slice.
- static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+ static const size_t k16BitCapacity = (1u << 2 * kBitsPerByte) - k8BitCapacity;
+
+ // Capacity of the 32-bit operand slice.
+ static const size_t k32BitCapacity =
+ kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
ConstantArrayBuilder(Isolate* isolate, Zone* zone);
@@ -60,12 +61,13 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
void DiscardReservedEntry(OperandSize operand_size);
private:
- typedef uint16_t index_t;
+ typedef uint32_t index_t;
index_t AllocateEntry(Handle<Object> object);
- struct ConstantArraySlice final {
- ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+ struct ConstantArraySlice final : public ZoneObject {
+ ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
+ OperandSize operand_size);
void Reserve();
void Unreserve();
size_t Allocate(Handle<Object> object);
@@ -76,21 +78,26 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
inline size_t capacity() const { return capacity_; }
inline size_t size() const { return constants_.size(); }
inline size_t start_index() const { return start_index_; }
+ inline size_t max_index() const { return start_index_ + capacity() - 1; }
+ inline OperandSize operand_size() const { return operand_size_; }
private:
const size_t start_index_;
const size_t capacity_;
size_t reserved_;
+ OperandSize operand_size_;
ZoneVector<Handle<Object>> constants_;
DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
+ const ConstantArraySlice* IndexToSlice(size_t index) const;
+ ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
+
IdentityMap<index_t>* constants_map() { return &constants_map_; }
Isolate* isolate_;
- ConstantArraySlice idx8_slice_;
- ConstantArraySlice idx16_slice_;
+ ConstantArraySlice* idx_slice_[3];
IdentityMap<index_t> constants_map_;
};
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 440e879c48..2663e4a876 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -22,12 +22,16 @@ namespace interpreter {
using compiler::Node;
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
- Bytecode bytecode)
- : compiler::CodeStubAssembler(
- isolate, zone, InterpreterDispatchDescriptor(isolate),
- Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+ Bytecode bytecode,
+ OperandScale operand_scale)
+ : compiler::CodeStubAssembler(isolate, zone,
+ InterpreterDispatchDescriptor(isolate),
+ Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ Bytecodes::ToString(bytecode), 0),
bytecode_(bytecode),
+ operand_scale_(operand_scale),
accumulator_(this, MachineRepresentation::kTagged),
+ accumulator_use_(AccumulatorUse::kNone),
context_(this, MachineRepresentation::kTagged),
bytecode_array_(this, MachineRepresentation::kTagged),
disable_stack_check_across_call_(false),
@@ -42,11 +46,26 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
}
}
-InterpreterAssembler::~InterpreterAssembler() {}
+InterpreterAssembler::~InterpreterAssembler() {
+ // If the following check fails the handler does not use the
+ // accumulator in the way described in the bytecode definitions in
+ // bytecodes.h.
+ DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+}
+
+Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+ return accumulator_.value();
+}
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+Node* InterpreterAssembler::GetAccumulator() {
+ DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
+ accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
+ return GetAccumulatorUnchecked();
+}
void InterpreterAssembler::SetAccumulator(Node* value) {
+ DCHECK(Bytecodes::WritesAccumulator(bytecode_));
+ accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
accumulator_.Bind(value);
}
@@ -79,11 +98,11 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
Node* InterpreterAssembler::LoadRegister(int offset) {
return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
- Int32Constant(offset));
+ IntPtrConstant(offset));
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
- return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+ return LoadRegister(IntPtrConstant(-reg.index()));
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -97,12 +116,12 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- RegisterFileRawPointer(), Int32Constant(offset),
+ RegisterFileRawPointer(), IntPtrConstant(offset),
value);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+ return StoreRegister(value, IntPtrConstant(-reg.index()));
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -113,27 +132,31 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
- return IntPtrAdd(reg_index, Int32Constant(-1));
+ return IntPtrAdd(reg_index, IntPtrConstant(-1));
+}
+
+Node* InterpreterAssembler::OperandOffset(int operand_index) {
+ return IntPtrConstant(
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(OperandSize::kByte,
- Bytecodes::GetOperandSize(bytecode_, operand_index));
- return Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
+ DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+ bytecode_, operand_index, operand_scale()));
+ Node* operand_offset = OperandOffset(operand_index);
+ return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(OperandSize::kByte,
- Bytecodes::GetOperandSize(bytecode_, operand_index));
- Node* load = Load(
- MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
+ DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+ bytecode_, operand_index, operand_scale()));
+ Node* operand_offset = OperandOffset(operand_index);
+ Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), operand_offset));
+
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = ChangeInt32ToInt64(load);
@@ -141,58 +164,85 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
return load;
}
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(OperandSize::kShort,
- Bytecodes::GetOperandSize(bytecode_, operand_index));
- if (TargetSupportsUnalignedAccess()) {
- return Load(
- MachineType::Uint16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
- } else {
- int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
- Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
- Node* second_byte =
- Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+ int relative_offset, MachineType result_type) {
+ static const int kMaxCount = 4;
+ DCHECK(!TargetSupportsUnalignedAccess());
+
+ int count;
+ switch (result_type.representation()) {
+ case MachineRepresentation::kWord16:
+ count = 2;
+ break;
+ case MachineRepresentation::kWord32:
+ count = 4;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ MachineType msb_type =
+ result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
+
#if V8_TARGET_LITTLE_ENDIAN
- return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+ const int kStep = -1;
+ int msb_offset = count - 1;
#elif V8_TARGET_BIG_ENDIAN
- return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+ const int kStep = 1;
+ int msb_offset = 0;
#else
#error "Unknown Architecture"
#endif
+
+ // Read the most signicant bytecode into bytes[0] and then in order
+ // down to least significant in bytes[count - 1].
+ DCHECK(count <= kMaxCount);
+ compiler::Node* bytes[kMaxCount];
+ for (int i = 0; i < count; i++) {
+ MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
+ Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
+ Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
+ bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
+ }
+
+ // Pack LSB to MSB.
+ Node* result = bytes[--count];
+ for (int i = 1; --count >= 0; i++) {
+ Node* shift = Int32Constant(i * kBitsPerByte);
+ Node* value = Word32Shl(bytes[count], shift);
+ result = Word32Or(value, result);
}
+ return result;
}
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(OperandSize::kShort,
- Bytecodes::GetOperandSize(bytecode_, operand_index));
- int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ DCHECK_EQ(
+ OperandSize::kShort,
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+ if (TargetSupportsUnalignedAccess()) {
+ return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ } else {
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
+ }
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(
+ OperandSize::kShort,
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
Node* load;
if (TargetSupportsUnalignedAccess()) {
load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
-#if V8_TARGET_LITTLE_ENDIAN
- Node* hi_byte_offset = Int32Constant(operand_offset + 1);
- Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
- Node* hi_byte_offset = Int32Constant(operand_offset);
- Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
- Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), hi_byte_offset));
- Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), lo_byte_offset));
- hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
- load = Word32Or(hi_byte, lo_byte);
+ load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
}
// Ensure that we sign extend to full pointer size
@@ -202,57 +252,123 @@ Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
return load;
}
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
- switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+ bytecode_, operand_index, operand_scale()));
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+ if (TargetSupportsUnalignedAccess()) {
+ return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ } else {
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+ }
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+ bytecode_, operand_index, operand_scale()));
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+ Node* load;
+ if (TargetSupportsUnalignedAccess()) {
+ load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ } else {
+ load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+ }
+
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size) {
+ DCHECK(!Bytecodes::IsUnsignedOperandType(
+ Bytecodes::GetOperandType(bytecode_, operand_index)));
+ switch (operand_size) {
case OperandSize::kByte:
- DCHECK_EQ(OperandType::kRegCount8,
- Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ return BytecodeOperandSignedByte(operand_index);
case OperandSize::kShort:
- DCHECK_EQ(OperandType::kRegCount16,
- Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandShort(operand_index);
+ return BytecodeOperandSignedShort(operand_index);
+ case OperandSize::kQuad:
+ return BytecodeOperandSignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
- DCHECK_EQ(OperandType::kImm8,
- Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandSignExtended(operand_index);
-}
-
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
- switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size) {
+ DCHECK(Bytecodes::IsUnsignedOperandType(
+ Bytecodes::GetOperandType(bytecode_, operand_index)));
+ switch (operand_size) {
case OperandSize::kByte:
- DCHECK_EQ(OperandType::kIdx8,
- Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ return BytecodeOperandUnsignedByte(operand_index);
case OperandSize::kShort:
- DCHECK_EQ(OperandType::kIdx16,
- Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandShort(operand_index);
+ return BytecodeOperandUnsignedShort(operand_index);
+ case OperandSize::kQuad:
+ return BytecodeOperandUnsignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+ DCHECK_EQ(OperandType::kRegCount,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+ DCHECK_EQ(OperandType::kFlag8,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ DCHECK_EQ(operand_size, OperandSize::kByte);
+ return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+ DCHECK_EQ(OperandType::kImm,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return BytecodeSignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+ DCHECK(OperandType::kIdx ==
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
- OperandType operand_type =
- Bytecodes::GetOperandType(bytecode_, operand_index);
- if (Bytecodes::IsRegisterOperandType(operand_type)) {
- OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
- if (operand_size == OperandSize::kByte) {
- return BytecodeOperandSignExtended(operand_index);
- } else if (operand_size == OperandSize::kShort) {
- return BytecodeOperandShortSignExtended(operand_index);
- }
- }
- UNREACHABLE();
- return nullptr;
+ DCHECK(Bytecodes::IsRegisterOperandType(
+ Bytecodes::GetOperandType(bytecode_, operand_index)));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return BytecodeSignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+ DCHECK(OperandType::kRuntimeId ==
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ DCHECK_EQ(operand_size, OperandSize::kShort);
+ return BytecodeUnsignedOperand(operand_index, operand_size);
}
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
@@ -264,14 +380,6 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
- int index) {
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- WordShl(Int32Constant(index), kPointerSizeLog2));
- return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
-}
-
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
@@ -285,7 +393,7 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset);
}
@@ -293,7 +401,7 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* value) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Store(MachineRepresentation::kTagged, context, offset, value);
}
@@ -311,8 +419,6 @@ Node* InterpreterAssembler::LoadTypeFeedbackVector() {
void InterpreterAssembler::CallPrologue() {
StoreRegister(SmiTag(BytecodeOffset()),
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
- StoreRegister(BytecodeArrayTaggedPointer(),
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
@@ -368,7 +474,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* function = IntPtrAdd(function_table, function_offset);
Node* function_entry =
Load(MachineType::Pointer(), function,
- Int32Constant(offsetof(Runtime::Function, entry)));
+ IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function_entry, result_size);
@@ -405,7 +511,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
}
Node* InterpreterAssembler::Advance(int delta) {
- return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+ return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
@@ -438,18 +544,21 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
}
void InterpreterAssembler::Dispatch() {
- DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+ DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
}
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+ if (kPointerSize == 8) {
+ target_bytecode = ChangeUint32ToUint64(target_bytecode);
+ }
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+ WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
}
@@ -461,12 +570,46 @@ void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
}
InterpreterDispatchDescriptor descriptor(isolate());
- Node* args[] = {GetAccumulator(), RegisterFileRawPointer(),
+ Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
bytecode_offset, BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(), GetContext()};
TailCall(descriptor, handler, args, 0);
}
+void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
+ // Dispatching a wide bytecode requires treating the prefix
+ // bytecode a base pointer into the dispatch table and dispatching
+ // the bytecode that follows relative to this base.
+ //
+ // Indices 0-255 correspond to bytecodes with operand_scale == 0
+ // Indices 256-511 correspond to bytecodes with operand_scale == 1
+ // Indices 512-767 correspond to bytecodes with operand_scale == 2
+ Node* next_bytecode_offset = Advance(1);
+ Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ next_bytecode_offset);
+ if (kPointerSize == 8) {
+ next_bytecode = ChangeUint32ToUint64(next_bytecode);
+ }
+ Node* base_index;
+ switch (operand_scale) {
+ case OperandScale::kDouble:
+ base_index = IntPtrConstant(1 << kBitsPerByte);
+ break;
+ case OperandScale::kQuadruple:
+ base_index = IntPtrConstant(2 << kBitsPerByte);
+ break;
+ default:
+ UNREACHABLE();
+ base_index = nullptr;
+ }
+ Node* target_index = IntPtrAdd(base_index, next_bytecode);
+ Node* target_code_object =
+ Load(MachineType::Pointer(), DispatchTableRawPointer(),
+ WordShl(target_index, kPointerSizeLog2));
+
+ DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+}
+
void InterpreterAssembler::InterpreterReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
@@ -505,27 +648,29 @@ void InterpreterAssembler::StackCheck() {
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
disable_stack_check_across_call_ = true;
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
- Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+ CallRuntime(Runtime::kAbort, GetContext(), abort_id);
disable_stack_check_across_call_ = false;
- // Unreached, but keeps turbofan happy.
- Return(ret_value);
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
CodeStubAssembler::Label match(this);
CodeStubAssembler::Label no_match(this);
+ CodeStubAssembler::Label end(this);
Node* condition = WordEqual(lhs, rhs);
Branch(condition, &match, &no_match);
Bind(&no_match);
Abort(bailout_reason);
+ Goto(&end);
Bind(&match);
+ Goto(&end);
+ Bind(&end);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
- SmiTag(BytecodeOffset()), GetAccumulator());
+ SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
}
// static
@@ -534,7 +679,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
return false;
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
+ V8_TARGET_ARCH_S390
return true;
#else
#error "Unknown Architecture"
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 9600dfb6c5..86ecea54dd 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -19,12 +19,16 @@ namespace interpreter {
class InterpreterAssembler : public compiler::CodeStubAssembler {
public:
- InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+ InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+ OperandScale operand_scale);
virtual ~InterpreterAssembler();
// Returns the count immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandCount(int operand_index);
+ // Returns the 8-bit flag for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandFlag(int operand_index);
// Returns the index immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
@@ -34,6 +38,9 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Returns the register index for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
+ // Returns the runtime id immediate for bytecode operand
+ // |operand_index| in the current bytecode.
+ compiler::Node* BytecodeOperandRuntimeId(int operand_index);
// Accumulator.
compiler::Node* GetAccumulator();
@@ -62,9 +69,6 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
- // Load an element from a fixed array on the heap.
- compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
-
// Load a field from an object on the heap.
compiler::Node* LoadObjectField(compiler::Node* object, int offset);
@@ -139,10 +143,14 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
DispatchToBytecodeHandler(handler, BytecodeOffset());
}
+ // Dispatch bytecode as wide operand variant.
+ void DispatchWide(OperandScale operand_scale);
+
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
protected:
+ Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
private:
@@ -155,6 +163,11 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Returns a raw pointer to first entry in the interpreter dispatch table.
compiler::Node* DispatchTableRawPointer();
+ // Returns the accumulator value without checking whether bytecode
+ // uses it. This is intended to be used only in dispatch and in
+ // tracing as these need to bypass accumulator use validity checks.
+ compiler::Node* GetAccumulatorUnchecked();
+
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue() override;
@@ -170,10 +183,28 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Returns the offset of register |index| relative to RegisterFilePointer().
compiler::Node* RegisterFrameOffset(compiler::Node* index);
- compiler::Node* BytecodeOperand(int operand_index);
- compiler::Node* BytecodeOperandSignExtended(int operand_index);
- compiler::Node* BytecodeOperandShort(int operand_index);
- compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+ // Returns the offset of an operand relative to the current bytecode offset.
+ compiler::Node* OperandOffset(int operand_index);
+
+ // Returns a value built from an sequence of bytes in the bytecode
+ // array starting at |relative_offset| from the current bytecode.
+ // The |result_type| determines the size and signedness. of the
+ // value read. This method should only be used on architectures that
+ // do not support unaligned memory accesses.
+ compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
+ MachineType result_type);
+
+ compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
+ compiler::Node* BytecodeOperandSignedByte(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
+ compiler::Node* BytecodeOperandSignedShort(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
+ compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+
+ compiler::Node* BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size);
+ compiler::Node* BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
@@ -187,8 +218,12 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
BailoutReason bailout_reason);
+ OperandScale operand_scale() const { return operand_scale_; }
+
Bytecode bytecode_;
+ OperandScale operand_scale_;
CodeStubAssembler::Variable accumulator_;
+ AccumulatorUse accumulator_use_;
CodeStubAssembler::Variable context_;
CodeStubAssembler::Variable bytecode_array_;
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
new file mode 100644
index 0000000000..6d9917de4f
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+#define __ assembler_->
+
+IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
+ : assembler_(assembler) {}
+
+bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
+ switch (function_id) {
+#define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
+ INTRINSICS_LIST(SUPPORTED)
+ return true;
+#undef SUPPORTED
+ default:
+ return false;
+ }
+}
+
+Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
+ Node* first_arg_reg, Node* arg_count) {
+ InterpreterAssembler::Label abort(assembler_), end(assembler_);
+ InterpreterAssembler::Variable result(assembler_,
+ MachineRepresentation::kTagged);
+
+#define MAKE_LABEL(name, lower_case, count) \
+ InterpreterAssembler::Label lower_case(assembler_);
+ INTRINSICS_LIST(MAKE_LABEL)
+#undef MAKE_LABEL
+
+#define LABEL_POINTER(name, lower_case, count) &lower_case,
+ InterpreterAssembler::Label* labels[] = {INTRINSICS_LIST(LABEL_POINTER)};
+#undef LABEL_POINTER
+
+#define CASE(name, lower_case, count) \
+ static_cast<int32_t>(Runtime::kInline##name),
+ int32_t cases[] = {INTRINSICS_LIST(CASE)};
+#undef CASE
+
+ __ Switch(function_id, &abort, cases, labels, arraysize(cases));
+#define HANDLE_CASE(name, lower_case, expected_arg_count) \
+ __ Bind(&lower_case); \
+ if (FLAG_debug_code) { \
+ AbortIfArgCountMismatch(expected_arg_count, arg_count); \
+ } \
+ result.Bind(name(first_arg_reg)); \
+ __ Goto(&end);
+ INTRINSICS_LIST(HANDLE_CASE)
+#undef HANDLE_CASE
+
+ __ Bind(&abort);
+ __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+ result.Bind(__ UndefinedConstant());
+ __ Goto(&end);
+
+ __ Bind(&end);
+ return result.value();
+}
+
+Node* IntrinsicsHelper::CompareInstanceType(Node* map, int type,
+ InstanceTypeCompareMode mode) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ Node* instance_type = __ LoadInstanceType(map);
+
+ InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
+ end(assembler_);
+ Node* condition;
+ if (mode == kInstanceTypeEqual) {
+ condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+ } else {
+ DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
+ condition =
+ __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+ }
+ __ Branch(condition, &if_true, &if_false);
+
+ __ Bind(&if_true);
+ return_value.Bind(__ BooleanConstant(true));
+ __ Goto(&end);
+
+ __ Bind(&if_false);
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&end);
+ return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+
+ InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+ end(assembler_);
+ Node* arg = __ LoadRegister(input);
+
+ __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+ __ Bind(&if_smi);
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+ kInstanceTypeGreaterThanOrEqual));
+ __ Goto(&end);
+
+ __ Bind(&end);
+ return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsArray(Node* input) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+
+ InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+ end(assembler_);
+ Node* arg = __ LoadRegister(input);
+
+ __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+ __ Bind(&if_smi);
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&if_not_smi);
+ return_value.Bind(
+ CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
+ __ Goto(&end);
+
+ __ Bind(&end);
+ return return_value.value();
+}
+
+void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
+ InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
+ end(assembler_);
+ Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
+ __ Branch(comparison, &match, &mismatch);
+ __ Bind(&mismatch);
+ __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Goto(&end);
+ __ Bind(&match);
+ __ Goto(&end);
+ __ Bind(&end);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
new file mode 100644
index 0000000000..e27c678e25
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+#define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class Node;
+} // namespace compiler
+
+#define INTRINSICS_LIST(V) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsArray, is_array, 1)
+
+namespace interpreter {
+
+class IntrinsicsHelper {
+ public:
+ explicit IntrinsicsHelper(InterpreterAssembler* assembler);
+
+ compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
+ compiler::Node* context,
+ compiler::Node* first_arg_reg,
+ compiler::Node* arg_count);
+
+ static bool IsSupported(Runtime::FunctionId function_id);
+
+ private:
+ enum InstanceTypeCompareMode {
+ kInstanceTypeEqual,
+ kInstanceTypeGreaterThanOrEqual
+ };
+ compiler::Node* CompareInstanceType(compiler::Node* map, int type,
+ InstanceTypeCompareMode mode);
+ void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
+ InterpreterAssembler* assembler_;
+
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+ compiler::Node* name(compiler::Node* input);
+ INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
+#undef DECLARE_INTRINSIC_HELPER
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index eb883427bb..5084300dfe 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -11,6 +11,8 @@
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/log.h"
#include "src/zone.h"
namespace v8 {
@@ -22,30 +24,69 @@ using compiler::Node;
#define __ assembler->
Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
- memset(&dispatch_table_, 0, sizeof(dispatch_table_));
+ memset(dispatch_table_, 0, sizeof(dispatch_table_));
}
void Interpreter::Initialize() {
DCHECK(FLAG_ignition);
if (IsDispatchTableInitialized()) return;
- Zone zone;
+ Zone zone(isolate_->allocator());
HandleScope scope(isolate_);
-#define GENERATE_CODE(Name, ...) \
- { \
- InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
- Do##Name(&assembler); \
- Handle<Code> code = assembler.GenerateCode(); \
- TraceCodegen(code, #Name); \
- dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code; \
+ // Generate bytecode handlers for all bytecodes and scales.
+ for (OperandScale operand_scale = OperandScale::kSingle;
+ operand_scale <= OperandScale::kMaxValid;
+ operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+#define GENERATE_CODE(Name, ...) \
+ { \
+ if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) { \
+ InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name, \
+ operand_scale); \
+ Do##Name(&assembler); \
+ Handle<Code> code = assembler.GenerateCode(); \
+ size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
+ dispatch_table_[index] = *code; \
+ TraceCodegen(code); \
+ LOG_CODE_EVENT( \
+ isolate_, \
+ CodeCreateEvent( \
+ Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code), \
+ Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
+ } \
}
- BYTECODE_LIST(GENERATE_CODE)
+ BYTECODE_LIST(GENERATE_CODE)
#undef GENERATE_CODE
+ }
+
+ // Fill unused entries will the illegal bytecode handler.
+ size_t illegal_index =
+ GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
+ for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
+ if (dispatch_table_[index] == nullptr) {
+ dispatch_table_[index] = dispatch_table_[illegal_index];
+ }
+ }
}
-Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
+ OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());
- return dispatch_table_[Bytecodes::ToByte(bytecode)];
+ DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
+ size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+ return dispatch_table_[index];
+}
+
+// static
+size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
+ OperandScale operand_scale) {
+ static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
+ size_t index = static_cast<size_t>(bytecode);
+ OperandScale current_scale = OperandScale::kSingle;
+ while (current_scale != operand_scale) {
+ index += kEntriesPerOperandScale;
+ current_scale = Bytecodes::NextOperandScale(current_scale);
+ }
+ return index;
}
void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
@@ -62,6 +103,9 @@ int Interpreter::InterruptBudget() {
}
bool Interpreter::MakeBytecode(CompilationInfo* info) {
+ TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
+ TRACE_EVENT0("v8", "V8.CompileIgnition");
+
if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
OFStream os(stdout);
base::SmartArrayPointer<char> name = info->GetDebugName();
@@ -88,8 +132,10 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
#endif // DEBUG
BytecodeGenerator generator(info->isolate(), info->zone());
- info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+
+ if (generator.HasStackOverflow()) return false;
+
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -102,23 +148,36 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
}
bool Interpreter::IsDispatchTableInitialized() {
- if (FLAG_trace_ignition) {
- // Regenerate table to add bytecode tracing operations.
+ if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
+ // Regenerate table to add bytecode tracing operations
+ // or to print the assembly code generated by TurboFan.
return false;
}
return dispatch_table_[0] != nullptr;
}
-void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+void Interpreter::TraceCodegen(Handle<Code> code) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
OFStream os(stdout);
- code->Disassemble(name, os);
+ code->Disassemble(nullptr, os);
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
}
+const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
+#ifdef ENABLE_DISASSEMBLER
+#define RETURN_NAME(Name, ...) \
+ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
+ return #Name; \
+ }
+ BYTECODE_LIST(RETURN_NAME)
+#undef RETURN_NAME
+#endif // ENABLE_DISASSEMBLER
+ return nullptr;
+}
+
// LdaZero
//
// Load literal '0' into the accumulator.
@@ -128,11 +187,10 @@ void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-// LdaSmi8 <imm8>
+// LdaSmi <imm>
//
-// Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
+// Load an integer literal into the accumulator as a Smi.
+void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
Node* raw_int = __ BytecodeOperandImm(0);
Node* smi_int = __ SmiTag(raw_int);
__ SetAccumulator(smi_int);
@@ -154,15 +212,6 @@ void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
DoLoadConstant(assembler);
}
-
-// LdaConstantWide <idx>
-//
-// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
- DoLoadConstant(assembler);
-}
-
-
// LdaUndefined
//
// Load Undefined into the accumulator.
@@ -248,13 +297,6 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
}
-// MovWide <src> <dst>
-//
-// Stores the value of register <src> to register <dst>.
-void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
- DoMov(assembler);
-}
-
void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
@@ -295,27 +337,6 @@ void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
DoLoadGlobal(ic, assembler);
}
-// LdaGlobalWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-// LdaGlobalInsideTypeofWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> inside of a typeof.
-void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-
void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
@@ -333,7 +354,6 @@ void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
__ CallStub(ic.descriptor(), code_target, context, global, name, value,
smi_slot, type_feedback_vector);
-
__ Dispatch();
}
@@ -359,29 +379,6 @@ void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
DoStoreGlobal(ic, assembler);
}
-
-// StaGlobalSloppyWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoStoreGlobal(ic, assembler);
-}
-
-
-// StaGlobalStrictWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
- DoStoreGlobal(ic, assembler);
-}
-
-
// LdaContextSlot <context> <slot_index>
//
// Load the object in |slot_index| of |context| into the accumulator.
@@ -394,15 +391,6 @@ void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-// LdaContextSlotWide <context> <slot_index>
-//
-// Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
- DoLdaContextSlot(assembler);
-}
-
-
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
@@ -415,14 +403,6 @@ void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-// StaContextSlot <context> <slot_index>
-//
-// Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
- DoStaContextSlot(assembler);
-}
-
void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
@@ -433,7 +413,6 @@ void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
__ Dispatch();
}
-
// LdaLookupSlot <name_index>
//
// Lookup the object with the name in constant pool entry |name_index|
@@ -442,7 +421,6 @@ void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
}
-
// LdaLookupSlotInsideTypeof <name_index>
//
// Lookup the object with the name in constant pool entry |name_index|
@@ -451,25 +429,6 @@ void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
}
-
-// LdaLookupSlotWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically.
-void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
- DoLdaLookupSlot(assembler);
-}
-
-
-// LdaLookupSlotInsideTypeofWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeofWide(
- InterpreterAssembler* assembler) {
- DoLdaLookupSlotInsideTypeof(assembler);
-}
-
void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
@@ -484,7 +443,6 @@ void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
__ Dispatch();
}
-
// StaLookupSlotSloppy <name_index>
//
// Store the object in accumulator to the object with the name in constant
@@ -502,24 +460,6 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
DoStoreLookupSlot(LanguageMode::STRICT, assembler);
}
-
-// StaLookupSlotSloppyWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
- DoStaLookupSlotSloppy(assembler);
-}
-
-
-// StaLookupSlotStrictWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
- DoStaLookupSlotStrict(assembler);
-}
-
void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
@@ -546,17 +486,6 @@ void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
DoLoadIC(ic, assembler);
}
-// LoadICWide <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadIC(ic, assembler);
-}
-
-
void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
@@ -582,17 +511,6 @@ void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
DoKeyedLoadIC(ic, assembler);
}
-// KeyedLoadICWide <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
- DoKeyedLoadIC(ic, assembler);
-}
-
-
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -633,30 +551,6 @@ void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
DoStoreIC(ic, assembler);
}
-
-// StoreICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoStoreIC(ic, assembler);
-}
-
-
-// StoreICStrictWide <object> <name_index> <slot>
-//
-// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
- DoStoreIC(ic, assembler);
-}
-
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -695,28 +589,6 @@ void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
-
-// KeyedStoreICSloppyWide <object> <key> <slot>
-//
-// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoKeyedStoreIC(ic, assembler);
-}
-
-
-// KeyedStoreICStoreWide <object> <key> <slot>
-//
-// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
- DoKeyedStoreIC(ic, assembler);
-}
-
// PushContext <context>
//
// Saves the current context in <context>, and pushes the accumulator as the
@@ -741,6 +613,20 @@ void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
+void Interpreter::DoBinaryOp(Callable callable,
+ InterpreterAssembler* assembler) {
+ // TODO(bmeurer): Collect definition side type feedback for various
+ // binary operations.
+ Node* target = __ HeapConstant(callable.code());
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
InterpreterAssembler* assembler) {
// TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
@@ -759,7 +645,7 @@ void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
//
// Add register <src> to accumulator.
void Interpreter::DoAdd(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kAdd, assembler);
+ DoBinaryOp(CodeFactory::Add(isolate_), assembler);
}
@@ -767,7 +653,7 @@ void Interpreter::DoAdd(InterpreterAssembler* assembler) {
//
// Subtract register <src> from accumulator.
void Interpreter::DoSub(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kSubtract, assembler);
+ DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
}
@@ -799,7 +685,7 @@ void Interpreter::DoMod(InterpreterAssembler* assembler) {
//
// BitwiseOr register <src> to accumulator.
void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kBitwiseOr, assembler);
+ DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
}
@@ -807,7 +693,7 @@ void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
//
// BitwiseXor register <src> to accumulator.
void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kBitwiseXor, assembler);
+ DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
}
@@ -815,7 +701,7 @@ void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
//
// BitwiseAnd register <src> to accumulator.
void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+ DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
}
@@ -883,24 +769,40 @@ void Interpreter::DoDec(InterpreterAssembler* assembler) {
// Perform logical-not on the accumulator, first casting the
// accumulator to a boolean value if required.
void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::ToBoolean(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ Node* to_boolean_value =
+ __ CallStub(callable.descriptor(), target, context, accumulator);
+ InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+ Node* true_value = __ BooleanConstant(true);
+ Node* false_value = __ BooleanConstant(false);
+ Node* condition = __ WordEqual(to_boolean_value, true_value);
+ __ Branch(condition, &if_true, &if_false);
+ __ Bind(&if_true);
+ {
+ __ SetAccumulator(false_value);
+ __ Dispatch();
+ }
+ __ Bind(&if_false);
+ {
+ __ SetAccumulator(true_value);
+ __ Dispatch();
+ }
}
-
// TypeOf
//
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::Typeof(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* result =
- __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
+ __ CallStub(callable.descriptor(), target, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -960,15 +862,6 @@ void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
-
-// CallWide <callable> <receiver> <arg_count>
-//
-// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
- DoJSCall(assembler, TailCallMode::kDisallow);
-}
-
// TailCall <callable> <receiver> <arg_count>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -977,16 +870,8 @@ void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kAllow);
}
-// TailCallWide <callable> <receiver> <arg_count>
-//
-// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
- DoJSCall(assembler, TailCallMode::kAllow);
-}
-
void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
- Node* function_id = __ BytecodeOperandIdx(0);
+ Node* function_id = __ BytecodeOperandRuntimeId(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
@@ -1006,19 +891,26 @@ void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
DoCallRuntimeCommon(assembler);
}
-
-// CallRuntime <function_id> <first_arg> <arg_count>
+// InvokeIntrinsic <function_id> <first_arg> <arg_count>
//
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
- DoCallRuntimeCommon(assembler);
+// Implements the semantic equivalent of calling the runtime function
+// |function_id| with the first argument in |first_arg| and |arg_count|
+// arguments in subsequent registers.
+void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
+ Node* function_id = __ BytecodeOperandRuntimeId(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* arg_count = __ BytecodeOperandCount(2);
+ Node* context = __ GetContext();
+ IntrinsicsHelper helper(assembler);
+ Node* result =
+ helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
// Call the runtime function.
- Node* function_id = __ BytecodeOperandIdx(0);
+ Node* function_id = __ BytecodeOperandRuntimeId(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
@@ -1047,17 +939,6 @@ void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
DoCallRuntimeForPairCommon(assembler);
}
-
-// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
-//
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
- DoCallRuntimeForPairCommon(assembler);
-}
-
void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
Node* context_index = __ BytecodeOperandIdx(0);
Node* receiver_reg = __ BytecodeOperandReg(1);
@@ -1088,15 +969,6 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
DoCallJSRuntimeCommon(assembler);
}
-
-// CallJSRuntimeWide <context_index> <receiver> <arg_count>
-//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
- DoCallJSRuntimeCommon(assembler);
-}
-
void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
Node* new_target = __ GetAccumulator();
@@ -1123,23 +995,11 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
DoCallConstruct(assembler);
}
-
-// NewWide <constructor> <first_arg> <arg_count>
-//
-// Call operator new with |constructor| and the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers. The new.target is in the accumulator.
-//
-void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
- DoCallConstruct(assembler);
-}
-
-
// TestEqual <src>
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kEqual, assembler);
+ DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
}
@@ -1147,7 +1007,7 @@ void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kNotEqual, assembler);
+ DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
}
@@ -1155,16 +1015,7 @@ void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kStrictEqual, assembler);
-}
-
-
-// TestNotEqualStrict <src>
-//
-// Test if the value in the <src> register is not strictly equal to the
-// accumulator.
-void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kStrictNotEqual, assembler);
+ DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
}
@@ -1172,7 +1023,7 @@ void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kLessThan, assembler);
+ DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
}
@@ -1180,7 +1031,7 @@ void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kGreaterThan, assembler);
+ DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
}
@@ -1189,7 +1040,7 @@ void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
+ DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
}
@@ -1198,7 +1049,7 @@ void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
+ DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
}
@@ -1219,16 +1070,22 @@ void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInstanceOf, assembler);
}
+void Interpreter::DoTypeConversionOp(Callable callable,
+ InterpreterAssembler* assembler) {
+ Node* target = __ HeapConstant(callable.code());
+ Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallStub(callable.descriptor(), target, context, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
// ToName
//
// Cast the object referenced by the accumulator to a name.
void Interpreter::DoToName(InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
}
@@ -1236,11 +1093,7 @@ void Interpreter::DoToName(InterpreterAssembler* assembler) {
//
// Cast the object referenced by the accumulator to a number.
void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
}
@@ -1248,26 +1101,20 @@ void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
//
// Cast the object referenced by the accumulator to a JSObject.
void Interpreter::DoToObject(InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
}
-
-// Jump <imm8>
+// Jump <imm>
//
-// Jump by number of bytes represented by the immediate operand |imm8|.
+// Jump by number of bytes represented by the immediate operand |imm|.
void Interpreter::DoJump(InterpreterAssembler* assembler) {
Node* relative_jump = __ BytecodeOperandImm(0);
__ Jump(relative_jump);
}
-
-// JumpConstant <idx8>
+// JumpConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -1275,17 +1122,7 @@ void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
__ Jump(relative_jump);
}
-
-// JumpConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the
-// constant pool.
-void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
- DoJumpConstant(assembler);
-}
-
-
-// JumpIfTrue <imm8>
+// JumpIfTrue <imm>
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains true.
@@ -1296,10 +1133,9 @@ void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
-
-// JumpIfTrueConstant <idx8>
+// JumpIfTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the accumulator contains true.
void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
@@ -1310,17 +1146,7 @@ void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
-
-// JumpIfTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
- DoJumpIfTrueConstant(assembler);
-}
-
-
-// JumpIfFalse <imm8>
+// JumpIfFalse <imm>
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains false.
@@ -1331,10 +1157,9 @@ void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
-
-// JumpIfFalseConstant <idx8>
+// JumpIfFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the accumulator contains false.
void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
@@ -1345,42 +1170,35 @@ void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
-
-// JumpIfFalseConstant <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
- DoJumpIfFalseConstant(assembler);
-}
-
-
-// JumpIfToBooleanTrue <imm8>
+// JumpIfToBooleanTrue <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::ToBoolean(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+ __ CallStub(callable.descriptor(), target, context, accumulator);
Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
}
-
-// JumpIfToBooleanTrueConstant <idx8>
+// JumpIfToBooleanTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the object referenced by the accumulator is true when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::ToBoolean(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+ __ CallStub(callable.descriptor(), target, context, accumulator);
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
@@ -1388,44 +1206,35 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
__ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
}
-
-// JumpIfToBooleanTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is true when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanTrueConstantWide(
- InterpreterAssembler* assembler) {
- DoJumpIfToBooleanTrueConstant(assembler);
-}
-
-
-// JumpIfToBooleanFalse <imm8>
+// JumpIfToBooleanFalse <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::ToBoolean(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+ __ CallStub(callable.descriptor(), target, context, accumulator);
Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
}
-
-// JumpIfToBooleanFalseConstant <idx8>
+// JumpIfToBooleanFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the object referenced by the accumulator is false when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::ToBoolean(isolate_);
+ Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+ __ CallStub(callable.descriptor(), target, context, accumulator);
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
@@ -1433,19 +1242,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
__ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
}
-
-// JumpIfToBooleanFalseConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is false when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanFalseConstantWide(
- InterpreterAssembler* assembler) {
- DoJumpIfToBooleanFalseConstant(assembler);
-}
-
-
-// JumpIfNull <imm8>
+// JumpIfNull <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the null constant.
@@ -1456,10 +1253,9 @@ void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
-
-// JumpIfNullConstant <idx8>
+// JumpIfNullConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the object referenced by the accumulator is the null constant.
void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
@@ -1470,16 +1266,7 @@ void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
-
-// JumpIfNullConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
- DoJumpIfNullConstant(assembler);
-}
-
-// JumpIfUndefined <imm8>
+// JumpIfUndefined <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
@@ -1491,10 +1278,9 @@ void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
-
-// JumpIfUndefinedConstant <idx8>
+// JumpIfUndefinedConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the object referenced by the accumulator is the undefined constant.
void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
@@ -1506,17 +1292,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
-
-// JumpIfUndefinedConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstantWide(
- InterpreterAssembler* assembler) {
- DoJumpIfUndefinedConstant(assembler);
-}
-
-// JumpIfNotHole <imm8>
+// JumpIfNotHole <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the hole.
@@ -1527,9 +1303,9 @@ void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
-// JumpIfNotHoleConstant <idx8>
+// JumpIfNotHoleConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
// if the object referenced by the accumulator is the hole constant.
void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
@@ -1540,21 +1316,13 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
-// JumpIfNotHoleConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the hole constant.
-void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
- DoJumpIfNotHoleConstant(assembler);
-}
-
void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant_elements = __ LoadConstantPoolEntry(index);
Node* literal_index_raw = __ BytecodeOperandIdx(1);
Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandImm(2);
+ Node* flags_raw = __ BytecodeOperandFlag(2);
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
@@ -1570,19 +1338,22 @@ void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
-}
-
-
-// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
-//
-// Creates a regular expression literal for literal index <literal_idx> with
-// <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+ Callable callable = CodeFactory::FastCloneRegExp(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* pattern = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_raw = __ BytecodeOperandFlag(2);
+ Node* flags = __ SmiTag(flags_raw);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* context = __ GetContext();
+ Node* result = __ CallStub(callable.descriptor(), target, context, closure,
+ literal_index, pattern, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
-
// CreateArrayLiteral <element_idx> <literal_idx> <flags>
//
// Creates an array literal for literal index <literal_idx> with flags <flags>
@@ -1591,16 +1362,6 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
}
-
-// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an array literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
-}
-
-
// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
// Creates an object literal for literal index <literal_idx> with flags <flags>
@@ -1609,16 +1370,6 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
}
-
-// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
-}
-
-
// CreateClosure <index> <tenured>
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
@@ -1628,7 +1379,7 @@ void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
// calling into the runtime.
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
- Node* tenured_raw = __ BytecodeOperandImm(1);
+ Node* tenured_raw = __ BytecodeOperandFlag(1);
Node* tenured = __ SmiTag(tenured_raw);
Node* context = __ GetContext();
Node* result =
@@ -1637,16 +1388,6 @@ void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-// CreateClosureWide <index> <tenured>
-//
-// Creates a new closure for SharedFunctionInfo at position |index| in the
-// constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
- return DoCreateClosure(assembler);
-}
-
-
// CreateMappedArguments
//
// Creates a new mapped arguments object.
@@ -1737,11 +1478,13 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
// DebugBreak
//
// Call runtime to handle a debug break.
-#define DEBUG_BREAK(Name, ...) \
- void Interpreter::Do##Name(InterpreterAssembler* assembler) { \
- Node* context = __ GetContext(); \
- Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
- __ DispatchToBytecodeHandler(original_handler); \
+#define DEBUG_BREAK(Name, ...) \
+ void Interpreter::Do##Name(InterpreterAssembler* assembler) { \
+ Node* context = __ GetContext(); \
+ Node* accumulator = __ GetAccumulator(); \
+ Node* original_handler = \
+ __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+ __ DispatchToBytecodeHandler(original_handler); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
@@ -1768,18 +1511,6 @@ void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-// ForInPrepareWide <cache_info_triple>
-//
-// Returns state for for..in loop execution based on the object in the
-// accumulator. The result is output in registers |cache_info_triple| to
-// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
-// and cache_length respectively.
-void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
- DoForInPrepare(assembler);
-}
-
-
// ForInNext <receiver> <index> <cache_info_pair>
//
// Returns the next enumerable property in the the accumulator.
@@ -1792,53 +1523,101 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* cache_type = __ LoadRegister(cache_type_reg);
Node* cache_array_reg = __ NextRegister(cache_type_reg);
Node* cache_array = __ LoadRegister(cache_array_reg);
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
- cache_array, cache_type, index);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-// ForInNextWide <receiver> <index> <cache_info_pair>
-//
-// Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
- return DoForInNext(assembler);
+ // Load the next key from the enumeration array.
+ Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+
+ // Check if we can use the for-in fast path potentially using the enum cache.
+ InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+ Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+ Node* condition = __ WordEqual(receiver_map, cache_type);
+ __ Branch(condition, &if_fast, &if_slow);
+ __ Bind(&if_fast);
+ {
+ // Enum cache in use for {receiver}, the {key} is definitely valid.
+ __ SetAccumulator(key);
+ __ Dispatch();
+ }
+ __ Bind(&if_slow);
+ {
+ // Record the fact that we hit the for-in slow path.
+ Node* vector_index = __ BytecodeOperandIdx(3);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* megamorphic_sentinel =
+ __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
+ __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
+ megamorphic_sentinel);
+
+ // Need to filter the {key} for the {receiver}.
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
}
-
// ForInDone <index> <cache_length>
//
// Returns true if the end of the enumerable properties has been reached.
void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
- // TODO(oth): Implement directly rather than making a runtime call.
Node* index_reg = __ BytecodeOperandReg(0);
Node* index = __ LoadRegister(index_reg);
Node* cache_length_reg = __ BytecodeOperandReg(1);
Node* cache_length = __ LoadRegister(cache_length_reg);
- Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
- __ SetAccumulator(result);
- __ Dispatch();
-}
+ // Check if {index} is at {cache_length} already.
+ InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+ Node* condition = __ WordEqual(index, cache_length);
+ __ Branch(condition, &if_true, &if_false);
+ __ Bind(&if_true);
+ {
+ Node* result = __ BooleanConstant(true);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+ __ Bind(&if_false);
+ {
+ Node* result = __ BooleanConstant(false);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
// ForInStep <index>
//
// Increments the loop counter in register |index| and stores the result
// in the accumulator.
void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
- // TODO(oth): Implement directly rather than making a runtime call.
Node* index_reg = __ BytecodeOperandReg(0);
Node* index = __ LoadRegister(index_reg);
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
+ Node* one = __ SmiConstant(Smi::FromInt(1));
+ Node* result = __ SmiAdd(index, one);
__ SetAccumulator(result);
__ Dispatch();
}
+// Wide
+//
+// Prefix bytecode indicating next bytecode has wide (16-bit) operands.
+void Interpreter::DoWide(InterpreterAssembler* assembler) {
+ __ DispatchWide(OperandScale::kDouble);
+}
+
+// ExtraWide
+//
+// Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
+void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
+ __ DispatchWide(OperandScale::kQuadruple);
+}
+
+// Illegal
+//
+// An invalid bytecode aborting execution if dispatched.
+void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
+ __ Abort(kInvalidBytecode);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index e02e9142b3..ea50faa02d 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -40,12 +40,14 @@ class Interpreter {
static bool MakeBytecode(CompilationInfo* info);
// Return bytecode handler for |bytecode|.
- Code* GetBytecodeHandler(Bytecode bytecode);
+ Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// GC support.
void IterateDispatchTable(ObjectVisitor* v);
- void TraceCodegen(Handle<Code> code, const char* name);
+ // Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
+ void TraceCodegen(Handle<Code> code);
+ const char* LookupNameOfBytecodeHandler(Code* code);
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
@@ -58,6 +60,9 @@ class Interpreter {
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+ // Generates code to perform the binary operations via |callable|.
+ void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
+
// Generates code to perform the binary operations via |function_id|.
void DoBinaryOp(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
@@ -103,9 +108,12 @@ class Interpreter {
// Generates code to perform a JS runtime call.
void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
- // Generates code to perform a constructor call..
+ // Generates code to perform a constructor call.
void DoCallConstruct(InterpreterAssembler* assembler);
+ // Generates code to perform a type conversion.
+ void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
+
// Generates code ro create a literal via |function_id|.
void DoCreateLiteral(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
@@ -122,9 +130,14 @@ class Interpreter {
void DoStoreLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler);
+ // Get dispatch table index of bytecode.
+ static size_t GetDispatchTableIndex(Bytecode bytecode,
+ OperandScale operand_scale);
+
bool IsDispatchTableInitialized();
- static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
+ static const int kNumberOfWideVariants = 3;
+ static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
Isolate* isolate_;
Code* dispatch_table_[kDispatchTableSize];
diff --git a/deps/v8/src/interpreter/register-translator.cc b/deps/v8/src/interpreter/register-translator.cc
deleted file mode 100644
index 3eba42f0dc..0000000000
--- a/deps/v8/src/interpreter/register-translator.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/register-translator.h"
-
-#include "src/interpreter/bytecode-array-builder.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-RegisterTranslator::RegisterTranslator(RegisterMover* mover)
- : mover_(mover),
- emitting_moves_(false),
- window_registers_count_(0),
- output_moves_count_(0) {}
-
-void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
- uint32_t* raw_operands,
- int raw_operand_count) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
- if (!emitting_moves_) {
- emitting_moves_ = true;
- DCHECK_EQ(window_registers_count_, 0);
- int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
- for (int i = 0; i < raw_operand_count; i++) {
- if ((register_bitmap & (1 << i)) == 0) {
- continue;
- }
- Register in_reg = Register::FromRawOperand(raw_operands[i]);
- Register out_reg = TranslateAndMove(bytecode, i, in_reg);
- raw_operands[i] = out_reg.ToRawOperand();
- }
- window_registers_count_ = 0;
- emitting_moves_ = false;
- } else {
- // When the register translator is translating registers, it will
- // cause the bytecode generator to emit moves on it's behalf. This
- // path is reached by these moves.
- DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
- Register::FromRawOperand(raw_operands[0]).is_valid() &&
- Register::FromRawOperand(raw_operands[1]).is_valid());
- }
-}
-
-Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
- int operand_index, Register reg) {
- if (FitsInReg8Operand(reg)) {
- return reg;
- }
-
- OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
- OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
- if (operand_size == OperandSize::kShort) {
- CHECK(FitsInReg16Operand(reg));
- return Translate(reg);
- }
-
- CHECK((operand_type == OperandType::kReg8 ||
- operand_type == OperandType::kRegOut8) &&
- RegisterIsMovableToWindow(bytecode, operand_index));
- Register translated_reg = Translate(reg);
- Register window_reg(kTranslationWindowStart + window_registers_count_);
- window_registers_count_ += 1;
- if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
- DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
- mover()->MoveRegisterUntranslated(translated_reg, window_reg);
- } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
- DCHECK_LT(output_moves_count_, kTranslationWindowLength);
- output_moves_[output_moves_count_] =
- std::make_pair(window_reg, translated_reg);
- output_moves_count_ += 1;
- } else {
- UNREACHABLE();
- }
- return window_reg;
-}
-
-// static
-bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
- int operand_index) {
- // By design, we only support moving individual registers. There
- // should be wide variants of such bytecodes instead to avoid the
- // need for a large translation window.
- OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
- if (operand_type != OperandType::kReg8 &&
- operand_type != OperandType::kRegOut8) {
- return false;
- } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
- return true;
- } else {
- OperandType next_operand_type =
- Bytecodes::GetOperandType(bytecode, operand_index + 1);
- return (next_operand_type != OperandType::kRegCount8 &&
- next_operand_type != OperandType::kRegCount16);
- }
-}
-
-void RegisterTranslator::TranslateOutputRegisters() {
- if (!emitting_moves_) {
- emitting_moves_ = true;
- while (output_moves_count_ > 0) {
- output_moves_count_ -= 1;
- mover()->MoveRegisterUntranslated(
- output_moves_[output_moves_count_].first,
- output_moves_[output_moves_count_].second);
- }
- emitting_moves_ = false;
- }
-}
-
-// static
-Register RegisterTranslator::Translate(Register reg) {
- if (reg.index() >= kTranslationWindowStart) {
- return Register(reg.index() + kTranslationWindowLength);
- } else {
- return reg;
- }
-}
-
-// static
-bool RegisterTranslator::InTranslationWindow(Register reg) {
- return (reg.index() >= kTranslationWindowStart &&
- reg.index() <= kTranslationWindowLimit);
-}
-
-// static
-Register RegisterTranslator::UntranslateRegister(Register reg) {
- if (reg.index() >= kTranslationWindowStart) {
- return Register(reg.index() - kTranslationWindowLength);
- } else {
- return reg;
- }
-}
-
-// static
-int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
- return kTranslationWindowStart - reg.index();
-}
-
-// static
-bool RegisterTranslator::FitsInReg8Operand(Register reg) {
- return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
-}
-
-// static
-bool RegisterTranslator::FitsInReg16Operand(Register reg) {
- int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
- return reg.is_short_operand() && reg.index() < max_index;
-}
-
-// static
-int RegisterTranslator::RegisterCountAdjustment(int register_count,
- int parameter_count) {
- if (register_count > kTranslationWindowStart) {
- return kTranslationWindowLength;
- } else if (parameter_count > 0) {
- Register param0 = Register::FromParameterIndex(0, parameter_count);
- if (!param0.is_byte_operand()) {
- // TODO(oth): Number of parameters means translation is
- // required, but the translation window location is such that
- // some space is wasted. Hopefully a rare corner case, but could
- // relocate window to limit waste.
- return kTranslationWindowLimit + 1 - register_count;
- }
- }
- return 0;
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/interpreter/register-translator.h b/deps/v8/src/interpreter/register-translator.h
deleted file mode 100644
index b683a899e2..0000000000
--- a/deps/v8/src/interpreter/register-translator.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-
-#include "src/interpreter/bytecodes.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class RegisterMover;
-
-// A class that enables bytecodes having only byte sized register operands
-// to access all registers in the two byte space. Most bytecode uses few
-// registers so space can be saved if most bytecodes with register operands
-// just take byte operands.
-//
-// To reach the wider register space, a translation window is reserved in
-// the byte addressable space specifically for copying registers into and
-// out of before a bytecode is emitted. The translation window occupies
-// the last register slots at the top of the byte addressable range.
-//
-// Because of the translation window any registers which naturally lie
-// at above the translation window have to have their register index
-// incremented by the window width before they are emitted.
-//
-// This class does not support moving ranges of registers to and from
-// the translation window. It would be straightforward to add support
-// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
-// these would have two negative effects. The translation window would
-// need to be wider, further limiting the space for byte operands. And
-// every register in a range would need to be moved consuming more
-// space in the bytecode array.
-class RegisterTranslator final {
- public:
- explicit RegisterTranslator(RegisterMover* mover);
-
- // Translate and re-write the register operands that are inputs
- // to |bytecode| when it is about to be emitted.
- void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
- int raw_operand_count);
-
- // Translate and re-write the register operands that are outputs
- // from |bytecode| when it has just been output.
- void TranslateOutputRegisters();
-
- // Returns true if |reg| is in the translation window.
- static bool InTranslationWindow(Register reg);
-
- // Return register value as if it had been translated.
- static Register UntranslateRegister(Register reg);
-
- // Returns the distance in registers between the translation window
- // start and |reg|. The result is negative when |reg| is above the
- // start of the translation window.
- static int DistanceToTranslationWindow(Register reg);
-
- // Returns true if |reg| can be represented as an 8-bit operand
- // after translation.
- static bool FitsInReg8Operand(Register reg);
-
- // Returns true if |reg| can be represented as an 16-bit operand
- // after translation.
- static bool FitsInReg16Operand(Register reg);
-
- // Returns the increment to the register count necessary if the
- // value indicates the translation window is required.
- static int RegisterCountAdjustment(int register_count, int parameter_count);
-
- private:
- static const int kTranslationWindowLength = 4;
- static const int kTranslationWindowLimit = -kMinInt8;
- static const int kTranslationWindowStart =
- kTranslationWindowLimit - kTranslationWindowLength + 1;
-
- Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
- static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
-
- static Register Translate(Register reg);
-
- RegisterMover* mover() const { return mover_; }
-
- // Entity to perform register moves necessary to translate registers
- // and ensure reachability.
- RegisterMover* mover_;
-
- // Flag to avoid re-entrancy when emitting move bytecodes for
- // translation.
- bool emitting_moves_;
-
- // Number of window registers in use.
- int window_registers_count_;
-
- // State for restoring register moves emitted by TranslateOutputRegisters.
- std::pair<Register, Register> output_moves_[kTranslationWindowLength];
- int output_moves_count_;
-};
-
-// Interface for RegisterTranslator helper class that will emit
-// register move bytecodes at the translator's behest.
-class RegisterMover {
- public:
- virtual ~RegisterMover() {}
-
- // Move register |from| to register |to| with no translation.
- // returns false if either register operand is invalid. Implementations
- // of this method must be aware that register moves with bad
- // register values are a security hole.
- virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
-};
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/deps/v8/src/interpreter/source-position-table.cc b/deps/v8/src/interpreter/source-position-table.cc
index 0b7c44e2d9..99a865b84e 100644
--- a/deps/v8/src/interpreter/source-position-table.cc
+++ b/deps/v8/src/interpreter/source-position-table.cc
@@ -4,7 +4,6 @@
#include "src/interpreter/source-position-table.h"
-#include "src/assembler.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -12,71 +11,196 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class IsStatementField : public BitField<bool, 0, 1> {};
-class SourcePositionField : public BitField<int, 1, 30> {};
+// We'll use a simple encoding scheme to record the source positions.
+// Conceptually, each position consists of:
+// - bytecode_offset: An integer index into the BytecodeArray
+// - source_position: An integer index into the source string.
+// - position type: Each position is either a statement or an expression.
+//
+// The basic idea for the encoding is to use a variable-length integer coding,
+// where each byte contains 7 bits of payload data, and 1 'more' bit that
+// determines whether additional bytes follow. Additionally:
+// - we record the difference from the previous position,
+// - we just stuff one bit for the type into the bytecode offset,
+// - we write least-significant bits first,
+// - negative numbers occur only rarely, so we use a denormalized
+// most-significant byte (a byte with all zeros, which normally wouldn't
+// make any sense) to encode a negative sign, so that we 'pay' nothing for
+// positive numbers, but have to pay a full byte for negative integers.
+
+namespace {
+
+// A zero-value in the most-significant byte is used to mark negative numbers.
+const int kNegativeSignMarker = 0;
+
+// Each byte is encoded as MoreBit | ValueBits.
+class MoreBit : public BitField8<bool, 7, 1> {};
+class ValueBits : public BitField8<int, 0, 7> {};
+
+// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
+void AddAndSetEntry(PositionTableEntry& value,
+ const PositionTableEntry& other) {
+ value.bytecode_offset += other.bytecode_offset;
+ value.source_position += other.source_position;
+ value.is_statement = other.is_statement;
+}
+
+// Helper: Substract the offsets from 'other' from 'value'.
+void SubtractFromEntry(PositionTableEntry& value,
+ const PositionTableEntry& other) {
+ value.bytecode_offset -= other.bytecode_offset;
+ value.source_position -= other.source_position;
+}
+
+// Helper: Encode an integer.
+void EncodeInt(ZoneVector<byte>& bytes, int value) {
+ bool sign = false;
+ if (value < 0) {
+ sign = true;
+ value = -value;
+ }
+
+ bool more;
+ do {
+ more = value > ValueBits::kMax;
+ bytes.push_back(MoreBit::encode(more || sign) |
+ ValueBits::encode(value & ValueBits::kMax));
+ value >>= ValueBits::kSize;
+ } while (more);
+
+ if (sign) {
+ bytes.push_back(MoreBit::encode(false) |
+ ValueBits::encode(kNegativeSignMarker));
+ }
+}
+
+// Encode a PositionTableEntry.
+void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
+ // 1 bit for sign + is_statement each, which leaves 30b for the value.
+ DCHECK(abs(entry.bytecode_offset) < (1 << 30));
+ EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+ EncodeInt(bytes, entry.source_position);
+}
+
+// Helper: Decode an integer.
+void DecodeInt(ByteArray* bytes, int* index, int* v) {
+ byte current;
+ int n = 0;
+ int value = 0;
+ bool more;
+ do {
+ current = bytes->get((*index)++);
+ value |= ValueBits::decode(current) << (n * ValueBits::kSize);
+ n++;
+ more = MoreBit::decode(current);
+ } while (more);
+
+ if (ValueBits::decode(current) == kNegativeSignMarker) {
+ value = -value;
+ }
+ *v = value;
+}
+
+void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
+ int tmp;
+ DecodeInt(bytes, index, &tmp);
+ entry->is_statement = (tmp & 1);
+
+ // Note that '>>' needs to be arithmetic shift in order to handle negative
+ // numbers properly.
+ entry->bytecode_offset = (tmp >> 1);
+
+ DecodeInt(bytes, index, &entry->source_position);
+}
+
+} // namespace
void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
int source_position) {
int offset = static_cast<int>(bytecode_offset);
- // If a position has already been assigned to this bytecode offset,
- // do not reassign a new statement position.
- if (CodeOffsetHasPosition(offset)) return;
- uint32_t encoded = IsStatementField::encode(true) |
- SourcePositionField::encode(source_position);
- entries_.push_back({offset, encoded});
+ AddEntry({offset, source_position, true});
}
void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
int source_position) {
int offset = static_cast<int>(bytecode_offset);
- // If a position has already been assigned to this bytecode offset,
- // do not reassign a new statement position.
- if (CodeOffsetHasPosition(offset)) return;
- uint32_t encoded = IsStatementField::encode(false) |
- SourcePositionField::encode(source_position);
- entries_.push_back({offset, encoded});
+ AddEntry({offset, source_position, false});
}
-void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
- int offset = static_cast<int>(bytecode_offset);
- // If we already added a source position table entry, but the bytecode array
- // builder ended up not outputting a bytecode for the corresponding bytecode
- // offset, we have to remove that entry.
- if (CodeOffsetHasPosition(offset)) entries_.pop_back();
+void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
+ // Don't encode a new entry if this bytecode already has a source position
+ // assigned.
+ if (candidate_.bytecode_offset == entry.bytecode_offset) {
+ if (entry.is_statement) candidate_ = entry;
+ return;
+ }
+
+ CommitEntry();
+ candidate_ = entry;
}
-Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
- int length = static_cast<int>(entries_.size());
- Handle<FixedArray> table =
- isolate_->factory()->NewFixedArray(length * 2, TENURED);
- for (int i = 0; i < length; i++) {
- table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
- table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+void SourcePositionTableBuilder::CommitEntry() {
+ if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
+ PositionTableEntry tmp(candidate_);
+ SubtractFromEntry(tmp, previous_);
+ EncodeEntry(bytes_, tmp);
+ previous_ = candidate_;
+
+ if (candidate_.is_statement) {
+ LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
+ jit_handler_data_, candidate_.bytecode_offset,
+ candidate_.source_position));
}
+ LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
+ jit_handler_data_, candidate_.bytecode_offset,
+ candidate_.source_position));
+
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_.push_back(candidate_);
+#endif
+}
+
+Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
+ CommitEntry();
+ if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
+
+ Handle<ByteArray> table = isolate_->factory()->NewByteArray(
+ static_cast<int>(bytes_.size()), TENURED);
+
+ MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
+
+#ifdef ENABLE_SLOW_DCHECKS
+ // Brute force testing: Record all positions and decode
+ // the entire table to verify they are identical.
+ auto raw = raw_entries_.begin();
+ for (SourcePositionTableIterator encoded(*table); !encoded.done();
+ encoded.Advance(), raw++) {
+ DCHECK(raw != raw_entries_.end());
+ DCHECK_EQ(encoded.bytecode_offset(), raw->bytecode_offset);
+ DCHECK_EQ(encoded.source_position(), raw->source_position);
+ DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+ }
+ DCHECK(raw == raw_entries_.end());
+#endif
+
return table;
}
-SourcePositionTableIterator::SourcePositionTableIterator(
- BytecodeArray* bytecode_array)
- : table_(bytecode_array->source_position_table()),
- index_(0),
- length_(table_->length()) {
- DCHECK(table_->length() % 2 == 0);
+SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
+ : table_(byte_array), index_(0), current_() {
Advance();
}
void SourcePositionTableIterator::Advance() {
- if (index_ < length_) {
- int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
- // Bytecode offsets are in ascending order.
- DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
- bytecode_offset_ = new_bytecode_offset;
- uint32_t source_position_and_type =
- static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
- is_statement_ = IsStatementField::decode(source_position_and_type);
- source_position_ = SourcePositionField::decode(source_position_and_type);
+ DCHECK(!done());
+ DCHECK(index_ >= 0 && index_ <= table_->length());
+ if (index_ == table_->length()) {
+ index_ = kDone;
+ } else {
+ PositionTableEntry tmp;
+ DecodeEntry(table_, &index_, &tmp);
+ AddAndSetEntry(current_, tmp);
}
- index_ += 2;
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/source-position-table.h b/deps/v8/src/interpreter/source-position-table.h
index 336cf42bc2..3ac58d6217 100644
--- a/deps/v8/src/interpreter/source-position-table.h
+++ b/deps/v8/src/interpreter/source-position-table.h
@@ -6,72 +6,90 @@
#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
#include "src/assert-scope.h"
+#include "src/checks.h"
#include "src/handles.h"
-#include "src/zone.h"
+#include "src/log.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
class BytecodeArray;
-class FixedArray;
+class ByteArray;
class Isolate;
+class Zone;
namespace interpreter {
-class SourcePositionTableBuilder {
+struct PositionTableEntry {
+ PositionTableEntry()
+ : bytecode_offset(0), source_position(0), is_statement(false) {}
+ PositionTableEntry(int bytecode, int source, bool statement)
+ : bytecode_offset(bytecode),
+ source_position(source),
+ is_statement(statement) {}
+
+ int bytecode_offset;
+ int source_position;
+ bool is_statement;
+};
+
+class SourcePositionTableBuilder : public PositionsRecorder {
public:
- explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate), entries_(zone) {}
+ SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_(zone),
+#endif
+ candidate_(kUninitializedCandidateOffset, 0, false) {
+ }
void AddStatementPosition(size_t bytecode_offset, int source_position);
void AddExpressionPosition(size_t bytecode_offset, int source_position);
- void RevertPosition(size_t bytecode_offset);
- Handle<FixedArray> ToFixedArray();
+ Handle<ByteArray> ToSourcePositionTable();
private:
- struct Entry {
- int bytecode_offset;
- uint32_t source_position_and_type;
- };
-
- bool CodeOffsetHasPosition(int bytecode_offset) {
- // Return whether bytecode offset already has a position assigned.
- return entries_.size() > 0 &&
- entries_.back().bytecode_offset == bytecode_offset;
- }
+ static const int kUninitializedCandidateOffset = -1;
+
+ void AddEntry(const PositionTableEntry& entry);
+ void CommitEntry();
Isolate* isolate_;
- ZoneVector<Entry> entries_;
+ ZoneVector<byte> bytes_;
+#ifdef ENABLE_SLOW_DCHECKS
+ ZoneVector<PositionTableEntry> raw_entries_;
+#endif
+ PositionTableEntry candidate_; // Next entry to be written, if initialized.
+ PositionTableEntry previous_; // Previously written entry, to compute delta.
};
class SourcePositionTableIterator {
public:
- explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+ explicit SourcePositionTableIterator(ByteArray* byte_array);
void Advance();
int bytecode_offset() const {
DCHECK(!done());
- return bytecode_offset_;
+ return current_.bytecode_offset;
}
int source_position() const {
DCHECK(!done());
- return source_position_;
+ return current_.source_position;
}
bool is_statement() const {
DCHECK(!done());
- return is_statement_;
+ return current_.is_statement;
}
- bool done() const { return index_ > length_; }
+ bool done() const { return index_ == kDone; }
private:
- FixedArray* table_;
+ static const int kDone = -1;
+
+ ByteArray* table_;
int index_;
- int length_;
- bool is_statement_;
- int bytecode_offset_;
- int source_position_;
+ PositionTableEntry current_;
DisallowHeapAllocation no_gc;
};
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index c27b7a700d..da36f769a0 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -97,6 +97,24 @@ Isolate::ExceptionScope::~ExceptionScope() {
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
+bool Isolate::IsArraySpeciesLookupChainIntact() {
+ if (!FLAG_harmony_species) return true;
+ // Note: It would be nice to have debug checks to make sure that the
+ // species protector is accurate, but this would be hard to do for most of
+ // what the protector stands for:
+ // - You'd need to traverse the heap to check that no Array instance has
+ // a constructor property
+ // - To check that Array[Symbol.species] == Array, JS code has to execute,
+ // but JS cannot be invoked in callstack overflow situations
+ // All that could be checked reliably is that
+ // Array.prototype.constructor == Array. Given that limitation, no check is
+ // done here. In place, there are mjsunit tests harmony/array-species* which
+ // ensure that behavior is correct in various invalid protector cases.
+
+ PropertyCell* species_cell = heap()->species_protector();
+ return species_cell->value()->IsSmi() &&
+ Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8116f14d30..c9f01118c5 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -22,6 +22,7 @@
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
@@ -34,7 +35,7 @@
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
#include "src/simulator.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/deserializer.h"
#include "src/v8.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -338,8 +339,23 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
return true;
}
+static Handle<FixedArray> MaybeGrow(Isolate* isolate,
+ Handle<FixedArray> elements,
+ int cur_position, int new_size) {
+ if (new_size > elements->length()) {
+ int new_capacity = JSObject::NewElementsCapacity(elements->length());
+ Handle<FixedArray> new_elements =
+ isolate->factory()->NewFixedArrayWithHoles(new_capacity);
+ for (int i = 0; i < cur_position; i++) {
+ new_elements->set(i, elements->get(i));
+ }
+ elements = new_elements;
+ }
+ DCHECK(new_size <= elements->length());
+ return elements;
+}
-Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
+Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
Handle<Object> caller) {
// Get stack trace limit.
Handle<JSObject> error = error_function();
@@ -364,51 +380,72 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
int frames_seen = 0;
int sloppy_frames = 0;
bool encountered_strict_function = false;
- for (JavaScriptFrameIterator iter(this);
- !iter.done() && frames_seen < limit;
+ for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit;
iter.Advance()) {
- JavaScriptFrame* frame = iter.frame();
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- Handle<JSFunction> fun = frames[i].function();
- Handle<Object> recv = frames[i].receiver();
- // Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) continue;
- // Filter out frames from other security contexts.
- if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory()->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- DCHECK(cursor + 4 <= elements->length());
-
- Handle<AbstractCode> abstract_code = frames[i].abstract_code();
-
- Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
- // The stack trace API should not expose receivers and function
- // objects on frames deeper than the top-most one with a strict
- // mode function. The number of sloppy frames is stored as
- // first element in the result array.
- if (!encountered_strict_function) {
- if (is_strict(fun->shared()->language_mode())) {
- encountered_strict_function = true;
- } else {
- sloppy_frames++;
+ StackFrame* frame = iter.frame();
+
+ switch (frame->type()) {
+ case StackFrame::JAVA_SCRIPT:
+ case StackFrame::OPTIMIZED:
+ case StackFrame::INTERPRETED: {
+ JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ js_frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Object> recv = frames[i].receiver();
+ // Filter out internal frames that we do not want to show.
+ if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) {
+ continue;
+ }
+ // Filter out frames from other security contexts.
+ if (!this->context()->HasSameSecurityTokenAs(fun->context())) {
+ continue;
+ }
+ elements = MaybeGrow(this, elements, cursor, cursor + 4);
+
+ Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+
+ Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
+ // The stack trace API should not expose receivers and function
+ // objects on frames deeper than the top-most one with a strict mode
+ // function. The number of sloppy frames is stored as first element in
+ // the result array.
+ if (!encountered_strict_function) {
+ if (is_strict(fun->shared()->language_mode())) {
+ encountered_strict_function = true;
+ } else {
+ sloppy_frames++;
+ }
+ }
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *abstract_code);
+ elements->set(cursor++, *offset);
+ frames_seen++;
}
- }
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *abstract_code);
- elements->set(cursor++, *offset);
- frames_seen++;
+ } break;
+
+ case StackFrame::WASM: {
+ WasmFrame* wasm_frame = WasmFrame::cast(frame);
+ Code* code = wasm_frame->unchecked_code();
+ Handle<AbstractCode> abstract_code =
+ Handle<AbstractCode>(AbstractCode::cast(code));
+ Handle<JSFunction> fun = factory()->NewFunction(
+ factory()->NewStringFromAsciiChecked("<WASM>"));
+ elements = MaybeGrow(this, elements, cursor, cursor + 4);
+ // TODO(jfb) Pass module object.
+ elements->set(cursor++, *factory()->undefined_value());
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *abstract_code);
+ elements->set(cursor++, Internals::IntToSmi(0));
+ frames_seen++;
+ } break;
+
+ default:
+ break;
}
}
elements->set(0, Smi::FromInt(sloppy_frames));
@@ -419,9 +456,8 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
return result;
}
-
-MaybeHandle<JSObject> Isolate::CaptureAndSetDetailedStackTrace(
- Handle<JSObject> error_object) {
+MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
+ Handle<JSReceiver> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
Handle<Name> key = factory()->detailed_stack_trace_symbol();
@@ -429,21 +465,20 @@ MaybeHandle<JSObject> Isolate::CaptureAndSetDetailedStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
RETURN_ON_EXCEPTION(
- this, JSObject::SetProperty(error_object, key, stack_trace, STRICT),
- JSObject);
+ this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
+ JSReceiver);
}
return error_object;
}
-
-MaybeHandle<JSObject> Isolate::CaptureAndSetSimpleStackTrace(
- Handle<JSObject> error_object, Handle<Object> caller) {
+MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
+ Handle<JSReceiver> error_object, Handle<Object> caller) {
// Capture stack trace for simple stack trace string formatting.
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> stack_trace = CaptureSimpleStackTrace(error_object, caller);
RETURN_ON_EXCEPTION(
- this, JSObject::SetProperty(error_object, key, stack_trace, STRICT),
- JSObject);
+ this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
+ JSReceiver);
return error_object;
}
@@ -868,7 +903,7 @@ Object* Isolate::StackOverflow() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && FLAG_stress_compaction) {
- heap()->CollectAllAvailableGarbage("trigger compaction");
+ heap()->CollectAllGarbage(Heap::kNoGCFlags, "trigger compaction");
}
#endif // VERIFY_HEAP
@@ -1780,6 +1815,8 @@ Isolate::Isolate(bool enable_serializer)
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
+ runtime_zone_(&allocator_),
+ interface_descriptor_zone_(&allocator_),
inner_pointer_to_code_cache_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
@@ -1794,6 +1831,7 @@ Isolate::Isolate(bool enable_serializer)
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
+ is_tail_call_elimination_enabled_(true),
cpu_profiler_(NULL),
heap_profiler_(NULL),
function_entry_hook_(NULL),
@@ -2165,7 +2203,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390
Simulator::Initialize(this);
#endif
#endif
@@ -2196,7 +2234,7 @@ bool Isolate::Init(Deserializer* des) {
}
if (create_heap_objects) {
- // Terminate the cache array with the sentinel so we can iterate.
+ // Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.Add(heap_.undefined_value());
}
@@ -2268,9 +2306,8 @@ bool Isolate::Init(Deserializer* des) {
// the snapshot.
HandleScope scope(this);
Deoptimizer::EnsureCodeForDeoptimizationEntry(
- this,
- Deoptimizer::LAZY,
- kDeoptTableSerializeEntryCount - 1);
+ this, Deoptimizer::LAZY,
+ ExternalReferenceTable::kDeoptTableSerializeEntryCount - 1);
}
if (!serializer_enabled()) {
@@ -2431,12 +2468,11 @@ CodeTracer* Isolate::GetCodeTracer() {
return code_tracer();
}
-
-Map* Isolate::get_initial_js_array_map(ElementsKind kind, Strength strength) {
+Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
if (IsFastElementsKind(kind)) {
DisallowHeapAllocation no_gc;
- Object* const initial_js_array_map = context()->native_context()->get(
- Context::ArrayMapIndex(kind, strength));
+ Object* const initial_js_array_map =
+ context()->native_context()->get(Context::ArrayMapIndex(kind));
if (!initial_js_array_map->IsUndefined()) {
return Map::cast(initial_js_array_map);
}
@@ -2511,25 +2547,6 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
return cell_reports_intact;
}
-bool Isolate::IsArraySpeciesLookupChainIntact() {
- if (!FLAG_harmony_species) return true;
- // Note: It would be nice to have debug checks to make sure that the
- // species protector is accurate, but this would be hard to do for most of
- // what the protector stands for:
- // - You'd need to traverse the heap to check that no Array instance has
- // a constructor property or a modified __proto__
- // - To check that Array[Symbol.species] == Array, JS code has to execute,
- // but JS cannot be invoked in callstack overflow situations
- // All that could be checked reliably is that
- // Array.prototype.constructor == Array. Given that limitation, no check is
- // done here. In place, there are mjsunit tests harmony/array-species* which
- // ensure that behavior is correct in various invalid protector cases.
-
- PropertyCell* species_cell = heap()->species_protector();
- return species_cell->value()->IsSmi() &&
- Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
-}
-
void Isolate::InvalidateArraySpeciesProtector() {
if (!FLAG_harmony_species) return;
DCHECK(factory()->species_protector()->value()->IsSmi());
@@ -2682,7 +2699,11 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void Isolate::FireCallCompletedCallback() {
bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty();
- bool run_microtasks = autorun_microtasks() && pending_microtask_count();
+ bool run_microtasks =
+ pending_microtask_count() &&
+ !handle_scope_implementer()->HasMicrotasksSuppressions() &&
+ handle_scope_implementer()->microtasks_policy() ==
+ v8::MicrotasksPolicy::kAuto;
if (!has_call_completed_callbacks && !run_microtasks) return;
if (!handle_scope_implementer()->CallDepthIsZero()) return;
@@ -2737,7 +2758,12 @@ void Isolate::RunMicrotasks() {
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
+ RunMicrotasksInternal();
+ FireMicrotasksCompletedCallback();
+}
+
+void Isolate::RunMicrotasksInternal() {
while (pending_microtask_count() > 0) {
HandleScope scope(this);
int num_tasks = pending_microtask_count();
@@ -2746,8 +2772,8 @@ void Isolate::RunMicrotasks() {
set_pending_microtask_count(0);
heap()->set_microtask_queue(heap()->empty_fixed_array());
- for (int i = 0; i < num_tasks; i++) {
- HandleScope scope(this);
+ Isolate* isolate = this;
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
Handle<Object> microtask(queue->get(i), this);
if (microtask->IsJSFunction()) {
Handle<JSFunction> microtask_function =
@@ -2774,11 +2800,37 @@ void Isolate::RunMicrotasks() {
void* data = v8::ToCData<void*>(callback_info->data());
callback(data);
}
+ });
+ }
+}
+
+
+void Isolate::AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+ if (callback == microtasks_completed_callbacks_.at(i)) return;
+ }
+ microtasks_completed_callbacks_.Add(callback);
+}
+
+
+void Isolate::RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallback callback) {
+ for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+ if (callback == microtasks_completed_callbacks_.at(i)) {
+ microtasks_completed_callbacks_.Remove(i);
}
}
}
+void Isolate::FireMicrotasksCompletedCallback() {
+ for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+ microtasks_completed_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+ }
+}
+
+
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
DCHECK(!use_counter_callback_);
use_counter_callback_ = callback;
@@ -2817,6 +2869,14 @@ std::string Isolate::GetTurboCfgFileName() {
}
}
+void Isolate::SetTailCallEliminationEnabled(bool enabled) {
+ if (is_tail_call_elimination_enabled_ == enabled) return;
+ is_tail_call_elimination_enabled_ = enabled;
+ // TODO(ishell): Introduce DependencyGroup::kTailCallChangedGroup to
+ // deoptimize only those functions that are affected by the change of this
+ // flag.
+ internal::Deoptimizer::DeoptimizeAll(this);
+}
// Heap::detached_contexts tracks detached contexts as pairs
// (number of GC since the context was detached, the context).
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 2d74dc4a63..8847164012 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -11,6 +11,7 @@
#include "include/v8-debug.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
+#include "src/base/accounting-allocator.h"
#include "src/base/atomicops.h"
#include "src/builtins.h"
#include "src/cancelable-task.h"
@@ -26,8 +27,8 @@
#include "src/messages.h"
#include "src/optimizing-compile-dispatcher.h"
#include "src/regexp/regexp-stack.h"
-#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
+#include "src/runtime/runtime.h"
#include "src/zone.h"
namespace v8 {
@@ -178,6 +179,20 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
+#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \
+ limit_check, increment, body) \
+ do { \
+ loop_var_type init; \
+ loop_var_type for_with_handle_limit = loop_var; \
+ Isolate* for_with_handle_isolate = isolate; \
+ while (limit_check) { \
+ for_with_handle_limit += 1024; \
+ HandleScope loop_scope(for_with_handle_isolate); \
+ for (; limit_check && loop_var < for_with_handle_limit; increment) { \
+ body \
+ } \
+ } \
+ } while (false)
// Platform-independent, reliable thread identifier.
class ThreadId {
@@ -378,7 +393,6 @@ typedef List<HeapObject*> DebugObjectCache;
V(HashMap*, external_reference_map, NULL) \
V(HashMap*, root_index_map, NULL) \
V(int, pending_microtask_count, 0) \
- V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
V(CompilationStatistics*, turbo_statistics, NULL) \
V(HTracer*, htracer, NULL) \
@@ -668,12 +682,12 @@ class Isolate {
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
- Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
Handle<Object> caller);
- MaybeHandle<JSObject> CaptureAndSetDetailedStackTrace(
- Handle<JSObject> error_object);
- MaybeHandle<JSObject> CaptureAndSetSimpleStackTrace(
- Handle<JSObject> error_object, Handle<Object> caller);
+ MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
+ Handle<JSReceiver> error_object);
+ MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
+ Handle<JSReceiver> error_object, Handle<Object> caller);
Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
Handle<JSArray> GetDetailedFromSimpleStackTrace(
Handle<JSObject> error_object);
@@ -953,14 +967,13 @@ class Isolate {
date_cache_ = date_cache;
}
- Map* get_initial_js_array_map(ElementsKind kind,
- Strength strength = Strength::WEAK);
+ Map* get_initial_js_array_map(ElementsKind kind);
static const int kArrayProtectorValid = 1;
static const int kArrayProtectorInvalid = 0;
bool IsFastArrayConstructorPrototypeChainIntact();
- bool IsArraySpeciesLookupChainIntact();
+ inline bool IsArraySpeciesLookupChainIntact();
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -998,13 +1011,6 @@ class Isolate {
return optimizing_compile_dispatcher_ != NULL;
}
- bool concurrent_osr_enabled() const {
- // Thread is only available with flag enabled.
- DCHECK(optimizing_compile_dispatcher_ == NULL ||
- FLAG_concurrent_recompilation);
- return optimizing_compile_dispatcher_ != NULL && FLAG_concurrent_osr;
- }
-
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
return optimizing_compile_dispatcher_;
}
@@ -1061,6 +1067,10 @@ class Isolate {
void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
void FireBeforeCallEnteredCallback();
+ void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+ void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+ void FireMicrotasksCompletedCallback();
+
void SetPromiseRejectCallback(PromiseRejectCallback callback);
void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
@@ -1080,6 +1090,14 @@ class Isolate {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
+ // Support for dynamically disabling tail call elimination.
+ Address is_tail_call_elimination_enabled_address() {
+ return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
+ }
+ bool is_tail_call_elimination_enabled() const {
+ return is_tail_call_elimination_enabled_;
+ }
+ void SetTailCallEliminationEnabled(bool enabled);
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
@@ -1101,6 +1119,8 @@ class Isolate {
interpreter::Interpreter* interpreter() const { return interpreter_; }
+ base::AccountingAllocator* allocator() { return &allocator_; }
+
protected:
explicit Isolate(bool enable_serializer);
@@ -1209,6 +1229,8 @@ class Isolate {
// the frame.
void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
+ void RunMicrotasksInternal();
+
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
@@ -1239,6 +1261,7 @@ class Isolate {
HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
+ base::AccountingAllocator allocator_;
Zone runtime_zone_;
Zone interface_descriptor_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
@@ -1266,6 +1289,9 @@ class Isolate {
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
+ // True if ES2015 tail call elimination feature is enabled.
+ bool is_tail_call_elimination_enabled_;
+
// Time stamp at initialization.
double time_millis_at_init_;
@@ -1330,6 +1356,9 @@ class Isolate {
// List of callbacks when a Call completes.
List<CallCompletedCallback> call_completed_callbacks_;
+ // List of callbacks after microtasks were run.
+ List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+
v8::Isolate::UseCounterCallback use_counter_callback_;
BasicBlockProfiler* basic_block_profiler_;
@@ -1358,6 +1387,8 @@ class Isolate {
friend class v8::Locker;
friend class v8::Unlocker;
friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
+ friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
+ const char*);
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
diff --git a/deps/v8/src/js/array-iterator.js b/deps/v8/src/js/array-iterator.js
index 2609ebdd73..b3e25e9adb 100644
--- a/deps/v8/src/js/array-iterator.js
+++ b/deps/v8/src/js/array-iterator.js
@@ -109,6 +109,24 @@ function ArrayKeys() {
return CreateArrayIterator(this, ITERATOR_KIND_KEYS);
}
+// TODO(littledan): Check for detached TypedArray in these three methods
+function TypedArrayEntries() {
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ return %_Call(ArrayEntries, this);
+}
+
+
+function TypedArrayValues() {
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ return %_Call(ArrayValues, this);
+}
+
+
+function TypedArrayKeys() {
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ return %_Call(ArrayKeys, this);
+}
+
%FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
@@ -117,8 +135,6 @@ utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
utils.SetFunctionName(ArrayIteratorIterator, iteratorSymbol);
-%AddNamedProperty(ArrayIterator.prototype, iteratorSymbol,
- ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, toStringTagSymbol,
"Array Iterator", READ_ONLY | DONT_ENUM);
@@ -135,12 +151,13 @@ utils.SetFunctionName(ArrayValues, 'values');
%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
+utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
+ 'entries', TypedArrayEntries,
+ 'keys', TypedArrayKeys,
+ 'values', TypedArrayValues
+]);
%AddNamedProperty(GlobalTypedArray.prototype,
- 'entries', ArrayEntries, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype, 'values', ArrayValues, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype, 'keys', ArrayKeys, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype,
- iteratorSymbol, ArrayValues, DONT_ENUM);
+ iteratorSymbol, TypedArrayValues, DONT_ENUM);
// -------------------------------------------------------------------
// Exports
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 0a5e2839ef..1406df336d 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -73,17 +73,13 @@ function DefineIndexedProperty(array, i, value) {
}
}
+function KeySortCompare(a, b) {
+ return a - b;
+}
-// Global list of arrays visited during toString, toLocaleString and
-// join invocations.
-var visited_arrays = new InternalArray();
-
-
-// Gets a sorted array of array keys. Useful for operations on sparse
-// arrays. Dupes have not been removed.
function GetSortedArrayKeys(array, indices) {
- var keys = new InternalArray();
if (IS_NUMBER(indices)) {
+ var keys = new InternalArray();
// It's an interval
var limit = indices;
for (var i = 0; i < limit; ++i) {
@@ -92,61 +88,34 @@ function GetSortedArrayKeys(array, indices) {
keys.push(i);
}
}
- } else {
- var length = indices.length;
- for (var k = 0; k < length; ++k) {
- var key = indices[k];
- if (!IS_UNDEFINED(key)) {
- var e = array[key];
- if (!IS_UNDEFINED(e) || key in array) {
- keys.push(key);
- }
- }
- }
- keys.sort(function(a, b) { return a - b; });
+ return keys;
}
- return keys;
+ return InnerArraySort(indices, indices.length, KeySortCompare);
}
-function SparseJoinWithSeparatorJS(array, len, convert, separator) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var totalLength = 0;
- var elements = new InternalArray(keys.length * 2);
- var previousKey = -1;
- for (var i = 0; i < keys.length; i++) {
+function SparseJoinWithSeparatorJS(array, keys, length, convert, separator) {
+ var keys_length = keys.length;
+ var elements = new InternalArray(keys_length * 2);
+ for (var i = 0; i < keys_length; i++) {
var key = keys[i];
- if (key != previousKey) { // keys may contain duplicates.
- var e = array[key];
- if (!IS_STRING(e)) e = convert(e);
- elements[i * 2] = key;
- elements[i * 2 + 1] = e;
- previousKey = key;
- }
+ var e = array[key];
+ elements[i * 2] = key;
+ elements[i * 2 + 1] = IS_STRING(e) ? e : convert(e);
}
- return %SparseJoinWithSeparator(elements, len, separator);
+ return %SparseJoinWithSeparator(elements, length, separator);
}
// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, len, convert) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var last_key = -1;
+function SparseJoin(array, keys, convert) {
var keys_length = keys.length;
-
var elements = new InternalArray(keys_length);
- var elements_length = 0;
-
for (var i = 0; i < keys_length; i++) {
- var key = keys[i];
- if (key != last_key) {
- var e = array[key];
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- last_key = key;
- }
+ var e = array[keys[i]];
+ elements[i] = IS_STRING(e) ? e : convert(e);
}
- return %StringBuilderConcat(elements, elements_length, '');
+ return %StringBuilderConcat(elements, keys_length, '');
}
@@ -167,94 +136,122 @@ function UseSparseVariant(array, length, is_array, touched) {
(touched > estimated_elements * 4);
}
+function Stack() {
+ this.length = 0;
+ this.values = new InternalArray();
+}
-function Join(array, length, separator, convert) {
- if (length == 0) return '';
+// Predeclare the instance variables on the prototype. Otherwise setting them in
+// the constructor will leak the instance through settings on Object.prototype.
+Stack.prototype.length = null;
+Stack.prototype.values = null;
- var is_array = IS_ARRAY(array);
+function StackPush(stack, value) {
+ stack.values[stack.length++] = value;
+}
- if (is_array) {
- // If the array is cyclic, return the empty string for already
- // visited arrays.
- if (!%PushIfAbsent(visited_arrays, array)) return '';
+function StackPop(stack) {
+ stack.values[--stack.length] = null
+}
+
+function StackHas(stack, v) {
+ var length = stack.length;
+ var values = stack.values;
+ for (var i = 0; i < length; i++) {
+ if (values[i] === v) return true;
}
+ return false;
+}
- // Attempt to convert the elements.
- try {
- if (UseSparseVariant(array, length, is_array, length)) {
- %NormalizeElements(array);
- if (separator.length == 0) {
- return SparseJoin(array, length, convert);
- } else {
- return SparseJoinWithSeparatorJS(array, length, convert, separator);
- }
- }
+// Global list of arrays visited during toString, toLocaleString and
+// join invocations.
+var visited_arrays = new Stack();
- // Fast case for one-element arrays.
- if (length == 1) {
- var e = array[0];
- if (IS_STRING(e)) return e;
- return convert(e);
+function DoJoin(array, length, is_array, separator, convert) {
+ if (UseSparseVariant(array, length, is_array, length)) {
+ %NormalizeElements(array);
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, length));
+ if (separator === '') {
+ if (keys.length === 0) return '';
+ return SparseJoin(array, keys, convert);
+ } else {
+ return SparseJoinWithSeparatorJS(array, keys, length, convert, separator);
}
+ }
- // Construct an array for the elements.
- var elements = new InternalArray(length);
+ // Fast case for one-element arrays.
+ if (length === 1) {
+ var e = array[0];
+ return IS_STRING(e) ? e : convert(e);
+ }
- // We pull the empty separator check outside the loop for speed!
- if (separator.length == 0) {
- var elements_length = 0;
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- }
- elements.length = elements_length;
- return %StringBuilderConcat(elements, elements_length, '');
+ // Construct an array for the elements.
+ var elements = new InternalArray(length);
+
+ // We pull the empty separator check outside the loop for speed!
+ if (separator === '') {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ elements[i] = IS_STRING(e) ? e : convert(e);
}
- // Non-empty separator case.
- // If the first element is a number then use the heuristic that the
- // remaining elements are also likely to be numbers.
- if (!IS_NUMBER(array[0])) {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_STRING(e)) e = convert(e);
- elements[i] = e;
- }
- } else {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (IS_NUMBER(e)) {
- e = %_NumberToString(e);
- } else if (!IS_STRING(e)) {
- e = convert(e);
- }
- elements[i] = e;
+ return %StringBuilderConcat(elements, length, '');
+ }
+ // Non-empty separator case.
+ // If the first element is a number then use the heuristic that the
+ // remaining elements are also likely to be numbers.
+ var e = array[0];
+ if (IS_NUMBER(e)) {
+ elements[0] = %_NumberToString(e);
+ for (var i = 1; i < length; i++) {
+ e = array[i];
+ if (IS_NUMBER(e)) {
+ elements[i] = %_NumberToString(e);
+ } else {
+ elements[i] = IS_STRING(e) ? e : convert(e);
}
}
- return %StringBuilderJoin(elements, length, separator);
+ } else {
+ elements[0] = IS_STRING(e) ? e : convert(e);
+ for (var i = 1; i < length; i++) {
+ e = array[i];
+ elements[i] = IS_STRING(e) ? e : convert(e);
+ }
+ }
+ return %StringBuilderJoin(elements, length, separator);
+}
+
+function Join(array, length, separator, convert) {
+ if (length === 0) return '';
+
+ var is_array = IS_ARRAY(array);
+
+ if (is_array) {
+ // If the array is cyclic, return the empty string for already
+ // visited arrays.
+ if (StackHas(visited_arrays, array)) return '';
+ StackPush(visited_arrays, array);
+ }
+
+ // Attempt to convert the elements.
+ try {
+ return DoJoin(array, length, is_array, separator, convert);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.
- if (is_array) visited_arrays.length = visited_arrays.length - 1;
+ if (is_array) StackPop(visited_arrays);
}
}
function ConvertToString(x) {
- if (IS_NULL_OR_UNDEFINED(x)) {
- return '';
- } else {
- return TO_STRING(x);
- }
+ if (IS_NULL_OR_UNDEFINED(x)) return '';
+ return TO_STRING(x);
}
function ConvertToLocaleString(e) {
- if (IS_NULL_OR_UNDEFINED(e)) {
- return '';
- } else {
- return TO_STRING(e.toLocaleString());
- }
+ if (IS_NULL_OR_UNDEFINED(e)) return '';
+ return TO_STRING(e.toLocaleString());
}
@@ -275,12 +272,10 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
var length = indices.length;
for (var k = 0; k < length; ++k) {
var key = indices[k];
- if (!IS_UNDEFINED(key)) {
- if (key >= start_i) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- DefineIndexedProperty(deleted_elements, key - start_i, current);
- }
+ if (key >= start_i) {
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array) {
+ DefineIndexedProperty(deleted_elements, key - start_i, current);
}
}
}
@@ -317,21 +312,19 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
var length = indices.length;
for (var k = 0; k < length; ++k) {
var key = indices[k];
- if (!IS_UNDEFINED(key)) {
- if (key < start_i) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- new_array[key] = current;
- }
- } else if (key >= start_i + del_count) {
- var current = array[key];
- if (!IS_UNDEFINED(current) || key in array) {
- var new_key = key - del_count + num_additional_args;
- new_array[new_key] = current;
- if (new_key > 0xfffffffe) {
- big_indices = big_indices || new InternalArray();
- big_indices.push(new_key);
- }
+ if (key < start_i) {
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array) {
+ new_array[key] = current;
+ }
+ } else if (key >= start_i + del_count) {
+ var current = array[key];
+ if (!IS_UNDEFINED(current) || key in array) {
+ var new_key = key - del_count + num_additional_args;
+ new_array[new_key] = current;
+ if (new_key > 0xfffffffe) {
+ big_indices = big_indices || new InternalArray();
+ big_indices.push(new_key);
}
}
}
@@ -1069,8 +1062,7 @@ function InnerArraySort(array, length, comparefn) {
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
- if (!IS_UNDEFINED(index) && !HAS_OWN_PROPERTY(obj, index)
- && HAS_OWN_PROPERTY(proto, index)) {
+ if (!HAS_OWN_PROPERTY(obj, index) && HAS_OWN_PROPERTY(proto, index)) {
obj[index] = proto[index];
if (index >= max) { max = index + 1; }
}
@@ -1097,8 +1089,7 @@ function InnerArraySort(array, length, comparefn) {
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
- if (!IS_UNDEFINED(index) && from <= index &&
- HAS_OWN_PROPERTY(proto, index)) {
+ if (from <= index && HAS_OWN_PROPERTY(proto, index)) {
obj[index] = UNDEFINED;
}
}
@@ -1247,10 +1238,19 @@ function InnerArrayForEach(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
- var element = array[i];
- %_Call(f, receiver, element, i, array);
+ if (IS_UNDEFINED(receiver)) {
+ for (var i = 0; i < length; i++) {
+ if (HAS_INDEX(array, i, is_array)) {
+ var element = array[i];
+ f(element, i, array);
+ }
+ }
+ } else {
+ for (var i = 0; i < length; i++) {
+ if (HAS_INDEX(array, i, is_array)) {
+ var element = array[i];
+ %_Call(f, receiver, element, i, array);
+ }
}
}
}
@@ -1347,7 +1347,7 @@ function InnerArrayIndexOf(array, element, index, length) {
if (IS_UNDEFINED(index)) {
index = 0;
} else {
- index = TO_INTEGER(index);
+ index = TO_INTEGER(index) + 0; // Add 0 to convert -0 to 0
// If index is negative, index from the end of the array.
if (index < 0) {
index = length + index;
@@ -1373,7 +1373,7 @@ function InnerArrayIndexOf(array, element, index, length) {
while (i < n && sortedKeys[i] < index) i++;
while (i < n) {
var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && array[key] === element) return key;
+ if (array[key] === element) return key;
i++;
}
return -1;
@@ -1409,7 +1409,7 @@ function InnerArrayLastIndexOf(array, element, index, length, argumentsLength) {
if (argumentsLength < 2) {
index = length - 1;
} else {
- index = TO_INTEGER(index);
+ index = TO_INTEGER(index) + 0; // Add 0 to convert -0 to 0
// If index is negative, index from end of the array.
if (index < 0) index += length;
// If index is still negative, do not search the array.
@@ -1432,7 +1432,7 @@ function InnerArrayLastIndexOf(array, element, index, length, argumentsLength) {
var i = sortedKeys.length - 1;
while (i >= 0) {
var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && array[key] === element) return key;
+ if (array[key] === element) return key;
i--;
}
return -1;
@@ -1946,6 +1946,10 @@ utils.Export(function(to) {
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
to.PackedArrayReverse = PackedArrayReverse;
+ to.Stack = Stack;
+ to.StackHas = StackHas;
+ to.StackPush = StackPush;
+ to.StackPop = StackPop;
});
%InstallToContext([
diff --git a/deps/v8/src/js/harmony-atomics.js b/deps/v8/src/js/harmony-atomics.js
index b861a2a471..9f80227426 100644
--- a/deps/v8/src/js/harmony-atomics.js
+++ b/deps/v8/src/js/harmony-atomics.js
@@ -12,12 +12,14 @@
// Imports
var GlobalObject = global.Object;
+var MakeRangeError;
var MakeTypeError;
var MaxSimple;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
+ MakeRangeError = from.MakeRangeError;
MaxSimple = from.MaxSimple;
});
@@ -37,14 +39,24 @@ function CheckSharedInteger32TypedArray(ia) {
}
}
+// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+function ValidateIndex(index, length) {
+ var numberIndex = TO_NUMBER(index);
+ var accessIndex = TO_INTEGER(numberIndex);
+ if (numberIndex !== accessIndex) {
+ throw MakeRangeError(kInvalidAtomicAccessIndex);
+ }
+ if (accessIndex < 0 || accessIndex >= length) {
+ throw MakeRangeError(kInvalidAtomicAccessIndex);
+ }
+ return accessIndex;
+}
+
//-------------------------------------------------------------------
function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
CheckSharedIntegerTypedArray(sta);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(sta));
oldValue = TO_NUMBER(oldValue);
newValue = TO_NUMBER(newValue);
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
@@ -52,79 +64,55 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
function AtomicsLoadJS(sta, index) {
CheckSharedIntegerTypedArray(sta);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(sta));
return %_AtomicsLoad(sta, index);
}
function AtomicsStoreJS(sta, index, value) {
CheckSharedIntegerTypedArray(sta);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(sta));
value = TO_NUMBER(value);
return %_AtomicsStore(sta, index, value);
}
function AtomicsAddJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsAdd(ia, index, value);
}
function AtomicsSubJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsSub(ia, index, value);
}
function AtomicsAndJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsAnd(ia, index, value);
}
function AtomicsOrJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsOr(ia, index, value);
}
function AtomicsXorJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsXor(ia, index, value);
}
function AtomicsExchangeJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
value = TO_NUMBER(value);
return %_AtomicsExchange(ia, index, value);
}
@@ -137,10 +125,7 @@ function AtomicsIsLockFreeJS(size) {
function AtomicsFutexWaitJS(ia, index, value, timeout) {
CheckSharedInteger32TypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
if (IS_UNDEFINED(timeout)) {
timeout = INFINITY;
} else {
@@ -156,20 +141,17 @@ function AtomicsFutexWaitJS(ia, index, value, timeout) {
function AtomicsFutexWakeJS(ia, index, count) {
CheckSharedInteger32TypedArray(ia);
- index = TO_INTEGER(index);
- if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
+ index = ValidateIndex(index, %_TypedArrayGetLength(ia));
count = MaxSimple(0, TO_INTEGER(count));
return %AtomicsFutexWake(ia, index, count);
}
function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
CheckSharedInteger32TypedArray(ia);
- index1 = TO_INTEGER(index1);
+ index1 = ValidateIndex(index1, %_TypedArrayGetLength(ia));
count = MaxSimple(0, TO_INTEGER(count));
value = TO_INT32(value);
- index2 = TO_INTEGER(index2);
+ index2 = ValidateIndex(index2, %_TypedArrayGetLength(ia));
if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
index2 < 0 || index2 >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
diff --git a/deps/v8/src/js/harmony-regexp-exec.js b/deps/v8/src/js/harmony-regexp-exec.js
new file mode 100644
index 0000000000..e2eece98aa
--- /dev/null
+++ b/deps/v8/src/js/harmony-regexp-exec.js
@@ -0,0 +1,37 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var RegExpSubclassExecJS = utils.ImportNow("RegExpSubclassExecJS");
+var RegExpSubclassMatch = utils.ImportNow("RegExpSubclassMatch");
+var RegExpSubclassReplace = utils.ImportNow("RegExpSubclassReplace");
+var RegExpSubclassSearch = utils.ImportNow("RegExpSubclassSearch");
+var RegExpSubclassSplit = utils.ImportNow("RegExpSubclassSplit");
+var RegExpSubclassTest = utils.ImportNow("RegExpSubclassTest");
+var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
+
+utils.OverrideFunction(GlobalRegExp.prototype, "exec",
+ RegExpSubclassExecJS, true);
+utils.OverrideFunction(GlobalRegExp.prototype, matchSymbol,
+ RegExpSubclassMatch, true);
+utils.OverrideFunction(GlobalRegExp.prototype, replaceSymbol,
+ RegExpSubclassReplace, true);
+utils.OverrideFunction(GlobalRegExp.prototype, searchSymbol,
+ RegExpSubclassSearch, true);
+utils.OverrideFunction(GlobalRegExp.prototype, splitSymbol,
+ RegExpSubclassSplit, true);
+utils.OverrideFunction(GlobalRegExp.prototype, "test",
+ RegExpSubclassTest, true);
+
+})
diff --git a/deps/v8/src/js/harmony-regexp.js b/deps/v8/src/js/harmony-regexp.js
deleted file mode 100644
index f76ef86ec7..0000000000
--- a/deps/v8/src/js/harmony-regexp.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var MakeTypeError;
-var regExpFlagsSymbol = utils.ImportNow("regexp_flags_symbol");
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// ES6 draft 12-06-13, section 21.2.5.3
-// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
-function RegExpGetFlags() {
- if (!IS_RECEIVER(this)) {
- throw MakeTypeError(
- kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
- }
- var result = '';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (this.unicode) result += 'u';
- if (this.sticky) result += 'y';
- return result;
-}
-
-// ES6 21.2.5.12.
-function RegExpGetSticky() {
- if (!IS_REGEXP(this)) {
- // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
- // TODO(littledan): Remove this workaround or standardize it
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeStickyGetter);
- return UNDEFINED;
- }
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
- }
- return !!REGEXP_STICKY(this);
-}
-%FunctionSetName(RegExpGetSticky, "RegExp.prototype.sticky");
-%SetNativeFlag(RegExpGetSticky);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
-utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
-
-})
diff --git a/deps/v8/src/js/harmony-string-padding.js b/deps/v8/src/js/harmony-string-padding.js
new file mode 100644
index 0000000000..a6c6c474de
--- /dev/null
+++ b/deps/v8/src/js/harmony-string-padding.js
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalString = global.String;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+// http://tc39.github.io/proposal-string-pad-start-end/
+
+function StringPad(thisString, maxLength, fillString) {
+ maxLength = TO_LENGTH(maxLength);
+ var stringLength = thisString.length;
+
+ if (maxLength <= stringLength) return "";
+
+ if (IS_UNDEFINED(fillString)) {
+ fillString = " ";
+ } else {
+ fillString = TO_STRING(fillString);
+ if (fillString === "") {
+ fillString = " ";
+ }
+ }
+
+ var fillLength = maxLength - stringLength;
+ var repetitions = (fillLength / fillString.length) | 0;
+ var remainingChars = (fillLength - fillString.length * repetitions) | 0;
+
+ var filler = "";
+ while (true) {
+ if (repetitions & 1) filler += fillString;
+ repetitions >>= 1;
+ if (repetitions === 0) break;
+ fillString += fillString;
+ }
+
+ if (remainingChars) {
+ filler += %_SubString(fillString, 0, remainingChars);
+ }
+
+ return filler;
+}
+
+function StringPadStart(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart")
+ var thisString = TO_STRING(this);
+
+ return StringPad(thisString, maxLength, fillString) + thisString;
+}
+%FunctionSetLength(StringPadStart, 1);
+
+function StringPadEnd(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd")
+ var thisString = TO_STRING(this);
+
+ return thisString + StringPad(thisString, maxLength, fillString);
+}
+%FunctionSetLength(StringPadEnd, 1);
+
+utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
+ "padStart", StringPadStart,
+ "padEnd", StringPadEnd
+]);
+
+});
diff --git a/deps/v8/src/js/harmony-unicode-regexps.js b/deps/v8/src/js/harmony-unicode-regexps.js
index b24bbdf2c5..16d06ba7e3 100644
--- a/deps/v8/src/js/harmony-unicode-regexps.js
+++ b/deps/v8/src/js/harmony-unicode-regexps.js
@@ -31,10 +31,9 @@ function RegExpGetUnicode() {
}
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
}
- return !!REGEXP_UNICODE(this);
+ return TO_BOOLEAN(REGEXP_UNICODE(this));
}
-%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
-%SetNativeFlag(RegExpGetUnicode);
+%SetForceInlineFlag(RegExpGetUnicode);
utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index 7b2f5a1a12..845289a91f 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -20,26 +20,30 @@
var ArrayIndexOf;
var ArrayJoin;
var ArrayPush;
-var IsFinite;
-var IsNaN;
var GlobalBoolean = global.Boolean;
var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var InstallFunctions = utils.InstallFunctions;
+var InstallGetter = utils.InstallGetter;
+var InternalPackedArray = utils.InternalPackedArray;
+var InternalRegExpMatch;
+var InternalRegExpReplace
+var IsFinite;
+var IsNaN;
var MakeError;
var MakeRangeError;
var MakeTypeError;
-var MathFloor;
var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
+var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
+var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
-var RegExpTest;
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
+var SetFunctionName = utils.SetFunctionName;
var StringIndexOf;
var StringLastIndexOf;
-var StringMatch;
-var StringReplace;
var StringSplit;
var StringSubstr;
var StringSubstring;
@@ -53,17 +57,72 @@ utils.Import(function(from) {
MakeError = from.MakeError;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
- MathFloor = from.MathFloor;
- RegExpTest = from.RegExpTest;
+ InternalRegExpMatch = from.InternalRegExpMatch;
+ InternalRegExpReplace = from.InternalRegExpReplace;
StringIndexOf = from.StringIndexOf;
StringLastIndexOf = from.StringLastIndexOf;
- StringMatch = from.StringMatch;
- StringReplace = from.StringReplace;
StringSplit = from.StringSplit;
StringSubstr = from.StringSubstr;
StringSubstring = from.StringSubstring;
});
+// Utilities for definitions
+
+function InstallFunction(object, name, func) {
+ InstallFunctions(object, DONT_ENUM, [name, func]);
+}
+
+
+function InstallConstructor(object, name, func) {
+ %CheckIsBootstrapping();
+ SetFunctionName(func, name);
+ %AddNamedProperty(object, name, func, DONT_ENUM);
+ %SetNativeFlag(func);
+ %ToFastProperties(object);
+}
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function AddBoundMethod(obj, methodName, implementation, length) {
+ %CheckIsBootstrapping();
+ var internalName = %CreatePrivateSymbol(methodName);
+ var getter = function() {
+ if (!%IsInitializedIntlObject(this)) {
+ throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
+ }
+ if (IS_UNDEFINED(this[internalName])) {
+ var boundMethod;
+ if (IS_UNDEFINED(length) || length === 2) {
+ boundMethod = (x, y) => implementation(this, x, y);
+ } else if (length === 1) {
+ boundMethod = x => implementation(this, x);
+ } else {
+ boundMethod = (...args) => {
+ // DateTimeFormat.format needs to be 0 arg method, but can stil
+ // receive optional dateValue param. If one was provided, pass it
+ // along.
+ if (args.length > 0) {
+ return implementation(this, args[0]);
+ } else {
+ return implementation(this);
+ }
+ }
+ }
+ // TODO(littledan): Once function name reform is shipped, remove the
+ // following line and wrap the boundMethod definition in an anonymous
+ // function macro.
+ %FunctionSetName(boundMethod, '__bound' + methodName + '__');
+ %FunctionRemovePrototype(boundMethod);
+ %SetNativeFlag(boundMethod);
+ this[internalName] = boundMethod;
+ }
+ return this[internalName];
+ };
+
+ InstallGetter(obj.prototype, methodName, getter, DONT_ENUM);
+}
+
// -------------------------------------------------------------------
var Intl = {};
@@ -197,74 +256,13 @@ function GetTimezoneNameLocationPartRE() {
return TIMEZONE_NAME_LOCATION_PART_RE;
}
-/**
- * Adds bound method to the prototype of the given object.
- */
-function addBoundMethod(obj, methodName, implementation, length) {
- %CheckIsBootstrapping();
- function getter() {
- if (!%IsInitializedIntlObject(this)) {
- throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
- }
- var internalName = '__bound' + methodName + '__';
- if (IS_UNDEFINED(this[internalName])) {
- var that = this;
- var boundMethod;
- if (IS_UNDEFINED(length) || length === 2) {
- boundMethod = function(x, y) {
- if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
- }
- return implementation(that, x, y);
- }
- } else if (length === 1) {
- boundMethod = function(x) {
- if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
- }
- return implementation(that, x);
- }
- } else {
- boundMethod = function() {
- if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
- }
- // DateTimeFormat.format needs to be 0 arg method, but can stil
- // receive optional dateValue param. If one was provided, pass it
- // along.
- if (arguments.length > 0) {
- return implementation(that, arguments[0]);
- } else {
- return implementation(that);
- }
- }
- }
- %FunctionSetName(boundMethod, internalName);
- %FunctionRemovePrototype(boundMethod);
- %SetNativeFlag(boundMethod);
- this[internalName] = boundMethod;
- }
- return this[internalName];
- }
-
- %FunctionSetName(getter, methodName);
- %FunctionRemovePrototype(getter);
- %SetNativeFlag(getter);
-
- ObjectDefineProperty(obj.prototype, methodName, {
- get: getter,
- enumerable: false,
- configurable: true
- });
-}
-
/**
* Returns an intersection of locales and service supported locales.
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
+ if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
throw MakeError(kWrongServiceType, service);
}
@@ -312,10 +310,8 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = [];
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
- var locale = %_Call(StringReplace,
- requestedLocales[i],
- GetUnicodeExtensionRE(),
- '');
+ var locale = InternalRegExpReplace(
+ GetUnicodeExtensionRE(), requestedLocales[i], '');
do {
if (!IS_UNDEFINED(availableLocales[locale])) {
// Push requested locale not the resolved one.
@@ -421,7 +417,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
+ if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
throw MakeError(kWrongServiceType, service);
}
@@ -432,13 +428,13 @@ function lookupMatcher(service, requestedLocales) {
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
- var locale = %_Call(StringReplace, requestedLocales[i],
- GetAnyExtensionRE(), '');
+ var locale = InternalRegExpReplace(
+ GetAnyExtensionRE(), requestedLocales[i], '');
do {
if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
// Return the resolved locale and extension.
- var extensionMatch =
- %_Call(StringMatch, requestedLocales[i], GetUnicodeExtensionRE());
+ var extensionMatch = InternalRegExpMatch(
+ GetUnicodeExtensionRE(), requestedLocales[i]);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
@@ -535,7 +531,7 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
}
for (var key in keyValues) {
- if (%HasOwnProperty(keyValues, key)) {
+ if (HAS_OWN_PROPERTY(keyValues, key)) {
var value = UNDEFINED;
var map = keyValues[key];
if (!IS_UNDEFINED(map.property)) {
@@ -551,7 +547,7 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
// User options didn't have it, check Unicode extension.
// Here we want to convert strings 'true', 'false' into proper Boolean
// values (not a user error).
- if (%HasOwnProperty(extensionMap, key)) {
+ if (HAS_OWN_PROPERTY(extensionMap, key)) {
value = extensionMap[key];
if (!IS_UNDEFINED(value)) {
updateProperty(map.property, map.type, value);
@@ -612,8 +608,8 @@ function getOptimalLanguageTag(original, resolved) {
}
// Preserve extensions of resolved locale, but swap base tags with original.
- var resolvedBase = new GlobalRegExp('^' + locales[1].base);
- return %_Call(StringReplace, resolved, resolvedBase, locales[0].base);
+ var resolvedBase = new GlobalRegExp('^' + locales[1].base, 'g');
+ return InternalRegExpReplace(resolvedBase, resolved, locales[0].base);
}
@@ -627,10 +623,10 @@ function getAvailableLocalesOf(service) {
var available = %AvailableLocalesOf(service);
for (var i in available) {
- if (%HasOwnProperty(available, i)) {
- var parts =
- %_Call(StringMatch, i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
- if (parts !== null) {
+ if (HAS_OWN_PROPERTY(available, i)) {
+ var parts = InternalRegExpMatch(
+ /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/, i);
+ if (!IS_NULL(parts)) {
// Build xx-ZZ. We don't care about the actual value,
// as long it's not undefined.
available[parts[1] + '-' + parts[3]] = null;
@@ -700,7 +696,7 @@ function toTitleCaseWord(word) {
* 'of', 'au' and 'es' are special-cased and lowercased.
*/
function toTitleCaseTimezoneLocation(location) {
- var match = %_Call(StringMatch, location, GetTimezoneNameLocationPartRE());
+ var match = InternalRegExpMatch(GetTimezoneNameLocationPartRE(), location)
if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, location);
var result = toTitleCaseWord(match[1]);
@@ -797,7 +793,7 @@ function initializeLocaleList(locales) {
*/
function isValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
- if (!%_Call(RegExpTest, GetLanguageTagRE(), locale)) {
+ if (IS_NULL(InternalRegExpMatch(GetLanguageTagRE(), locale))) {
return false;
}
@@ -809,17 +805,17 @@ function isValidLanguageTag(locale) {
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
- locale = %_Call(StringSplit, locale, /-x-/)[0];
+ locale = %_Call(StringSplit, locale, '-x-')[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
var variants = [];
var extensions = [];
- var parts = %_Call(StringSplit, locale, /-/);
+ var parts = %_Call(StringSplit, locale, '-');
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
- if (%_Call(RegExpTest, GetLanguageVariantRE(), value) &&
+ if (!IS_NULL(InternalRegExpMatch(GetLanguageVariantRE(), value)) &&
extensions.length === 0) {
if (%_Call(ArrayIndexOf, variants, value) === -1) {
%_Call(ArrayPush, variants, value);
@@ -828,7 +824,7 @@ function isValidLanguageTag(locale) {
}
}
- if (%_Call(RegExpTest, GetLanguageSingletonRE(), value)) {
+ if (!IS_NULL(InternalRegExpMatch(GetLanguageSingletonRE(), value))) {
if (%_Call(ArrayIndexOf, extensions, value) === -1) {
%_Call(ArrayPush, extensions, value);
} else {
@@ -943,7 +939,7 @@ function initializeCollator(collator, locales, options) {
var collation = 'default';
var extension = '';
- if (%HasOwnProperty(extensionMap, 'co') && internalOptions.usage === 'sort') {
+ if (HAS_OWN_PROPERTY(extensionMap, 'co') && internalOptions.usage === 'sort') {
/**
* Allowed -u-co- values. List taken from:
@@ -1001,7 +997,7 @@ function initializeCollator(collator, locales, options) {
*
* @constructor
*/
-%AddNamedProperty(Intl, 'Collator', function() {
+InstallConstructor(Intl, 'Collator', function() {
var locales = arguments[0];
var options = arguments[1];
@@ -1011,15 +1007,14 @@ function initializeCollator(collator, locales, options) {
}
return initializeCollator(TO_OBJECT(this), locales, options);
- },
- DONT_ENUM
+ }
);
/**
* Collator resolvedOptions method.
*/
-%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1041,12 +1036,8 @@ function initializeCollator(collator, locales, options) {
caseFirst: coll[resolvedSymbol].caseFirst,
collation: coll[resolvedSymbol].collation
};
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
-%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
/**
@@ -1055,18 +1046,14 @@ function initializeCollator(collator, locales, options) {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('collator', locales, arguments[1]);
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
-%SetNativeFlag(Intl.Collator.supportedLocalesOf);
/**
@@ -1085,7 +1072,7 @@ function compare(collator, x, y) {
};
-addBoundMethod(Intl.Collator, 'compare', compare, 2);
+AddBoundMethod(Intl.Collator, 'compare', compare, 2);
/**
* Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1093,9 +1080,8 @@ addBoundMethod(Intl.Collator, 'compare', compare, 2);
* For example \u00DFP (Eszett+P) becomes SSP.
*/
function isWellFormedCurrencyCode(currency) {
- return typeof currency == "string" &&
- currency.length == 3 &&
- %_Call(StringMatch, currency, /[^A-Za-z]/) == null;
+ return typeof currency == "string" && currency.length == 3 &&
+ IS_NULL(InternalRegExpMatch(/[^A-Za-z]/, currency));
}
@@ -1110,7 +1096,7 @@ function getNumberOption(options, property, min, max, fallback) {
if (IsNaN(value) || value < min || value > max) {
throw MakeRangeError(kPropertyValueOutOfRange, property);
}
- return MathFloor(value);
+ return %math_floor(value);
}
return fallback;
@@ -1225,10 +1211,10 @@ function initializeNumberFormat(numberFormat, locales, options) {
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
});
- if (%HasOwnProperty(internalOptions, 'minimumSignificantDigits')) {
+ if (HAS_OWN_PROPERTY(internalOptions, 'minimumSignificantDigits')) {
defineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
}
- if (%HasOwnProperty(internalOptions, 'maximumSignificantDigits')) {
+ if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
}
var formatter = %CreateNumberFormat(requestedLocale,
@@ -1254,7 +1240,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
*
* @constructor
*/
-%AddNamedProperty(Intl, 'NumberFormat', function() {
+InstallConstructor(Intl, 'NumberFormat', function() {
var locales = arguments[0];
var options = arguments[1];
@@ -1264,15 +1250,14 @@ function initializeNumberFormat(numberFormat, locales, options) {
}
return initializeNumberFormat(TO_OBJECT(this), locales, options);
- },
- DONT_ENUM
+ }
);
/**
* NumberFormat resolvedOptions method.
*/
-%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1301,24 +1286,19 @@ function initializeNumberFormat(numberFormat, locales, options) {
format[resolvedSymbol].currencyDisplay);
}
- if (%HasOwnProperty(format[resolvedSymbol], 'minimumSignificantDigits')) {
+ if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'minimumSignificantDigits')) {
defineWECProperty(result, 'minimumSignificantDigits',
format[resolvedSymbol].minimumSignificantDigits);
}
- if (%HasOwnProperty(format[resolvedSymbol], 'maximumSignificantDigits')) {
+ if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'maximumSignificantDigits')) {
defineWECProperty(result, 'maximumSignificantDigits',
format[resolvedSymbol].maximumSignificantDigits);
}
return result;
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
/**
@@ -1327,18 +1307,14 @@ function initializeNumberFormat(numberFormat, locales, options) {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('numberformat', locales, arguments[1]);
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
/**
@@ -1364,8 +1340,8 @@ function parseNumber(formatter, value) {
}
-addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
/**
* Returns a string that matches LDML representation of the options object.
@@ -1435,57 +1411,57 @@ function appendToLDMLString(option, pairs) {
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = %_Call(StringReplace, ldmlString, GetQuotedStringRE(), '');
+ ldmlString = InternalRegExpReplace(GetQuotedStringRE(), ldmlString, '');
var options = {};
- var match = %_Call(StringMatch, ldmlString, /E{3,5}/g);
+ var match = InternalRegExpMatch(/E{3,5}/, ldmlString);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
- match = %_Call(StringMatch, ldmlString, /G{3,5}/g);
+ match = InternalRegExpMatch(/G{3,5}/, ldmlString);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
- match = %_Call(StringMatch, ldmlString, /y{1,2}/g);
+ match = InternalRegExpMatch(/y{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /M{1,5}/g);
+ match = InternalRegExpMatch(/M{1,5}/, ldmlString);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
- match = %_Call(StringMatch, ldmlString, /L{1,5}/g);
+ match = InternalRegExpMatch(/L{1,5}/, ldmlString);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
- match = %_Call(StringMatch, ldmlString, /d{1,2}/g);
+ match = InternalRegExpMatch(/d{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /h{1,2}/g);
+ match = InternalRegExpMatch(/h{1,2}/, ldmlString);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /H{1,2}/g);
+ match = InternalRegExpMatch(/H{1,2}/, ldmlString);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /m{1,2}/g);
+ match = InternalRegExpMatch(/m{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /s{1,2}/g);
+ match = InternalRegExpMatch(/s{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = %_Call(StringMatch, ldmlString, /z|zzzz/g);
+ match = InternalRegExpMatch(/z|zzzz/, ldmlString);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
@@ -1495,7 +1471,7 @@ function fromLDMLString(ldmlString) {
function appendToDateTimeObject(options, option, match, pairs) {
if (IS_NULL(match)) {
- if (!%HasOwnProperty(options, option)) {
+ if (!HAS_OWN_PROPERTY(options, option)) {
defineWEProperty(options, option, UNDEFINED);
}
return options;
@@ -1658,7 +1634,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
*
* @constructor
*/
-%AddNamedProperty(Intl, 'DateTimeFormat', function() {
+InstallConstructor(Intl, 'DateTimeFormat', function() {
var locales = arguments[0];
var options = arguments[1];
@@ -1668,15 +1644,14 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
}
return initializeDateTimeFormat(TO_OBJECT(this), locales, options);
- },
- DONT_ENUM
+ }
);
/**
* DateTimeFormat resolvedOptions method.
*/
-%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1735,13 +1710,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
addWECPropertyIfDefined(result, 'second', fromPattern.second);
return result;
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
/**
@@ -1750,18 +1720,14 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('dateformat', locales, arguments[1]);
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
/**
@@ -1797,8 +1763,8 @@ function parseDate(formatter, value) {
// 0 because date is optional argument.
-addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+AddBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
/**
@@ -1822,7 +1788,7 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _, '-' and / beside ASCII letters.
// All inputs should conform to Area/Location(/Location)* from now on.
- var match = %_Call(StringMatch, tzID, GetTimezoneNameCheckRE());
+ var match = InternalRegExpMatch(GetTimezoneNameCheckRE(), tzID);
if (IS_NULL(match)) throw MakeRangeError(kExpectedTimezoneID, tzID);
var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
@@ -1885,7 +1851,7 @@ function initializeBreakIterator(iterator, locales, options) {
*
* @constructor
*/
-%AddNamedProperty(Intl, 'v8BreakIterator', function() {
+InstallConstructor(Intl, 'v8BreakIterator', function() {
var locales = arguments[0];
var options = arguments[1];
@@ -1895,15 +1861,14 @@ function initializeBreakIterator(iterator, locales, options) {
}
return initializeBreakIterator(TO_OBJECT(this), locales, options);
- },
- DONT_ENUM
+ }
);
/**
* BreakIterator resolvedOptions method.
*/
-%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions',
+InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
function() {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
@@ -1922,13 +1887,8 @@ function initializeBreakIterator(iterator, locales, options) {
locale: locale,
type: segmenter[resolvedSymbol].type
};
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
/**
@@ -1937,19 +1897,15 @@ function initializeBreakIterator(iterator, locales, options) {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf',
+InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
function(locales) {
if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('breakiterator', locales, arguments[1]);
- },
- DONT_ENUM
+ }
);
-%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
-%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
/**
@@ -1994,11 +1950,11 @@ function breakType(iterator) {
}
-addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
@@ -2036,18 +1992,6 @@ function cachedOrNewService(service, locales, options, defaults) {
return new savedObjects[service](locales, useOptions);
}
-
-function OverrideFunction(object, name, f) {
- %CheckIsBootstrapping();
- ObjectDefineProperty(object, name, { value: f,
- writeable: true,
- configurable: true,
- enumerable: false });
- %FunctionSetName(f, name);
- %FunctionRemovePrototype(f);
- %SetNativeFlag(f);
-}
-
/**
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
diff --git a/deps/v8/src/js/json.js b/deps/v8/src/js/json.js
index 73d7802be9..c6dbed9cbb 100644
--- a/deps/v8/src/js/json.js
+++ b/deps/v8/src/js/json.js
@@ -19,6 +19,10 @@ var MakeTypeError;
var MaxSimple;
var MinSimple;
var ObjectHasOwnProperty;
+var Stack;
+var StackHas;
+var StackPop;
+var StackPush;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
@@ -26,6 +30,10 @@ utils.Import(function(from) {
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
+ Stack = from.Stack;
+ StackHas = from.StackHas;
+ StackPop = from.StackPop;
+ StackPush = from.StackPush;
});
// -------------------------------------------------------------------
@@ -51,7 +59,9 @@ function InternalizeJSONProperty(holder, name, reviver) {
}
}
} else {
- for (var p of %object_keys(val)) {
+ var keys = %object_keys(val);
+ for (var i = 0; i < keys.length; i++) {
+ var p = keys[i];
var newElement = InternalizeJSONProperty(val, p, reviver);
if (IS_UNDEFINED(newElement)) {
%reflect_delete_property(val, p);
@@ -76,7 +86,8 @@ function JSONParse(text, reviver) {
function SerializeArray(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
+ if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
+ StackPush(stack, value);
var stepback = indent;
indent += gap;
var partial = new InternalArray();
@@ -99,13 +110,14 @@ function SerializeArray(value, replacer, stack, indent, gap) {
} else {
final = "[]";
}
- stack.pop();
+ StackPop(stack);
return final;
}
function SerializeObject(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
+ if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
+ StackPush(stack, value);
var stepback = indent;
indent += gap;
var partial = new InternalArray();
@@ -122,7 +134,9 @@ function SerializeObject(value, replacer, stack, indent, gap) {
}
}
} else {
- for (var p of %object_keys(value)) {
+ var keys = %object_keys(value);
+ for (var i = 0; i < keys.length; i++) {
+ var p = keys[i];
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
var member = %QuoteJSONString(p) + ":";
@@ -142,7 +156,7 @@ function SerializeObject(value, replacer, stack, indent, gap) {
} else {
final = "{}";
}
- stack.pop();
+ StackPop(stack);
return final;
}
@@ -237,7 +251,7 @@ function JSONStringify(value, replacer, space) {
if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
return %BasicJSONStringify(value);
}
- return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
+ return JSONSerialize('', {'': value}, replacer, new Stack(), "", gap);
}
// -------------------------------------------------------------------
@@ -275,7 +289,7 @@ function JsonSerializeAdapter(key, object) {
var holder = {};
holder[key] = object;
// No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
+ return JSONSerialize(key, holder, UNDEFINED, new Stack(), "", "");
}
%InstallToContext(["json_serialize_adapter", JsonSerializeAdapter]);
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index b2a785697b..a4c7f53293 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -88,9 +88,9 @@ macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
-macro IS_STRONG(arg) = (%IsStrong(arg));
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
+macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
@@ -122,12 +122,12 @@ macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
macro TO_NAME(arg) = (%_ToName(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-macro HAS_OWN_PROPERTY(arg, index) = (%_Call(ObjectHasOwnProperty, arg, index));
-macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
+macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
+macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array)) && (index < array.length)) || (index in array));
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
-macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
+macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key);
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
@@ -255,7 +255,6 @@ define kObjectObserve = 6;
define kForcedGC = 7;
define kSloppyMode = 8;
define kStrictMode = 9;
-define kStrongMode = 10;
define kRegExpPrototypeStickyGetter = 11;
define kRegExpPrototypeToString = 12;
define kRegExpPrototypeUnicodeGetter = 13;
@@ -265,3 +264,15 @@ define kIntlResolved = 16;
define kPromiseChain = 17;
define kPromiseAccept = 18;
define kPromiseDefer = 19;
+define kHtmlCommentInExternalScript = 20;
+define kHtmlComment = 21;
+define kSloppyModeBlockScopedFunctionRedefinition = 22;
+define kForInInitializer = 23;
+define kArrayProtectorDirtied = 24;
+define kArraySpeciesModified = 25;
+define kArrayPrototypeConstructorModified = 26;
+define kArrayInstanceProtoModified = 27;
+define kArrayInstanceConstructorModified = 28;
+define kLegacyFunctionDeclaration = 29;
+define kRegExpPrototypeSourceGetter = 30;
+define kRegExpPrototypeOldFlagGetter = 31;
diff --git a/deps/v8/src/js/math.js b/deps/v8/src/js/math.js
index a698fd4285..f8ad6b1fe6 100644
--- a/deps/v8/src/js/math.js
+++ b/deps/v8/src/js/math.js
@@ -10,7 +10,6 @@
// -------------------------------------------------------------------
// Imports
-define kRandomBatchSize = 64;
// The first two slots are reserved to persist PRNG state.
define kRandomNumberStart = 2;
@@ -19,7 +18,7 @@ var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var NaN = %GetRootNaN();
-var nextRandomIndex = kRandomBatchSize;
+var nextRandomIndex = 0;
var randomNumbers = UNDEFINED;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -31,33 +30,13 @@ function MathAbs(x) {
return (x > 0) ? x : 0 - x;
}
-// ECMA 262 - 15.8.2.2
-function MathAcosJS(x) {
- return %_MathAcos(+x);
-}
-
-// ECMA 262 - 15.8.2.3
-function MathAsinJS(x) {
- return %_MathAsin(+x);
-}
-
-// ECMA 262 - 15.8.2.4
-function MathAtanJS(x) {
- return %_MathAtan(+x);
-}
-
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2JS(y, x) {
y = +y;
x = +x;
- return %_MathAtan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.6
-function MathCeil(x) {
- return -%_MathFloor(-x);
+ return %MathAtan2(y, x);
}
// ECMA 262 - 15.8.2.8
@@ -65,11 +44,6 @@ function MathExp(x) {
return %MathExpRT(TO_NUMBER(x));
}
-// ECMA 262 - 15.8.2.9
-function MathFloorJS(x) {
- return %_MathFloor(+x);
-}
-
// ECMA 262 - 15.8.2.10
function MathLog(x) {
return %_MathLogRT(TO_NUMBER(x));
@@ -82,34 +56,24 @@ function MathPowJS(x, y) {
// ECMA 262 - 15.8.2.14
function MathRandom() {
- if (nextRandomIndex >= kRandomBatchSize) {
+ // While creating a startup snapshot, %GenerateRandomNumbers returns a
+ // normal array containing a single random number, and has to be called for
+ // every new random number.
+ // Otherwise, it returns a pre-populated typed array of random numbers. The
+ // first two elements are reserved for the PRNG state.
+ if (nextRandomIndex <= kRandomNumberStart) {
randomNumbers = %GenerateRandomNumbers(randomNumbers);
- nextRandomIndex = kRandomNumberStart;
+ nextRandomIndex = randomNumbers.length;
}
- return randomNumbers[nextRandomIndex++];
+ return randomNumbers[--nextRandomIndex];
}
function MathRandomRaw() {
- if (nextRandomIndex >= kRandomBatchSize) {
+ if (nextRandomIndex <= kRandomNumberStart) {
randomNumbers = %GenerateRandomNumbers(randomNumbers);
- nextRandomIndex = kRandomNumberStart;
+ nextRandomIndex = randomNumbers.length;
}
- return %_DoubleLo(randomNumbers[nextRandomIndex++]) & 0x3FFFFFFF;
-}
-
-// ECMA 262 - 15.8.2.15
-function MathRound(x) {
- return %RoundNumber(TO_NUMBER(x));
-}
-
-// ECMA 262 - 15.8.2.17
-function MathSqrtJS(x) {
- return %_MathSqrt(+x);
-}
-
-// Non-standard extension.
-function MathImul(x, y) {
- return %NumberImul(TO_NUMBER(x), TO_NUMBER(y));
+ return %_DoubleLo(randomNumbers[--nextRandomIndex]) & 0x3FFFFFFF;
}
// ES6 draft 09-27-13, section 20.2.2.28.
@@ -121,23 +85,14 @@ function MathSign(x) {
return x;
}
-// ES6 draft 09-27-13, section 20.2.2.34.
-function MathTrunc(x) {
- x = +x;
- if (x > 0) return %_MathFloor(x);
- if (x < 0) return -%_MathFloor(-x);
- // -0, 0 or NaN.
- return x;
-}
-
// ES6 draft 09-27-13, section 20.2.2.5.
function MathAsinh(x) {
x = TO_NUMBER(x);
// Idempotent for NaN, +/-0 and +/-Infinity.
if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
- if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
+ if (x > 0) return MathLog(x + %math_sqrt(x * x + 1));
// This is to prevent numerical errors caused by large negative x.
- return -MathLog(-x + %_MathSqrt(x * x + 1));
+ return -MathLog(-x + %math_sqrt(x * x + 1));
}
// ES6 draft 09-27-13, section 20.2.2.3.
@@ -146,7 +101,7 @@ function MathAcosh(x) {
if (x < 1) return NaN;
// Idempotent for NaN and +Infinity.
if (!NUMBER_IS_FINITE(x)) return x;
- return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
+ return MathLog(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
}
// ES6 draft 09-27-13, section 20.2.2.7.
@@ -185,17 +140,7 @@ function MathHypot(x, y) { // Function length is 2.
compensation = (preliminary - sum) - summand;
sum = preliminary;
}
- return %_MathSqrt(sum) * max;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.16.
-function MathFroundJS(x) {
- return %MathFround(TO_NUMBER(x));
-}
-
-// ES6 draft 07-18-14, section 20.2.2.11
-function MathClz32JS(x) {
- return %_MathClz32(x >>> 0);
+ return %math_sqrt(sum) * max;
}
// ES6 draft 09-27-13, section 20.2.2.9.
@@ -213,7 +158,7 @@ macro NEWTON_ITERATION_CBRT(x, approx)
endmacro
function CubeRoot(x) {
- var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
+ var approx_hi = %math_floor(%_DoubleHi(x) / 3) + 0x2A9F7893;
var approx = %_ConstructDouble(approx_hi | 0, 0);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
@@ -223,6 +168,10 @@ function CubeRoot(x) {
// -------------------------------------------------------------------
+%InstallToContext([
+ "math_pow", MathPowJS,
+]);
+
%AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
// Set up math constants.
@@ -246,41 +195,22 @@ utils.InstallConstants(GlobalMath, [
utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"random", MathRandom,
"abs", MathAbs,
- "acos", MathAcosJS,
- "asin", MathAsinJS,
- "atan", MathAtanJS,
- "ceil", MathCeil,
"exp", MathExp,
- "floor", MathFloorJS,
"log", MathLog,
- "round", MathRound,
- "sqrt", MathSqrtJS,
"atan2", MathAtan2JS,
"pow", MathPowJS,
- "imul", MathImul,
"sign", MathSign,
- "trunc", MathTrunc,
"asinh", MathAsinh,
"acosh", MathAcosh,
"atanh", MathAtanh,
"hypot", MathHypot,
- "fround", MathFroundJS,
- "clz32", MathClz32JS,
"cbrt", MathCbrt
]);
%SetForceInlineFlag(MathAbs);
-%SetForceInlineFlag(MathAcosJS);
-%SetForceInlineFlag(MathAsinJS);
-%SetForceInlineFlag(MathAtanJS);
%SetForceInlineFlag(MathAtan2JS);
-%SetForceInlineFlag(MathCeil);
-%SetForceInlineFlag(MathClz32JS);
-%SetForceInlineFlag(MathFloorJS);
%SetForceInlineFlag(MathRandom);
%SetForceInlineFlag(MathSign);
-%SetForceInlineFlag(MathSqrtJS);
-%SetForceInlineFlag(MathTrunc);
// -------------------------------------------------------------------
// Exports
@@ -288,7 +218,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
utils.Export(function(to) {
to.MathAbs = MathAbs;
to.MathExp = MathExp;
- to.MathFloor = MathFloorJS;
to.IntRandom = MathRandomRaw;
});
diff --git a/deps/v8/src/js/messages.js b/deps/v8/src/js/messages.js
index feb14d3788..4529981c30 100644
--- a/deps/v8/src/js/messages.js
+++ b/deps/v8/src/js/messages.js
@@ -23,7 +23,6 @@ var callSitePositionSymbol =
utils.ImportNow("call_site_position_symbol");
var callSiteStrictSymbol =
utils.ImportNow("call_site_strict_symbol");
-var FLAG_harmony_tostring;
var Float32x4ToString;
var formattedStackTraceSymbol =
utils.ImportNow("formatted_stack_trace_symbol");
@@ -34,6 +33,7 @@ var Int8x16ToString;
var InternalArray = utils.InternalArray;
var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
var ObjectDefineProperty;
+var ObjectHasOwnProperty;
var ObjectToString = utils.ImportNow("object_to_string");
var Script = utils.ImportNow("Script");
var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
@@ -56,6 +56,7 @@ utils.Import(function(from) {
Int32x4ToString = from.Int32x4ToString;
Int8x16ToString = from.Int8x16ToString;
ObjectDefineProperty = from.ObjectDefineProperty;
+ ObjectHasOwnProperty = from.ObjectHasOwnProperty;
StringCharAt = from.StringCharAt;
StringIndexOf = from.StringIndexOf;
StringSubstring = from.StringSubstring;
@@ -65,10 +66,6 @@ utils.Import(function(from) {
Uint8x16ToString = from.Uint8x16ToString;
});
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_tostring = from.FLAG_harmony_tostring;
-});
-
// -------------------------------------------------------------------
var GlobalError;
@@ -85,13 +82,8 @@ function NoSideEffectsObjectToString() {
if (IS_NULL(this)) return "[object Null]";
var O = TO_OBJECT(this);
var builtinTag = %_ClassOf(O);
- var tag;
- if (FLAG_harmony_tostring) {
- tag = %GetDataProperty(O, toStringTagSymbol);
- if (!IS_STRING(tag)) {
- tag = builtinTag;
- }
- } else {
+ var tag = %GetDataProperty(O, toStringTagSymbol);
+ if (!IS_STRING(tag)) {
tag = builtinTag;
}
return `[object ${tag}]`;
@@ -578,69 +570,90 @@ function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
}
+function CheckCallSite(obj, name) {
+ if (!IS_RECEIVER(obj) || !HAS_PRIVATE(obj, callSiteFunctionSymbol)) {
+ throw MakeTypeError(kCallSiteMethod, name);
+ }
+}
+
function CallSiteGetThis() {
+ CheckCallSite(this, "getThis");
return GET_PRIVATE(this, callSiteStrictSymbol)
? UNDEFINED : GET_PRIVATE(this, callSiteReceiverSymbol);
}
function CallSiteGetFunction() {
+ CheckCallSite(this, "getFunction");
return GET_PRIVATE(this, callSiteStrictSymbol)
? UNDEFINED : GET_PRIVATE(this, callSiteFunctionSymbol);
}
function CallSiteGetPosition() {
+ CheckCallSite(this, "getPosition");
return GET_PRIVATE(this, callSitePositionSymbol);
}
function CallSiteGetTypeName() {
+ CheckCallSite(this, "getTypeName");
return GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), false);
}
function CallSiteIsToplevel() {
+ CheckCallSite(this, "isTopLevel");
return %CallSiteIsToplevelRT(this);
}
function CallSiteIsEval() {
+ CheckCallSite(this, "isEval");
return %CallSiteIsEvalRT(this);
}
function CallSiteGetEvalOrigin() {
+ CheckCallSite(this, "getEvalOrigin");
var script = %FunctionGetScript(GET_PRIVATE(this, callSiteFunctionSymbol));
return FormatEvalOrigin(script);
}
function CallSiteGetScriptNameOrSourceURL() {
+ CheckCallSite(this, "getScriptNameOrSourceURL");
return %CallSiteGetScriptNameOrSourceUrlRT(this);
}
function CallSiteGetFunctionName() {
// See if the function knows its own name
+ CheckCallSite(this, "getFunctionName");
return %CallSiteGetFunctionNameRT(this);
}
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
+ CheckCallSite(this, "getMethodName");
return %CallSiteGetMethodNameRT(this);
}
function CallSiteGetFileName() {
+ CheckCallSite(this, "getFileName");
return %CallSiteGetFileNameRT(this);
}
function CallSiteGetLineNumber() {
+ CheckCallSite(this, "getLineNumber");
return %CallSiteGetLineNumberRT(this);
}
function CallSiteGetColumnNumber() {
+ CheckCallSite(this, "getColumnNumber");
return %CallSiteGetColumnNumberRT(this);
}
function CallSiteIsNative() {
+ CheckCallSite(this, "isNative");
return %CallSiteIsNativeRT(this);
}
function CallSiteIsConstructor() {
+ CheckCallSite(this, "isConstructor");
return %CallSiteIsConstructorRT(this);
}
@@ -782,13 +795,15 @@ function FormatErrorString(error) {
function GetStackFrames(raw_stack) {
+ var internal_raw_stack = new InternalArray();
+ %MoveArrayContents(raw_stack, internal_raw_stack);
var frames = new InternalArray();
- var sloppy_frames = raw_stack[0];
- for (var i = 1; i < raw_stack.length; i += 4) {
- var recv = raw_stack[i];
- var fun = raw_stack[i + 1];
- var code = raw_stack[i + 2];
- var pc = raw_stack[i + 3];
+ var sloppy_frames = internal_raw_stack[0];
+ for (var i = 1; i < internal_raw_stack.length; i += 4) {
+ var recv = internal_raw_stack[i];
+ var fun = internal_raw_stack[i + 1];
+ var code = internal_raw_stack[i + 2];
+ var pc = internal_raw_stack[i + 3];
var pos = %_IsSmi(code) ? code : %FunctionGetPositionForOffset(code, pc);
sloppy_frames--;
frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 24225a0a00..f9589a51c2 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -126,6 +126,18 @@ function InstallGetterSetter(object, name, getter, setter, attributes) {
}
+function OverrideFunction(object, name, f, afterInitialBootstrap) {
+ %CheckIsBootstrapping();
+ %ObjectDefineProperty(object, name, { value: f,
+ writeable: true,
+ configurable: true,
+ enumerable: false });
+ SetFunctionName(f, name);
+ if (!afterInitialBootstrap) %FunctionRemovePrototype(f);
+ %SetNativeFlag(f);
+}
+
+
// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -175,18 +187,26 @@ function PostNatives(utils) {
"GetMethod",
"IsNaN",
"MakeError",
+ "MakeRangeError",
"MakeTypeError",
"MapEntries",
"MapIterator",
"MapIteratorNext",
"MaxSimple",
"MinSimple",
+ "NumberIsInteger",
"ObjectDefineProperty",
"ObserveArrayMethods",
"ObserveObjectMethods",
"PromiseChain",
"PromiseDeferred",
"PromiseResolved",
+ "RegExpSubclassExecJS",
+ "RegExpSubclassMatch",
+ "RegExpSubclassReplace",
+ "RegExpSubclassSearch",
+ "RegExpSubclassSplit",
+ "RegExpSubclassTest",
"SetIterator",
"SetIteratorNext",
"SetValues",
@@ -206,6 +226,10 @@ function PostNatives(utils) {
"to_string_tag_symbol",
"object_to_string",
"species_symbol",
+ "match_symbol",
+ "replace_symbol",
+ "search_symbol",
+ "split_symbol",
];
var filtered_exports = {};
@@ -284,6 +308,7 @@ utils.InstallConstants = InstallConstants;
utils.InstallFunctions = InstallFunctions;
utils.InstallGetter = InstallGetter;
utils.InstallGetterSetter = InstallGetterSetter;
+utils.OverrideFunction = OverrideFunction;
utils.SetUpLockedPrototype = SetUpLockedPrototype;
utils.PostNatives = PostNatives;
utils.PostExperimentals = PostExperimentals;
@@ -323,14 +348,14 @@ extrasUtils.createPrivateSymbol = function createPrivateSymbol(name) {
// indirection and slowness given how un-optimized bind is.
extrasUtils.simpleBind = function simpleBind(func, thisArg) {
- return function() {
- return %Apply(func, thisArg, arguments, 0, arguments.length);
+ return function(...args) {
+ return %reflect_apply(func, thisArg, args);
};
};
extrasUtils.uncurryThis = function uncurryThis(func) {
- return function(thisArg) {
- return %Apply(func, thisArg, arguments, 1, arguments.length - 1);
+ return function(thisArg, ...args) {
+ return %reflect_apply(func, thisArg, args);
};
};
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index 8cf6a36cef..bcf826a101 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -61,13 +61,13 @@ function CreateResolvingFunctions(promise) {
var GlobalPromise = function Promise(resolver) {
if (resolver === promiseRawSymbol) {
- return %NewObject(GlobalPromise, new.target);
+ return %_NewObject(GlobalPromise, new.target);
}
if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
if (!IS_CALLABLE(resolver))
throw MakeTypeError(kResolverNotAFunction, resolver);
- var promise = PromiseInit(%NewObject(GlobalPromise, new.target));
+ var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
var callbacks = CreateResolvingFunctions(promise);
try {
@@ -89,9 +89,6 @@ function PromiseSet(promise, status, value, onResolve, onReject) {
SET_PRIVATE(promise, promiseValueSymbol, value);
SET_PRIVATE(promise, promiseOnResolveSymbol, onResolve);
SET_PRIVATE(promise, promiseOnRejectSymbol, onReject);
- if (DEBUG_IS_ACTIVE) {
- %DebugPromiseEvent({ promise: promise, status: status, value: value });
- }
return promise;
}
@@ -217,8 +214,6 @@ function PromiseReject(promise, r) {
PromiseDone(promise, -1, r, promiseOnRejectSymbol)
}
-// Convenience.
-
function NewPromiseCapability(C) {
if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
@@ -239,6 +234,9 @@ function NewPromiseCapability(C) {
result.reject = reject;
});
+ if (!IS_CALLABLE(result.resolve) || !IS_CALLABLE(result.reject))
+ throw MakeTypeError(kPromiseNonCallable);
+
return result;
}
@@ -305,9 +303,6 @@ function PromiseThen(onResolve, onReject) {
}
// Mark this promise as having handler.
SET_PRIVATE(this, promiseHasHandlerSymbol, true);
- if (DEBUG_IS_ACTIVE) {
- %DebugPromiseEvent({ promise: deferred.promise, parentPromise: this });
- }
return deferred.promise;
}
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
index e80d0190f4..cc8cb41de1 100644
--- a/deps/v8/src/js/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -4,26 +4,37 @@
(function(global, utils) {
+'use strict';
+
%CheckIsBootstrapping();
// -------------------------------------------------------------------
// Imports
+var AddIndexedProperty;
var ExpandReplacement;
+var GlobalArray = global.Array;
var GlobalObject = global.Object;
var GlobalRegExp = global.RegExp;
var GlobalRegExpPrototype;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeTypeError;
+var MaxSimple;
+var MinSimple;
var matchSymbol = utils.ImportNow("match_symbol");
var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
+var SpeciesConstructor;
utils.Import(function(from) {
+ AddIndexedProperty = from.AddIndexedProperty;
ExpandReplacement = from.ExpandReplacement;
MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ SpeciesConstructor = from.SpeciesConstructor;
});
// -------------------------------------------------------------------
@@ -44,6 +55,7 @@ var RegExpLastMatchInfo = new InternalPackedArray(
// -------------------------------------------------------------------
+// ES#sec-isregexp IsRegExp ( argument )
function IsRegExp(o) {
if (!IS_RECEIVER(o)) return false;
var is_regexp = o[matchSymbol];
@@ -52,7 +64,8 @@ function IsRegExp(o) {
}
-// ES6 section 21.2.3.2.2
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
function RegExpInitialize(object, pattern, flags) {
pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
@@ -70,6 +83,8 @@ function PatternFlags(pattern) {
}
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
function RegExpConstructor(pattern, flags) {
var newtarget = new.target;
var pattern_is_regexp = IsRegExp(pattern);
@@ -94,11 +109,12 @@ function RegExpConstructor(pattern, flags) {
if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
}
- var object = %NewObject(GlobalRegExp, newtarget);
+ var object = %_NewObject(GlobalRegExp, newtarget);
return RegExpInitialize(object, pattern, flags);
}
+// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
function RegExpCompileJS(pattern, flags) {
if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -163,6 +179,54 @@ function RegExpExecNoTests(regexp, string, start) {
}
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+function RegExpSubclassExecJS(string) {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'RegExp.prototype.exec', this);
+ }
+
+ string = TO_STRING(string);
+ var lastIndex = this.lastIndex;
+
+ // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+ // algorithm, step 4) even if the value is discarded for non-global RegExps.
+ var i = TO_LENGTH(lastIndex);
+
+ var global = TO_BOOLEAN(REGEXP_GLOBAL(this));
+ var sticky = TO_BOOLEAN(REGEXP_STICKY(this));
+ var updateLastIndex = global || sticky;
+ if (updateLastIndex) {
+ if (i > string.length) {
+ this.lastIndex = 0;
+ return null;
+ }
+ } else {
+ i = 0;
+ }
+
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ // TODO(littledan): Whether a RegExp is sticky is compiled into the RegExp
+ // itself, but ES2015 allows monkey-patching this property to differ from
+ // the internal flags. If it differs, recompile a different RegExp?
+ var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
+
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
+ return null;
+ }
+
+ // Successful match.
+ if (updateLastIndex) {
+ this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
+ }
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
+}
+%FunctionRemovePrototype(RegExpSubclassExecJS);
+
+
+// Legacy implementation of RegExp.prototype.exec
function RegExpExecJS(string) {
if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -202,10 +266,30 @@ function RegExpExecJS(string) {
}
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+// Also takes an optional exec method in case our caller
+// has already fetched exec.
+function RegExpSubclassExec(regexp, string, exec) {
+ if (IS_UNDEFINED(exec)) {
+ exec = regexp.exec;
+ }
+ if (IS_CALLABLE(exec)) {
+ var result = %_Call(exec, regexp, string);
+ if (!IS_RECEIVER(result) && !IS_NULL(result)) {
+ throw MakeTypeError(kInvalidRegExpExecResult);
+ }
+ return result;
+ }
+ return %_Call(RegExpExecJS, regexp, string);
+}
+%SetForceInlineFlag(RegExpSubclassExec);
+
+
// One-element cache for the simplified test regexp.
var regexp_key;
var regexp_val;
+// Legacy implementation of RegExp.prototype.test
// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
// that test is defined in terms of String.prototype.exec. However, it probably
// means the original value of String.prototype.exec, which is what everybody
@@ -259,6 +343,19 @@ function RegExpTest(string) {
}
}
+
+// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
+function RegExpSubclassTest(string) {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'RegExp.prototype.test', this);
+ }
+ string = TO_STRING(string);
+ var match = RegExpSubclassExec(this, string);
+ return !IS_NULL(match);
+}
+%FunctionRemovePrototype(RegExpSubclassTest);
+
function TrimRegExp(regexp) {
if (regexp_key !== regexp) {
regexp_key = regexp;
@@ -273,27 +370,14 @@ function TrimRegExp(regexp) {
function RegExpToString() {
- if (!IS_REGEXP(this)) {
- // RegExp.prototype.toString() returns '/(?:)/' as a compatibility fix;
- // a UseCounter is incremented to track it.
- // TODO(littledan): Remove this workaround or standardize it
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeToString);
- return '/(?:)/';
- }
- if (!IS_RECEIVER(this)) {
- throw MakeTypeError(
- kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
- }
- return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(
+ kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
}
- var result = '/' + REGEXP_SOURCE(this) + '/';
- if (REGEXP_GLOBAL(this)) result += 'g';
- if (REGEXP_IGNORE_CASE(this)) result += 'i';
- if (REGEXP_MULTILINE(this)) result += 'm';
- if (REGEXP_UNICODE(this)) result += 'u';
- if (REGEXP_STICKY(this)) result += 'y';
- return result;
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeToString);
+ }
+ return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
}
@@ -306,7 +390,8 @@ function AtSurrogatePair(subject, index) {
}
-// ES6 21.2.5.11.
+// Legacy implementation of RegExp.prototype[Symbol.split] which
+// doesn't properly call the underlying exec, @@species methods
function RegExpSplit(string, limit) {
// TODO(yangguo): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
@@ -380,9 +465,85 @@ function RegExpSplit(string, limit) {
}
-// ES6 21.2.5.6.
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+function RegExpSubclassSplit(string, limit) {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split", this);
+ }
+ string = TO_STRING(string);
+ var constructor = SpeciesConstructor(this, GlobalRegExp);
+ var flags = TO_STRING(this.flags);
+
+ // TODO(adamk): this fast path is wrong with respect to this.global
+ // and this.sticky, but hopefully the spec will remove those gets
+ // and thus make the assumption of 'exec' having no side-effects
+ // more correct. Also, we doesn't ensure that 'exec' is actually
+ // a data property on RegExp.prototype.
+ var exec;
+ if (IS_REGEXP(this) && constructor === GlobalRegExp) {
+ exec = this.exec;
+ if (exec === RegExpSubclassExecJS) {
+ return %_Call(RegExpSplit, this, string, limit);
+ }
+ }
+
+ var unicode = %StringIndexOf(flags, 'u', 0) >= 0;
+ var sticky = %StringIndexOf(flags, 'y', 0) >= 0;
+ var newFlags = sticky ? flags : flags + "y";
+ var splitter = new constructor(this, newFlags);
+ var array = new GlobalArray();
+ var arrayIndex = 0;
+ var lim = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
+ var size = string.length;
+ var prevStringIndex = 0;
+ if (lim === 0) return array;
+ var result;
+ if (size === 0) {
+ result = RegExpSubclassExec(splitter, string);
+ if (IS_NULL(result)) AddIndexedProperty(array, 0, string);
+ return array;
+ }
+ var stringIndex = prevStringIndex;
+ while (stringIndex < size) {
+ splitter.lastIndex = stringIndex;
+ result = RegExpSubclassExec(splitter, string, exec);
+ // Ensure exec will be read again on the next loop through.
+ exec = UNDEFINED;
+ if (IS_NULL(result)) {
+ stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
+ } else {
+ var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
+ if (end === stringIndex) {
+ stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
+ } else {
+ AddIndexedProperty(
+ array, arrayIndex,
+ %_SubString(string, prevStringIndex, stringIndex));
+ arrayIndex++;
+ if (arrayIndex === lim) return array;
+ prevStringIndex = end;
+ var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
+ for (var i = 1; i < numberOfCaptures; i++) {
+ AddIndexedProperty(array, arrayIndex, result[i]);
+ arrayIndex++;
+ if (arrayIndex === lim) return array;
+ }
+ stringIndex = prevStringIndex;
+ }
+ }
+ }
+ AddIndexedProperty(array, arrayIndex,
+ %_SubString(string, prevStringIndex, size));
+ return array;
+}
+%FunctionRemovePrototype(RegExpSubclassSplit);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.match] which
+// doesn't properly call the underlying exec method
function RegExpMatch(string) {
- // TODO(yangguo): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"RegExp.prototype.@@match", this);
@@ -396,7 +557,41 @@ function RegExpMatch(string) {
}
-// ES6 21.2.5.8.
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+function RegExpSubclassMatch(string) {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match", this);
+ }
+ string = TO_STRING(string);
+ var global = this.global;
+ if (!global) return RegExpSubclassExec(this, string);
+ var unicode = this.unicode;
+ this.lastIndex = 0;
+ var array = new InternalArray();
+ var n = 0;
+ var result;
+ while (true) {
+ result = RegExpSubclassExec(this, string);
+ if (IS_NULL(result)) {
+ if (n === 0) return null;
+ break;
+ }
+ var matchStr = TO_STRING(result[0]);
+ array[n] = matchStr;
+ if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
+ n++;
+ }
+ var resultArray = [];
+ %MoveArrayContents(array, resultArray);
+ return resultArray;
+}
+%FunctionRemovePrototype(RegExpSubclassMatch);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.replace] which
+// doesn't properly call the underlying exec method.
// TODO(lrn): This array will survive indefinitely if replace is never
// called again. However, it will be empty, since the contents are cleared
@@ -458,7 +653,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
+ var func_result = %reflect_apply(replace, UNDEFINED, elem);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
res[i] = TO_STRING(func_result);
@@ -512,7 +707,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
parameters[j] = index;
parameters[j + 1] = subject;
- replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
+ replacement = %reflect_apply(replace, UNDEFINED, parameters);
}
result += replacement; // The add method converts to string if necessary.
@@ -523,7 +718,6 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
function RegExpReplace(string, replace) {
- // TODO(littledan): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"RegExp.prototype.@@replace", this);
@@ -565,9 +759,206 @@ function RegExpReplace(string, replace) {
}
-// ES6 21.2.5.9.
+// ES#sec-getsubstitution
+// GetSubstitution(matched, str, position, captures, replacement)
+// Expand the $-expressions in the string and return a new string with
+// the result.
+// TODO(littledan): Call this function from String.prototype.replace instead
+// of the very similar ExpandReplacement in src/js/string.js
+function GetSubstitution(matched, string, position, captures, replacement) {
+ var matchLength = matched.length;
+ var stringLength = string.length;
+ var capturesLength = captures.length;
+ var tailPos = position + matchLength;
+ var result = "";
+ var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
+
+ var next = %StringIndexOf(replacement, '$', 0);
+ if (next < 0) {
+ result += replacement;
+ return result;
+ }
+
+ if (next > 0) result += %_SubString(replacement, 0, next);
+
+ while (true) {
+ expansion = '$';
+ pos = next + 1;
+ if (pos < replacement.length) {
+ peek = %_StringCharCodeAt(replacement, pos);
+ if (peek == 36) { // $$
+ ++pos;
+ result += '$';
+ } else if (peek == 38) { // $& - match
+ ++pos;
+ result += matched;
+ } else if (peek == 96) { // $` - prefix
+ ++pos;
+ result += %_SubString(string, 0, position);
+ } else if (peek == 39) { // $' - suffix
+ ++pos;
+ result += %_SubString(string, tailPos, stringLength);
+ } else if (peek >= 48 && peek <= 57) {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ scaledIndex = (peek - 48);
+ advance = 1;
+ if (pos + 1 < replacement.length) {
+ next = %_StringCharCodeAt(replacement, pos + 1);
+ if (next >= 48 && next <= 57) {
+ newScaledIndex = scaledIndex * 10 + ((next - 48));
+ if (newScaledIndex < capturesLength) {
+ scaledIndex = newScaledIndex;
+ advance = 2;
+ }
+ }
+ }
+ if (scaledIndex != 0 && scaledIndex < capturesLength) {
+ var capture = captures[scaledIndex];
+ if (!IS_UNDEFINED(capture)) result += capture;
+ pos += advance;
+ } else {
+ result += '$';
+ }
+ } else {
+ result += '$';
+ }
+ } else {
+ result += '$';
+ }
+
+ // Go the the next $ in the replacement.
+ next = %StringIndexOf(replacement, '$', pos);
+
+ // Return if there are no more $ characters in the replacement. If we
+ // haven't reached the end, we need to append the suffix.
+ if (next < 0) {
+ if (pos < replacement.length) {
+ result += %_SubString(replacement, pos, replacement.length);
+ }
+ return result;
+ }
+
+ // Append substring between the previous and the next $ character.
+ if (next > pos) {
+ result += %_SubString(replacement, pos, next);
+ }
+ }
+ return result;
+}
+
+
+// ES#sec-advancestringindex
+// AdvanceStringIndex ( S, index, unicode )
+function AdvanceStringIndex(string, index, unicode) {
+ var increment = 1;
+ if (unicode) {
+ var first = %_StringCharCodeAt(string, index);
+ if (first >= 0xD800 && first <= 0xDBFF && string.length > index + 1) {
+ var second = %_StringCharCodeAt(string, index + 1);
+ if (second >= 0xDC00 && second <= 0xDFFF) {
+ increment = 2;
+ }
+ }
+ }
+ return increment;
+}
+
+
+function SetAdvancedStringIndex(regexp, string, unicode) {
+ var lastIndex = regexp.lastIndex;
+ regexp.lastIndex = lastIndex +
+ AdvanceStringIndex(string, lastIndex, unicode);
+}
+
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+function RegExpSubclassReplace(string, replace) {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace", this);
+ }
+ string = TO_STRING(string);
+ var length = string.length;
+ var functionalReplace = IS_CALLABLE(replace);
+ if (!functionalReplace) replace = TO_STRING(replace);
+ var global = TO_BOOLEAN(this.global);
+ if (global) {
+ var unicode = TO_BOOLEAN(this.unicode);
+ this.lastIndex = 0;
+ }
+
+ // TODO(adamk): this fast path is wrong with respect to this.global
+ // and this.sticky, but hopefully the spec will remove those gets
+ // and thus make the assumption of 'exec' having no side-effects
+ // more correct. Also, we doesn't ensure that 'exec' is actually
+ // a data property on RegExp.prototype, nor does the fast path
+ // correctly handle lastIndex setting.
+ var exec;
+ if (IS_REGEXP(this)) {
+ exec = this.exec;
+ if (exec === RegExpSubclassExecJS) {
+ return %_Call(RegExpReplace, this, string, replace);
+ }
+ }
+
+ var results = new InternalArray();
+ var result, replacement;
+ while (true) {
+ result = RegExpSubclassExec(this, string, exec);
+ // Ensure exec will be read again on the next loop through.
+ exec = UNDEFINED;
+ if (IS_NULL(result)) {
+ break;
+ } else {
+ results.push(result);
+ if (!global) break;
+ var matchStr = TO_STRING(result[0]);
+ if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
+ }
+ }
+ var accumulatedResult = "";
+ var nextSourcePosition = 0;
+ for (var i = 0; i < results.length; i++) {
+ result = results[i];
+ var capturesLength = MaxSimple(TO_LENGTH(result.length), 0);
+ var matched = TO_STRING(result[0]);
+ var matchedLength = matched.length;
+ var position = MaxSimple(MinSimple(TO_INTEGER(result.index), length), 0);
+ var captures = new InternalArray();
+ for (var n = 0; n < capturesLength; n++) {
+ var capture = result[n];
+ if (!IS_UNDEFINED(capture)) capture = TO_STRING(capture);
+ captures[n] = capture;
+ }
+ if (functionalReplace) {
+ var parameters = new InternalArray(capturesLength + 2);
+ for (var j = 0; j < capturesLength; j++) {
+ parameters[j] = captures[j];
+ }
+ parameters[j] = position;
+ parameters[j + 1] = string;
+ replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
+ parameters.length);
+ } else {
+ replacement = GetSubstitution(matched, string, position, captures,
+ replace);
+ }
+ if (position >= nextSourcePosition) {
+ accumulatedResult +=
+ %_SubString(string, nextSourcePosition, position) + replacement;
+ nextSourcePosition = position + matchedLength;
+ }
+ }
+ if (nextSourcePosition >= length) return accumulatedResult;
+ return accumulatedResult + %_SubString(string, nextSourcePosition, length);
+}
+%FunctionRemovePrototype(RegExpSubclassReplace);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.search] which
+// doesn't properly use the overridden exec method
function RegExpSearch(string) {
- // TODO(yangguo): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"RegExp.prototype.@@search", this);
@@ -578,6 +969,24 @@ function RegExpSearch(string) {
}
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+function RegExpSubclassSearch(string) {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search", this);
+ }
+ string = TO_STRING(string);
+ var previousLastIndex = this.lastIndex;
+ this.lastIndex = 0;
+ var result = RegExpSubclassExec(this, string);
+ this.lastIndex = previousLastIndex;
+ if (IS_NULL(result)) return -1;
+ return result.index;
+}
+%FunctionRemovePrototype(RegExpSubclassSearch);
+
+
// Getters for the static properties lastMatch, lastParen, leftContext, and
// rightContext of the RegExp constructor. The properties are computed based
// on the captures array of the last successful match and the subject string
@@ -639,19 +1048,35 @@ function RegExpMakeCaptureGetter(n) {
}
+// ES6 21.2.5.3.
+function RegExpGetFlags() {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(
+ kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
+ }
+ var result = '';
+ if (this.global) result += 'g';
+ if (this.ignoreCase) result += 'i';
+ if (this.multiline) result += 'm';
+ if (this.unicode) result += 'u';
+ if (this.sticky) result += 'y';
+ return result;
+}
+
+
// ES6 21.2.5.4.
function RegExpGetGlobal() {
if (!IS_REGEXP(this)) {
// TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.global");
}
- return !!REGEXP_GLOBAL(this);
+ return TO_BOOLEAN(REGEXP_GLOBAL(this));
}
-%FunctionSetName(RegExpGetGlobal, "RegExp.prototype.global");
-%SetNativeFlag(RegExpGetGlobal);
+%SetForceInlineFlag(RegExpGetGlobal);
// ES6 21.2.5.5.
@@ -659,14 +1084,13 @@ function RegExpGetIgnoreCase() {
if (!IS_REGEXP(this)) {
// TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
}
- return !!REGEXP_IGNORE_CASE(this);
+ return TO_BOOLEAN(REGEXP_IGNORE_CASE(this));
}
-%FunctionSetName(RegExpGetIgnoreCase, "RegExp.prototype.ignoreCase");
-%SetNativeFlag(RegExpGetIgnoreCase);
// ES6 21.2.5.7.
@@ -674,14 +1098,13 @@ function RegExpGetMultiline() {
if (!IS_REGEXP(this)) {
// TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.multiline");
}
- return !!REGEXP_MULTILINE(this);
+ return TO_BOOLEAN(REGEXP_MULTILINE(this));
}
-%FunctionSetName(RegExpGetMultiline, "RegExp.prototype.multiline");
-%SetNativeFlag(RegExpGetMultiline);
// ES6 21.2.5.10.
@@ -689,14 +1112,29 @@ function RegExpGetSource() {
if (!IS_REGEXP(this)) {
// TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
- return UNDEFINED;
+ %IncrementUseCounter(kRegExpPrototypeSourceGetter);
+ return "(?:)";
}
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.source");
}
return REGEXP_SOURCE(this);
}
-%FunctionSetName(RegExpGetSource, "RegExp.prototype.source");
-%SetNativeFlag(RegExpGetSource);
+
+
+// ES6 21.2.5.12.
+function RegExpGetSticky() {
+ if (!IS_REGEXP(this)) {
+ // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
+ // TODO(littledan): Remove this workaround or standardize it
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeStickyGetter);
+ return UNDEFINED;
+ }
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
+ }
+ return TO_BOOLEAN(REGEXP_STICKY(this));
+}
+%SetForceInlineFlag(RegExpGetSticky);
// -------------------------------------------------------------------
@@ -718,10 +1156,12 @@ utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
splitSymbol, RegExpSplit,
]);
+utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
+utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
// The properties `input` and `$_` are aliases for each other. When this
// value is set the value it is set to is coerced to a string.
@@ -769,12 +1209,39 @@ for (var i = 1; i < 10; ++i) {
%ToFastProperties(GlobalRegExp);
// -------------------------------------------------------------------
+// Internal
+
+var InternalRegExpMatchInfo = new InternalPackedArray(2, "", UNDEFINED, 0, 0);
+
+function InternalRegExpMatch(regexp, subject) {
+ var matchInfo = %_RegExpExec(regexp, subject, 0, InternalRegExpMatchInfo);
+ if (!IS_NULL(matchInfo)) {
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, subject);
+ }
+ return null;
+}
+
+function InternalRegExpReplace(regexp, subject, replacement) {
+ return %StringReplaceGlobalRegExpWithString(
+ subject, regexp, replacement, InternalRegExpMatchInfo);
+}
+
+// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
+ to.InternalRegExpMatch = InternalRegExpMatch;
+ to.InternalRegExpReplace = InternalRegExpReplace;
+ to.IsRegExp = IsRegExp;
to.RegExpExec = DoRegExpExec;
- to.RegExpExecNoTests = RegExpExecNoTests;
+ to.RegExpInitialize = RegExpInitialize;
to.RegExpLastMatchInfo = RegExpLastMatchInfo;
+ to.RegExpSubclassExecJS = RegExpSubclassExecJS;
+ to.RegExpSubclassMatch = RegExpSubclassMatch;
+ to.RegExpSubclassReplace = RegExpSubclassReplace;
+ to.RegExpSubclassSearch = RegExpSubclassSearch;
+ to.RegExpSubclassSplit = RegExpSubclassSplit;
+ to.RegExpSubclassTest = RegExpSubclassTest;
to.RegExpTest = RegExpTest;
});
diff --git a/deps/v8/src/js/runtime.js b/deps/v8/src/js/runtime.js
index 7a61094da6..8e4f283256 100644
--- a/deps/v8/src/js/runtime.js
+++ b/deps/v8/src/js/runtime.js
@@ -42,14 +42,6 @@ utils.ImportFromExperimental(function(from) {
---------------------------------
*/
-function ConcatIterableToArray(target, iterable) {
- var index = target.length;
- for (var element of iterable) {
- AddIndexedProperty(target, index++, element);
- }
- return target;
-}
-
// This function should be called rather than %AddElement in contexts where the
// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
@@ -137,8 +129,4 @@ utils.Export(function(to) {
to.SpeciesConstructor = SpeciesConstructor;
});
-%InstallToContext([
- "concat_iterable_to_array", ConcatIterableToArray,
-]);
-
})
diff --git a/deps/v8/src/js/string-iterator.js b/deps/v8/src/js/string-iterator.js
index 3c331dd1a2..af9af31efd 100644
--- a/deps/v8/src/js/string-iterator.js
+++ b/deps/v8/src/js/string-iterator.js
@@ -32,6 +32,7 @@ function StringIterator() {}
// 21.1.5.1 CreateStringIterator Abstract Operation
function CreateStringIterator(string) {
+ CHECK_OBJECT_COERCIBLE(string, 'String.prototype[Symbol.iterator]');
var s = TO_STRING(string);
var iterator = new StringIterator;
SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index a4019784e8..0eb394e173 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -15,12 +15,13 @@ var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
+var IsRegExp;
var MakeRangeError;
var MakeTypeError;
var MaxSimple;
var MinSimple;
+var RegExpInitialize;
var matchSymbol = utils.ImportNow("match_symbol");
-var RegExpExecNoTests;
var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
@@ -28,11 +29,12 @@ var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
+ IsRegExp = from.IsRegExp;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- RegExpExecNoTests = from.RegExpExecNoTests;
+ RegExpInitialize = from.RegExpInitialize;
});
//-------------------------------------------------------------------
@@ -159,9 +161,10 @@ function StringMatchJS(pattern) {
var subject = TO_STRING(this);
- // Non-regexp argument.
- var regexp = new GlobalRegExp(pattern);
- return RegExpExecNoTests(regexp, subject, 0);
+ // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+ var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
+ RegExpInitialize(regexp, pattern);
+ return regexp[matchSymbol](subject);
}
@@ -355,7 +358,10 @@ function StringSearch(pattern) {
}
var subject = TO_STRING(this);
- var regexp = new GlobalRegExp(pattern);
+
+ // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+ var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
+ RegExpInitialize(regexp, pattern);
return %_Call(regexp[searchSymbol], regexp, subject);
}
@@ -558,18 +564,6 @@ function StringTrimRight() {
}
-// ECMA-262, section 15.5.3.2
-function StringFromCharCode(_) { // length == 1
- "use strict";
- var s = "";
- var n = arguments.length;
- for (var i = 0; i < n; ++i) {
- s += %_StringCharFromCode(arguments[i] & 0xffff);
- }
- return s;
-}
-
-
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
@@ -701,7 +695,7 @@ function StringStartsWith(searchString, position) { // length == 1
var s = TO_STRING(this);
- if (IS_REGEXP(searchString)) {
+ if (IsRegExp(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.startsWith");
}
@@ -727,7 +721,7 @@ function StringEndsWith(searchString, position) { // length == 1
var s = TO_STRING(this);
- if (IS_REGEXP(searchString)) {
+ if (IsRegExp(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.endsWith");
}
@@ -754,7 +748,7 @@ function StringIncludes(searchString, position) { // length == 1
var string = TO_STRING(this);
- if (IS_REGEXP(searchString)) {
+ if (IsRegExp(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.includes");
}
@@ -860,7 +854,6 @@ function StringRaw(callSite) {
// Set up the non-enumerable functions on the String object.
utils.InstallFunctions(GlobalString, DONT_ENUM, [
- "fromCharCode", StringFromCharCode,
"fromCodePoint", StringFromCodePoint,
"raw", StringRaw
]);
diff --git a/deps/v8/src/js/symbol.js b/deps/v8/src/js/symbol.js
index ae543691c2..7365655e24 100644
--- a/deps/v8/src/js/symbol.js
+++ b/deps/v8/src/js/symbol.js
@@ -84,9 +84,7 @@ utils.InstallConstants(GlobalSymbol, [
// "search", searchSymbol,
// "split, splitSymbol,
"toPrimitive", toPrimitiveSymbol,
- // TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
- // Move here when shipping
- // "toStringTag", toStringTagSymbol,
+ "toStringTag", toStringTagSymbol,
"unscopables", unscopablesSymbol,
]);
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 3d500a379e..4fb174bc57 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -11,11 +11,15 @@
// -------------------------------------------------------------------
// Imports
-var ArrayFrom;
-var ArrayToString;
+var AddIndexedProperty;
+// array.js has to come before typedarray.js for this to work
+var ArrayToString = utils.ImportNow("ArrayToString");
var ArrayValues;
+var GetIterator;
+var GetMethod;
var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
+var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
@@ -67,9 +71,10 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
utils.Import(function(from) {
- ArrayFrom = from.ArrayFrom;
- ArrayToString = from.ArrayToString;
+ AddIndexedProperty = from.AddIndexedProperty;
ArrayValues = from.ArrayValues;
+ GetIterator = from.GetIterator;
+ GetMethod = from.GetMethod;
InnerArrayCopyWithin = from.InnerArrayCopyWithin;
InnerArrayEvery = from.InnerArrayEvery;
InnerArrayFill = from.InnerArrayFill;
@@ -118,7 +123,7 @@ function TypedArrayCreate(constructor, arg0, arg1, arg2) {
} else {
var newTypedArray = new constructor(arg0, arg1, arg2);
}
- if (!%_IsTypedArray(newTypedArray)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(newTypedArray)) throw MakeTypeError(kNotTypedArray);
// TODO(littledan): Check for being detached, here and elsewhere
// All callers where the first argument is a Number have no additional
// arguments.
@@ -195,8 +200,7 @@ function NAMEConstructByLength(obj, length) {
}
}
-function NAMEConstructByArrayLike(obj, arrayLike) {
- var length = arrayLike.length;
+function NAMEConstructByArrayLike(obj, arrayLike, length) {
var l = ToPositiveInteger(length, kInvalidTypedArrayLength);
if (l > %_MaxSmi()) {
@@ -236,7 +240,23 @@ function NAMEConstructByIterable(obj, iterable, iteratorFn) {
for (var value of newIterable) {
list.push(value);
}
- NAMEConstructByArrayLike(obj, list);
+ NAMEConstructByArrayLike(obj, list, list.length);
+}
+
+// ES#sec-typedarray-typedarray TypedArray ( typedArray )
+function NAMEConstructByTypedArray(obj, typedArray) {
+ // TODO(littledan): Throw on detached typedArray
+ var srcData = %TypedArrayGetBuffer(typedArray);
+ var length = %_TypedArrayGetLength(typedArray);
+ var byteLength = %_ArrayBufferViewGetByteLength(typedArray);
+ var newByteLength = length * ELEMENT_SIZE;
+ NAMEConstructByArrayLike(obj, typedArray, length);
+ var bufferConstructor = SpeciesConstructor(srcData, GlobalArrayBuffer);
+ var prototype = bufferConstructor.prototype;
+ // TODO(littledan): Use the right prototype based on bufferConstructor's realm
+ if (IS_RECEIVER(prototype) && prototype !== GlobalArrayBufferPrototype) {
+ %InternalSetPrototype(%TypedArrayGetBuffer(obj), prototype);
+ }
}
function NAMEConstructor(arg1, arg2, arg3) {
@@ -246,14 +266,12 @@ function NAMEConstructor(arg1, arg2, arg3) {
} else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
NAMEConstructByLength(this, arg1);
+ } else if (IS_TYPEDARRAY(arg1)) {
+ NAMEConstructByTypedArray(this, arg1);
} else {
- // TODO(littledan): If arg1 is a TypedArray, follow the constructor
- // path in ES2015 22.2.4.3, and call SpeciesConstructor, in a
- // path that seems to be an optimized version of what's below, but
- // in an observably different way.
var iteratorFn = arg1[iteratorSymbol];
if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
- NAMEConstructByArrayLike(this, arg1);
+ NAMEConstructByArrayLike(this, arg1, arg1.length);
} else {
NAMEConstructByIterable(this, arg1, iteratorFn);
}
@@ -263,14 +281,6 @@ function NAMEConstructor(arg1, arg2, arg3) {
}
}
-// TODO(littledan): Remove this performance workaround BUG(chromium:579905)
-function NAME_GetLength() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.length", this);
- }
- return %_TypedArrayGetLength(this);
-}
-
function NAMESubArray(begin, end) {
var beginInt = TO_INTEGER(begin);
if (!IS_UNDEFINED(end)) {
@@ -323,7 +333,7 @@ TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
%SetForceInlineFlag(TypedArraySubArray);
function TypedArrayGetBuffer() {
- if (!%_IsTypedArray(this)) {
+ if (!IS_TYPEDARRAY(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"get TypedArray.prototype.buffer", this);
}
@@ -332,7 +342,7 @@ function TypedArrayGetBuffer() {
%SetForceInlineFlag(TypedArrayGetBuffer);
function TypedArrayGetByteLength() {
- if (!%_IsTypedArray(this)) {
+ if (!IS_TYPEDARRAY(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"get TypedArray.prototype.byteLength", this);
}
@@ -341,7 +351,7 @@ function TypedArrayGetByteLength() {
%SetForceInlineFlag(TypedArrayGetByteLength);
function TypedArrayGetByteOffset() {
- if (!%_IsTypedArray(this)) {
+ if (!IS_TYPEDARRAY(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"get TypedArray.prototype.byteOffset", this);
}
@@ -350,7 +360,7 @@ function TypedArrayGetByteOffset() {
%SetForceInlineFlag(TypedArrayGetByteOffset);
function TypedArrayGetLength() {
- if (!%_IsTypedArray(this)) {
+ if (!IS_TYPEDARRAY(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
"get TypedArray.prototype.length", this);
}
@@ -465,7 +475,7 @@ function TypedArraySet(obj, offset) {
%FunctionSetLength(TypedArraySet, 1);
function TypedArrayGetToStringTag() {
- if (!%_IsTypedArray(this)) return;
+ if (!IS_TYPEDARRAY(this)) return;
var name = %_ClassOf(this);
if (IS_UNDEFINED(name)) return;
return name;
@@ -473,7 +483,7 @@ function TypedArrayGetToStringTag() {
function TypedArrayCopyWithin(target, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -485,7 +495,7 @@ function TypedArrayCopyWithin(target, start, end) {
// ES6 draft 05-05-15, section 22.2.3.7
function TypedArrayEvery(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -496,7 +506,7 @@ function TypedArrayEvery(f, receiver) {
// ES6 draft 08-24-14, section 22.2.3.12
function TypedArrayForEach(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -507,7 +517,7 @@ function TypedArrayForEach(f, receiver) {
// ES6 draft 04-05-14 section 22.2.3.8
function TypedArrayFill(value, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -518,7 +528,7 @@ function TypedArrayFill(value, start, end) {
// ES6 draft 07-15-13, section 22.2.3.9
function TypedArrayFilter(f, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
@@ -536,7 +546,7 @@ function TypedArrayFilter(f, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.10
function TypedArrayFind(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -547,7 +557,7 @@ function TypedArrayFind(predicate, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.11
function TypedArrayFindIndex(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -558,7 +568,7 @@ function TypedArrayFindIndex(predicate, thisArg) {
// ES6 draft 05-18-15, section 22.2.3.21
function TypedArrayReverse() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -586,7 +596,7 @@ function TypedArrayComparefn(x, y) {
// ES6 draft 05-18-15, section 22.2.3.25
function TypedArraySort(comparefn) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -600,7 +610,7 @@ function TypedArraySort(comparefn) {
// ES6 section 22.2.3.13
function TypedArrayIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
return InnerArrayIndexOf(this, element, index, length);
@@ -610,7 +620,7 @@ function TypedArrayIndexOf(element, index) {
// ES6 section 22.2.3.16
function TypedArrayLastIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -622,7 +632,7 @@ function TypedArrayLastIndexOf(element, index) {
// ES6 draft 07-15-13, section 22.2.3.18
function TypedArrayMap(f, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
var result = TypedArraySpeciesCreate(this, length);
@@ -638,7 +648,7 @@ function TypedArrayMap(f, thisArg) {
// ES6 draft 05-05-15, section 22.2.3.24
function TypedArraySome(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -649,7 +659,7 @@ function TypedArraySome(f, receiver) {
// ES6 section 22.2.3.27
function TypedArrayToLocaleString() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -657,15 +667,9 @@ function TypedArrayToLocaleString() {
}
-// ES6 section 22.2.3.28
-function TypedArrayToString() {
- return %_Call(ArrayToString, this);
-}
-
-
// ES6 section 22.2.3.14
function TypedArrayJoin(separator) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -675,7 +679,7 @@ function TypedArrayJoin(separator) {
// ES6 draft 07-15-13, section 22.2.3.19
function TypedArrayReduce(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
return InnerArrayReduce(callback, current, this, length,
@@ -686,7 +690,7 @@ function TypedArrayReduce(callback, current) {
// ES6 draft 07-15-13, section 22.2.3.19
function TypedArrayReduceRight(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
return InnerArrayReduceRight(callback, current, this, length,
@@ -696,7 +700,7 @@ function TypedArrayReduceRight(callback, current) {
function TypedArraySlice(start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var len = %_TypedArrayGetLength(this);
var relativeStart = TO_INTEGER(start);
@@ -740,7 +744,7 @@ function TypedArraySlice(start, end) {
// ES2016 draft, section 22.2.3.14
function TypedArrayIncludes(searchElement, fromIndex) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -760,14 +764,50 @@ function TypedArrayOf() {
}
+// ES#sec-iterabletoarraylike Runtime Semantics: IterableToArrayLike( items )
+function IterableToArrayLike(items) {
+ var iterable = GetMethod(items, iteratorSymbol);
+ if (!IS_UNDEFINED(iterable)) {
+ var internal_array = new InternalArray();
+ var i = 0;
+ for (var value of
+ { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
+ internal_array[i] = value;
+ i++;
+ }
+ var array = [];
+ %MoveArrayContents(internal_array, array);
+ return array;
+ }
+ return TO_OBJECT(items);
+}
+
+
+// ES#sec-%typedarray%.from
+// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
function TypedArrayFrom(source, mapfn, thisArg) {
- // TODO(littledan): Investigate if there is a receiver which could be
- // faster to accumulate on than Array, e.g., a TypedVector.
- // TODO(littledan): Rewrite this code to ensure that things happen
- // in the right order, e.g., the constructor needs to be called before
- // the mapping function on array-likes.
- var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
- return TypedArrayCreate(this, array);
+ if (!%IsConstructor(this)) throw MakeTypeError(kNotConstructor, this);
+ var mapping;
+ if (!IS_UNDEFINED(mapfn)) {
+ if (!IS_CALLABLE(mapfn)) throw MakeTypeError(kCalledNonCallable, this);
+ mapping = true;
+ } else {
+ mapping = false;
+ }
+ var arrayLike = IterableToArrayLike(source);
+ var length = TO_LENGTH(arrayLike.length);
+ var targetObject = TypedArrayCreate(this, length);
+ var value, mappedValue;
+ for (var i = 0; i < length; i++) {
+ value = arrayLike[i];
+ if (mapping) {
+ mappedValue = %_Call(mapfn, thisArg, value, i);
+ } else {
+ mappedValue = value;
+ }
+ targetObject[i] = mappedValue;
+ }
+ return targetObject;
}
%FunctionSetLength(TypedArrayFrom, 1);
@@ -785,7 +825,7 @@ function TypedArray() {
%FunctionSetPrototype(TypedArray, new GlobalObject());
%AddNamedProperty(TypedArray.prototype,
"constructor", TypedArray, DONT_ENUM);
-utils.InstallFunctions(TypedArray, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+utils.InstallFunctions(TypedArray, DONT_ENUM, [
"from", TypedArrayFrom,
"of", TypedArrayOf
]);
@@ -819,10 +859,12 @@ utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
"slice", TypedArraySlice,
"some", TypedArraySome,
"sort", TypedArraySort,
- "toString", TypedArrayToString,
"toLocaleString", TypedArrayToLocaleString
]);
+%AddNamedProperty(TypedArray.prototype, "toString", ArrayToString,
+ DONT_ENUM);
+
macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%SetCode(GlobalNAME, NAMEConstructor);
@@ -838,9 +880,6 @@ macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%AddNamedProperty(GlobalNAME.prototype,
"BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- // TODO(littledan): Remove this performance workaround BUG(chromium:579905)
- utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
- DONT_ENUM | DONT_DELETE);
endmacro
TYPED_ARRAYS(SETUP_TYPED_ARRAY)
diff --git a/deps/v8/src/js/uri.js b/deps/v8/src/js/uri.js
index 712d7e60f3..dca83c9b23 100644
--- a/deps/v8/src/js/uri.js
+++ b/deps/v8/src/js/uri.js
@@ -15,7 +15,6 @@
// Imports
var GlobalObject = global.Object;
-var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var MakeURIError;
@@ -76,7 +75,7 @@ function URIEncodeSingle(cc, result, index) {
var x = (cc >> 12) & 0xF;
var y = (cc >> 6) & 63;
var z = cc & 63;
- var octets = new GlobalArray(3);
+ var octets = new InternalArray(3);
if (cc <= 0x007F) {
octets[0] = cc;
} else if (cc <= 0x07FF) {
@@ -96,7 +95,7 @@ function URIEncodePair(cc1 , cc2, result, index) {
var x = cc1 & 3;
var y = (cc2 >> 6) & 0xF;
var z = cc2 & 63;
- var octets = new GlobalArray(4);
+ var octets = new InternalArray(4);
octets[0] = (u >> 2) + 240;
octets[1] = (((u & 3) << 4) | w) + 128;
octets[2] = ((x << 4) | y) + 128;
@@ -248,7 +247,7 @@ function Decode(uri, reserved) {
var n = 0;
while (((cc << ++n) & 0x80) != 0) { }
if (n == 1 || n > 4) throw MakeURIError();
- var octets = new GlobalArray(n);
+ var octets = new InternalArray(n);
octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw MakeURIError();
for (var i = 1; i < n; i++) {
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 5e1a8256ee..5185c620b3 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -134,14 +134,6 @@ function ObjectValueOf() {
}
-// ES6 7.3.11
-function ObjectHasOwnProperty(value) {
- var name = TO_NAME(value);
- var object = TO_OBJECT(this);
- return %HasOwnProperty(object, name);
-}
-
-
// ES6 19.1.3.3 Object.prototype.isPrototypeOf(V)
function ObjectIsPrototypeOf(V) {
if (!IS_RECEIVER(V)) return false;
@@ -581,11 +573,9 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
var currentIsWritable = current.isWritable();
if (currentIsWritable != desc.isWritable()) {
- if (!currentIsWritable || IS_STRONG(obj)) {
+ if (!currentIsWritable) {
if (should_throw) {
- throw currentIsWritable
- ? MakeTypeError(kStrongRedefineDisallowed, obj, p)
- : MakeTypeError(kRedefineDisallowed, p);
+ throw MakeTypeError(kRedefineDisallowed, p);
} else {
return false;
}
@@ -850,7 +840,6 @@ utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
"toString", ObjectToString,
"toLocaleString", ObjectToLocaleString,
"valueOf", ObjectValueOf,
- "hasOwnProperty", ObjectHasOwnProperty,
"isPrototypeOf", ObjectIsPrototypeOf,
"propertyIsEnumerable", ObjectPropertyIsEnumerable,
"__defineGetter__", ObjectDefineGetter,
@@ -1106,9 +1095,10 @@ utils.Export(function(to) {
to.IsFinite = GlobalIsFinite;
to.IsNaN = GlobalIsNaN;
to.NumberIsNaN = NumberIsNaN;
+ to.NumberIsInteger = NumberIsInteger;
to.ObjectDefineProperties = ObjectDefineProperties;
to.ObjectDefineProperty = ObjectDefineProperty;
- to.ObjectHasOwnProperty = ObjectHasOwnProperty;
+ to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
});
%InstallToContext([
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index efd3c04b98..1b9829fa40 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -37,6 +37,7 @@ class JsonParser BASE_EMBEDDED {
source_length_(source->length()),
isolate_(source->map()->GetHeap()->isolate()),
factory_(isolate_->factory()),
+ zone_(isolate_->allocator()),
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
position_(-1) {
@@ -536,7 +537,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
fast_elements->set(i, *elements[i]);
}
Handle<Object> json_array = factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, Strength::WEAK, pretenure_);
+ fast_elements, FAST_ELEMENTS, pretenure_);
return scope.CloseAndEscape(json_array);
}
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index d97ca2ba73..b40a78249f 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -245,7 +245,7 @@ MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Handle<Object> fun;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
- if (!fun->IsJSFunction()) return object;
+ if (!fun->IsCallable()) return object;
// Call toJSON function.
if (key->IsSmi()) key = factory()->NumberToString(key);
@@ -501,8 +501,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
if (i > 0) builder_.AppendCharacter(',');
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, element,
- Object::GetElement(isolate_, object, i),
+ isolate_, element, JSReceiver::GetElement(isolate_, object, i),
EXCEPTION);
if (element->IsUndefined()) {
builder_.AppendCString("null");
@@ -580,8 +579,8 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
DCHECK(key->IsNumber());
key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
if (key->IsSmi()) {
- maybe_property = Object::GetElement(
- isolate_, object, Smi::cast(key)->value());
+ maybe_property =
+ JSReceiver::GetElement(isolate_, object, Smi::cast(key)->value());
} else {
maybe_property = Object::GetPropertyOrElement(object, key_handle);
}
diff --git a/deps/v8/src/key-accumulator.cc b/deps/v8/src/keys.cc
index c2c4996922..f8b606ca4b 100644
--- a/deps/v8/src/key-accumulator.cc
+++ b/deps/v8/src/keys.cc
@@ -2,26 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/key-accumulator.h"
+#include "src/keys.h"
#include "src/elements.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
-
+#include "src/prototype.h"
namespace v8 {
namespace internal {
-
KeyAccumulator::~KeyAccumulator() {
for (size_t i = 0; i < elements_.size(); i++) {
delete elements_[i];
}
}
-
Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
if (length_ == 0) {
return isolate_->factory()->empty_fixed_array();
@@ -46,6 +44,7 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
for (size_t level = 0; level < max_level; level++) {
int num_string_properties = level_lengths_[level * 2];
int num_symbol_properties = level_lengths_[level * 2 + 1];
+ int num_elements = 0;
if (num_string_properties < 0) {
// If the |num_string_properties| is negative, the current level contains
// properties from a proxy, hence we skip the integer keys in |elements_|
@@ -54,7 +53,7 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
} else if (level < elements_.size()) {
// Add the element indices for this prototype level.
std::vector<uint32_t>* elements = elements_[level];
- int num_elements = static_cast<int>(elements->size());
+ num_elements = static_cast<int>(elements->size());
for (int i = 0; i < num_elements; i++) {
Handle<Object> key;
if (convert == KEEP_NUMBERS) {
@@ -80,13 +79,19 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
insertion_index++;
symbol_properties_index++;
}
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("| strings=%d symbols=%d elements=%i ", num_string_properties,
+ num_symbol_properties, num_elements);
+ }
+ }
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("|| prototypes=%zu ||\n", max_level);
}
DCHECK_EQ(insertion_index, length_);
return result;
}
-
namespace {
bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
@@ -99,7 +104,6 @@ bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
return AddKey(handle(key, isolate_), convert);
}
-
bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
if (key->IsSymbol()) {
if (filter_ & SKIP_SYMBOLS) return false;
@@ -136,10 +140,8 @@ bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
return AddStringKey(key, convert);
}
-
bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
-
bool KeyAccumulator::AddIntegerKey(uint32_t key) {
// Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
// We mark proxy-levels with a negative length
@@ -154,7 +156,6 @@ bool KeyAccumulator::AddIntegerKey(uint32_t key) {
return true;
}
-
bool KeyAccumulator::AddStringKey(Handle<Object> key,
AddKeyConversion convert) {
if (string_properties_.is_null()) {
@@ -176,7 +177,6 @@ bool KeyAccumulator::AddStringKey(Handle<Object> key,
}
}
-
bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
if (symbol_properties_.is_null()) {
symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
@@ -192,7 +192,6 @@ bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
}
}
-
void KeyAccumulator::AddKeys(Handle<FixedArray> array,
AddKeyConversion convert) {
int add_length = array->length();
@@ -203,7 +202,6 @@ void KeyAccumulator::AddKeys(Handle<FixedArray> array,
}
}
-
void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
AddKeyConversion convert) {
DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
@@ -211,7 +209,6 @@ void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
accessor->AddElementsToKeyAccumulator(array_like, this, convert);
}
-
void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
// Proxies define a complete list of keys with no distinction of
// elements and properties, which breaks the normal assumption for the
@@ -223,7 +220,6 @@ void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
level_string_length_ = -level_string_length_;
}
-
MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
Handle<FixedArray> keys,
PropertyFilter filter) {
@@ -253,13 +249,14 @@ MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
return keys;
}
-
// Returns "nothing" in case of exception, "true" on success.
Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
Handle<FixedArray> keys) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
- Nothing<bool>());
+ if (filter_proxy_keys_) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
+ Nothing<bool>());
+ }
// Proxies define a complete list of keys with no distinction of
// elements and properties, which breaks the normal assumption for the
// KeyAccumulator.
@@ -277,7 +274,6 @@ Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
return Just(true);
}
-
void KeyAccumulator::AddElementKeysFromInterceptor(
Handle<JSObject> array_like) {
AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
@@ -286,7 +282,6 @@ void KeyAccumulator::AddElementKeysFromInterceptor(
SortCurrentElementsListRemoveDuplicates();
}
-
void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
// Sort and remove duplicates from the current elements level and adjust.
// the lengths accordingly.
@@ -300,14 +295,12 @@ void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
length_ -= static_cast<int>(nof_removed_keys);
}
-
void KeyAccumulator::SortCurrentElementsList() {
if (elements_.empty()) return;
auto element_keys = elements_.back();
std::sort(element_keys->begin(), element_keys->end());
}
-
void KeyAccumulator::NextPrototype() {
// Store the protoLength on the first call of this method.
if (!elements_.empty()) {
@@ -319,6 +312,154 @@ void KeyAccumulator::NextPrototype() {
level_symbol_length_ = 0;
}
+namespace {
+
+void TrySettingEmptyEnumCache(JSReceiver* object) {
+ Map* map = object->map();
+ DCHECK_EQ(kInvalidEnumCacheSentinel, map->EnumLength());
+ if (!map->OnlyHasSimpleProperties()) return;
+ if (map->IsJSProxyMap()) return;
+ if (map->NumberOfOwnDescriptors() > 0) {
+ int number_of_enumerable_own_properties =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ if (number_of_enumerable_own_properties > 0) return;
+ }
+ DCHECK(object->IsJSObject());
+ map->SetEnumLength(0);
+}
+
+bool CheckAndInitalizeSimpleEnumCache(JSReceiver* object) {
+ if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
+ TrySettingEmptyEnumCache(object);
+ }
+ if (object->map()->EnumLength() != 0) return false;
+ DCHECK(object->IsJSObject());
+ return !JSObject::cast(object)->HasEnumerableElements();
+}
+} // namespace
+
+void FastKeyAccumulator::Prepare() {
+ DisallowHeapAllocation no_gc;
+ // Directly go for the fast path for OWN_ONLY keys.
+ if (type_ == OWN_ONLY) return;
+ // Fully walk the prototype chain and find the last prototype with keys.
+ is_receiver_simple_enum_ = false;
+ has_empty_prototype_ = true;
+ JSReceiver* first_non_empty_prototype;
+ for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
+ iter.Advance()) {
+ JSReceiver* current = iter.GetCurrent<JSReceiver>();
+ if (CheckAndInitalizeSimpleEnumCache(current)) continue;
+ has_empty_prototype_ = false;
+ first_non_empty_prototype = current;
+ // TODO(cbruni): use the first non-empty prototype.
+ USE(first_non_empty_prototype);
+ return;
+ }
+ DCHECK(has_empty_prototype_);
+ is_receiver_simple_enum_ =
+ receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
+ !JSObject::cast(*receiver_)->HasEnumerableElements();
+}
+
+namespace {
+
+template <bool fast_properties>
+Handle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
+ Handle<JSObject> object,
+ GetKeysConversion convert) {
+ Handle<FixedArray> keys;
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ if (fast_properties) {
+ keys = JSObject::GetFastEnumPropertyKeys(isolate, object);
+ } else {
+ // TODO(cbruni): preallocate big enough array to also hold elements.
+ keys = JSObject::GetEnumPropertyKeys(object);
+ }
+ Handle<FixedArray> result =
+ accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
+
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("| strings=%d symbols=0 elements=%u || prototypes>=1 ||\n",
+ keys->length(), result->length() - keys->length());
+ }
+ return result;
+}
+
+MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(
+ Isolate* isolate, Handle<JSObject> object) {
+ // Uninitalized enum cache
+ Map* map = object->map();
+ if (object->elements() != isolate->heap()->empty_fixed_array() ||
+ object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ // Assume that there are elements.
+ return MaybeHandle<FixedArray>();
+ }
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
+ // We have no elements but possibly enumerable property keys, hence we can
+ // directly initialize the enum cache.
+ return JSObject::GetFastEnumPropertyKeys(isolate, object);
+}
+
+bool OnlyHasSimpleProperties(Map* map) {
+ return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER;
+}
+
+} // namespace
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(GetKeysConversion convert) {
+ Handle<FixedArray> keys;
+ if (GetKeysFast(convert).ToHandle(&keys)) {
+ return keys;
+ }
+ return GetKeysSlow(convert);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
+ GetKeysConversion convert) {
+ bool own_only = has_empty_prototype_ || type_ == OWN_ONLY;
+ Map* map = receiver_->map();
+ if (!own_only || !OnlyHasSimpleProperties(map)) {
+ return MaybeHandle<FixedArray>();
+ }
+
+ // From this point on we are certiain to only collect own keys.
+ DCHECK(receiver_->IsJSObject());
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+
+ // Do not try to use the enum-cache for dict-mode objects.
+ if (map->is_dictionary_map()) {
+ return GetOwnKeysWithElements<false>(isolate_, object, convert);
+ }
+ int enum_length = receiver_->map()->EnumLength();
+ if (enum_length == kInvalidEnumCacheSentinel) {
+ Handle<FixedArray> keys;
+ // Try initializing the enum cache and return own properties.
+ if (GetOwnKeysWithUninitializedEnumCache(isolate_, object)
+ .ToHandle(&keys)) {
+ if (FLAG_trace_for_in_enumerate) {
+ PrintF("| strings=%d symbols=0 elements=0 || prototypes>=1 ||\n",
+ keys->length());
+ }
+ is_receiver_simple_enum_ =
+ object->map()->EnumLength() != kInvalidEnumCacheSentinel;
+ return keys;
+ }
+ }
+ // The properties-only case failed because there were probably elements on the
+ // receiver.
+ return GetOwnKeysWithElements<true>(isolate_, object, convert);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
+ GetKeysConversion convert) {
+ return JSReceiver::GetKeys(receiver_, type_, ENUMERABLE_STRINGS, KEEP_NUMBERS,
+ filter_proxy_keys_);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/key-accumulator.h b/deps/v8/src/keys.h
index 9daee10cd3..1fd3fc02b0 100644
--- a/deps/v8/src/key-accumulator.h
+++ b/deps/v8/src/keys.h
@@ -52,6 +52,7 @@ class KeyAccumulator final BASE_EMBEDDED {
Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
int length() { return length_; }
Isolate* isolate() { return isolate_; }
+ void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
private:
bool AddIntegerKey(uint32_t key);
@@ -62,6 +63,7 @@ class KeyAccumulator final BASE_EMBEDDED {
Isolate* isolate_;
KeyCollectionType type_;
PropertyFilter filter_;
+ bool filter_proxy_keys_ = true;
// |elements_| contains the sorted element keys (indices) per level.
std::vector<std::vector<uint32_t>*> elements_;
// |protoLengths_| contains the total number of keys (elements + properties)
@@ -86,9 +88,42 @@ class KeyAccumulator final BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
};
+// The FastKeyAccumulator handles the cases where there are no elements on the
+// prototype chain and forwords the complex/slow cases to the normal
+// KeyAccumulator.
+class FastKeyAccumulator {
+ public:
+ FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
+ KeyCollectionType type, PropertyFilter filter)
+ : isolate_(isolate), receiver_(receiver), type_(type), filter_(filter) {
+ Prepare();
+ // TODO(cbruni): pass filter_ directly to the KeyAccumulator.
+ USE(filter_);
+ }
+
+ bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
+ bool has_empty_prototype() { return has_empty_prototype_; }
+ void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+
+ MaybeHandle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+
+ private:
+ void Prepare();
+ MaybeHandle<FixedArray> GetKeysFast(GetKeysConversion convert);
+ MaybeHandle<FixedArray> GetKeysSlow(GetKeysConversion convert);
+
+ Isolate* isolate_;
+ Handle<JSReceiver> receiver_;
+ KeyCollectionType type_;
+ PropertyFilter filter_;
+ bool is_receiver_simple_enum_ = false;
+ bool has_empty_prototype_ = false;
+ bool filter_proxy_keys_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
+};
} // namespace internal
} // namespace v8
-
#endif // V8_KEY_ACCUMULATOR_H_
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 6902504d10..71ee6bee98 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -29,9 +29,7 @@ bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) {
return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
}
-
-const int DefaultPlatform::kMaxThreadPoolSize = 4;
-
+const int DefaultPlatform::kMaxThreadPoolSize = 8;
DefaultPlatform::DefaultPlatform()
: initialized_(false), thread_pool_size_(0) {}
@@ -66,7 +64,7 @@ void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
base::LockGuard<base::Mutex> guard(&lock_);
DCHECK(thread_pool_size >= 0);
if (thread_pool_size < 1) {
- thread_pool_size = base::SysInfo::NumberOfProcessors();
+ thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
}
thread_pool_size_ =
std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
@@ -172,8 +170,9 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
uint64_t DefaultPlatform::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
- uint64_t id, uint64_t bind_id, int num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values, unsigned int flags) {
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
return 0;
}
@@ -194,6 +193,7 @@ const char* DefaultPlatform::GetCategoryGroupName(
return dummy;
}
+
size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
return static_cast<size_t>(thread_pool_size_);
}
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 2c428ee77e..ea39abc28b 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -47,9 +47,10 @@ class DefaultPlatform : public Platform {
const char* GetCategoryGroupName(
const uint8_t* category_enabled_flag) override;
uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, uint64_t id, uint64_t bind_id,
- int32_t num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
unsigned int flags) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 7621668552..3e70a96c8f 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -30,7 +30,7 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
- FLAG_ll_prof || FLAG_perf_basic_prof ||
+ FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_prof ||
FLAG_log_internal_timer_events || FLAG_prof_cpp;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index cbdd9dd106..93111a2e7e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -13,9 +13,12 @@
#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
+#include "src/perf-jit.h"
#include "src/profiler/cpu-profiler.h"
#include "src/runtime-profiler.h"
#include "src/string-stream.h"
@@ -45,11 +48,13 @@ for (int i = 0; i < listeners_.length(); ++i) { \
} \
} while (false);
-static const char* ComputeMarker(SharedFunctionInfo* shared, Code* code) {
+static const char* ComputeMarker(SharedFunctionInfo* shared,
+ AbstractCode* code) {
switch (code->kind()) {
- case Code::FUNCTION:
+ case AbstractCode::FUNCTION:
+ case AbstractCode::INTERPRETED_FUNCTION:
return shared->optimization_disabled() ? "" : "~";
- case Code::OPTIMIZED_FUNCTION:
+ case AbstractCode::OPTIMIZED_FUNCTION:
return "*";
default:
return "";
@@ -159,42 +164,35 @@ CodeEventLogger::CodeEventLogger() : name_buffer_(new NameBuffer) { }
CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
-
void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- const char* comment) {
+ AbstractCode* code, const char* comment) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
-
void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- Name* name) {
+ AbstractCode* code, Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendName(name);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
-
void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ AbstractCode* code,
SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* name) {
+ CompilationInfo* info, Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
-
void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ AbstractCode* code,
SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* source, int line, int column) {
+ CompilationInfo* info, Name* source,
+ int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendString(shared->DebugName());
@@ -211,17 +209,15 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
-
void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- int args_count) {
+ AbstractCode* code, int args_count) {
name_buffer_->Init(tag);
name_buffer_->AppendInt(args_count);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
-
-void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
+void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
+ String* source) {
name_buffer_->Init(Logger::REG_EXP_TAG);
name_buffer_->AppendString(source);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
@@ -232,17 +228,15 @@ void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
class PerfBasicLogger : public CodeEventLogger {
public:
PerfBasicLogger();
- virtual ~PerfBasicLogger();
+ ~PerfBasicLogger() override;
- virtual void CodeMoveEvent(Address from, Address to) { }
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
- virtual void CodeDeleteEvent(Address from) { }
+ void CodeMoveEvent(AbstractCode* from, Address to) override {}
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {}
private:
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo* shared,
- const char* name,
- int length);
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) override;
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -281,16 +275,12 @@ PerfBasicLogger::~PerfBasicLogger() {
perf_output_handle_ = NULL;
}
-
-void PerfBasicLogger::LogRecordedBuffer(Code* code,
- SharedFunctionInfo*,
- const char* name,
- int length) {
- DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
-
+void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+ const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
- (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION)) {
+ (code->kind() != AbstractCode::FUNCTION &&
+ code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
return;
}
@@ -306,19 +296,17 @@ void PerfBasicLogger::LogRecordedBuffer(Code* code,
class LowLevelLogger : public CodeEventLogger {
public:
explicit LowLevelLogger(const char* file_name);
- virtual ~LowLevelLogger();
+ ~LowLevelLogger() override;
- virtual void CodeMoveEvent(Address from, Address to);
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
- virtual void CodeDeleteEvent(Address from);
- virtual void SnapshotPositionEvent(Address addr, int pos);
- virtual void CodeMovingGCEvent();
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {}
+ void SnapshotPositionEvent(HeapObject* obj, int pos);
+ void CodeMovingGCEvent() override;
private:
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo* shared,
- const char* name,
- int length);
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) override;
// Low-level profiling event structures.
struct CodeCreateStruct {
@@ -338,21 +326,6 @@ class LowLevelLogger : public CodeEventLogger {
};
- struct CodeDeleteStruct {
- static const char kTag = 'D';
-
- Address address;
- };
-
-
- struct SnapshotPositionStruct {
- static const char kTag = 'P';
-
- Address address;
- int32_t position;
- };
-
-
static const char kCodeMovingGCTag = 'G';
@@ -416,21 +389,19 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "x87";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
+#elif V8_TARGET_ARCH_S390
+ const char arch[] = "s390";
#else
const char arch[] = "unknown";
#endif
LogWriteBytes(arch, sizeof(arch));
}
-
-void LowLevelLogger::LogRecordedBuffer(Code* code,
- SharedFunctionInfo*,
- const char* name,
- int length) {
+void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+ const char* name, int length) {
CodeCreateStruct event;
event.name_size = length;
event.code_address = code->instruction_start();
- DCHECK(event.code_address == code->address() + Code::kHeaderSize);
event.code_size = code->instruction_size();
LogWriteStruct(event);
LogWriteBytes(name, length);
@@ -439,26 +410,11 @@ void LowLevelLogger::LogRecordedBuffer(Code* code,
code->instruction_size());
}
-
-void LowLevelLogger::CodeMoveEvent(Address from, Address to) {
+void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
CodeMoveStruct event;
- event.from_address = from + Code::kHeaderSize;
- event.to_address = to + Code::kHeaderSize;
- LogWriteStruct(event);
-}
-
-
-void LowLevelLogger::CodeDeleteEvent(Address from) {
- CodeDeleteStruct event;
- event.address = from + Code::kHeaderSize;
- LogWriteStruct(event);
-}
-
-
-void LowLevelLogger::SnapshotPositionEvent(Address addr, int pos) {
- SnapshotPositionStruct event;
- event.address = addr + Code::kHeaderSize;
- event.position = pos;
+ event.from_address = from->instruction_start();
+ size_t header_size = from->instruction_start() - from->address();
+ event.to_address = to + header_size;
LogWriteStruct(event);
}
@@ -484,23 +440,19 @@ class JitLogger : public CodeEventLogger {
public:
explicit JitLogger(JitCodeEventHandler code_event_handler);
- virtual void CodeMoveEvent(Address from, Address to);
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
- virtual void CodeDeleteEvent(Address from);
- virtual void AddCodeLinePosInfoEvent(
- void* jit_handler_data,
- int pc_offset,
- int position,
- JitCodeEvent::PositionType position_type);
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {}
+ void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type);
void* StartCodePosInfoEvent();
- void EndCodePosInfoEvent(Code* code, void* jit_handler_data);
+ void EndCodePosInfoEvent(AbstractCode* code, void* jit_handler_data);
private:
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo* shared,
- const char* name,
- int length);
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) override;
JitCodeEventHandler code_event_handler_;
base::Mutex logger_mutex_;
@@ -511,10 +463,8 @@ JitLogger::JitLogger(JitCodeEventHandler code_event_handler)
: code_event_handler_(code_event_handler) {
}
-
-void JitLogger::LogRecordedBuffer(Code* code,
- SharedFunctionInfo* shared,
- const char* name,
+void JitLogger::LogRecordedBuffer(AbstractCode* code,
+ SharedFunctionInfo* shared, const char* name,
int length) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
@@ -531,35 +481,19 @@ void JitLogger::LogRecordedBuffer(Code* code,
code_event_handler_(&event);
}
-
-void JitLogger::CodeMoveEvent(Address from, Address to) {
+void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
base::LockGuard<base::Mutex> guard(&logger_mutex_);
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
JitCodeEvent event;
event.type = JitCodeEvent::CODE_MOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
+ event.code_start = from->instruction_start();
+ event.code_len = from->instruction_size();
// Calculate the header size.
- const size_t header_size =
- from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+ const size_t header_size = from->instruction_start() - from->address();
// Calculate the new start address of the instructions.
- event.new_code_start =
- reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
-
- code_event_handler_(&event);
-}
-
-
-void JitLogger::CodeDeleteEvent(Address from) {
- Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
- JitCodeEvent event;
- event.type = JitCodeEvent::CODE_REMOVED;
- event.code_start = from_code->instruction_start();
- event.code_len = from_code->instruction_size();
+ event.new_code_start = to + header_size;
code_event_handler_(&event);
}
@@ -590,8 +524,8 @@ void* JitLogger::StartCodePosInfoEvent() {
return event.user_data;
}
-
-void JitLogger::EndCodePosInfoEvent(Code* code, void* jit_handler_data) {
+void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
+ void* jit_handler_data) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
@@ -778,19 +712,18 @@ void Profiler::Run() {
//
Logger::Logger(Isolate* isolate)
- : isolate_(isolate),
- ticker_(NULL),
- profiler_(NULL),
- log_events_(NULL),
- is_logging_(false),
- log_(new Log(this)),
- perf_basic_logger_(NULL),
- ll_logger_(NULL),
- jit_logger_(NULL),
- listeners_(5),
- is_initialized_(false) {
-}
-
+ : isolate_(isolate),
+ ticker_(NULL),
+ profiler_(NULL),
+ log_events_(NULL),
+ is_logging_(false),
+ log_(new Log(this)),
+ perf_basic_logger_(NULL),
+ perf_jit_logger_(NULL),
+ ll_logger_(NULL),
+ jit_logger_(NULL),
+ listeners_(5),
+ is_initialized_(false) {}
Logger::~Logger() {
delete log_;
@@ -934,7 +867,6 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
void Logger::EnterExternal(Isolate* isolate) {
LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
- TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
DCHECK(isolate->current_vm_state() == JS);
isolate->set_current_vm_state(EXTERNAL);
}
@@ -942,7 +874,6 @@ void Logger::EnterExternal(Isolate* isolate) {
void Logger::LeaveExternal(Isolate* isolate) {
LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
- TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
DCHECK(isolate->current_vm_state() == EXTERNAL);
isolate->set_current_vm_state(JS);
}
@@ -971,7 +902,7 @@ void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
Handle<Object> source =
- Object::GetProperty(isolate, regexp, "source").ToHandleChecked();
+ JSReceiver::GetProperty(isolate, regexp, "source").ToHandleChecked();
if (!source->IsString()) {
msg->Append("no source");
return;
@@ -990,19 +921,19 @@ void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
// global flag
Handle<Object> global =
- Object::GetProperty(isolate, regexp, "global").ToHandleChecked();
+ JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
if (global->IsTrue()) {
msg->Append('g');
}
// ignorecase flag
Handle<Object> ignorecase =
- Object::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
+ JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
if (ignorecase->IsTrue()) {
msg->Append('i');
}
// multiline flag
Handle<Object> multiline =
- Object::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
+ JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
if (multiline->IsTrue()) {
msg->Append('m');
}
@@ -1136,10 +1067,9 @@ void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
CallbackEventInternal("set ", name, entry_point);
}
-
static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
Logger::LogEventsAndTags tag,
- Code* code) {
+ AbstractCode* code) {
DCHECK(msg);
msg->Append("%s,%s,%d,",
kLogEventsNames[Logger::CODE_CREATION_EVENT],
@@ -1149,9 +1079,7 @@ static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
msg->Append(",%d,", code->ExecutableSize());
}
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
const char* comment) {
PROFILER_LOG(CodeCreateEvent(tag, code, comment));
@@ -1165,9 +1093,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.WriteToLogFile();
}
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
Name* name) {
PROFILER_LOG(CodeCreateEvent(tag, code, name));
@@ -1187,11 +1113,8 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.WriteToLogFile();
}
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
Name* name) {
PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
@@ -1199,7 +1122,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- if (code == isolate_->builtins()->builtin(Builtins::kCompileLazy)) return;
+ if (code == AbstractCode::cast(
+ isolate_->builtins()->builtin(Builtins::kCompileLazy))) {
+ return;
+ }
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
@@ -1220,10 +1146,8 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
// Although, it is possible to extract source and line from
// the SharedFunctionInfo object, we left it to caller
// to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
Name* source, int line, int column) {
PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
@@ -1250,9 +1174,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.WriteToLogFile();
}
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
int args_count) {
PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
@@ -1266,8 +1188,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.WriteToLogFile();
}
-
-void Logger::CodeDisableOptEvent(Code* code,
+void Logger::CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) {
PROFILER_LOG(CodeDisableOptEvent(code, shared));
@@ -1294,8 +1215,7 @@ void Logger::CodeMovingGCEvent() {
base::OS::SignalCodeMovingGC();
}
-
-void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
+void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
PROFILER_LOG(RegExpCodeCreateEvent(code, source));
if (!is_logging_code_events()) return;
@@ -1310,33 +1230,16 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.WriteToLogFile();
}
-
-void Logger::CodeMoveEvent(Address from, Address to) {
+void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
PROFILER_LOG(CodeMoveEvent(from, to));
if (!is_logging_code_events()) return;
CALL_LISTENERS(CodeMoveEvent(from, to));
- MoveEventInternal(CODE_MOVE_EVENT, from, to);
-}
-
-
-void Logger::CodeDeleteEvent(Address from) {
- PROFILER_LOG(CodeDeleteEvent(from));
-
- if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeDeleteEvent(from));
-
- if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
- msg.AppendAddress(from);
- msg.WriteToLogFile();
+ MoveEventInternal(CODE_MOVE_EVENT, from->address(), to);
}
-
void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position) {
+ int pc_offset, int position) {
JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
pc_offset,
position,
@@ -1360,8 +1263,7 @@ void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
}
}
-
-void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
+void Logger::CodeEndLinePosInfoRecordEvent(AbstractCode* code,
void* jit_handler_data) {
JIT_LOG(EndCodePosInfoEvent(code, jit_handler_data));
}
@@ -1376,18 +1278,6 @@ void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
}
-void Logger::SnapshotPositionEvent(Address addr, int pos) {
- if (!log_->IsEnabled()) return;
- LL_LOG(SnapshotPositionEvent(addr, pos));
- if (!FLAG_log_snapshot_positions) return;
- Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
- msg.AppendAddress(addr);
- msg.Append(",%d", pos);
- msg.WriteToLogFile();
-}
-
-
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
if (!is_logging_code_events()) return;
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
@@ -1535,9 +1425,9 @@ void Logger::LogFailure() {
class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
public:
EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects,
+ Handle<AbstractCode>* code_objects,
int* count)
- : sfis_(sfis), code_objects_(code_objects), count_(count) { }
+ : sfis_(sfis), code_objects_(code_objects), count_(count) {}
virtual void EnterContext(Context* context) {}
virtual void LeaveContext(Context* context) {}
@@ -1551,22 +1441,22 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
}
if (code_objects_ != NULL) {
- DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- code_objects_[*count_] = Handle<Code>(function->code());
+ DCHECK(function->abstract_code()->kind() ==
+ AbstractCode::OPTIMIZED_FUNCTION);
+ code_objects_[*count_] = Handle<AbstractCode>(function->abstract_code());
}
*count_ = *count_ + 1;
}
private:
Handle<SharedFunctionInfo>* sfis_;
- Handle<Code>* code_objects_;
+ Handle<AbstractCode>* code_objects_;
int* count_;
};
-
static int EnumerateCompiledFunctions(Heap* heap,
Handle<SharedFunctionInfo>* sfis,
- Handle<Code>* code_objects) {
+ Handle<AbstractCode>* code_objects) {
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
@@ -1583,7 +1473,8 @@ static int EnumerateCompiledFunctions(Heap* heap,
sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
}
if (code_objects != NULL) {
- code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
+ code_objects[compiled_funcs_count] =
+ Handle<AbstractCode>(sfi->abstract_code());
}
++compiled_funcs_count;
}
@@ -1600,60 +1491,71 @@ static int EnumerateCompiledFunctions(Heap* heap,
void Logger::LogCodeObject(Object* object) {
- Code* code_object = Code::cast(object);
+ AbstractCode* code_object = AbstractCode::cast(object);
LogEventsAndTags tag = Logger::STUB_TAG;
const char* description = "Unknown code from the snapshot";
switch (code_object->kind()) {
- case Code::FUNCTION:
- case Code::OPTIMIZED_FUNCTION:
+ case AbstractCode::FUNCTION:
+ case AbstractCode::INTERPRETED_FUNCTION:
+ case AbstractCode::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC: // fall through
- case Code::COMPARE_NIL_IC: // fall through
- case Code::TO_BOOLEAN_IC: // fall through
- case Code::STUB:
- description = CodeStub::MajorName(CodeStub::GetMajorKey(code_object));
+ case AbstractCode::BYTECODE_HANDLER:
+ return; // We log it later by walking the dispatch table.
+ case AbstractCode::BINARY_OP_IC: // fall through
+ case AbstractCode::COMPARE_IC: // fall through
+ case AbstractCode::TO_BOOLEAN_IC: // fall through
+
+ case AbstractCode::STUB:
+ description =
+ CodeStub::MajorName(CodeStub::GetMajorKey(code_object->GetCode()));
if (description == NULL)
description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
break;
- case Code::REGEXP:
+ case AbstractCode::REGEXP:
description = "Regular expression code";
tag = Logger::REG_EXP_TAG;
break;
- case Code::BUILTIN:
- description = isolate_->builtins()->name(code_object->builtin_index());
+ case AbstractCode::BUILTIN:
+ description =
+ isolate_->builtins()->name(code_object->GetCode()->builtin_index());
tag = Logger::BUILTIN_TAG;
break;
- case Code::HANDLER:
+ case AbstractCode::HANDLER:
description = "An IC handler from the snapshot";
tag = Logger::HANDLER_TAG;
break;
- case Code::KEYED_LOAD_IC:
+ case AbstractCode::KEYED_LOAD_IC:
description = "A keyed load IC from the snapshot";
tag = Logger::KEYED_LOAD_IC_TAG;
break;
- case Code::LOAD_IC:
+ case AbstractCode::LOAD_IC:
description = "A load IC from the snapshot";
tag = Logger::LOAD_IC_TAG;
break;
- case Code::CALL_IC:
+ case AbstractCode::CALL_IC:
description = "A call IC from the snapshot";
tag = Logger::CALL_IC_TAG;
break;
- case Code::STORE_IC:
+ case AbstractCode::STORE_IC:
description = "A store IC from the snapshot";
tag = Logger::STORE_IC_TAG;
break;
- case Code::KEYED_STORE_IC:
+ case AbstractCode::KEYED_STORE_IC:
description = "A keyed store IC from the snapshot";
tag = Logger::KEYED_STORE_IC_TAG;
break;
- case Code::WASM_FUNCTION:
- description = "A wasm function";
+ case AbstractCode::WASM_FUNCTION:
+ description = "A Wasm function";
tag = Logger::STUB_TAG;
break;
- case Code::NUMBER_OF_KINDS:
+ case AbstractCode::JS_TO_WASM_FUNCTION:
+ description = "A JavaScript to Wasm adapter";
+ tag = Logger::STUB_TAG;
+ break;
+ case AbstractCode::WASM_TO_JS_FUNCTION:
+ description = "A Wasm to JavaScript adapter";
+ tag = Logger::STUB_TAG;
break;
}
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
@@ -1668,12 +1570,34 @@ void Logger::LogCodeObjects() {
DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
+ if (obj->IsBytecodeArray()) LogCodeObject(obj);
}
}
+void Logger::LogBytecodeHandlers() {
+ if (!FLAG_ignition) return;
+
+ interpreter::Interpreter* interpreter = isolate_->interpreter();
+ const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
+ for (auto operand_scale = interpreter::OperandScale::kSingle;
+ operand_scale <= interpreter::OperandScale::kMaxValid;
+ operand_scale =
+ interpreter::Bytecodes::NextOperandScale(operand_scale)) {
+ for (int index = 0; index <= last_index; ++index) {
+ interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
+ if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
+ std::string bytecode_name =
+ interpreter::Bytecodes::ToString(bytecode, operand_scale);
+ CodeCreateEvent(Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(code),
+ bytecode_name.c_str());
+ }
+ }
+ }
+}
void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
- Handle<Code> code) {
+ Handle<AbstractCode> code) {
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
@@ -1730,7 +1654,7 @@ void Logger::LogCompiledFunctions() {
HandleScope scope(isolate_);
const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
- ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
+ ScopedVector<Handle<AbstractCode> > code_objects(compiled_funcs_count);
EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
// During iteration, there can be heap allocation due to
@@ -1827,11 +1751,6 @@ bool Logger::SetUp(Isolate* isolate) {
if (is_initialized_) return true;
is_initialized_ = true;
- // --ll-prof implies --log-code and --log-snapshot-positions.
- if (FLAG_ll_prof) {
- FLAG_log_snapshot_positions = true;
- }
-
std::ostringstream log_file_name;
PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
log_->Initialize(log_file_name.str().c_str());
@@ -1842,6 +1761,11 @@ bool Logger::SetUp(Isolate* isolate) {
addCodeEventListener(perf_basic_logger_);
}
+ if (FLAG_perf_prof) {
+ perf_jit_logger_ = new PerfJitLogger();
+ addCodeEventListener(perf_jit_logger_);
+ }
+
if (FLAG_ll_prof) {
ll_logger_ = new LowLevelLogger(log_file_name.str().c_str());
addCodeEventListener(ll_logger_);
@@ -1910,6 +1834,12 @@ FILE* Logger::TearDown() {
perf_basic_logger_ = NULL;
}
+ if (perf_jit_logger_) {
+ removeCodeEventListener(perf_jit_logger_);
+ delete perf_jit_logger_;
+ perf_jit_logger_ = NULL;
+ }
+
if (ll_logger_) {
removeCodeEventListener(ll_logger_);
delete ll_logger_;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 1a454dad26..fdc50471b4 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -82,63 +82,61 @@ struct TickSample;
logger->Call; \
} while (false)
-
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
- V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
- V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
- V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
- V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic") \
- V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
- V(STORE_INITIALIZE_TAG, "StoreInitialize") \
- V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic") \
- V(STORE_GENERIC_TAG, "StoreGeneric") \
- V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
- "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(HANDLER_TAG, "Handler") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(CALL_IC_TAG, "CallIC") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
- V(NATIVE_SCRIPT_TAG, "Script")
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+ V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
+ V(CODE_MOVE_EVENT, "code-move") \
+ V(CODE_DELETE_EVENT, "code-delete") \
+ V(CODE_MOVING_GC, "code-moving-gc") \
+ V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
+ V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
+ V(TICK_EVENT, "tick") \
+ V(REPEAT_META_EVENT, "repeat") \
+ V(BUILTIN_TAG, "Builtin") \
+ V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
+ V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
+ V(CALL_INITIALIZE_TAG, "CallInitialize") \
+ V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
+ V(CALL_MISS_TAG, "CallMiss") \
+ V(CALL_NORMAL_TAG, "CallNormal") \
+ V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
+ V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
+ V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic") \
+ V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
+ V(STORE_INITIALIZE_TAG, "StoreInitialize") \
+ V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic") \
+ V(STORE_GENERIC_TAG, "StoreGeneric") \
+ V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
+ V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
+ V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
+ V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
+ V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
+ V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
+ V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
+ V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
+ V(CALLBACK_TAG, "Callback") \
+ V(EVAL_TAG, "Eval") \
+ V(FUNCTION_TAG, "Function") \
+ V(HANDLER_TAG, "Handler") \
+ V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
+ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
+ V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
+ V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
+ V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
+ V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(CALL_IC_TAG, "CallIC") \
+ V(LOAD_IC_TAG, "LoadIC") \
+ V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
+ V(REG_EXP_TAG, "RegExp") \
+ V(SCRIPT_TAG, "Script") \
+ V(STORE_IC_TAG, "StoreIC") \
+ V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
+ V(STUB_TAG, "Stub") \
+ V(NATIVE_FUNCTION_TAG, "Function") \
+ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
+ V(NATIVE_SCRIPT_TAG, "Script")
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
@@ -146,6 +144,7 @@ struct TickSample;
class JitLogger;
class PerfBasicLogger;
class LowLevelLogger;
+class PerfJitLogger;
class Sampler;
class Logger {
@@ -224,30 +223,24 @@ class Logger {
void GetterCallbackEvent(Name* name, Address entry_point);
void SetterCallbackEvent(Name* name, Address entry_point);
// Emits a code create event.
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, const char* source);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, Name* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ const char* source);
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name);
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
Name* name);
- void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
Name* source, int line, int column);
- void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ int args_count);
// Emits a code deoptimization event.
- void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
+ void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
- void RegExpCodeCreateEvent(Code* code, String* source);
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source);
// Emits a code move event.
- void CodeMoveEvent(Address from, Address to);
- // Emits a code delete event.
- void CodeDeleteEvent(Address from);
+ void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info add event with Postion type.
void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
int pc_offset,
@@ -260,12 +253,12 @@ class Logger {
void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
// Emits a code line info finish record event.
// It's the callee's responsibility to dispose the parameter jit_handler_data.
- void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data);
+ void CodeEndLinePosInfoRecordEvent(AbstractCode* code,
+ void* jit_handler_data);
void SharedFunctionInfoMoveEvent(Address from, Address to);
void CodeNameEvent(Address addr, int pos, const char* code_name);
- void SnapshotPositionEvent(Address addr, int pos);
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
@@ -316,13 +309,15 @@ class Logger {
void StopProfiler();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
- Handle<Code> code);
+ Handle<AbstractCode> code);
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
void LogCodeObjects();
+ // Used for logging bytecode handlers found in the snapshot.
+ void LogBytecodeHandlers();
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
@@ -397,6 +392,7 @@ class Logger {
bool is_logging_;
Log* log_;
PerfBasicLogger* perf_basic_logger_;
+ PerfJitLogger* perf_jit_logger_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
List<CodeEventListener*> listeners_;
@@ -413,6 +409,7 @@ class Logger {
#define TIMER_EVENTS_LIST(V) \
V(RecompileSynchronous, true) \
V(RecompileConcurrent, true) \
+ V(CompileIgnition, true) \
V(CompileFullCode, true) \
V(OptimizeCode, true) \
V(CompileCode, true) \
@@ -446,83 +443,86 @@ class TimerEventScope {
Isolate* isolate_;
};
+class PositionsRecorder BASE_EMBEDDED {
+ public:
+ PositionsRecorder() { jit_handler_data_ = NULL; }
+
+ void AttachJITHandlerData(void* user_data) { jit_handler_data_ = user_data; }
+
+ void* DetachJITHandlerData() {
+ void* old_data = jit_handler_data_;
+ jit_handler_data_ = NULL;
+ return old_data;
+ }
+
+ protected:
+ // Currently jit_handler_data_ is used to store JITHandler-specific data
+ // over the lifetime of a PositionsRecorder
+ void* jit_handler_data_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
+};
class CodeEventListener {
public:
virtual ~CodeEventListener() {}
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
const char* comment) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
Name* name) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* name) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ CompilationInfo* info, Name* name) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* source,
- int line, int column) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
+ CompilationInfo* info, Name* source, int line,
+ int column) = 0;
+ virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
int args_count) = 0;
virtual void CallbackEvent(Name* name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
- virtual void RegExpCodeCreateEvent(Code* code, String* source) = 0;
- virtual void CodeMoveEvent(Address from, Address to) = 0;
- virtual void CodeDeleteEvent(Address from) = 0;
+ virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+ virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) = 0;
+ virtual void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) = 0;
};
class CodeEventLogger : public CodeEventListener {
public:
CodeEventLogger();
- virtual ~CodeEventLogger();
-
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- const char* comment);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- Name* name);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- int args_count);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* name);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info,
- Name* source,
- int line, int column);
- virtual void RegExpCodeCreateEvent(Code* code, String* source);
-
- virtual void CallbackEvent(Name* name, Address entry_point) { }
- virtual void GetterCallbackEvent(Name* name, Address entry_point) { }
- virtual void SetterCallbackEvent(Name* name, Address entry_point) { }
- virtual void SharedFunctionInfoMoveEvent(Address from, Address to) { }
- virtual void CodeMovingGCEvent() { }
+ ~CodeEventLogger() override;
+
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ const char* comment) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ Name* name) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ int args_count) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
+ Name* name) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
+ Name* source, int line, int column) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+
+ void CallbackEvent(Name* name, Address entry_point) override {}
+ void GetterCallbackEvent(Name* name, Address entry_point) override {}
+ void SetterCallbackEvent(Name* name, Address entry_point) override {}
+ void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+ void CodeMovingGCEvent() override {}
private:
class NameBuffer;
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo* shared,
- const char* name,
- int length) = 0;
+ virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) = 0;
NameBuffer* name_buffer_;
};
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index bad5a20df5..3df8752c01 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -45,6 +45,25 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
return LookupIterator(receiver, name, configuration);
}
+template <bool is_element>
+void LookupIterator::Start() {
+ DisallowHeapAllocation no_gc;
+
+ has_property_ = false;
+ state_ = NOT_FOUND;
+ holder_ = initial_holder_;
+
+ JSReceiver* holder = *holder_;
+ Map* map = holder->map();
+
+ state_ = LookupInHolder<is_element>(map, holder);
+ if (IsFound()) return;
+
+ NextInternal<is_element>(map, holder);
+}
+
+template void LookupIterator::Start<true>();
+template void LookupIterator::Start<false>();
void LookupIterator::Next() {
DCHECK_NE(JSPROXY, state_);
@@ -55,38 +74,47 @@ void LookupIterator::Next() {
JSReceiver* holder = *holder_;
Map* map = holder->map();
- // Perform lookup on current holder.
- state_ = LookupInHolder(map, holder);
- if (IsFound()) return;
+ if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
+ : LookupInSpecialHolder<false>(map, holder);
+ if (IsFound()) return;
+ }
- // Continue lookup if lookup on current holder failed.
+ IsElement() ? NextInternal<true>(map, holder)
+ : NextInternal<false>(map, holder);
+}
+
+template <bool is_element>
+void LookupIterator::NextInternal(Map* map, JSReceiver* holder) {
do {
JSReceiver* maybe_holder = NextHolder(map);
if (maybe_holder == nullptr) {
if (interceptor_state_ == InterceptorState::kSkipNonMasking) {
- RestartLookupForNonMaskingInterceptors();
+ RestartLookupForNonMaskingInterceptors<is_element>();
return;
}
- break;
+ state_ = NOT_FOUND;
+ if (holder != *holder_) holder_ = handle(holder, isolate_);
+ return;
}
holder = maybe_holder;
map = holder->map();
- state_ = LookupInHolder(map, holder);
+ state_ = LookupInHolder<is_element>(map, holder);
} while (!IsFound());
- if (holder != *holder_) holder_ = handle(holder, isolate_);
+ holder_ = handle(holder, isolate_);
}
-
+template <bool is_element>
void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
- state_ = NOT_FOUND;
interceptor_state_ = interceptor_state;
property_details_ = PropertyDetails::Empty();
- holder_ = initial_holder_;
number_ = DescriptorArray::kNotFound;
- Next();
+ Start<is_element>();
}
+template void LookupIterator::RestartInternal<true>(InterceptorState);
+template void LookupIterator::RestartInternal<false>(InterceptorState);
// static
Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
@@ -116,29 +144,17 @@ Handle<Map> LookupIterator::GetReceiverMap() const {
return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
}
-
-Handle<JSObject> LookupIterator::GetStoreTarget() const {
- if (receiver_->IsJSGlobalProxy()) {
- Object* prototype = JSGlobalProxy::cast(*receiver_)->map()->prototype();
- if (!prototype->IsNull()) {
- return handle(JSGlobalObject::cast(prototype), isolate_);
- }
- }
- return Handle<JSObject>::cast(receiver_);
-}
-
-
bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
return isolate_->MayAccess(handle(isolate_->context()),
GetHolder<JSObject>());
}
-
+template <bool is_element>
void LookupIterator::ReloadPropertyInformation() {
state_ = BEFORE_PROPERTY;
interceptor_state_ = InterceptorState::kUninitialized;
- state_ = LookupInHolder(holder_->map(), *holder_);
+ state_ = LookupInHolder<is_element>(holder_->map(), *holder_);
DCHECK(IsFound() || !holder_->HasFastProperties());
}
@@ -156,14 +172,11 @@ bool LookupIterator::HolderIsInContextIndex(uint32_t index) const {
return false;
}
-void LookupIterator::UpdateProtector() {
- if (!FLAG_harmony_species) return;
-
- if (IsElement()) return;
+void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
- if (*name_ == *isolate_->factory()->constructor_string()) {
+ if (*name_ == heap()->constructor_string()) {
// Setting the constructor property could change an instance's @@species
if (holder_->IsJSArray()) {
isolate_->CountUsage(
@@ -178,7 +191,7 @@ void LookupIterator::UpdateProtector() {
isolate_->InvalidateArraySpeciesProtector();
}
}
- } else if (*name_ == *isolate_->factory()->species_symbol()) {
+ } else if (*name_ == heap()->species_symbol()) {
// Setting the Symbol.species property of any Array constructor invalidates
// the species protector
if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
@@ -228,7 +241,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
}
JSObject::MigrateToMap(holder, new_map);
- ReloadPropertyInformation();
+ ReloadPropertyInformation<false>();
}
@@ -243,19 +256,23 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<FixedArrayBase> elements(holder->elements());
holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
attributes);
- } else if (!holder->HasFastProperties()) {
- PropertyDetails details(attributes, v8::internal::DATA, 0,
- PropertyCellType::kMutable);
- JSObject::SetNormalizedProperty(holder, name(), value, details);
+ ReloadPropertyInformation<true>();
} else {
- Handle<Map> old_map(holder->map(), isolate_);
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- old_map, descriptor_number(), i::kData, attributes);
- new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
- JSObject::MigrateToMap(holder, new_map);
+ if (!holder->HasFastProperties()) {
+ PropertyDetails details(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kMutable);
+ JSObject::SetNormalizedProperty(holder, name(), value, details);
+ } else {
+ Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ old_map, descriptor_number(), i::kData, attributes);
+ new_map =
+ Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+ JSObject::MigrateToMap(holder, new_map);
+ }
+ ReloadPropertyInformation<false>();
}
- ReloadPropertyInformation();
WriteDataValue(value);
#if VERIFY_HEAP
@@ -323,7 +340,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
property_details_ = transition->GetLastDescriptorDetails();
state_ = DATA;
} else {
- ReloadPropertyInformation();
+ ReloadPropertyInformation<false>();
}
}
@@ -342,7 +359,7 @@ void LookupIterator::Delete() {
if (holder->HasFastProperties()) {
JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
"DeletingProperty");
- ReloadPropertyInformation();
+ ReloadPropertyInformation<false>();
}
// TODO(verwaest): Get rid of the name_ argument.
JSReceiver::DeleteNormalizedProperty(holder, name_, number_);
@@ -364,14 +381,31 @@ void LookupIterator::TransitionToAccessorProperty(
Handle<JSObject> receiver = GetStoreTarget();
if (!IsElement() && !receiver->map()->is_dictionary_map()) {
- holder_ = receiver;
Handle<Map> old_map(receiver->map(), isolate_);
+
+ if (!holder_.is_identical_to(receiver)) {
+ holder_ = receiver;
+ state_ = NOT_FOUND;
+ } else if (state_ == INTERCEPTOR) {
+ LookupInRegularHolder<false>(*old_map, *holder_);
+ }
+ int descriptor =
+ IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
+
Handle<Map> new_map = Map::TransitionToAccessorProperty(
- old_map, name_, component, accessor, attributes);
+ old_map, name_, descriptor, component, accessor, attributes);
+ bool simple_transition = new_map->GetBackPointer() == receiver->map();
JSObject::MigrateToMap(receiver, new_map);
- ReloadPropertyInformation();
+ if (simple_transition) {
+ int number = new_map->LastAdded();
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = new_map->GetLastDescriptorDetails();
+ state_ = ACCESSOR;
+ return;
+ }
+ ReloadPropertyInformation<false>();
if (!new_map->is_dictionary_map()) return;
}
@@ -430,6 +464,8 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
} else {
receiver->set_elements(*dictionary);
}
+
+ ReloadPropertyInformation<true>();
} else {
PropertyNormalizationMode mode = receiver->map()->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
@@ -440,9 +476,9 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
JSObject::SetNormalizedProperty(receiver, name_, pair, details);
JSObject::ReoptimizeIfPrototype(receiver);
- }
- ReloadPropertyInformation();
+ ReloadPropertyInformation<false>();
+ }
}
@@ -451,13 +487,13 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
// Optimization that only works if configuration_ is not mutable.
if (!check_prototype_chain()) return true;
DisallowHeapAllocation no_gc;
+ if (*receiver_ == *holder_) return true;
if (!receiver_->IsJSReceiver()) return false;
JSReceiver* current = JSReceiver::cast(*receiver_);
JSReceiver* object = *holder_;
- if (current == object) return true;
if (!current->map()->has_hidden_prototype()) return false;
// JSProxy do not occur as hidden prototypes.
- if (current->IsJSProxy()) return false;
+ if (object->IsJSProxy()) return false;
PrototypeIterator iter(isolate(), current,
PrototypeIterator::START_AT_PROTOTYPE,
PrototypeIterator::END_AT_NON_HIDDEN);
@@ -581,15 +617,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
}
}
-
-bool LookupIterator::HasInterceptor(Map* map) const {
- if (IsElement()) return map->has_indexed_interceptor();
- return map->has_named_interceptor();
-}
-
-
+template <bool is_element>
bool LookupIterator::SkipInterceptor(JSObject* holder) {
- auto info = GetInterceptor(holder);
+ auto info = GetInterceptor<is_element>(holder);
// TODO(dcarney): check for symbol/can_intercept_symbols here as well.
if (info->non_masking()) {
switch (interceptor_state_) {
@@ -605,10 +635,9 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
return interceptor_state_ == InterceptorState::kProcessNonMasking;
}
-
JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
- if (!map->prototype()->IsJSReceiver()) return NULL;
+ if (map->prototype() == heap()->null_value()) return NULL;
DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
@@ -635,45 +664,37 @@ LookupIterator::State LookupIterator::NotFound(JSReceiver* const holder) const {
: NOT_FOUND;
}
-LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
- JSReceiver* const holder) {
+namespace {
+
+template <bool is_element>
+bool HasInterceptor(Map* map) {
+ return is_element ? map->has_indexed_interceptor()
+ : map->has_named_interceptor();
+}
+
+} // namespace
+
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInSpecialHolder(
+ Map* const map, JSReceiver* const holder) {
STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
- DisallowHeapAllocation no_gc;
- if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
- return LookupNonMaskingInterceptorInHolder(map, holder);
- }
switch (state_) {
case NOT_FOUND:
if (map->IsJSProxyMap()) {
- if (IsElement() || !name_->IsPrivate()) return JSPROXY;
+ if (is_element || !name_->IsPrivate()) return JSPROXY;
}
if (map->is_access_check_needed()) {
- if (IsElement() || !name_->IsPrivate()) return ACCESS_CHECK;
+ if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
}
// Fall through.
case ACCESS_CHECK:
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- if (IsElement() || !name_->IsPrivate()) return INTERCEPTOR;
+ if (check_interceptor() && HasInterceptor<is_element>(map) &&
+ !SkipInterceptor<is_element>(JSObject::cast(holder))) {
+ if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
}
// Fall through.
case INTERCEPTOR:
- if (IsElement()) {
- JSObject* js_object = JSObject::cast(holder);
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase* backing_store = js_object->elements();
- number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
- if (number_ == kMaxUInt32) {
- return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
- }
- property_details_ = accessor->GetDetails(js_object, number_);
- } else if (!map->is_dictionary_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(isolate_, *name_, map);
- if (number == DescriptorArray::kNotFound) return NotFound(holder);
- number_ = static_cast<uint32_t>(number);
- property_details_ = descriptors->GetDetails(number_);
- } else if (map->IsJSGlobalObjectMap()) {
+ if (!is_element && map->IsJSGlobalObjectMap()) {
GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
int number = dict->FindEntry(name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
@@ -682,20 +703,15 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
if (cell->value()->IsTheHole()) return NOT_FOUND;
property_details_ = cell->property_details();
- } else {
- NameDictionary* dict = holder->property_dictionary();
- int number = dict->FindEntry(name_);
- if (number == NameDictionary::kNotFound) return NotFound(holder);
- number_ = static_cast<uint32_t>(number);
- property_details_ = dict->DetailsAt(number_);
- }
- has_property_ = true;
- switch (property_details_.kind()) {
- case v8::internal::kData:
- return DATA;
- case v8::internal::kAccessor:
- return ACCESSOR;
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
}
+ return LookupInRegularHolder<is_element>(map, holder);
case ACCESSOR:
case DATA:
return NOT_FOUND;
@@ -705,22 +721,47 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
UNREACHABLE();
}
UNREACHABLE();
- return state_;
+ return NOT_FOUND;
}
-
-LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInRegularHolder(
Map* const map, JSReceiver* const holder) {
- switch (state_) {
- case NOT_FOUND:
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- return INTERCEPTOR;
- }
- // Fall through.
- default:
- return NOT_FOUND;
+ DisallowHeapAllocation no_gc;
+ if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+ return NOT_FOUND;
}
+
+ if (is_element) {
+ JSObject* js_object = JSObject::cast(holder);
+ ElementsAccessor* accessor = js_object->GetElementsAccessor();
+ FixedArrayBase* backing_store = js_object->elements();
+ number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) {
+ return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ }
+ property_details_ = accessor->GetDetails(js_object, number_);
+ } else if (!map->is_dictionary_map()) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(isolate_, *name_, map);
+ if (number == DescriptorArray::kNotFound) return NotFound(holder);
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = descriptors->GetDetails(number_);
+ } else {
+ NameDictionary* dict = holder->property_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == NameDictionary::kNotFound) return NotFound(holder);
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = dict->DetailsAt(number_);
+ }
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
+
UNREACHABLE();
return state_;
}
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 0c298d99bf..8005f32eb9 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -47,81 +47,71 @@ class LookupIterator final BASE_EMBEDDED {
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
- state_(NOT_FOUND),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
name_(isolate_->factory()->InternalizeName(name)),
+ receiver_(receiver),
+ initial_holder_(GetRoot(isolate_, receiver)),
// kMaxUInt32 isn't a valid index.
index_(kMaxUInt32),
- receiver_(receiver),
- holder_(GetRoot(isolate_, receiver)),
- initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
#ifdef DEBUG
uint32_t index; // Assert that the name is not an array index.
DCHECK(!name->AsArrayIndex(&index));
#endif // DEBUG
- Next();
+ Start<false>();
}
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
- state_(NOT_FOUND),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
name_(isolate_->factory()->InternalizeName(name)),
+ receiver_(receiver),
+ initial_holder_(holder),
// kMaxUInt32 isn't a valid index.
index_(kMaxUInt32),
- receiver_(receiver),
- holder_(holder),
- initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
#ifdef DEBUG
uint32_t index; // Assert that the name is not an array index.
DCHECK(!name->AsArrayIndex(&index));
#endif // DEBUG
- Next();
+ Start<false>();
}
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Configuration configuration = DEFAULT)
: configuration_(configuration),
- state_(NOT_FOUND),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
- name_(),
- index_(index),
receiver_(receiver),
- holder_(GetRoot(isolate, receiver, index)),
- initial_holder_(holder_),
+ initial_holder_(GetRoot(isolate, receiver, index)),
+ index_(index),
number_(DescriptorArray::kNotFound) {
// kMaxUInt32 isn't a valid index.
DCHECK_NE(kMaxUInt32, index_);
- Next();
+ Start<true>();
}
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Handle<JSReceiver> holder,
Configuration configuration = DEFAULT)
: configuration_(configuration),
- state_(NOT_FOUND),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
- name_(),
- index_(index),
receiver_(receiver),
- holder_(holder),
- initial_holder_(holder_),
+ initial_holder_(holder),
+ index_(index),
number_(DescriptorArray::kNotFound) {
// kMaxUInt32 isn't a valid index.
DCHECK_NE(kMaxUInt32, index_);
- Next();
+ Start<true>();
}
static LookupIterator PropertyOrElement(
@@ -154,7 +144,10 @@ class LookupIterator final BASE_EMBEDDED {
Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
bool* success, Configuration configuration = DEFAULT);
- void Restart() { RestartInternal(InterceptorState::kUninitialized); }
+ void Restart() {
+ InterceptorState state = InterceptorState::kUninitialized;
+ IsElement() ? RestartInternal<true>(state) : RestartInternal<false>(state);
+ }
Isolate* isolate() const { return isolate_; }
State state() const { return state_; }
@@ -184,7 +177,17 @@ class LookupIterator final BASE_EMBEDDED {
Heap* heap() const { return isolate_->heap(); }
Factory* factory() const { return isolate_->factory(); }
Handle<Object> GetReceiver() const { return receiver_; }
- Handle<JSObject> GetStoreTarget() const;
+
+ Handle<JSObject> GetStoreTarget() const {
+ if (receiver_->IsJSGlobalProxy()) {
+ Map* map = JSGlobalProxy::cast(*receiver_)->map();
+ if (map->has_hidden_prototype()) {
+ return handle(JSGlobalObject::cast(map->prototype()), isolate_);
+ }
+ }
+ return Handle<JSObject>::cast(receiver_);
+ }
+
bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
@@ -252,13 +255,24 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Object> GetAccessors() const;
inline Handle<InterceptorInfo> GetInterceptor() const {
DCHECK_EQ(INTERCEPTOR, state_);
- return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
+ InterceptorInfo* result =
+ IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
+ : GetInterceptor<false>(JSObject::cast(*holder_));
+ return handle(result, isolate_);
}
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value);
- void UpdateProtector();
+ inline void UpdateProtector() {
+ if (FLAG_harmony_species && !IsElement() &&
+ (*name_ == heap()->constructor_string() ||
+ *name_ == heap()->species_symbol())) {
+ InternalUpdateProtector();
+ }
+ }
private:
+ void InternalUpdateProtector();
+
enum class InterceptorState {
kUninitialized,
kSkipNonMasking,
@@ -268,19 +282,37 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Map> GetReceiverMap() const;
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
- inline State LookupInHolder(Map* map, JSReceiver* holder);
+
+ template <bool is_element>
+ void Start();
+ template <bool is_element>
+ void NextInternal(Map* map, JSReceiver* holder);
+ template <bool is_element>
+ inline State LookupInHolder(Map* map, JSReceiver* holder) {
+ return map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE
+ ? LookupInSpecialHolder<is_element>(map, holder)
+ : LookupInRegularHolder<is_element>(map, holder);
+ }
+ template <bool is_element>
+ State LookupInRegularHolder(Map* map, JSReceiver* holder);
+ template <bool is_element>
+ State LookupInSpecialHolder(Map* map, JSReceiver* holder);
+ template <bool is_element>
void RestartLookupForNonMaskingInterceptors() {
- RestartInternal(InterceptorState::kProcessNonMasking);
+ RestartInternal<is_element>(InterceptorState::kProcessNonMasking);
}
+ template <bool is_element>
void RestartInternal(InterceptorState interceptor_state);
- State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
+ template <bool is_element>
void ReloadPropertyInformation();
- inline bool SkipInterceptor(JSObject* holder);
- bool HasInterceptor(Map* map) const;
+
+ template <bool is_element>
+ bool SkipInterceptor(JSObject* holder);
+ template <bool is_element>
inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
- if (IsElement()) return holder->GetIndexedInterceptor();
- return holder->GetNamedInterceptor();
+ return is_element ? holder->GetIndexedInterceptor()
+ : holder->GetNamedInterceptor();
}
bool check_hidden() const { return (configuration_ & kHidden) != 0; }
@@ -332,11 +364,11 @@ class LookupIterator final BASE_EMBEDDED {
PropertyDetails property_details_;
Isolate* const isolate_;
Handle<Name> name_;
- uint32_t index_;
Handle<Object> transition_;
const Handle<Object> receiver_;
Handle<JSReceiver> holder_;
const Handle<JSReceiver> initial_holder_;
+ const uint32_t index_;
uint32_t number_;
};
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index fd2aa7c314..6338b2c1c1 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -68,6 +68,11 @@ enum AllocationFlags {
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/constants-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/s390/constants-s390.h"
+#include "src/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87.h"
#include "src/x87/assembler-x87-inl.h"
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 072ac1d5a6..67ab36f6cf 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -236,9 +236,20 @@ Handle<Object> CallSite::GetMethodName() {
Handle<Object> function_name(fun_->shared()->name(), isolate_);
if (function_name->IsName()) {
Handle<Name> name = Handle<Name>::cast(function_name);
+ // ES2015 gives getters and setters name prefixes which must
+ // be stripped to find the property name.
+ if (name->IsString() && FLAG_harmony_function_name) {
+ Handle<String> name_string = Handle<String>::cast(name);
+ if (name_string->IsUtf8EqualTo(CStrVector("get "), true) ||
+ name_string->IsUtf8EqualTo(CStrVector("set "), true)) {
+ name = isolate_->factory()->NewProperSubString(name_string, 4,
+ name_string->length());
+ }
+ }
if (CheckMethodName(isolate_, obj, name, fun_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR))
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
return name;
+ }
}
HandleScope outer_scope(isolate_);
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index c71e11ba24..4aa0b73e71 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -94,11 +94,13 @@ class CallSite {
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
- T(CalledNonCallableInstanceOf, "right-hand side is not a function") \
+ T(CalledNonCallableInstanceOf, \
+ "Right-hand side of 'instanceof' is not callable") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
T(CallSiteExpectsFunction, \
"CallSite expects function as second argument, got %") \
+ T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
T(CannotPreventExt, "Cannot prevent extensions") \
T(CannotFreezeArrayBufferView, \
@@ -135,6 +137,8 @@ class CallSite {
"Function has non-object prototype '%' in instanceof check") \
T(InvalidArgument, "invalid_argument") \
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
+ T(InvalidRegExpExecResult, \
+ "RegExp exec method returned something other than an Object or null") \
T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
@@ -147,6 +151,8 @@ class CallSite {
T(NoAccess, "no access") \
T(NonCoercible, "Cannot match against 'undefined' or 'null'.") \
T(NonExtensibleProto, "% is not extensible") \
+ T(NonObjectInInstanceOfCheck, \
+ "Right-hand side of 'instanceof' is not an object") \
T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
T(NonObjectPropertyStore, "Cannot set property '%' of %") \
T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
@@ -187,6 +193,7 @@ class CallSite {
T(PromiseCyclic, "Chaining cycle detected for promise %") \
T(PromiseExecutorAlreadyInvoked, \
"Promise executor has already been invoked with non-undefined arguments") \
+ T(PromiseNonCallable, "Promise resolve or reject function is not callable") \
T(PropertyDescObject, "Property description must be an object: %") \
T(PropertyNotFunction, \
"'%' returned for property '%' of object '%' is not a function") \
@@ -208,9 +215,6 @@ class CallSite {
T(ProxyDeletePropertyNonConfigurable, \
"'deleteProperty' on proxy: trap returned truish for property '%' which " \
"is non-configurable in the proxy target") \
- T(ProxyEnumerateNonObject, "'enumerate' on proxy: trap returned non-object") \
- T(ProxyEnumerateNonString, \
- "'enumerate' on proxy: trap result includes non-string") \
T(ProxyGetNonConfigurableData, \
"'get' on proxy: property '%' is a read-only and " \
"non-configurable data property on the proxy target but the proxy " \
@@ -305,18 +309,6 @@ class CallSite {
T(StrictReadOnlyProperty, \
"Cannot assign to read only property '%' of % '%'") \
T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
- T(StrongArity, \
- "In strong mode, calling a function with too few arguments is deprecated") \
- T(StrongDeleteProperty, \
- "Deleting property '%' of strong object '%' is deprecated") \
- T(StrongExtendNull, "In strong mode, classes extending null are deprecated") \
- T(StrongImplicitConversion, \
- "In strong mode, implicit conversions are deprecated") \
- T(StrongRedefineDisallowed, \
- "On strong object %, redefining writable, non-configurable property '%' " \
- "to be non-writable is deprecated") \
- T(StrongSetProto, \
- "On strong object %, redefining the internal prototype is deprecated") \
T(SymbolIteratorInvalid, \
"Result of the Symbol.iterator method is not an object") \
T(SymbolKeyFor, "% is not a symbol") \
@@ -333,9 +325,6 @@ class CallSite {
/* ReferenceError */ \
T(NonMethod, "'super' is referenced from non-method") \
T(NotDefined, "% is not defined") \
- T(StrongSuperCallMissing, \
- "In strong mode, invoking the super constructor in a subclass is " \
- "required") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(DateRange, "Provided date is not in valid range.") \
@@ -347,6 +336,7 @@ class CallSite {
T(InvalidArrayBufferLength, "Invalid array buffer length") \
T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
T(InvalidArrayLength, "Invalid array length") \
+ T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
T(InvalidCodePoint, "Invalid code point %") \
T(InvalidCountValue, "Invalid count value") \
T(InvalidCurrencyCode, "Invalid currency code: %") \
@@ -390,7 +380,6 @@ class CallSite {
"% loop variable declaration may not have an initializer.") \
T(ForInOfLoopMultiBindings, \
"Invalid left-hand side in % loop: Must have a single binding.") \
- T(IllegalAccess, "Illegal access") \
T(IllegalBreak, "Illegal break statement") \
T(IllegalContinue, "Illegal continue statement") \
T(IllegalLanguageModeDirective, \
@@ -407,11 +396,15 @@ class CallSite {
T(InvalidLhsInPrefixOp, \
"Invalid left-hand side expression in prefix operation") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
+ T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
T(LabelRedeclaration, "Label '%' has already been declared") \
+ T(LabelledFunctionDeclaration, \
+ "Labelled function declaration not allowed as the body of a control flow " \
+ "structure") \
T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
T(MalformedRegExp, "Invalid regular expression: /%/: %") \
T(MalformedRegExpFlags, "Invalid regular expression flags") \
@@ -432,7 +425,11 @@ class CallSite {
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
T(ParenthesisInArgString, "Function arg string contains parenthesis") \
+ T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
T(SingleFunctionLiteral, "Single function literal required") \
+ T(SloppyFunction, \
+ "In non-strict mode code, functions can only be declared at top level, " \
+ "inside a block, or as the body of an if statement.") \
T(SloppyLexical, \
"Block-scoped declarations (let, const, function, class) not yet " \
"supported outside strict mode") \
@@ -442,53 +439,9 @@ class CallSite {
T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
T(StrictFunction, \
"In strict mode code, functions can only be declared at top level or " \
- "immediately within another function.") \
+ "inside a block.") \
T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
T(StrictWith, "Strict mode code may not include a with statement") \
- T(StrongArguments, \
- "In strong mode, 'arguments' is deprecated, use '...args' instead") \
- T(StrongConstructorDirective, \
- "\"use strong\" directive is disallowed in class constructor body") \
- T(StrongConstructorReturnMisplaced, \
- "In strong mode, returning from a constructor before its super " \
- "constructor invocation or all assignments to 'this' is deprecated") \
- T(StrongConstructorReturnValue, \
- "In strong mode, returning a value from a constructor is deprecated") \
- T(StrongConstructorSuper, \
- "In strong mode, 'super' can only be used to invoke the super " \
- "constructor, and cannot be nested inside another statement or " \
- "expression") \
- T(StrongConstructorThis, \
- "In strong mode, 'this' can only be used to initialize properties, and " \
- "cannot be nested inside another statement or expression") \
- T(StrongDelete, \
- "In strong mode, 'delete' is deprecated, use maps or sets instead") \
- T(StrongDirectEval, "In strong mode, direct calls to eval are deprecated") \
- T(StrongEllision, \
- "In strong mode, arrays with holes are deprecated, use maps instead") \
- T(StrongEmpty, \
- "In strong mode, empty sub-statements are deprecated, make them explicit " \
- "with '{}' instead") \
- T(StrongEqual, \
- "In strong mode, '==' and '!=' are deprecated, use '===' and '!==' " \
- "instead") \
- T(StrongForIn, \
- "In strong mode, 'for'-'in' loops are deprecated, use 'for'-'of' instead") \
- T(StrongPropertyAccess, \
- "In strong mode, accessing missing property '%' of % is deprecated") \
- T(StrongSuperCallDuplicate, \
- "In strong mode, invoking the super constructor multiple times is " \
- "deprecated") \
- T(StrongSuperCallMisplaced, \
- "In strong mode, the super constructor must be invoked before any " \
- "assignment to 'this'") \
- T(StrongSwitchFallthrough, \
- "In strong mode, switch fall-through is deprecated, terminate each case " \
- "with 'break', 'continue', 'return' or 'throw'") \
- T(StrongUndefined, \
- "In strong mode, binding or assigning to 'undefined' is deprecated") \
- T(StrongVar, \
- "In strong mode, 'var' is deprecated, use 'let' or 'const' instead") \
T(TemplateOctalLiteral, \
"Octal literals are not allowed in template strings.") \
T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
@@ -518,7 +471,10 @@ class CallSite {
T(UnterminatedTemplate, "Unterminated template literal") \
T(UnterminatedTemplateExpr, "Missing } in template expression") \
T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance") \
- T(NonObjectInInstanceOfCheck, "Expecting an object in instanceof check") \
+ T(InvalidHexEscapeSequence, "Invalid hexadecimal escape sequence") \
+ T(InvalidUnicodeEscapeSequence, "Invalid Unicode escape sequence") \
+ T(UndefinedUnicodeCodePoint, "Undefined Unicode code-point") \
+ T(YieldInParameter, "Yield expression not allowed in formal parameter") \
/* EvalError */ \
T(CodeGenFromStrings, "%") \
/* URIError */ \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 5e27f4545b..517d4adab0 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -102,6 +102,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) ||
@@ -152,6 +156,18 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
@@ -160,19 +176,30 @@ Address Assembler::target_address_from_return_address(Address pc) {
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
- // Encoded internal references are lui/ori load of 32-bit abolute address.
- Instr instr_lui = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
- DCHECK(Assembler::IsLui(instr_lui));
- DCHECK(Assembler::IsOri(instr_ori));
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
+ Instr instr1 = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr1));
+ DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
int32_t imm = reinterpret_cast<int32_t>(target);
DCHECK((imm & 3) == 0);
- Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ if (Assembler::IsJicOrJialc(instr2)) {
+ // Encoded internal references are lui/jic load of 32-bit absolute address.
+ uint32_t lui_offset_u, jic_offset_u;
+ Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+
+ Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr1 | lui_offset_u);
+ Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr2 | jic_offset_u);
+ } else {
+ // Encoded internal references are lui/ori load of 32-bit absolute address.
+ Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr1 | ((imm >> kLuiShift) & kImm16Mask));
+ Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr2 | (imm & kImm16Mask));
+ }
// Currently used only by deserializer, and all code will be flushed
// after complete deserialization, no need to flush on each reference.
@@ -230,14 +257,19 @@ Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
return Memory::Address_at(pc_);
} else {
- // Encoded internal references are lui/ori load of 32-bit abolute address.
+ // Encoded internal references are lui/ori or lui/jic load of 32-bit
+ // absolute address.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
- Instr instr_lui = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
- Instr instr_ori = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- DCHECK(Assembler::IsLui(instr_lui));
- DCHECK(Assembler::IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ Instr instr1 = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr1));
+ DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
+ if (Assembler::IsJicOrJialc(instr2)) {
+ return reinterpret_cast<Address>(
+ Assembler::CreateTargetAddress(instr1, instr2));
+ }
+ int32_t imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
return reinterpret_cast<Address>(imm);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e50a239a4a..bfa232892a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -534,6 +534,11 @@ bool Assembler::IsBnec(Instr instr) {
return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
}
+bool Assembler::IsJicOrJialc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ return (opcode == POP66 || opcode == POP76) && rs == 0;
+}
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -546,7 +551,6 @@ bool Assembler::IsJump(Instr instr) {
((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
}
-
bool Assembler::IsJ(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a jump.
@@ -697,6 +701,47 @@ static inline int32_t AddBranchOffset(int pos, Instr instr) {
}
}
+uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
+ DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
+ int16_t jic_offset = GetImmediate16(instr_jic);
+ int16_t lui_offset = GetImmediate16(instr_lui);
+
+ if (jic_offset < 0) {
+ lui_offset += kImm16Mask;
+ }
+ uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
+ uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
+
+ return lui_offset_u | jic_offset_u;
+}
+
+// Use just lui and jic instructions. Insert lower part of the target address in
+// jic offset part. Since jic sign-extends offset and then add it with register,
+// before that addition, difference between upper part of the target address and
+// upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
+// in jic register with lui instruction.
+void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
+ int16_t& jic_offset) {
+ lui_offset = (address & kHiMask) >> kLuiShift;
+ jic_offset = address & kLoMask;
+
+ if (jic_offset < 0) {
+ lui_offset -= kImm16Mask;
+ }
+}
+
+void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
+ uint32_t& lui_offset,
+ uint32_t& jic_offset) {
+ int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
+ int16_t jic_offset16 = address & kLoMask;
+
+ if (jic_offset16 < 0) {
+ lui_offset16 -= kImm16Mask;
+ }
+ lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
+ jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
+}
int Assembler::target_at(int pos, bool is_internal) {
Instr instr = instr_at(pos);
@@ -724,11 +769,16 @@ int Assembler::target_at(int pos, bool is_internal) {
if (IsBranch(instr)) {
return AddBranchOffset(pos, instr);
} else {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
+ DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+ int32_t imm;
+ if (IsJicOrJialc(instr2)) {
+ imm = CreateTargetAddress(instr1, instr2);
+ } else {
+ imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+ }
if (imm == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
@@ -781,19 +831,26 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr_ori));
+ Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
+ DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
DCHECK((imm & 3) == 0);
-
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
-
- instr_at_put(pos + 0 * Assembler::kInstrSize,
- instr_lui | ((imm & kHiMask) >> kLuiShift));
- instr_at_put(pos + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
+
+ if (IsJicOrJialc(instr2)) {
+ uint32_t lui_offset_u, jic_offset_u;
+ UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
+ instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+ } else {
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr1 | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr2 | (imm & kImm16Mask));
+ }
}
}
@@ -1330,7 +1387,6 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!rs.is(zero_reg));
if (rs.code() >= rt.code()) {
GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
} else {
@@ -1341,7 +1397,6 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!rs.is(zero_reg));
if (rs.code() >= rt.code()) {
GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
} else {
@@ -1704,10 +1759,10 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa < 5 && sa > 0);
+ DCHECK(sa <= 3);
DCHECK(IsMipsArchVariant(kMips32r6));
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
- (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+ rd.code() << kRdShift | sa << kSaShift | LSA;
emit(instr);
}
@@ -2085,7 +2140,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- DCHECK(!src.rm().is(at));
if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
@@ -2790,24 +2844,36 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
} else {
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
if (IsLui(instr)) {
- Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
+ DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+ int32_t imm;
+ if (IsJicOrJialc(instr2)) {
+ imm = CreateTargetAddress(instr1, instr2);
+ } else {
+ imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+ }
+
if (imm == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm += pc_delta;
DCHECK((imm & 3) == 0);
-
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
-
- instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
+
+ if (IsJicOrJialc(instr2)) {
+ uint32_t lui_offset_u, jic_offset_u;
+ Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
+ instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+ } else {
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr1 | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr2 | (imm & kImm16Mask));
+ }
return 2; // Number of instructions patched.
} else {
UNREACHABLE();
@@ -2900,7 +2966,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
@@ -2964,19 +3030,40 @@ void Assembler::CheckTrampolinePool() {
}
int pool_start = pc_offset();
- for (int i = 0; i < unbound_labels_count_; i++) {
- uint32_t imm32;
- imm32 = jump_address(&after_pool);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and available
- // to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ if (IsMipsArchVariant(kMips32r6)) {
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ uint32_t lui_offset, jic_offset;
+ UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and
+ // available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, lui_offset);
+ jic(at, jic_offset);
+ }
+ CheckBuffer();
+ }
+ } else {
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and
+ // available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ CheckBuffer();
+ jr(at);
+ nop();
}
- jr(at);
- nop();
}
bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
@@ -3000,10 +3087,10 @@ Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
// Interpret 2 instructions generated by li: lui/ori
- if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+ if (IsLui(instr1) && IsOri(instr2)) {
// Assemble the 32 bit value.
- return reinterpret_cast<Address>(
- (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
+ return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
+ GetImmediate16(instr2));
}
// We should never get here, force a bad address if we do.
@@ -3024,6 +3111,8 @@ void Assembler::QuietNaN(HeapObject* object) {
// On Mips, a target address is stored in a lui/ori instruction pair, each
// of which load 16 bits of the 32-bit address to a register.
// Patching the address must replace both instr, and flush the i-cache.
+// On r6, target address is stored in a lui/jic pair, and both instr have to be
+// patched.
//
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
@@ -3039,15 +3128,27 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
#ifdef DEBUG
// Check we have the result from a li macro-instruction, using instr pair.
Instr instr1 = instr_at(pc);
- CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
+ CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
#endif
- // Must use 2 instructions to insure patchable code => just use lui and ori.
- // lui rt, upper-16.
- // ori rt rt, lower-16.
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ if (IsJicOrJialc(instr2)) {
+ // Must use 2 instructions to insure patchable code => use lui and jic
+ uint32_t lui_offset, jic_offset;
+ Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
+
+ *p &= ~kImm16Mask;
+ *(p + 1) &= ~kImm16Mask;
+
+ *p |= lui_offset;
+ *(p + 1) |= jic_offset;
+ } else {
+ // Must use 2 instructions to insure patchable code => just use lui and ori.
+ // lui rt, upper-16.
+ // ori rt rt, lower-16.
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ }
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index b708ef7700..886ac6c052 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -528,7 +528,11 @@ class Assembler : public AssemblerBase {
// Distance between the instruction referring to the address of the call
// target and the return address.
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kCallTargetAddressOffset = 3 * kInstrSize;
+#else
static const int kCallTargetAddressOffset = 4 * kInstrSize;
+#endif
// Distance between start of patched debug break slot and the emitted address
// to jump to.
@@ -538,7 +542,11 @@ class Assembler : public AssemblerBase {
// register.
static const int kPcLoadDelta = 4;
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kDebugBreakSlotInstructions = 3;
+#else
static const int kDebugBreakSlotInstructions = 4;
+#endif
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -750,9 +758,6 @@ class Assembler : public AssemblerBase {
void rotr(Register rd, Register rt, uint16_t sa);
void rotrv(Register rd, Register rt, Register rs);
- // Address computing instructions with shift.
- void lsa(Register rd, Register rt, Register rs, uint8_t sa);
-
// ------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
@@ -1048,7 +1053,9 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Postpone the generation of the trampoline pool for the specified number of
// instructions.
@@ -1082,6 +1089,7 @@ class Assembler : public AssemblerBase {
static bool IsBnezc(Instr instr);
static bool IsBeqc(Instr instr);
static bool IsBnec(Instr instr);
+ static bool IsJicOrJialc(Instr instr);
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1121,12 +1129,20 @@ class Assembler : public AssemblerBase {
static int32_t GetBranchOffset(Instr instr);
static bool IsLw(Instr instr);
static int16_t GetLwOffset(Instr instr);
+ static int16_t GetJicOrJialcOffset(Instr instr);
+ static int16_t GetLuiOffset(Instr instr);
static Instr SetLwOffset(Instr instr, int16_t offset);
static bool IsSw(Instr instr);
static Instr SetSwOffset(Instr instr, int16_t offset);
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+ static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
+ static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
+ int16_t& jic_offset);
+ static void UnpackTargetAddressUnsigned(uint32_t address,
+ uint32_t& lui_offset,
+ uint32_t& jic_offset);
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1143,6 +1159,9 @@ class Assembler : public AssemblerBase {
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
protected:
+ // Load Scaled Address instruction.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
@@ -1213,6 +1232,8 @@ class Assembler : public AssemblerBase {
inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void CheckBuffer();
+
private:
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
@@ -1259,7 +1280,6 @@ class Assembler : public AssemblerBase {
enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
// Code emission.
- inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x,
CompactBranchType is_compact_branch = CompactBranchType::NO);
@@ -1406,7 +1426,11 @@ class Assembler : public AssemblerBase {
// branch instruction generation, where we use jump instructions rather
// than regular branch instructions.
bool trampoline_emitted_;
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kTrampolineSlotsSize = 2 * kInstrSize;
+#else
static const int kTrampolineSlotsSize = 4 * kInstrSize;
+#endif
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
@@ -1427,8 +1451,8 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 09f4d59e35..9693a52697 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -149,17 +149,15 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
- Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
- DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in a1 and the double value in f0.
__ LoadRoot(a1, root_index);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
- __ mov(a3, a0);
+ __ Addu(a3, a0, Operand(1));
Label done_loop, loop;
__ bind(&loop);
@@ -211,21 +209,24 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ SmiToDoubleFPURegister(a2, f2, t0);
__ bind(&done_convert);
- // Perform the actual comparison with the accumulator value on the left hand
- // side (f0) and the next parameter value on the right hand side (f2).
- Label compare_equal, compare_nan, compare_swap;
- __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
- __ BranchF(&compare_swap, nullptr, cc, f0, f2);
- __ Branch(&loop);
-
- // Left and right hand side are equal, check for -0 vs. +0.
- __ bind(&compare_equal);
- __ FmoveHigh(t0, reg);
- __ Branch(&loop, ne, t0, Operand(0x80000000));
-
- // Result is on the right hand side.
- __ bind(&compare_swap);
- __ mov_d(f0, f2);
+ // Perform the actual comparison with using Min/Max macro instructions the
+ // accumulator value on the left hand side (f0) and the next parameter value
+ // on the right hand side (f2).
+ // We need to work out which HeapNumber (or smi) the result came from.
+ Label compare_nan, set_value;
+ __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+ __ Move(t0, t1, f0);
+ if (kind == MathMaxMinKind::kMin) {
+ __ MinNaNCheck_d(f0, f0, f2);
+ } else {
+ DCHECK(kind == MathMaxMinKind::kMax);
+ __ MaxNaNCheck_d(f0, f0, f2);
+ }
+ __ Move(at, t8, f0);
+ __ Branch(&set_value, ne, t0, Operand(at));
+ __ Branch(&set_value, ne, t1, Operand(t8));
+ __ jmp(&loop);
+ __ bind(&set_value);
__ mov(a1, a2);
__ jmp(&loop);
@@ -238,8 +239,8 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ bind(&done_loop);
__ Lsa(sp, sp, a3, kPointerSizeLog2);
- __ mov(v0, a1);
- __ DropAndRet(1);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a1); // In delay slot.
}
// static
@@ -530,6 +531,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- a1 : constructor function
// -- a2 : allocation site or undefined
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -543,7 +545,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0);
+ __ Push(cp, a2, a0);
if (create_implicit_receiver) {
// Allocate the new receiver object.
@@ -618,7 +620,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -750,9 +752,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// -----------------------------------
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the JS frame.
- __ mov(cp, zero_reg);
-
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -847,9 +846,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
-
- __ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(a1);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -1205,8 +1202,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ MultiPop(saved_regs);
// Perform prologue operations usually performed by the young code stub.
- __ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(a1);
// Jump to point after the code-age stub.
__ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
@@ -1435,23 +1431,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1498,6 +1477,27 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[4] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ lw(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ lw(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ DropAndRet(2);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1961,18 +1961,20 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ li(at, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ li(at, Operand(is_tail_call_elimination_enabled));
__ lb(scratch1, MemOperand(at));
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ lw(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ lw(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_interpreter_frame, ne, scratch3,
Operand(Smi::FromInt(StackFrame::STUB)));
__ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1980,72 +1982,37 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ lw(scratch3,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ lw(scratch1,
+ __ lw(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ Branch(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ lw(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ Lsa(dst_reg, fp, scratch1, kPointerSizeLog2);
- __ Addu(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ Lsa(src_reg, sp, args_reg, kPointerSizeLog2);
- // Count receiver argument as well (not included in args_reg).
- __ Addu(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ Branch(&entry);
- __ bind(&loop);
- __ Subu(src_reg, src_reg, Operand(kPointerSize));
- __ Subu(dst_reg, dst_reg, Operand(kPointerSize));
- __ lw(tmp_reg, MemOperand(src_reg));
- __ sw(tmp_reg, MemOperand(dst_reg));
- __ bind(&entry);
- __ Branch(&loop, ne, sp, Operand(src_reg));
-
- // Leave current frame.
- __ mov(sp, dst_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2556,27 +2523,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
-
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, t2, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
- kSmiTagSize)));
- __ Branch(&no_strong_error, eq, t3, Operand(zero_reg));
-
- // What we really care about is the required number of arguments.
- __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kLengthOffset));
- __ SmiUntag(t2);
- __ Branch(&no_strong_error, ge, a0, Operand(t2));
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 541e73e2fa..fd286fbb77 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_MIPS
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -76,6 +77,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -506,7 +511,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
@@ -546,6 +551,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ And(at, t1, Operand(1 << Map::kIsUndetectable));
__ Branch(&return_unequal, eq, at, Operand(zero_reg));
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ GetInstanceType(a2, a2);
+ __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
+
+ __ bind(&return_equal);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -1492,8 +1507,12 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ GetObjectType(function, function_map, scratch);
__ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(1 << Map::kIsConstructor));
+ __ Branch(&slow_case, eq, at, Operand(zero_reg));
+
+ // Ensure that {function} has an instance prototype.
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
@@ -1563,7 +1582,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -1582,29 +1602,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
-
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in ra.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2769,57 +2766,58 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
Label not_heap_number;
- __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- // a0: object
- // a1: instance type.
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
__ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ bind(&not_heap_number);
- Label not_string, slow_string;
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes on argument in a0.
+ __ AssertNotNumber(a0);
+
+ Label not_string;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
__ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
- // Check if string has a cached array index.
- __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
- __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
- __ Branch(&slow_string, ne, at, Operand(zero_reg));
- __ IndexFromHash(a2, a0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&slow_string);
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ bind(&not_string);
Label not_oddball;
__ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
__ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset)); // In delay slot.
__ bind(&not_oddball);
- __ push(a0); // Push argument.
+ __ Push(a0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes on argument in a0.
+ __ AssertString(a0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes on argument in a0.
- Label not_smi, positive_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
- __ mov(a0, zero_reg);
- __ bind(&positive_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ IndexFromHash(a2, v0);
+ __ Ret();
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ Push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
@@ -2990,39 +2988,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : left
- // -- a0 : right
- // -- ra : return address
- // -----------------------------------
- __ AssertString(a1);
- __ AssertString(a0);
-
- Label not_same;
- __ Branch(&not_same, ne, a0, Operand(a1));
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
- a2);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
-
- // Compare flat ASCII strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
- a3);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
-
- __ bind(&runtime);
- __ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : left
@@ -3345,10 +3310,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+ __ Ret(USE_DELAY_SLOT);
+ __ Subu(v0, v0, a0); // In delay slot.
} else {
+ __ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3907,7 +3879,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Addu(a1, a1, Operand(1));
@@ -4889,7 +4861,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
__ Branch(&loop, ne, a1, Operand(a3));
}
@@ -4897,7 +4869,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ lw(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_rest_parameters, ne, a3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5036,7 +5008,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5244,14 +5216,14 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
__ Branch(&loop, ne, a1, Operand(a3));
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&arguments_adaptor, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
{
@@ -5607,16 +5579,12 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- t0 : call_data
// -- a2 : holder
// -- a1 : api_function_address
- // -- a3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5642,17 +5610,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || a3.is(argc.reg()));
-
// Save context, callee and call data.
__ Push(context, callee, call_data);
- if (!is_lazy) {
+ if (!is_lazy()) {
// Load context from callee.
__ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// Push return value and default return value.
@@ -5677,29 +5643,14 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ Addu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ sw(scratch, MemOperand(a0, 0 * kPointerSize));
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::values_
- __ Addu(at, scratch,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ sw(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(at, Operand(argc.immediate()));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
- } else {
- // FunctionCallbackInfo::values_
- __ sll(at, argc.reg(), kPointerSizeLog2);
- __ Addu(at, at, scratch);
- __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ sw(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_
- __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
- __ sll(at, argc.reg(), kPointerSizeLog2);
- __ sw(at, MemOperand(a0, 3 * kPointerSize));
- }
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc()));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5709,7 +5660,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5717,33 +5668,14 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
int32_t stack_space_offset = 4 * kPointerSize;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_offset = kInvalidStackOffset;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_offset, return_value_operand,
&context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 878ba3489a..1c6c1692ae 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -1197,10 +1197,8 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->PushStandardFrame(a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher->masm()->Addu(
- fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 0caaa4c9d4..e9caaadadb 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -98,12 +98,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on MIPS in the input frame.
- return false;
-}
-
-
#define __ masm()->
@@ -160,10 +154,15 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Subu(t0, fp, t0);
// Allocate a new deoptimizer object.
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
__ PrepareCallCFunction(6, t1);
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a1, Operand(type())); // bailout type,
+ __ bind(&context_check);
+ __ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
@@ -239,6 +238,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+ __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 849dea2841..3511679708 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -133,13 +133,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
@@ -161,7 +159,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index fdb43f325c..06e3b77aea 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -109,35 +109,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return a0; }
-
-
// static
-const Register ToNameDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -266,6 +239,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -310,24 +290,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
+ Register registers[] = {a1, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -391,21 +363,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // callee
- t0, // call_data
- a2, // holder
- a1, // api_function_address
- a3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // callee
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index e3544c5eec..7cbbd3ae2f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,3 @@
-
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -100,6 +99,34 @@ void MacroAssembler::StoreRoot(Register source,
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Addu(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mov(fp, sp);
+ }
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Pop(ra, fp, marker_reg);
+ } else {
+ Pop(ra, fp);
+ }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg);
+ offset += kPointerSize;
+ } else {
+ Push(ra, fp, cp);
+ }
+ Addu(fp, sp, Operand(offset));
+}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
@@ -455,12 +482,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
sw(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
if (and_then == kFallThroughAtEnd) {
- Branch(&done, eq, t8, Operand(zero_reg));
+ Branch(&done, ne, t8, Operand(zero_reg));
} else {
DCHECK(and_then == kReturnAtEnd);
- Ret(eq, t8, Operand(zero_reg));
+ Ret(ne, t8, Operand(zero_reg));
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -481,13 +508,25 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
Label same_contexts;
+ Register temporary = t8;
DCHECK(!holder_reg.is(scratch));
DCHECK(!holder_reg.is(at));
DCHECK(!scratch.is(at));
- // Load current lexical context from the stack frame.
- lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ mov(at, fp);
+ bind(&load_context);
+ lw(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
+ // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
+ JumpIfNotSmi(scratch, &has_context, temporary);
+ lw(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
+ Branch(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
@@ -764,6 +803,34 @@ void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
}
}
+void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
+ const Operand& rt) {
+ Register reg;
+ if (rt.is_reg()) {
+ reg = rt.rm();
+ } else {
+ DCHECK(!rs.is(at));
+ reg = at;
+ li(reg, rt);
+ }
+
+ if (!IsMipsArchVariant(kMips32r6)) {
+ multu(rs, reg);
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+ muhu(rd_hi, rs, reg);
+ mulu(rd_lo, rs, reg);
+ } else {
+ DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+ mulu(rd_lo, rs, reg);
+ muhu(rd_hi, rs, reg);
+ }
+ }
+}
void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
@@ -1078,7 +1145,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- rotr(rd, rs, rt.imm32_);
+ rotr(rd, rs, rt.imm32_ & 0x1f);
}
} else {
if (rt.is_reg()) {
@@ -1090,8 +1157,8 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.imm32_ == 0) {
srl(rd, rs, 0);
} else {
- srl(at, rs, rt.imm32_);
- sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+ srl(at, rs, rt.imm32_ & 0x1f);
+ sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
or_(rd, rd, at);
}
}
@@ -1110,8 +1177,9 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
- lsa(rd, rt, rs, sa);
+ lsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd.is(rt) ? scratch : rd;
DCHECK(!tmp.is(rt));
@@ -1840,6 +1908,185 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
}
}
+#define __ masm->
+
+static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+ FPURegister src1, FPURegister src2, Label* equal) {
+ if (src1.is(src2)) {
+ __ Move(dst, src1);
+ return true;
+ }
+
+ Label other, compare_not_equal;
+ FPURegister left, right;
+ if (kind == MaxMinKind::kMin) {
+ left = src1;
+ right = src2;
+ } else {
+ left = src2;
+ right = src1;
+ }
+
+ __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ FmoveHigh(t8, src1);
+ __ Branch(&other, eq, t8, Operand(0x80000000));
+ __ Move_d(dst, right);
+ __ Branch(equal);
+ __ bind(&other);
+ __ Move_d(dst, left);
+ __ Branch(equal);
+ __ bind(&compare_not_equal);
+ return false;
+}
+
+static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+ FPURegister src1, FPURegister src2, Label* equal) {
+ if (src1.is(src2)) {
+ __ Move(dst, src1);
+ return true;
+ }
+
+ Label other, compare_not_equal;
+ FPURegister left, right;
+ if (kind == MaxMinKind::kMin) {
+ left = src1;
+ right = src2;
+ } else {
+ left = src2;
+ right = src1;
+ }
+
+ __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ FmoveLow(t8, src1);
+ __ Branch(&other, eq, t8, Operand(0x80000000));
+ __ Move_s(dst, right);
+ __ Branch(equal);
+ __ bind(&other);
+ __ Move_s(dst, left);
+ __ Branch(equal);
+ __ bind(&compare_not_equal);
+ return false;
+}
+
+#undef __
+
+void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF64(nullptr, nan, eq, src1, src2);
+ }
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_d(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF64(&skip, nullptr, le, src1, src2);
+ Move_d(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF64(&skip, nullptr, ge, src1, src2);
+ Move_d(dst, src1);
+ } else {
+ Label right;
+ BranchF64(&right, nullptr, gt, src1, src2);
+ Move_d(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_d(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF64(nullptr, nan, eq, src1, src2);
+ }
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_d(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF64(&skip, nullptr, ge, src1, src2);
+ Move_d(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF64(&skip, nullptr, le, src1, src2);
+ Move_d(dst, src1);
+ } else {
+ Label right;
+ BranchF64(&right, nullptr, lt, src1, src2);
+ Move_d(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_d(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF32(nullptr, nan, eq, src1, src2);
+ }
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_s(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF32(&skip, nullptr, le, src1, src2);
+ Move_s(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF32(&skip, nullptr, ge, src1, src2);
+ Move_s(dst, src1);
+ } else {
+ Label right;
+ BranchF32(&right, nullptr, gt, src1, src2);
+ Move_s(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_s(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF32(nullptr, nan, eq, src1, src2);
+ }
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_s(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF32(&skip, nullptr, ge, src1, src2);
+ Move_s(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF32(&skip, nullptr, le, src1, src2);
+ Move_s(dst, src1);
+ } else {
+ Label right;
+ BranchF32(&right, nullptr, lt, src1, src2);
+ Move_s(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_s(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
void MacroAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
@@ -3011,16 +3258,25 @@ void MacroAssembler::Jump(Register target,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == cc_always) {
- jr(target);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jic(target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, 0);
+ }
} else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target);
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
}
@@ -3078,8 +3334,7 @@ int MacroAssembler::CallSize(Register target,
size += 3;
}
- if (bd == PROTECT)
- size += 1;
+ if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
return size * kInstrSize;
}
@@ -3098,16 +3353,25 @@ void MacroAssembler::Call(Register target,
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
- if (cond == cc_always) {
- jalr(target);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jialc(target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jialc(target, 0);
+ }
} else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target);
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
#ifdef DEBUG
CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
@@ -3198,18 +3462,35 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ uint32_t lui_offset, jic_offset;
+ UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and
+ // available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, lui_offset);
+ jic(at, jic_offset);
+ }
+ CheckBuffer();
+ } else {
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references
+ // until associated instructions are emitted and available to be
+ // patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ CheckBuffer();
+ jr(at);
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
}
@@ -3222,18 +3503,35 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ uint32_t lui_offset, jic_offset;
+ UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and
+ // available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, lui_offset);
+ jialc(at, jic_offset);
+ }
+ CheckBuffer();
+ } else {
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references
+ // until associated instructions are emitted and available to be
+ // patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ CheckBuffer();
+ jalr(at);
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jalr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
}
@@ -4062,6 +4360,65 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
+ Addu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
+ Addu(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ Addu(src_reg, sp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ Subu(src_reg, src_reg, Operand(kPointerSize));
+ Subu(dst_reg, dst_reg, Operand(kPointerSize));
+ lw(tmp_reg, MemOperand(src_reg));
+ sw(tmp_reg, MemOperand(dst_reg));
+ bind(&entry);
+ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ mov(sp, dst_reg);
+}
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Label* done,
@@ -4846,12 +5203,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-
-void MacroAssembler::StubPrologue() {
- Push(ra, fp, cp);
- Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+ li(at, Operand(Smi::FromInt(type)));
+ PushCommonFrame(at);
}
@@ -4874,10 +5228,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
nop(); // Branch delay slot nop.
nop(); // Pad the empty space.
} else {
- Push(ra, fp, cp, a1);
+ PushStandardFrame(a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
@@ -4898,30 +5250,41 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- addiu(sp, sp, -5 * kPointerSize);
- li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()), CONSTANT_SIZE);
- sw(ra, MemOperand(sp, 4 * kPointerSize));
- sw(fp, MemOperand(sp, 3 * kPointerSize));
- sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t8, MemOperand(sp, 1 * kPointerSize));
- sw(t9, MemOperand(sp, 0 * kPointerSize));
+ int stack_offset, fp_offset;
+ if (type == StackFrame::INTERNAL) {
+ stack_offset = -4 * kPointerSize;
+ fp_offset = 2 * kPointerSize;
+ } else {
+ stack_offset = -3 * kPointerSize;
+ fp_offset = 1 * kPointerSize;
+ }
+ addiu(sp, sp, stack_offset);
+ stack_offset = -stack_offset - kPointerSize;
+ sw(ra, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ sw(fp, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ li(t9, Operand(Smi::FromInt(type)));
+ sw(t9, MemOperand(sp, stack_offset));
+ if (type == StackFrame::INTERNAL) {
+ DCHECK_EQ(stack_offset, kPointerSize);
+ li(t9, Operand(CodeObject()));
+ sw(t9, MemOperand(sp, 0));
+ } else {
+ DCHECK_EQ(stack_offset, 0);
+ }
// Adjust FP to point to saved FP.
- Addu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ Addu(fp, sp, Operand(fp_offset));
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- mov(sp, fp);
- lw(fp, MemOperand(sp, 0 * kPointerSize));
- lw(ra, MemOperand(sp, 1 * kPointerSize));
- addiu(sp, sp, 2 * kPointerSize);
+ addiu(sp, fp, 2 * kPointerSize);
+ lw(ra, MemOperand(fp, 1 * kPointerSize));
+ lw(fp, MemOperand(fp, 0 * kPointerSize));
}
-
-void MacroAssembler::EnterExitFrame(bool save_doubles,
- int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -4931,16 +5294,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp + 2 (==kCallerSPDisplacement) - old stack's end
// [fp + 1 (==kCallerPCOffset)] - saved old ra
// [fp + 0 (==kCallerFPOffset)] - saved old fp
- // [fp - 1 (==kSPOffset)] - sp of the called function
- // [fp - 2 (==kCodeOffset)] - CodeObject
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // [fp - 3 (==kCodeOffset)] - CodeObject
// fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
// new stack (will contain saved ra)
- // Save registers.
- addiu(sp, sp, -4 * kPointerSize);
- sw(ra, MemOperand(sp, 3 * kPointerSize));
- sw(fp, MemOperand(sp, 2 * kPointerSize));
- addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
+ // Save registers and reserve room for saved entry sp and code object.
+ addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ sw(ra, MemOperand(sp, 4 * kPointerSize));
+ sw(fp, MemOperand(sp, 3 * kPointerSize));
+ li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+ sw(at, MemOperand(sp, 2 * kPointerSize));
+ // Set up new frame pointer.
+ addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
if (emit_debug_code()) {
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -5177,6 +5544,15 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
JumpIfSmi(at, on_either_smi);
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, kOperandIsANumber, at, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -5708,28 +6084,45 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
bind(&done);
}
-
-void MacroAssembler::TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found,
- Condition cond,
- Label* allocation_memento_present) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- Addu(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+ Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
+ And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+ Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
li(at, Operand(new_space_allocation_top));
lw(at, MemOperand(at));
Branch(no_memento_found, gt, scratch_reg, Operand(at));
- lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- if (allocation_memento_present) {
- Branch(allocation_memento_present, cond, scratch_reg,
- Operand(isolate()->factory()->allocation_memento_map()));
- }
+ // Memento map check.
+ bind(&map_check);
+ lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ Branch(no_memento_found, ne, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 05a8fec644..2f028658f4 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -237,7 +237,8 @@ class MacroAssembler: public Assembler {
void Call(Label* target);
- void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+ inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
@@ -245,12 +246,20 @@ class MacroAssembler: public Assembler {
}
}
- inline void Move(FPURegister dst, FPURegister src) {
+ inline void Move_d(FPURegister dst, FPURegister src) {
if (!dst.is(src)) {
mov_d(dst, src);
}
}
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_s(dst, src);
+ }
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
Mfhc1(dst_high, src);
@@ -284,6 +293,17 @@ class MacroAssembler: public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
+ // Min, Max macros.
+ // On pre-r6 these functions may modify at and t8 registers.
+ void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
@@ -629,6 +649,7 @@ class MacroAssembler: public Assembler {
DEFINE_INSTRUCTION3(Div);
DEFINE_INSTRUCTION3(Mul);
+ DEFINE_INSTRUCTION3(Mulu);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
@@ -646,8 +667,12 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+ // may be clobbered.
void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
Register scratch = at);
+
void Pref(int32_t hint, const MemOperand& rs);
@@ -761,6 +786,14 @@ class MacroAssembler: public Assembler {
Addu(sp, sp, Operand(count * kPointerSize));
}
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -981,8 +1014,16 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Invoke the JavaScript function code by either calling or jumping.
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+ // Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@@ -1485,6 +1526,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1604,7 +1648,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Load the type feedback vector from a JavaScript frame.
@@ -1619,25 +1663,22 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, jump to allocation_memento_present.
- void TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found,
- Condition cond = al,
- Label* allocation_memento_present = NULL);
+ // AllocationMemento support. Arrays may have an associated AllocationMemento
+ // object that can be checked for in order to pretransition to another type.
+ // On entry, receiver_reg should point to the array object. scratch_reg gets
+ // clobbered. If no info is present jump to no_memento_found, otherwise fall
+ // through.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found, eq, memento_found);
+ &no_memento_found);
+ Branch(memento_found);
bind(&no_memento_found);
}
@@ -1773,16 +1814,18 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
if (kArchVariant >= kMips32r6) {
BlockTrampolinePoolFor(case_count + 5);
addiupc(at, 5);
- lsa(at, at, index, kPointerSizeLog2);
+ Lsa(at, at, index, kPointerSizeLog2);
lw(at, MemOperand(at));
} else {
Label here;
- BlockTrampolinePoolFor(case_count + 6);
+ BlockTrampolinePoolFor(case_count + 10);
+ push(ra);
bal(&here);
sll(at, index, kPointerSizeLog2); // Branch delay slot.
bind(&here);
addu(at, at, ra);
- lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ pop(ra);
+ lw(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
}
jr(at);
nop(); // Branch delay slot nop.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 0c91cb5512..e37b6e12f9 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -4483,7 +4483,7 @@ void Simulator::CallInternal(byte* entry) {
// Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
+ int32_t callee_saved_value = static_cast<int32_t>(icount_);
set_register(s0, callee_saved_value);
set_register(s1, callee_saved_value);
set_register(s2, callee_saved_value);
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 37ee3a6807..dec58e895c 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -102,6 +102,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) ||
@@ -154,6 +158,18 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index f0d3eba6b6..5a8dd2cd37 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -1372,17 +1372,21 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
@@ -1863,6 +1867,12 @@ void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
emit(instr);
}
+void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
+ emit(instr);
+}
void Assembler::drotrv(Register rd, Register rt, Register rs) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
@@ -1899,20 +1909,20 @@ void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa < 5 && sa > 0);
+ DCHECK(sa <= 3);
DCHECK(kArchVariant == kMips64r6);
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
- (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+ rd.code() << kRdShift | sa << kSaShift | LSA;
emit(instr);
}
void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
- DCHECK(sa < 5 && sa > 0);
+ DCHECK(sa <= 3);
DCHECK(kArchVariant == kMips64r6);
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
- (rd.code() << kRdShift) | (sa - 1) << kSaShift | DLSA;
+ Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+ rd.code() << kRdShift | sa << kSaShift | DLSA;
emit(instr);
}
@@ -2493,7 +2503,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- DCHECK(!src.rm().is(at));
if (is_int16(src.offset_)) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
@@ -3210,7 +3219,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index bf2285a2d5..de09366b42 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -535,7 +535,11 @@ class Assembler : public AssemblerBase {
// Distance between the instruction referring to the address of the call
// target and the return address.
+#ifdef _MIPS_ARCH_MIPS64R6
+ static const int kCallTargetAddressOffset = 5 * kInstrSize;
+#else
static const int kCallTargetAddressOffset = 6 * kInstrSize;
+#endif
// Distance between start of patched debug break slot and the emitted address
// to jump to.
@@ -545,7 +549,11 @@ class Assembler : public AssemblerBase {
// register.
static const int kPcLoadDelta = 4;
+#ifdef _MIPS_ARCH_MIPS64R6
+ static const int kDebugBreakSlotInstructions = 5;
+#else
static const int kDebugBreakSlotInstructions = 6;
+#endif
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -783,6 +791,7 @@ class Assembler : public AssemblerBase {
void dsrl(Register rd, Register rt, uint16_t sa);
void dsrlv(Register rd, Register rt, Register rs);
void drotr(Register rd, Register rt, uint16_t sa);
+ void drotr32(Register rd, Register rt, uint16_t sa);
void drotrv(Register rd, Register rt, Register rs);
void dsra(Register rt, Register rd, uint16_t sa);
void dsrav(Register rd, Register rt, Register rs);
@@ -790,10 +799,6 @@ class Assembler : public AssemblerBase {
void dsrl32(Register rt, Register rd, uint16_t sa);
void dsra32(Register rt, Register rd, uint16_t sa);
- // Address computing instructions with shift.
- void lsa(Register rd, Register rt, Register rs, uint8_t sa);
- void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
-
// ------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
@@ -1107,7 +1112,9 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dd(Label* label);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Postpone the generation of the trampoline pool for the specified number of
// instructions.
@@ -1206,6 +1213,10 @@ class Assembler : public AssemblerBase {
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
protected:
+ // Load Scaled Address instructions.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+ void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
+
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
@@ -1490,8 +1501,8 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 1d8d5d3599..b55b77c511 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -148,17 +148,15 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
- Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
- DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in a1 and the double value in f0.
__ LoadRoot(a1, root_index);
__ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
- __ mov(a3, a0);
+ __ Addu(a3, a0, 1);
Label done_loop, loop;
__ bind(&loop);
@@ -210,23 +208,21 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ SmiToDoubleFPURegister(a2, f2, a4);
__ bind(&done_convert);
- // Perform the actual comparison with the accumulator value on the left hand
- // side (f0) and the next parameter value on the right hand side (f2).
- Label compare_equal, compare_nan, compare_swap;
- __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
- __ BranchF(&compare_swap, nullptr, cc, f0, f2);
- __ Branch(&loop);
-
- // Left and right hand side are equal, check for -0 vs. +0.
- __ bind(&compare_equal);
- __ FmoveHigh(a4, reg);
- // Make a4 unsigned.
- __ dsll32(a4, a4, 0);
- __ Branch(&loop, ne, a4, Operand(0x8000000000000000));
-
- // Result is on the right hand side.
- __ bind(&compare_swap);
- __ mov_d(f0, f2);
+ // Perform the actual comparison with using Min/Max macro instructions the
+ // accumulator value on the left hand side (f0) and the next parameter value
+ // on the right hand side (f2).
+ // We need to work out which HeapNumber (or smi) the result came from.
+ Label compare_nan;
+ __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+ __ Move(a4, f0);
+ if (kind == MathMaxMinKind::kMin) {
+ __ MinNaNCheck_d(f0, f0, f2);
+ } else {
+ DCHECK(kind == MathMaxMinKind::kMax);
+ __ MaxNaNCheck_d(f0, f0, f2);
+ }
+ __ Move(at, f0);
+ __ Branch(&loop, eq, a4, Operand(at));
__ mov(a1, a2);
__ jmp(&loop);
@@ -239,8 +235,8 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ bind(&done_loop);
__ Dlsa(sp, sp, a3, kPointerSizeLog2);
- __ mov(v0, a1);
- __ DropAndRet(1);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a1); // In delay slot.
}
// static
@@ -528,6 +524,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- a1 : constructor function
// -- a2 : allocation site or undefined
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -541,7 +538,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0);
+ __ Push(cp, a2, a0);
if (create_implicit_receiver) {
__ Push(a1, a3);
@@ -612,7 +609,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -743,8 +740,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// -- s0: argv
// -----------------------------------
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the JS frame.
- __ mov(cp, zero_reg);
// Enter an internal frame.
{
@@ -839,9 +834,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
-
- __ Push(ra, fp, cp, a1);
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(a1);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -1197,8 +1190,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ MultiPop(saved_regs);
// Perform prologue operations usually performed by the young code stub.
- __ Push(ra, fp, cp, a1);
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(a1);
// Jump to point after the code-age stub.
__ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
@@ -1428,23 +1420,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1491,6 +1466,27 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[8] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ ld(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ ld(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ DropAndRet(2);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1956,18 +1952,20 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ li(at, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ li(at, Operand(is_tail_call_elimination_enabled));
__ lb(scratch1, MemOperand(at));
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ ld(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_interpreter_frame, ne, scratch3,
Operand(Smi::FromInt(StackFrame::STUB)));
__ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1975,71 +1973,36 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ ld(scratch3,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ld(scratch1,
+ __ ld(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ Branch(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ ld(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ ld(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch1,
+ __ lw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ Dlsa(dst_reg, fp, scratch1, kPointerSizeLog2);
- __ Daddu(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ Dlsa(src_reg, sp, args_reg, kPointerSizeLog2);
- // Count receiver argument as well (not included in args_reg).
- __ Daddu(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ Branch(&entry);
- __ bind(&loop);
- __ Dsubu(src_reg, src_reg, Operand(kPointerSize));
- __ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
- __ ld(tmp_reg, MemOperand(src_reg));
- __ sd(tmp_reg, MemOperand(dst_reg));
- __ bind(&entry);
- __ Branch(&loop, ne, sp, Operand(src_reg));
-
- // Leave current frame.
- __ mov(sp, dst_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2549,27 +2512,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
-
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(a5, FieldMemOperand(a4, SharedFunctionInfo::kStrongModeByteOffset));
- __ And(a5, a5, Operand(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
- __ Branch(&no_strong_error, eq, a5, Operand(zero_reg));
-
- // What we really care about is the required number of arguments.
- DCHECK_EQ(kPointerSize, kInt64Size);
- __ lw(a5, FieldMemOperand(a4, SharedFunctionInfo::kLengthOffset));
- __ srl(a5, a5, 1);
- __ Branch(&no_strong_error, ge, a0, Operand(a5));
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 28812ad997..fdb6c81d2e 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -75,6 +76,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -502,7 +507,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
@@ -542,6 +547,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ And(at, t1, Operand(1 << Map::kIsUndetectable));
__ Branch(&return_unequal, eq, at, Operand(zero_reg));
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ GetInstanceType(a2, a2);
+ __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
+
+ __ bind(&return_equal);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -1488,8 +1503,12 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ GetObjectType(function, function_map, scratch);
__ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(1 << Map::kIsConstructor));
+ __ Branch(&slow_case, eq, at, Operand(zero_reg));
+
+ // Ensure that {function} has an instance prototype.
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
@@ -1559,7 +1578,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -1579,29 +1599,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in ra.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2777,57 +2774,58 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
Label not_heap_number;
- __ ld(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- // a0: object
- // a1: instance type.
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
__ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
__ bind(&not_heap_number);
- Label not_string, slow_string;
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes on argument in a0.
+ __ AssertNotNumber(a0);
+
+ Label not_string;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
__ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
- // Check if string has a cached array index.
- __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
- __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
- __ Branch(&slow_string, ne, at, Operand(zero_reg));
- __ IndexFromHash(a2, a0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&slow_string);
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ bind(&not_string);
Label not_oddball;
__ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
__ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset)); // In delay slot.
__ bind(&not_oddball);
- __ push(a0); // Push argument.
+ __ Push(a0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes on argument in a0.
+ __ AssertString(a0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes on argument in a0.
- Label not_smi, positive_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
- __ mov(a0, zero_reg);
- __ bind(&positive_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ IndexFromHash(a2, v0);
+ __ Ret();
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ Push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
@@ -2998,39 +2996,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : left
- // -- a0 : right
- // -- ra : return address
- // -----------------------------------
- __ AssertString(a1);
- __ AssertString(a0);
-
- Label not_same;
- __ Branch(&not_same, ne, a0, Operand(a1));
- __ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
- a2);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
-
- // Compare flat ASCII strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
- a3);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
-
- __ bind(&runtime);
- __ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : left
@@ -3353,10 +3318,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+ __ Ret(USE_DELAY_SLOT);
+ __ Subu(v0, v0, a0); // In delay slot.
} else {
+ __ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3915,7 +3887,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ ld(a1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Daddu(a1, a1, Operand(1));
@@ -4900,7 +4872,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
__ Branch(&loop, ne, a1, Operand(a3));
}
@@ -4908,7 +4880,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_rest_parameters, ne, a3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5053,7 +5025,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5266,14 +5238,14 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
__ Branch(&loop, ne, a1, Operand(a3));
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&arguments_adaptor, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
{
@@ -5635,16 +5607,12 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- a4 : call_data
// -- a2 : holder
// -- a1 : api_function_address
- // -- a3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5670,17 +5638,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || a3.is(argc.reg()));
-
// Save context, callee and call data.
__ Push(context, callee, call_data);
- if (!is_lazy) {
+ if (!is_lazy()) {
// Load context from callee.
__ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// Push return value and default return value.
@@ -5705,33 +5671,17 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ Daddu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ sd(scratch, MemOperand(a0, 0 * kPointerSize));
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::values_
- __ Daddu(at, scratch,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- // Stored as int field, 32-bit integers within struct on stack always left
- // justified by n64 ABI.
- __ li(at, Operand(argc.immediate()));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
- } else {
- // FunctionCallbackInfo::values_
- __ dsll(at, argc.reg(), kPointerSizeLog2);
- __ Daddu(at, at, scratch);
- __ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- // Stored as int field, 32-bit integers within struct on stack always left
- // justified by n64 ABI.
- __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_
- __ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
- __ dsll(at, argc.reg(), kPointerSizeLog2);
- __ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
- }
+ // FunctionCallbackInfo::values_
+ __ Daddu(at, scratch,
+ Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ li(at, Operand(argc()));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5741,7 +5691,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5749,33 +5699,14 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
int32_t stack_space_offset = 4 * kPointerSize;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_offset = kInvalidStackOffset;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_offset, return_value_operand,
&context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index c8cde97883..44d822b615 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -1194,12 +1194,10 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->PushStandardFrame(a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher->masm()->Daddu(
- fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index ec610f0281..90bd11e5e3 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -98,12 +98,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on MIPS in the input frame.
- return false;
-}
-
-
#define __ masm()->
@@ -161,9 +155,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, a5);
- // Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both.
- __ li(a1, Operand(type())); // bailout type,
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
// a4: already has fp-to-sp delta.
@@ -238,6 +237,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+ __ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/deps/v8/src/mips64/frames-mips64.h b/deps/v8/src/mips64/frames-mips64.h
index 9c42d8d95c..d6d3e5c0fb 100644
--- a/deps/v8/src/mips64/frames-mips64.h
+++ b/deps/v8/src/mips64/frames-mips64.h
@@ -133,13 +133,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
@@ -161,7 +159,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 73df66ea8e..7695d0b694 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -109,35 +109,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
// static
-const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -266,6 +239,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -310,24 +290,15 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
+ Register registers[] = {a1, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -391,21 +362,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // callee
- a4, // call_data
- a2, // holder
- a1, // api_function_address
- a3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // callee
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index b49fa76e06..fb83fe9b76 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -102,6 +102,34 @@ void MacroAssembler::StoreRoot(Register source,
sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Daddu(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mov(fp, sp);
+ }
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Pop(ra, fp, marker_reg);
+ } else {
+ Pop(ra, fp);
+ }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg);
+ offset += kPointerSize;
+ } else {
+ Push(ra, fp, cp);
+ }
+ Daddu(fp, sp, Operand(offset));
+}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
@@ -457,13 +485,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
sd(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
DCHECK(!scratch.is(t8));
if (and_then == kFallThroughAtEnd) {
- Branch(&done, eq, t8, Operand(zero_reg));
+ Branch(&done, ne, t8, Operand(zero_reg));
} else {
DCHECK(and_then == kReturnAtEnd);
- Ret(eq, t8, Operand(zero_reg));
+ Ret(ne, t8, Operand(zero_reg));
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -484,13 +512,25 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
Label same_contexts;
+ Register temporary = t8;
DCHECK(!holder_reg.is(scratch));
DCHECK(!holder_reg.is(at));
DCHECK(!scratch.is(at));
- // Load current lexical context from the stack frame.
- ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ mov(at, fp);
+ bind(&load_context);
+ ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
+ // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
+ JumpIfNotSmi(scratch, &has_context, temporary);
+ ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
+ Branch(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
@@ -1225,7 +1265,11 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- rotr(rd, rs, rt.imm64_);
+ int64_t ror_value = rt.imm64_ % 32;
+ if (ror_value < 0) {
+ ror_value += 32;
+ }
+ rotr(rd, rs, ror_value);
}
}
@@ -1234,7 +1278,13 @@ void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
drotrv(rd, rs, rt.rm());
} else {
- drotr(rd, rs, rt.imm64_);
+ int64_t dror_value = rt.imm64_ % 64;
+ if (dror_value < 0) dror_value += 64;
+ if (dror_value <= 31) {
+ drotr(rd, rs, dror_value);
+ } else {
+ drotr32(rd, rs, dror_value - 32);
+ }
}
}
@@ -1246,8 +1296,9 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
- lsa(rd, rt, rs, sa);
+ lsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd.is(rt) ? scratch : rd;
DCHECK(!tmp.is(rt));
@@ -1259,8 +1310,9 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
- dlsa(rd, rt, rs, sa);
+ dlsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd.is(rt) ? scratch : rd;
DCHECK(!tmp.is(rt));
@@ -2310,6 +2362,186 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
+#define __ masm->
+
+static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+ FPURegister src1, FPURegister src2, Label* equal) {
+ if (src1.is(src2)) {
+ __ Move(dst, src1);
+ return true;
+ }
+
+ Label other, compare_not_equal;
+ FPURegister left, right;
+ if (kind == MaxMinKind::kMin) {
+ left = src1;
+ right = src2;
+ } else {
+ left = src2;
+ right = src1;
+ }
+
+ __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ dmfc1(t8, src1);
+ __ Branch(&other, eq, t8, Operand(0x8000000000000000));
+ __ Move_d(dst, right);
+ __ Branch(equal);
+ __ bind(&other);
+ __ Move_d(dst, left);
+ __ Branch(equal);
+ __ bind(&compare_not_equal);
+ return false;
+}
+
+static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+ FPURegister src1, FPURegister src2, Label* equal) {
+ if (src1.is(src2)) {
+ __ Move(dst, src1);
+ return true;
+ }
+
+ Label other, compare_not_equal;
+ FPURegister left, right;
+ if (kind == MaxMinKind::kMin) {
+ left = src1;
+ right = src2;
+ } else {
+ left = src2;
+ right = src1;
+ }
+
+ __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ FmoveLow(t8, src1);
+ __ dsll32(t8, t8, 0);
+ __ Branch(&other, eq, t8, Operand(0x8000000000000000));
+ __ Move_s(dst, right);
+ __ Branch(equal);
+ __ bind(&other);
+ __ Move_s(dst, left);
+ __ Branch(equal);
+ __ bind(&compare_not_equal);
+ return false;
+}
+
+#undef __
+
+void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF64(nullptr, nan, eq, src1, src2);
+ }
+ if (kArchVariant >= kMips64r6) {
+ min_d(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF64(&skip, nullptr, le, src1, src2);
+ Move_d(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF64(&skip, nullptr, ge, src1, src2);
+ Move_d(dst, src1);
+ } else {
+ Label right;
+ BranchF64(&right, nullptr, gt, src1, src2);
+ Move_d(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_d(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF64(nullptr, nan, eq, src1, src2);
+ }
+ if (kArchVariant >= kMips64r6) {
+ max_d(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF64(&skip, nullptr, ge, src1, src2);
+ Move_d(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF64(&skip, nullptr, le, src1, src2);
+ Move_d(dst, src1);
+ } else {
+ Label right;
+ BranchF64(&right, nullptr, lt, src1, src2);
+ Move_d(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_d(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF32(nullptr, nan, eq, src1, src2);
+ }
+ if (kArchVariant >= kMips64r6) {
+ min_s(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF32(&skip, nullptr, le, src1, src2);
+ Move_s(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF32(&skip, nullptr, ge, src1, src2);
+ Move_s(dst, src1);
+ } else {
+ Label right;
+ BranchF32(&right, nullptr, gt, src1, src2);
+ Move_s(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_s(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* nan) {
+ if (nan) {
+ BranchF32(nullptr, nan, eq, src1, src2);
+ }
+ if (kArchVariant >= kMips64r6) {
+ max_s(dst, src1, src2);
+ } else {
+ Label skip;
+ if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+ if (dst.is(src1)) {
+ BranchF32(&skip, nullptr, ge, src1, src2);
+ Move_s(dst, src2);
+ } else if (dst.is(src2)) {
+ BranchF32(&skip, nullptr, le, src1, src2);
+ Move_s(dst, src1);
+ } else {
+ Label right;
+ BranchF32(&right, nullptr, lt, src1, src2);
+ Move_s(dst, src1);
+ Branch(&skip);
+ bind(&right);
+ Move_s(dst, src2);
+ }
+ }
+ bind(&skip);
+ }
+}
void MacroAssembler::Clz(Register rd, Register rs) {
clz(rd, rs);
@@ -3468,16 +3700,25 @@ void MacroAssembler::Jump(Register target,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == cc_always) {
- jr(target);
+ if (kArchVariant == kMips64r6 && bd == PROTECT) {
+ if (cond == cc_always) {
+ jic(target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, 0);
+ }
} else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target);
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
}
@@ -3535,8 +3776,7 @@ int MacroAssembler::CallSize(Register target,
size += 3;
}
- if (bd == PROTECT)
- size += 1;
+ if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
return size * kInstrSize;
}
@@ -3555,16 +3795,25 @@ void MacroAssembler::Call(Register target,
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
- if (cond == cc_always) {
- jalr(target);
+ if (kArchVariant == kMips64r6 && bd == PROTECT) {
+ if (cond == cc_always) {
+ jialc(target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jialc(target, 0);
+ }
} else {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target);
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bd == PROTECT)
- nop();
#ifdef DEBUG
CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
@@ -3687,44 +3936,6 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
-void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint64_t imm64;
- imm64 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- li(at, Operand(imm64), ADDRESS_LOAD);
- }
- jr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint64_t imm64;
- imm64 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- li(at, Operand(imm64), ADDRESS_LOAD);
- }
- jalr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
void MacroAssembler::DropAndRet(int drop) {
DCHECK(is_int16(drop * kPointerSize));
Ret(USE_DELAY_SLOT);
@@ -4551,6 +4762,65 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
+ Daddu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
+ Daddu(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ Daddu(src_reg, sp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ Dsubu(src_reg, src_reg, Operand(kPointerSize));
+ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
+ ld(tmp_reg, MemOperand(src_reg));
+ sd(tmp_reg, MemOperand(dst_reg));
+ bind(&entry);
+ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ mov(sp, dst_reg);
+}
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Label* done,
@@ -4913,153 +5183,194 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
cvt_d_w(value, value);
}
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+void MacroAssembler::AddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
- AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
- if (dst.is(left)) {
- li(t9, right); // Load right.
- mov(scratch, left); // Preserve left.
- addu(dst, left, t9); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, t9);
- and_(overflow_dst, overflow_dst, scratch);
+ if (kArchVariant == kMips64r6) {
+ Register right_reg = t9;
+ DCHECK(!left.is(right_reg));
+ li(right_reg, Operand(right));
+ AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
} else {
- li(t9, right);
- addu(dst, left, t9);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, t9);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ // Left is overwritten.
+ Addu(dst, left, static_cast<int32_t>(right.immediate()));
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Addu(dst, left, static_cast<int32_t>(right.immediate()));
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, dst, scratch);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
}
-
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- DCHECK(!dst.is(t9));
- DCHECK(!scratch.is(t9));
- DCHECK(!left.is(t9));
- DCHECK(!right.is(t9));
- DCHECK(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (kArchVariant == kMips64r6) {
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ DCHECK(!dst.is(scratch));
+ Register left_reg = left.is(dst) ? scratch : left;
+ Register right_reg = right.is(dst) ? t9 : right;
+ DCHECK(!dst.is(left_reg));
+ DCHECK(!dst.is(right_reg));
+ Move(left_reg, left);
+ Move(right_reg, right);
+ addu(dst, left, right);
+ bnvc(left_reg, right_reg, no_overflow_label);
+ } else {
+ bovc(left, right, overflow_label);
+ addu(dst, left, right);
+ if (no_overflow_label) bc(no_overflow_label);
+ }
} else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
-}
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
-void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
- if (right.is_reg()) {
- DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
- } else {
if (dst.is(left)) {
- li(t9, right); // Load right.
mov(scratch, left); // Preserve left.
- daddu(dst, left, t9); // Left is overwritten.
+ addu(dst, left, right); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, t9);
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
and_(overflow_dst, overflow_dst, scratch);
} else {
- li(t9, right); // Load right.
- Daddu(dst, left, t9);
+ addu(dst, left, right);
xor_(overflow_dst, dst, left);
- xor_(scratch, dst, t9);
+ xor_(scratch, dst, right);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
+void MacroAssembler::SubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ // Left is overwritten.
+ Subu(dst, left, static_cast<int32_t>(right.immediate()));
+ // Load right since xori takes uint16 as immediate.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ Subu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, left, scratch);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
-void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
+void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
- if (left.is(right) && dst.is(left)) {
- DCHECK(!dst.is(t9));
- DCHECK(!scratch.is(t9));
- DCHECK(!left.is(t9));
- DCHECK(!right.is(t9));
- DCHECK(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
}
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- daddu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
+ subu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
} else if (dst.is(right)) {
mov(scratch, right); // Preserve right.
- daddu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
+ subu(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
} else {
- daddu(dst, left, right);
+ subu(dst, left, right);
xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
+ xor_(scratch, left, right);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-
-static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
- } else {
- masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) masm->Branch(no_overflow_label);
- }
-}
-
-
void MacroAssembler::DaddBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
@@ -5129,138 +5440,6 @@ void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
- if (right.is_reg()) {
- SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
- } else {
- if (dst.is(left)) {
- li(t9, right); // Load right.
- mov(scratch, left); // Preserve left.
- Subu(dst, left, t9); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, t9); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- li(t9, right);
- subu(dst, left, t9);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, t9);
- and_(overflow_dst, scratch, overflow_dst);
- }
- }
-}
-
-
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left.is(right)) {
- mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- subu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
-}
-
-
-void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
- if (right.is_reg()) {
- DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
- } else {
- if (dst.is(left)) {
- li(t9, right); // Load right.
- mov(scratch, left); // Preserve left.
- dsubu(dst, left, t9); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, t9); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- li(t9, right);
- dsubu(dst, left, t9);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, t9);
- and_(overflow_dst, scratch, overflow_dst);
- }
- }
-}
-
-
-void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
- DCHECK(!scratch.is(left));
- DCHECK(!scratch.is(right));
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left.is(right)) {
- mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- dsubu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- dsubu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- dsubu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
-}
-
-
void MacroAssembler::DsubBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
@@ -5566,12 +5745,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-
-void MacroAssembler::StubPrologue() {
- Push(ra, fp, cp);
- Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+ li(at, Operand(Smi::FromInt(type)));
+ PushCommonFrame(at);
}
@@ -5594,16 +5770,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
nop(); // Branch delay slot nop.
nop(); // Pad the empty space.
} else {
- Push(ra, fp, cp, a1);
+ PushStandardFrame(a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
-
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
@@ -5620,30 +5793,41 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- daddiu(sp, sp, -5 * kPointerSize);
- li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()), CONSTANT_SIZE);
- sd(ra, MemOperand(sp, 4 * kPointerSize));
- sd(fp, MemOperand(sp, 3 * kPointerSize));
- sd(cp, MemOperand(sp, 2 * kPointerSize));
- sd(t8, MemOperand(sp, 1 * kPointerSize));
- sd(t9, MemOperand(sp, 0 * kPointerSize));
+ int stack_offset, fp_offset;
+ if (type == StackFrame::INTERNAL) {
+ stack_offset = -4 * kPointerSize;
+ fp_offset = 2 * kPointerSize;
+ } else {
+ stack_offset = -3 * kPointerSize;
+ fp_offset = 1 * kPointerSize;
+ }
+ daddiu(sp, sp, stack_offset);
+ stack_offset = -stack_offset - kPointerSize;
+ sd(ra, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ sd(fp, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ li(t9, Operand(Smi::FromInt(type)));
+ sd(t9, MemOperand(sp, stack_offset));
+ if (type == StackFrame::INTERNAL) {
+ DCHECK_EQ(stack_offset, kPointerSize);
+ li(t9, Operand(CodeObject()));
+ sd(t9, MemOperand(sp, 0));
+ } else {
+ DCHECK_EQ(stack_offset, 0);
+ }
// Adjust FP to point to saved FP.
- Daddu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ Daddu(fp, sp, Operand(fp_offset));
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- mov(sp, fp);
- ld(fp, MemOperand(sp, 0 * kPointerSize));
- ld(ra, MemOperand(sp, 1 * kPointerSize));
- daddiu(sp, sp, 2 * kPointerSize);
+ daddiu(sp, fp, 2 * kPointerSize);
+ ld(ra, MemOperand(fp, 1 * kPointerSize));
+ ld(fp, MemOperand(fp, 0 * kPointerSize));
}
-
-void MacroAssembler::EnterExitFrame(bool save_doubles,
- int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -5653,16 +5837,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp + 2 (==kCallerSPDisplacement) - old stack's end
// [fp + 1 (==kCallerPCOffset)] - saved old ra
// [fp + 0 (==kCallerFPOffset)] - saved old fp
- // [fp - 1 (==kSPOffset)] - sp of the called function
- // [fp - 2 (==kCodeOffset)] - CodeObject
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // [fp - 3 (==kCodeOffset)] - CodeObject
// fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
// new stack (will contain saved ra)
- // Save registers.
- daddiu(sp, sp, -4 * kPointerSize);
- sd(ra, MemOperand(sp, 3 * kPointerSize));
- sd(fp, MemOperand(sp, 2 * kPointerSize));
- daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
+ // Save registers and reserve room for saved entry sp and code object.
+ daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ sd(ra, MemOperand(sp, 4 * kPointerSize));
+ sd(fp, MemOperand(sp, 3 * kPointerSize));
+ li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+ sd(at, MemOperand(sp, 2 * kPointerSize));
+ // Set up new frame pointer.
+ daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
if (emit_debug_code()) {
sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -5715,8 +5903,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
- Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
- kNumOfSavedRegisters * kDoubleSize));
+ Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
ldc1(reg, MemOperand(t8, i * kDoubleSize));
@@ -5966,6 +6154,15 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
JumpIfSmi(at, on_either_smi);
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, kOperandIsANumber, at, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -6494,28 +6691,45 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
bind(&done);
}
-
-void MacroAssembler::TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found,
- Condition cond,
- Label* allocation_memento_present) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- Daddu(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+ Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
+ And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+ Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
li(at, Operand(new_space_allocation_top));
- ld(at, MemOperand(at));
+ lw(at, MemOperand(at));
Branch(no_memento_found, gt, scratch_reg, Operand(at));
- ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- if (allocation_memento_present) {
- Branch(allocation_memento_present, cond, scratch_reg,
- Operand(isolate()->factory()->allocation_memento_map()));
- }
+ // Memento map check.
+ bind(&map_check);
+ lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ Branch(no_memento_found, ne, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 7f44ab9cc5..401112d100 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -265,7 +265,8 @@ class MacroAssembler: public Assembler {
void Call(Label* target);
- void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+ inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
@@ -273,17 +274,29 @@ class MacroAssembler: public Assembler {
}
}
- inline void Move(FPURegister dst, FPURegister src) {
+ inline void Move_d(FPURegister dst, FPURegister src) {
if (!dst.is(src)) {
mov_d(dst, src);
}
}
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_s(dst, src);
+ }
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
mfhc1(dst_high, src);
}
+ inline void Move(Register dst, FPURegister src) { dmfc1(dst, src); }
+
+ inline void Move(FPURegister dst, Register src) { dmtc1(src, dst); }
+
inline void FmoveHigh(Register dst_high, FPURegister src) {
mfhc1(dst_high, src);
}
@@ -312,6 +325,17 @@ class MacroAssembler: public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
+ // Min, Max macros.
+ // On pre-r6 these functions may modify at and t8 registers.
+ void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+ void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* nan = nullptr);
+
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
@@ -676,6 +700,9 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+ // may be clobbered.
void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
Register scratch = at);
void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
@@ -803,6 +830,14 @@ class MacroAssembler: public Assembler {
Daddu(sp, sp, Operand(count * kPointerSize));
}
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -1053,6 +1088,15 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// JavaScript invokes.
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1277,32 +1321,41 @@ class MacroAssembler: public Assembler {
// Usage: first call the appropriate arithmetic function, then call one of the
// jump functions with the overflow_dst register as the second parameter.
- void AdduAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ inline void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
- void AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch);
+ void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void SubuAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ void AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch);
+ inline void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
- void DadduAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = at);
+ inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
- void DadduAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch);
+ void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
@@ -1322,13 +1375,6 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
- void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = at);
-
- void DsubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch);
-
inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
@@ -1648,6 +1694,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1758,7 +1807,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
DecodeField<Field>(reg, reg);
}
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Load the type feedback vector from a JavaScript frame.
@@ -1773,25 +1822,22 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, jump to allocation_memento_present.
- void TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found,
- Condition cond = al,
- Label* allocation_memento_present = NULL);
+ // AllocationMemento support. Arrays may have an associated AllocationMemento
+ // object that can be checked for in order to pretransition to another type.
+ // On entry, receiver_reg should point to the array object. scratch_reg gets
+ // clobbered. If no info is present jump to no_memento_found, otherwise fall
+ // through.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found, eq, memento_found);
+ &no_memento_found);
+ Branch(memento_found);
bind(&no_memento_found);
}
@@ -1832,8 +1878,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
BranchDelaySlot bdslot);
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
- void Jr(Label* L, BranchDelaySlot bdslot);
- void Jalr(Label* L, BranchDelaySlot bdslot);
// Common implementation of BranchF functions for the different formats.
void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
@@ -1936,17 +1980,19 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
nop();
}
addiupc(at, 5);
- dlsa(at, at, index, kPointerSizeLog2);
+ Dlsa(at, at, index, kPointerSizeLog2);
ld(at, MemOperand(at));
} else {
Label here;
- BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
Align(8);
+ push(ra);
bal(&here);
dsll(at, index, kPointerSizeLog2); // Branch delay slot.
bind(&here);
daddu(at, at, ra);
- ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ pop(ra);
+ ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
}
jr(at);
nop(); // Branch delay slot nop.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 70c06c885f..9519865c82 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -24,11 +24,8 @@
namespace v8 {
namespace internal {
-// Utils functions.
-bool HaveSameSign(int64_t a, int64_t b) {
- return ((a ^ b) >= 0);
-}
-
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
uint32_t get_fcsr_condition_bit(uint32_t cc) {
if (cc == 0) {
@@ -3478,9 +3475,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- alu_out =
- base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
- static_cast<const uint32_t>(rs_u()));
+ alu_out = base::bits::RotateRight64(rt_u(), rs_u());
}
SetResult(rd_reg(), alu_out);
break;
@@ -4331,13 +4326,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
if (kArchVariant == kMips64r6) {
if (rs_reg >= rt_reg) { // BOVC
- if (HaveSameSign(rs, rt)) {
- if (rs > 0) {
- BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
- } else if (rs < 0) {
- BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
- }
- }
+ bool condition = !is_int32(rs) || !is_int32(rt) || !is_int32(rs + rt);
+ BranchCompactHelper(condition, 16);
} else {
if (rs_reg == 0) { // BEQZALC
BranchAndLinkCompactHelper(rt == 0, 16);
@@ -4363,15 +4353,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
if (kArchVariant == kMips64r6) {
if (rs_reg >= rt_reg) { // BNVC
- if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
- BranchCompactHelper(true, 16);
- } else {
- if (rs > 0) {
- BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
- } else if (rs < 0) {
- BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
- }
- }
+ bool condition = is_int32(rs) && is_int32(rt) && is_int32(rs + rt);
+ BranchCompactHelper(condition, 16);
} else {
if (rs_reg == 0) { // BNEZALC
BranchAndLinkCompactHelper(rt != 0, 16);
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index cd4be13967..bfc1895aef 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -472,6 +472,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 0d01ec2f5b..b023b03aea 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -99,6 +99,7 @@ void HeapObject::HeapObjectVerify() {
Oddball::cast(this)->OddballVerify();
break;
case JS_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectVerify();
@@ -228,6 +229,10 @@ void ByteArray::ByteArrayVerify() {
void BytecodeArray::BytecodeArrayVerify() {
// TODO(oth): Walk bytecodes and immediate values to validate sanity.
+ // - All bytecodes are known and well formed.
+ // - Jumps must go to new instructions starts.
+ // - No Illegal bytecodes.
+ // - No consecutive sequences of prefix Wide / ExtraWide.
CHECK(IsBytecodeArray());
CHECK(constant_pool()->IsFixedArray());
VerifyHeapPointer(constant_pool());
@@ -618,7 +623,7 @@ void Oddball::OddballVerify() {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
// Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -5;
+ const int kLeastHiddenOddballNumber = -6;
CHECK_LE(value, 1);
CHECK(value >= kLeastHiddenOddballNumber);
}
@@ -641,6 +646,8 @@ void Oddball::OddballVerify() {
CHECK(this == heap->termination_exception());
} else if (map() == heap->exception_map()) {
CHECK(this == heap->exception());
+ } else if (map() == heap->optimized_out_map()) {
+ CHECK(this == heap->optimized_out());
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index e00478a6af..b75dd1c969 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -18,10 +18,12 @@
#include "src/conversions-inl.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
+#include "src/field-type.h"
#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
+#include "src/isolate-inl.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
@@ -738,13 +740,12 @@ bool HeapObject::IsDependentCode() const {
bool HeapObject::IsContext() const {
Map* map = this->map();
Heap* heap = GetHeap();
- return (map == heap->function_context_map() ||
- map == heap->catch_context_map() ||
- map == heap->with_context_map() ||
- map == heap->native_context_map() ||
- map == heap->block_context_map() ||
- map == heap->module_context_map() ||
- map == heap->script_context_map());
+ return (
+ map == heap->function_context_map() || map == heap->catch_context_map() ||
+ map == heap->with_context_map() || map == heap->native_context_map() ||
+ map == heap->block_context_map() || map == heap->module_context_map() ||
+ map == heap->script_context_map() ||
+ map == heap->debug_evaluate_context_map());
}
bool HeapObject::IsNativeContext() const {
@@ -845,6 +846,8 @@ bool Object::IsUnseededNumberDictionary() const {
bool HeapObject::IsStringTable() const { return IsHashTable(); }
+bool HeapObject::IsStringSet() const { return IsHashTable(); }
+
bool HeapObject::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
@@ -908,9 +911,7 @@ bool HeapObject::IsJSGlobalProxy() const {
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-bool HeapObject::IsUndetectableObject() const {
- return map()->is_undetectable();
-}
+bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
bool HeapObject::IsAccessCheckNeeded() const {
if (IsJSGlobalProxy()) {
@@ -1003,6 +1004,24 @@ bool Object::FitsRepresentation(Representation representation) {
return true;
}
+bool Object::ToUint32(uint32_t* value) {
+ if (IsSmi()) {
+ int num = Smi::cast(this)->value();
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (num < 0) return false;
+ uint32_t uint_value = FastD2UI(num);
+ if (FastUI2D(uint_value) == num) {
+ *value = uint_value;
+ return true;
+ }
+ }
+ return false;
+}
// static
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
@@ -1013,6 +1032,12 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
// static
+MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
+ if (input->IsName()) return Handle<Name>::cast(input);
+ return ConvertToName(isolate, input);
+}
+
+// static
MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
ToPrimitiveHint hint) {
if (input->IsPrimitive()) return input;
@@ -1027,15 +1052,39 @@ bool Object::HasSpecificClassOf(String* name) {
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
LookupIterator it(object, name);
+ if (!it.IsFound()) return it.factory()->undefined_value();
return GetProperty(&it);
}
+MaybeHandle<Object> JSReceiver::GetProperty(Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ LookupIterator it(receiver, name, receiver);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return Object::GetProperty(&it);
+}
+
MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
uint32_t index) {
LookupIterator it(isolate, object, index);
+ if (!it.IsFound()) return it.factory()->undefined_value();
return GetProperty(&it);
}
+MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ uint32_t index) {
+ LookupIterator it(isolate, receiver, index, receiver);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return Object::GetProperty(&it);
+}
+
+Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ LookupIterator it(object, name, object,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) return it.factory()->undefined_value();
+ return GetDataProperty(&it);
+}
MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
@@ -1059,10 +1108,11 @@ MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
return PrototypeIterator::GetCurrent(iter);
}
-MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
- const char* name) {
+MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- return GetProperty(object, str);
+ return GetProperty(receiver, str);
}
@@ -1836,17 +1886,33 @@ void JSObject::initialize_elements() {
InterceptorInfo* JSObject::GetIndexedInterceptor() {
- DCHECK(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
+ return map()->GetIndexedInterceptor();
+}
+
+InterceptorInfo* JSObject::GetNamedInterceptor() {
+ return map()->GetNamedInterceptor();
+}
+
+InterceptorInfo* Map::GetNamedInterceptor() {
+ DCHECK(has_named_interceptor());
+ JSFunction* constructor = JSFunction::cast(GetConstructor());
+ DCHECK(constructor->shared()->IsApiFunction());
+ return InterceptorInfo::cast(
+ constructor->shared()->get_api_func_data()->named_property_handler());
+}
+
+InterceptorInfo* Map::GetIndexedInterceptor() {
+ DCHECK(has_indexed_interceptor());
+ JSFunction* constructor = JSFunction::cast(GetConstructor());
DCHECK(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->indexed_property_handler();
- return InterceptorInfo::cast(result);
+ return InterceptorInfo::cast(
+ constructor->shared()->get_api_func_data()->indexed_property_handler());
}
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(Oddball, to_boolean, Oddball, kToBooleanOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
@@ -1896,11 +1962,14 @@ void WeakCell::clear() {
void WeakCell::initialize(HeapObject* val) {
WRITE_FIELD(this, kValueOffset, val);
- Heap* heap = GetHeap();
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
- heap->RecordWrite(this, kValueOffset, val);
+ WriteBarrierMode mode =
+ Page::FromAddress(this->address())->IsFlagSet(Page::BLACK_PAGE)
+ ? UPDATE_WRITE_BARRIER
+ : UPDATE_WEAK_WRITE_BARRIER;
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
}
@@ -1936,6 +2005,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kSize;
case JS_MODULE_TYPE:
@@ -2182,7 +2253,6 @@ void Struct::InitializeBody(int object_size) {
}
}
-
bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
@@ -2724,6 +2794,25 @@ FixedArrayBase* Map::GetInitialElements() {
return NULL;
}
+// static
+Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type,
+ StoreMode store_mode) {
+ return Reconfigure(map, map->elements_kind(), modify_index, new_kind,
+ new_attributes, new_representation, new_field_type,
+ store_mode);
+}
+
+// static
+Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
+ ElementsKind new_elements_kind) {
+ return Reconfigure(map, new_elements_kind, -1, kData, NONE,
+ Representation::None(), FieldType::None(map->GetIsolate()),
+ ALLOW_IN_DESCRIPTOR);
+}
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
@@ -2929,6 +3018,9 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
return Max(capacity, kMinCapacity);
}
+bool HashTableBase::IsKey(Heap* heap, Object* k) {
+ return k != heap->the_hole_value() && k != heap->undefined_value();
+}
bool HashTableBase::IsKey(Object* k) {
return !k->IsTheHole() && !k->IsUndefined();
@@ -2977,6 +3069,15 @@ int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
return kNotFound;
}
+bool StringSetShape::IsMatch(String* key, Object* value) {
+ return value->IsString() && key->Equals(String::cast(value));
+}
+
+uint32_t StringSetShape::Hash(String* key) { return key->Hash(); }
+
+uint32_t StringSetShape::HashForObject(String* key, Object* object) {
+ return object->IsString() ? String::cast(object)->Hash() : 0;
+}
bool SeededNumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
@@ -3080,6 +3181,7 @@ CAST_ACCESSOR(Simd128Value)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
@@ -3902,10 +4004,9 @@ int BytecodeArray::parameter_count() const {
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(BytecodeArray, source_position_table, FixedArray,
+ACCESSORS(BytecodeArray, source_position_table, ByteArray,
kSourcePositionTableOffset)
-
Address BytecodeArray::GetFirstBytecodeAddress() {
return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
@@ -4514,16 +4615,6 @@ bool Map::is_migration_target() {
}
-void Map::set_is_strong() {
- set_bit_field3(IsStrong::update(bit_field3(), true));
-}
-
-
-bool Map::is_strong() {
- return IsStrong::decode(bit_field3());
-}
-
-
void Map::set_new_target_is_base(bool value) {
set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
}
@@ -4690,8 +4781,7 @@ bool Code::IsCodeStubOrIC() {
return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
- kind() == TO_BOOLEAN_IC;
+ kind() == COMPARE_IC || kind() == TO_BOOLEAN_IC;
}
@@ -4723,7 +4813,6 @@ Code::StubType Code::type() {
return ExtractTypeFromFlags(flags());
}
-
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4870,14 +4959,10 @@ void Code::set_profiler_ticks(int ticks) {
}
}
-
-int Code::builtin_index() {
- return READ_INT32_FIELD(this, kKindSpecificFlags1Offset);
-}
-
+int Code::builtin_index() { return READ_INT_FIELD(this, kBuiltinIndexOffset); }
void Code::set_builtin_index(int index) {
- WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index);
+ WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
}
@@ -4981,15 +5066,14 @@ bool Code::is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
bool Code::is_call_stub() { return kind() == CALL_IC; }
bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
-bool Code::is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
-
+bool Code::is_wasm_code() { return kind() == WASM_FUNCTION; }
bool Code::embeds_maps_weakly() {
Kind k = kind();
return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
- k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
+ k == KEYED_STORE_IC) &&
ic_state() == MONOMORPHIC;
}
@@ -5005,20 +5089,17 @@ Address Code::constant_pool() {
return constant_pool;
}
-
Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
ExtraICState extra_ic_state, StubType type,
CacheHolderFlag holder) {
// Compute the bit mask.
- unsigned int bits = KindField::encode(kind)
- | ICStateField::encode(ic_state)
- | TypeField::encode(type)
- | ExtraICStateField::encode(extra_ic_state)
- | CacheHolderField::encode(holder);
+ unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) |
+ TypeField::encode(type) |
+ ExtraICStateField::encode(extra_ic_state) |
+ CacheHolderField::encode(holder);
return static_cast<Flags>(bits);
}
-
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
CacheHolderFlag holder,
@@ -5052,7 +5133,6 @@ Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
-
CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -5135,7 +5215,7 @@ class Code::FindAndReplacePattern {
friend class Code;
};
-int AbstractCode::Size() {
+int AbstractCode::instruction_size() {
if (IsCode()) {
return GetCode()->instruction_size();
} else {
@@ -5143,6 +5223,45 @@ int AbstractCode::Size() {
}
}
+int AbstractCode::ExecutableSize() {
+ if (IsCode()) {
+ return GetCode()->ExecutableSize();
+ } else {
+ return GetBytecodeArray()->BytecodeArraySize();
+ }
+}
+
+Address AbstractCode::instruction_start() {
+ if (IsCode()) {
+ return GetCode()->instruction_start();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress();
+ }
+}
+
+Address AbstractCode::instruction_end() {
+ if (IsCode()) {
+ return GetCode()->instruction_end();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress() +
+ GetBytecodeArray()->length();
+ }
+}
+
+bool AbstractCode::contains(byte* inner_pointer) {
+ return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+}
+
+AbstractCode::Kind AbstractCode::kind() {
+ if (IsCode()) {
+ STATIC_ASSERT(AbstractCode::FUNCTION ==
+ static_cast<AbstractCode::Kind>(Code::FUNCTION));
+ return static_cast<AbstractCode::Kind>(GetCode()->kind());
+ } else {
+ return INTERPRETED_FUNCTION;
+ }
+}
+
Code* AbstractCode::GetCode() { return Code::cast(this); }
BytecodeArray* AbstractCode::GetBytecodeArray() {
@@ -5500,8 +5619,8 @@ ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-
+ACCESSORS(SharedFunctionInfo, function_identifier, Object,
+ kFunctionIdentifierOffset)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -5634,6 +5753,13 @@ BOOL_GETTER(SharedFunctionInfo,
optimization_disabled,
kOptimizationDisabled)
+AbstractCode* SharedFunctionInfo::abstract_code() {
+ if (HasBytecodeArray()) {
+ return AbstractCode::cast(bytecode_array());
+ } else {
+ return AbstractCode::cast(code());
+ }
+}
void SharedFunctionInfo::set_optimization_disabled(bool disable) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
@@ -5645,8 +5771,7 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
LanguageMode SharedFunctionInfo::language_mode() {
STATIC_ASSERT(LANGUAGE_END == 3);
return construct_language_mode(
- BooleanBit::get(compiler_hints(), kStrictModeFunction),
- BooleanBit::get(compiler_hints(), kStrongModeFunction));
+ BooleanBit::get(compiler_hints(), kStrictModeFunction));
}
@@ -5657,7 +5782,6 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
int hints = compiler_hints();
hints = BooleanBit::set(hints, kStrictModeFunction, is_strict(language_mode));
- hints = BooleanBit::set(hints, kStrongModeFunction, is_strong(language_mode));
set_compiler_hints(hints);
}
@@ -5719,7 +5843,7 @@ bool Script::HasValidSource() {
void SharedFunctionInfo::DontAdaptArguments() {
- DCHECK(code()->kind() == Code::BUILTIN);
+ DCHECK(code()->kind() == Code::BUILTIN || code()->kind() == Code::STUB);
set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
@@ -5824,28 +5948,61 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
return FunctionTemplateInfo::cast(function_data());
}
+void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
+ DCHECK(function_data()->IsUndefined());
+ set_function_data(data);
+}
-bool SharedFunctionInfo::HasBuiltinFunctionId() {
- return function_data()->IsSmi();
+bool SharedFunctionInfo::HasBytecodeArray() {
+ return function_data()->IsBytecodeArray();
}
+BytecodeArray* SharedFunctionInfo::bytecode_array() {
+ DCHECK(HasBytecodeArray());
+ return BytecodeArray::cast(function_data());
+}
+
+void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
+ DCHECK(function_data()->IsUndefined());
+ set_function_data(bytecode);
+}
+
+void SharedFunctionInfo::ClearBytecodeArray() {
+ DCHECK(function_data()->IsUndefined() || HasBytecodeArray());
+ set_function_data(GetHeap()->undefined_value());
+}
+
+bool SharedFunctionInfo::HasBuiltinFunctionId() {
+ return function_identifier()->IsSmi();
+}
+
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
DCHECK(HasBuiltinFunctionId());
- return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
+ return static_cast<BuiltinFunctionId>(
+ Smi::cast(function_identifier())->value());
}
-
-bool SharedFunctionInfo::HasBytecodeArray() {
- return function_data()->IsBytecodeArray();
+void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
+ set_function_identifier(Smi::FromInt(id));
}
+bool SharedFunctionInfo::HasInferredName() {
+ return function_identifier()->IsString();
+}
-BytecodeArray* SharedFunctionInfo::bytecode_array() {
- DCHECK(HasBytecodeArray());
- return BytecodeArray::cast(function_data());
+String* SharedFunctionInfo::inferred_name() {
+ if (HasInferredName()) {
+ return String::cast(function_identifier());
+ }
+ DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
+ return GetIsolate()->heap()->empty_string();
}
+void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
+ DCHECK(function_identifier()->IsUndefined() || HasInferredName());
+ set_function_identifier(inferred_name);
+}
int SharedFunctionInfo::ic_age() {
return ICAgeBits::decode(counters());
@@ -5944,26 +6101,6 @@ bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
}
-// static
-void SharedFunctionInfo::AddToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- Handle<Code> code, Handle<LiteralsArray> literals, BailoutId osr_ast_id) {
- AddToOptimizedCodeMapInternal(shared, native_context, code, literals,
- osr_ast_id);
-}
-
-
-// static
-void SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- Handle<LiteralsArray> literals) {
- Isolate* isolate = shared->GetIsolate();
- Handle<Oddball> undefined = isolate->factory()->undefined_value();
- AddToOptimizedCodeMapInternal(shared, native_context, undefined, literals,
- BailoutId::None());
-}
-
-
bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
@@ -6008,6 +6145,14 @@ void Map::InobjectSlackTrackingStep() {
}
}
+AbstractCode* JSFunction::abstract_code() {
+ Code* code = this->code();
+ if (code->is_interpreter_entry_trampoline()) {
+ return AbstractCode::cast(shared()->bytecode_array());
+ } else {
+ return AbstractCode::cast(code);
+ }
+}
Code* JSFunction::code() {
return Code::cast(
@@ -6939,6 +7084,17 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
return GetProperty(&it);
}
+MaybeHandle<Object> Object::SetPropertyOrElement(Handle<Object> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+ MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+ return value;
+}
+
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
Handle<Name> name,
Handle<JSReceiver> holder) {
@@ -6972,11 +7128,10 @@ NameDictionary* JSReceiver::property_dictionary() {
return NameDictionary::cast(properties());
}
-
Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<Name> name) {
- LookupIterator it =
- LookupIterator::PropertyOrElement(object->GetIsolate(), object, name);
+ LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
+ object, name, object);
return HasProperty(&it);
}
@@ -6985,7 +7140,7 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
if (object->IsJSObject()) { // Shortcut
LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ object->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
return HasProperty(&it);
}
@@ -6998,8 +7153,8 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
- LookupIterator it =
- LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+ LookupIterator it = LookupIterator::PropertyOrElement(name->GetIsolate(),
+ object, name, object);
return GetPropertyAttributes(&it);
}
@@ -7007,13 +7162,13 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
return GetPropertyAttributes(&it);
}
Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- LookupIterator it(object->GetIsolate(), object, index);
+ LookupIterator it(object->GetIsolate(), object, index, object);
return HasProperty(&it);
}
@@ -7021,7 +7176,7 @@ Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
Handle<JSReceiver> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index);
+ LookupIterator it(isolate, object, index, object);
return GetPropertyAttributes(&it);
}
@@ -7029,7 +7184,7 @@ Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
Handle<JSReceiver> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+ LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
return GetPropertyAttributes(&it);
}
@@ -7052,11 +7207,12 @@ Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
: JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
}
-
-Object* JSReceiver::GetIdentityHash() {
- return IsJSProxy()
- ? JSProxy::cast(this)->GetIdentityHash()
- : JSObject::cast(this)->GetIdentityHash();
+Handle<Object> JSReceiver::GetIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
+ return receiver->IsJSProxy() ? JSProxy::GetIdentityHash(
+ isolate, Handle<JSProxy>::cast(receiver))
+ : JSObject::GetIdentityHash(
+ isolate, Handle<JSObject>::cast(receiver));
}
@@ -7089,6 +7245,11 @@ void AccessorInfo::set_is_special_data_property(bool value) {
set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
}
+bool AccessorInfo::is_sloppy() { return BooleanBit::get(flag(), kIsSloppy); }
+
+void AccessorInfo::set_is_sloppy(bool value) {
+ set_flag(BooleanBit::set(flag(), kIsSloppy, value));
+}
PropertyAttributes AccessorInfo::property_attributes() {
return AttributesField::decode(static_cast<uint32_t>(flag()));
@@ -7448,6 +7609,11 @@ void JSArray::SetContent(Handle<JSArray> array,
}
+bool JSArray::HasArrayPrototype(Isolate* isolate) {
+ return map()->prototype() == *isolate->initial_array_prototype();
+}
+
+
int TypeFeedbackInfo::ic_total_count() {
int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
return ICTotalCountField::decode(current);
@@ -7645,6 +7811,30 @@ static inline uint32_t ObjectAddressForHashing(void* object) {
return value & MemoryChunk::kAlignmentMask;
}
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, uint32_t index,
+ Handle<Object> value) {
+ Handle<Object> key = isolate->factory()->Uint32ToString(index);
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ {
+ entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+ entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entry_storage,
+ FAST_ELEMENTS, 2);
+}
+
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
+ Handle<Object> value) {
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ {
+ entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+ entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+ }
+ return isolate->factory()->NewJSArrayWithElements(entry_storage,
+ FAST_ELEMENTS, 2);
+}
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 67bc62e7e2..58092a49ba 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -95,6 +95,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
os << "filler";
break;
case JS_OBJECT_TYPE: // fall through
+ case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -395,10 +396,16 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- PrototypeIterator iter(obj->GetIsolate(), obj);
os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
- << ElementsKindToString(obj->map()->elements_kind())
- << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+ << ElementsKindToString(obj->map()->elements_kind());
+ if (obj->elements()->map() == obj->GetHeap()->fixed_cow_array_map()) {
+ os << " (COW)";
+ }
+ PrototypeIterator iter(obj->GetIsolate(), obj);
+ os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+ if (obj->elements()->length() > 0) {
+ os << "\n - elements = " << Brief(obj->elements());
+ }
}
@@ -454,6 +461,12 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
os << "\n - elements kind: " << ElementsKindToString(elements_kind());
os << "\n - unused property fields: " << unused_property_fields();
+ os << "\n - enum length: ";
+ if (EnumLength() == kInvalidEnumCacheSentinel) {
+ os << "invalid";
+ } else {
+ os << EnumLength();
+ }
if (is_deprecated()) os << "\n - deprecated_map";
if (is_stable()) os << "\n - stable_map";
if (is_dictionary_map()) os << "\n - dictionary_map";
@@ -466,7 +479,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_access_check_needed()) os << "\n - access_check_needed";
if (!is_extensible()) os << "\n - non-extensible";
if (is_observed()) os << "\n - observed";
- if (is_strong()) os << "\n - strong_map";
if (is_prototype_map()) {
os << "\n - prototype_map";
os << "\n - prototype info: " << Brief(prototype_info());
@@ -856,6 +868,8 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
if (has_initial_map()) os << Brief(initial_map());
os << "\n - shared_info = " << Brief(shared());
os << "\n - name = " << Brief(shared()->name());
+ os << "\n - formal_parameter_count = "
+ << shared()->internal_formal_parameter_count();
if (shared()->is_generator()) {
os << "\n - generator";
}
@@ -868,9 +882,10 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
- os << "\n - name: " << Brief(name());
- os << "\n - expected_nof_properties: " << expected_nof_properties();
- os << "\n - ast_node_count: " << ast_node_count();
+ os << "\n - name = " << Brief(name());
+ os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
+ os << "\n - expected_nof_properties = " << expected_nof_properties();
+ os << "\n - ast_node_count = " << ast_node_count();
os << "\n - instance class name = ";
instance_class_name()->Print(os);
os << "\n - code = " << Brief(code());
@@ -1297,8 +1312,6 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
<< ")";
} else if (key == heap->strict_function_transition_symbol()) {
os << " (transition to strict function)";
- } else if (key == heap->strong_function_transition_symbol()) {
- os << " (transition to strong function)";
} else if (key == heap->observed_symbol()) {
os << " (transition to Object.observe)";
} else {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index cc0712a324..74b5d85aa2 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -8,11 +8,13 @@
#include <iomanip>
#include <sstream>
+#include "src/objects-inl.h"
+
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
#include "src/api.h"
+#include "src/api-arguments.h"
#include "src/api-natives.h"
-#include "src/arguments.h"
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
@@ -31,16 +33,16 @@
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/identity-map.h"
-#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/interpreter.h"
#include "src/interpreter/source-position-table.h"
#include "src/isolate-inl.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
#include "src/list.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
-#include "src/objects-inl.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/property-descriptor.h"
@@ -113,17 +115,18 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
return result;
}
-
+// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
// static
-MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
- Name);
- if (input->IsName()) return Handle<Name>::cast(input);
- return ToString(isolate, input);
+MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+ if (*object == isolate->heap()->null_value() ||
+ *object == isolate->heap()->undefined_value()) {
+ return handle(isolate->global_proxy(), isolate);
+ }
+ return Object::ToObject(isolate, object);
}
-
// static
MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
while (true) {
@@ -175,6 +178,16 @@ MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
// static
+MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
+ Name);
+ if (input->IsName()) return Handle<Name>::cast(input);
+ return ToString(isolate, input);
+}
+
+// static
MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
while (true) {
if (input->IsString()) {
@@ -218,7 +231,7 @@ bool Object::BooleanValue() {
if (IsBoolean()) return IsTrue();
if (IsSmi()) return Smi::cast(this)->value() != 0;
if (IsUndefined() || IsNull()) return false;
- if (IsUndetectableObject()) return false; // Undetectable object is false.
+ if (IsUndetectable()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
return true;
@@ -284,6 +297,10 @@ Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
// static
Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
+ // This is the generic version of Abstract Equality Comparison; a version in
+ // JavaScript land is available in the EqualStub and NotEqualStub. Whenever
+ // you change something functionality wise in here, remember to update the
+ // TurboFan code stubs as well.
while (true) {
if (x->IsNumber()) {
if (y->IsNumber()) {
@@ -292,7 +309,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
} else if (y->IsString()) {
return Just(NumberEquals(x, String::ToNumber(Handle<String>::cast(y))));
- } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ } else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
@@ -310,7 +327,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsBoolean()) {
x = String::ToNumber(Handle<String>::cast(x));
return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
- } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ } else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
@@ -326,7 +343,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (y->IsString()) {
y = String::ToNumber(Handle<String>::cast(y));
return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
- } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ } else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
@@ -338,7 +355,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else if (x->IsSymbol()) {
if (y->IsSymbol()) {
return Just(x.is_identical_to(y));
- } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ } else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
@@ -350,7 +367,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
if (y->IsSimd128Value()) {
return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
Handle<Simd128Value>::cast(y)));
- } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ } else if (y->IsJSReceiver()) {
if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
.ToHandle(&y)) {
return Nothing<bool>();
@@ -358,11 +375,11 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
} else {
return Just(false);
}
- } else if (x->IsJSReceiver() && !x->IsUndetectableObject()) {
+ } else if (x->IsJSReceiver()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
- } else if (y->IsNull() || y->IsUndefined()) {
- return Just(false);
+ } else if (y->IsUndetectable()) {
+ return Just(x->IsUndetectable());
} else if (y->IsBoolean()) {
y = Oddball::ToNumber(Handle<Oddball>::cast(y));
} else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
@@ -370,9 +387,7 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
return Nothing<bool>();
}
} else {
- return Just(
- (x->IsNull() || x->IsUndefined() || x->IsUndetectableObject()) &&
- (y->IsNull() || y->IsUndefined() || y->IsUndetectableObject()));
+ return Just(x->IsUndetectable() && y->IsUndetectable());
}
}
}
@@ -397,7 +412,7 @@ bool Object::StrictEquals(Object* that) {
Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsNumber()) return isolate->factory()->number_string();
if (object->IsOddball()) return handle(Oddball::cast(*object)->type_of());
- if (object->IsUndetectableObject()) {
+ if (object->IsUndetectable()) {
return isolate->factory()->undefined_string();
}
if (object->IsString()) return isolate->factory()->string_string();
@@ -618,10 +633,11 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
FixedArray);
}
// 4. Let len be ? ToLength(? Get(obj, "length")).
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
Handle<Object> raw_length_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, raw_length_obj,
- JSReceiver::GetProperty(object, isolate->factory()->length_string()),
+ JSReceiver::GetProperty(receiver, isolate->factory()->length_string()),
FixedArray);
Handle<Object> raw_length_number;
ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
@@ -642,8 +658,9 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
// 7a. Let indexName be ToString(index).
// 7b. Let next be ? Get(obj, indexName).
Handle<Object> next;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, next, Object::GetElement(isolate, object, index), FixedArray);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, next,
+ JSReceiver::GetElement(isolate, receiver, index),
+ FixedArray);
switch (element_types) {
case ElementTypes::kAll:
// Nothing to do.
@@ -678,13 +695,12 @@ Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- // Call the "has" trap on proxies.
return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
it->GetName());
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(it);
- if (!result.IsJust()) return Nothing<bool>();
+ if (result.IsNothing()) return Nothing<bool>();
if (result.FromJust() != ABSENT) return Just(true);
break;
}
@@ -692,7 +708,7 @@ Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
if (it->HasAccess()) break;
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
- if (!result.IsJust()) return Nothing<bool>();
+ if (result.IsNothing()) return Nothing<bool>();
return Just(result.FromJust() != ABSENT);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -847,14 +863,6 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
}
-Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
- LookupIterator it(object, name,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- return GetDataProperty(&it);
-}
-
-
Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -901,26 +909,6 @@ bool Object::ToInt32(int32_t* value) {
}
-bool Object::ToUint32(uint32_t* value) {
- if (IsSmi()) {
- int num = Smi::cast(this)->value();
- if (num < 0) return false;
- *value = static_cast<uint32_t>(num);
- return true;
- }
- if (IsHeapNumber()) {
- double num = HeapNumber::cast(this)->value();
- if (num < 0) return false;
- uint32_t uint_value = FastD2UI(num);
- if (FastUI2D(uint_value) == num) {
- *value = uint_value;
- return true;
- }
- }
- return false;
-}
-
-
bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
if (!object->IsHeapObject()) return false;
return IsTemplateFor(HeapObject::cast(object)->map());
@@ -992,19 +980,18 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
return result;
}
-
-Handle<FixedArray> JSObject::EnsureWritableFastElements(
- Handle<JSObject> object) {
+void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
DCHECK(object->HasFastSmiOrObjectElements() ||
object->HasFastStringWrapperElements());
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
- if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
+ FixedArray* raw_elems = FixedArray::cast(object->elements());
+ Heap* heap = object->GetHeap();
+ if (raw_elems->map() != heap->fixed_cow_array_map()) return;
+ Isolate* isolate = heap->isolate();
+ Handle<FixedArray> elems(raw_elems, isolate);
Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
elems, isolate->factory()->fixed_array_map());
object->set_elements(*writable_elems);
isolate->counters()->cow_arrays_converted()->Increment();
- return writable_elems;
}
@@ -1093,18 +1080,19 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
v8::ToCData<v8::AccessorNameGetterCallback>(info->getter());
if (call_fun == nullptr) return isolate->factory()->undefined_value();
- LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Object);
+ }
+
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
Object::DONT_THROW);
- v8::Local<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(name));
+ Handle<Object> result = args.Call(call_fun, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) {
- return ReadAbsentProperty(isolate, receiver, name);
- }
- Handle<Object> return_value = v8::Utils::OpenHandle(*result);
- return_value->VerifyApiCallResultType();
+ if (result.is_null()) return ReadAbsentProperty(isolate, receiver, name);
// Rebox handle before return.
- return handle(*return_value, isolate);
+ return handle(*result, isolate);
}
// Regular accessor.
@@ -1168,10 +1156,15 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// have a setter.
if (call_fun == nullptr) return Just(true);
- LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
should_throw);
- args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+ args.Call(call_fun, name, value);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
return Just(true);
}
@@ -1433,8 +1426,11 @@ Object* Object::GetHash() {
Object* hash = GetSimpleHash();
if (hash->IsSmi()) return hash;
+ DisallowHeapAllocation no_gc;
DCHECK(IsJSReceiver());
- return JSReceiver::cast(this)->GetIdentityHash();
+ JSReceiver* receiver = JSReceiver::cast(this);
+ Isolate* isolate = receiver->GetIsolate();
+ return *JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
}
@@ -1565,7 +1561,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
return default_species;
}
if (original_array->IsJSArray() &&
- Handle<JSReceiver>::cast(original_array)->map()->new_target_is_base() &&
+ Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
isolate->IsArraySpeciesLookupChainIntact()) {
return default_species;
}
@@ -1592,8 +1588,8 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
if (constructor->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, constructor,
- Object::GetProperty(constructor,
- isolate->factory()->species_symbol()),
+ JSReceiver::GetProperty(Handle<JSReceiver>::cast(constructor),
+ isolate->factory()->species_symbol()),
Object);
if (constructor->IsNull()) {
constructor = isolate->factory()->undefined_value();
@@ -1748,7 +1744,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+ ClearRecordedSlots::kNo);
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -1809,7 +1806,8 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+ ClearRecordedSlots::kNo);
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -1962,7 +1960,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
// All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
+ // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
default: {
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -2501,11 +2499,9 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
}
}
- if (FLAG_harmony_tostring) {
- Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
- receiver, isolate->factory()->to_string_tag_symbol());
- if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
- }
+ Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
PrototypeIterator iter(isolate, receiver);
if (iter.IsAtEnd()) return handle(receiver->class_name());
@@ -2681,6 +2677,16 @@ const char* Representation::Mnemonic() const {
}
}
+bool Map::InstancesNeedRewriting(Map* target) {
+ int target_number_of_fields = target->NumberOfFields();
+ int target_inobject = target->GetInObjectProperties();
+ int target_unused = target->unused_property_fields();
+ int old_number_of_fields;
+
+ return InstancesNeedRewriting(target, target_number_of_fields,
+ target_inobject, target_unused,
+ &old_number_of_fields);
+}
bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
int target_inobject, int target_unused,
@@ -2953,8 +2959,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
if (instance_size_delta > 0) {
Address address = object->address();
- heap->CreateFillerObjectAt(
- address + new_instance_size, instance_size_delta);
+ heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
+ ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta,
Heap::CONCURRENT_TO_SWEEPER);
}
@@ -3049,7 +3055,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
if (instance_size_delta > 0) {
Heap* heap = isolate->heap();
heap->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
+ instance_size_delta, ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta,
Heap::CONCURRENT_TO_SWEEPER);
}
@@ -3141,10 +3147,10 @@ int Map::NumberOfFields() {
return result;
}
-
Handle<Map> Map::CopyGeneralizeAllRepresentations(
- Handle<Map> map, int modify_index, StoreMode store_mode, PropertyKind kind,
- PropertyAttributes attributes, const char* reason) {
+ Handle<Map> map, ElementsKind elements_kind, int modify_index,
+ StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
+ const char* reason) {
Isolate* isolate = map->GetIsolate();
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -3200,6 +3206,7 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
MaybeHandle<Object>());
}
}
+ new_map->set_elements_kind(elements_kind);
return new_map;
}
@@ -3235,7 +3242,7 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
}
DescriptorArray* to_replace = instance_descriptors();
- GetHeap()->incremental_marking()->RecordWrites(to_replace);
+ GetHeap()->incremental_marking()->IterateBlackObject(to_replace);
Map* current = this;
while (current->instance_descriptors() == to_replace) {
Object* next = current->GetBackPointer();
@@ -3325,25 +3332,40 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
Representation new_representation,
Handle<Object> new_wrapped_type) {
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
+ // We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
if (details.type() != DATA) return;
- Object* transitions = raw_transitions();
- int num_transitions = TransitionArray::NumberOfTransitions(transitions);
- for (int i = 0; i < num_transitions; ++i) {
- Map* target = TransitionArray::GetTarget(transitions, i);
- target->UpdateFieldType(descriptor, name, new_representation,
- new_wrapped_type);
- }
- // It is allowed to change representation here only from None to something.
- DCHECK(details.representation().Equals(new_representation) ||
- details.representation().IsNone());
- // Skip if already updated the shared descriptor.
- if (instance_descriptors()->GetValue(descriptor) == *new_wrapped_type) return;
- DataDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
- new_wrapped_type, details.attributes(), new_representation);
- instance_descriptors()->Replace(descriptor, &d);
+ Zone zone(GetIsolate()->allocator());
+ ZoneQueue<Map*> backlog(&zone);
+ backlog.push(this);
+
+ while (!backlog.empty()) {
+ Map* current = backlog.front();
+ backlog.pop();
+
+ Object* transitions = current->raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ for (int i = 0; i < num_transitions; ++i) {
+ Map* target = TransitionArray::GetTarget(transitions, i);
+ backlog.push(target);
+ }
+ DescriptorArray* descriptors = current->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ // It is allowed to change representation here only from None to something.
+ DCHECK(details.representation().Equals(new_representation) ||
+ details.representation().IsNone());
+
+ // Skip if already updated the shared descriptor.
+ if (descriptors->GetValue(descriptor) != *new_wrapped_type) {
+ DataDescriptor d(name, descriptors->GetFieldIndex(descriptor),
+ new_wrapped_type, details.attributes(),
+ new_representation);
+ descriptors->Replace(descriptor, &d);
+ }
+ }
}
bool FieldTypeIsCleared(Representation rep, FieldType* type) {
@@ -3437,9 +3459,9 @@ static inline Handle<FieldType> GetFieldType(
}
}
-
-// Reconfigures property at |modify_index| with |new_kind|, |new_attributes|,
-// |store_mode| and/or |new_representation|/|new_field_type|.
+// Reconfigures elements kind to |new_elements_kind| and/or property at
+// |modify_index| with |new_kind|, |new_attributes|, |store_mode| and/or
+// |new_representation|/|new_field_type|.
// If |modify_index| is negative then no properties are reconfigured but the
// map is migrated to the up-to-date non-deprecated state.
//
@@ -3449,6 +3471,7 @@ static inline Handle<FieldType> GetFieldType(
// any potential new (partial) version of the type in the transition tree.
// To do this, on each rewrite:
// - Search the root of the transition tree using FindRootMap.
+// - Find/create a |root_map| with requested |new_elements_kind|.
// - Find |target_map|, the newest matching version of this map using the
// virtually "enhanced" |old_map|'s descriptor array (i.e. whose entry at
// |modify_index| is considered to be of |new_kind| and having
@@ -3464,12 +3487,13 @@ static inline Handle<FieldType> GetFieldType(
// Return it.
// - Otherwise, invalidate the outdated transition target from |target_map|, and
// replace its transition tree with a new branch for the updated descriptors.
-Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type,
- StoreMode store_mode) {
+Handle<Map> Map::Reconfigure(Handle<Map> old_map,
+ ElementsKind new_elements_kind, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type,
+ StoreMode store_mode) {
DCHECK_NE(kAccessor, new_kind); // TODO(ishell): not supported yet.
DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
Isolate* isolate = old_map->GetIsolate();
@@ -3484,7 +3508,8 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
// uninitialized value for representation None can be overwritten by both
// smi and tagged values. Doubles, however, would require a box allocation.
if (modify_index >= 0 && !new_representation.IsNone() &&
- !new_representation.IsDouble()) {
+ !new_representation.IsDouble() &&
+ old_map->elements_kind() == new_elements_kind) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
Representation old_representation = old_details.representation();
@@ -3517,38 +3542,39 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
// Check the state of the root map.
Handle<Map> root_map(old_map->FindRootMap(), isolate);
if (!old_map->EquivalentToForTransition(*root_map)) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_NotEquivalent");
+ return CopyGeneralizeAllRepresentations(
+ old_map, new_elements_kind, modify_index, store_mode, new_kind,
+ new_attributes, "GenAll_NotEquivalent");
}
ElementsKind from_kind = root_map->elements_kind();
- ElementsKind to_kind = old_map->elements_kind();
+ ElementsKind to_kind = new_elements_kind;
// TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+ to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
!(IsTransitionableFastElementsKind(from_kind) &&
IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_InvalidElementsTransition");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_InvalidElementsTransition");
}
int root_nof = root_map->NumberOfOwnDescriptors();
if (modify_index >= 0 && modify_index < root_nof) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
if (old_details.kind() != new_kind ||
old_details.attributes() != new_attributes) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_RootModification1");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_RootModification1");
}
if ((old_details.type() != DATA && store_mode == FORCE_FIELD) ||
(old_details.type() == DATA &&
(!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
!new_representation.fits_into(old_details.representation())))) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_RootModification2");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_RootModification2");
}
}
@@ -3602,9 +3628,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
if (next_kind == kAccessor &&
!EqualImmutableValues(old_descriptors->GetValue(i),
tmp_descriptors->GetValue(i))) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_Incompatible");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_Incompatible");
}
if (next_location == kField && tmp_details.location() == kDescriptor) break;
@@ -3697,9 +3723,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
if (next_kind == kAccessor &&
!EqualImmutableValues(old_descriptors->GetValue(i),
tmp_descriptors->GetValue(i))) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_Incompatible");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_Incompatible");
}
DCHECK(!tmp_map->is_deprecated());
target_map = tmp_map;
@@ -3930,9 +3956,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
// could be inserted regardless of whether transitions array is full or not.
if (maybe_transition == NULL &&
!TransitionArray::CanHaveMoreTransitions(split_map)) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- new_kind, new_attributes,
- "GenAll_CantHaveMoreTransitions");
+ return CopyGeneralizeAllRepresentations(
+ old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+ "GenAll_CantHaveMoreTransitions");
}
old_map->NotifyLeafMapLayoutChange();
@@ -4013,18 +4039,27 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
if (root_map == NULL) return MaybeHandle<Map>();
// From here on, use the map with correct elements kind as root map.
}
- int root_nof = root_map->NumberOfOwnDescriptors();
+ Map* new_map = root_map->TryReplayPropertyTransitions(*old_map);
+ if (new_map == nullptr) return MaybeHandle<Map>();
+ return handle(new_map);
+}
+
+Map* Map::TryReplayPropertyTransitions(Map* old_map) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(GetIsolate());
+
+ int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map->NumberOfOwnDescriptors();
DescriptorArray* old_descriptors = old_map->instance_descriptors();
- Map* new_map = root_map;
+ Map* new_map = this;
for (int i = root_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
Map* transition = TransitionArray::SearchTransition(
new_map, old_details.kind(), old_descriptors->GetKey(i),
old_details.attributes());
- if (transition == NULL) return MaybeHandle<Map>();
+ if (transition == NULL) return nullptr;
new_map = transition;
DescriptorArray* new_descriptors = new_map->instance_descriptors();
@@ -4032,7 +4067,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
DCHECK_EQ(old_details.kind(), new_details.kind());
DCHECK_EQ(old_details.attributes(), new_details.attributes());
if (!old_details.representation().fits_into(new_details.representation())) {
- return MaybeHandle<Map>();
+ return nullptr;
}
switch (new_details.type()) {
case DATA: {
@@ -4040,20 +4075,20 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
- return MaybeHandle<Map>();
+ return nullptr;
}
PropertyType old_property_type = old_details.type();
if (old_property_type == DATA) {
FieldType* old_type = old_descriptors->GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type->NowIs(new_type)) {
- return MaybeHandle<Map>();
+ return nullptr;
}
} else {
DCHECK(old_property_type == DATA_CONSTANT);
Object* old_value = old_descriptors->GetValue(i);
if (!new_type->NowContains(old_value)) {
- return MaybeHandle<Map>();
+ return nullptr;
}
}
break;
@@ -4071,14 +4106,14 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
Object* old_value = old_descriptors->GetValue(i);
Object* new_value = new_descriptors->GetValue(i);
if (old_details.location() == kField || old_value != new_value) {
- return MaybeHandle<Map>();
+ return nullptr;
}
break;
}
}
}
- if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
- return handle(new_map);
+ if (new_map->NumberOfOwnDescriptors() != old_nof) return nullptr;
+ return new_map;
}
@@ -4104,17 +4139,23 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
if (interceptor->setter()->IsUndefined()) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
- v8::Local<v8::Value> result;
- PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder, should_throw);
+ bool result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertySetterCallback setter =
v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", *holder, index));
- result = args.Call(setter, index, v8::Utils::ToLocal(value));
+ // TODO(neis): In the future, we may want to actually return the
+ // interceptor's result, which then should be a boolean.
+ result = !args.Call(setter, index, value).is_null();
} else {
Handle<Name> name = it->name();
DCHECK(!name->IsPrivate());
@@ -4126,21 +4167,11 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
v8::GenericNamedPropertySetterCallback setter =
v8::ToCData<v8::GenericNamedPropertySetterCallback>(
interceptor->setter());
- LOG(it->isolate(),
- ApiNamedPropertyAccess("interceptor-named-set", *holder, *name));
- result =
- args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+ result = !args.Call(setter, name, value).is_null();
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
- if (result.IsEmpty()) return Just(false);
-#ifdef DEBUG
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
-#endif
- return Just(true);
- // TODO(neis): In the future, we may want to actually return the interceptor's
- // result, which then should be a boolean.
+ return Just(result);
}
@@ -4159,7 +4190,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
LanguageMode language_mode,
StoreFromKeyed store_mode,
bool* found) {
- it->UpdateProtector();
+ DCHECK(it->IsFound());
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
@@ -4167,10 +4198,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
// interceptor calls.
AssertNoContextChange ncc(it->isolate());
- *found = true;
-
- bool done = false;
- for (; it->IsFound(); it->Next()) {
+ do {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
UNREACHABLE();
@@ -4195,10 +4223,12 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
if (!maybe_attributes.IsJust()) return Nothing<bool>();
- done = maybe_attributes.FromJust() != ABSENT;
- if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
+ if (maybe_attributes.FromJust() == ABSENT) break;
+ if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
+ *found = false;
+ return Nothing<bool>();
}
break;
@@ -4210,13 +4240,13 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if (accessors->IsAccessorInfo() &&
!it->HolderIsReceiverOrHiddenPrototype() &&
AccessorInfo::cast(*accessors)->is_special_data_property()) {
- done = true;
- break;
+ *found = false;
+ return Nothing<bool>();
}
return SetPropertyWithAccessor(it, value, should_throw);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- // TODO(verwaest): We should throw an exception.
+ // TODO(verwaest): We should throw an exception if holder is receiver.
return Just(true);
case LookupIterator::DATA:
@@ -4226,38 +4256,39 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
}
- done = true;
- break;
-
+ // Fall through.
case LookupIterator::TRANSITION:
- done = true;
- break;
+ *found = false;
+ return Nothing<bool>();
}
+ it->Next();
+ } while (it->IsFound());
+
+ *found = false;
+ return Nothing<bool>();
+}
+
- if (done) break;
+Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ it->UpdateProtector();
+ if (it->IsFound()) {
+ bool found = true;
+ Maybe<bool> result =
+ SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ if (found) return result;
}
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
// throw a reference error in strict mode. In sloppy mode, we continue.
- if (it->GetReceiver()->IsJSGlobalObject() && is_strict(language_mode)) {
+ if (is_strict(language_mode) && it->GetReceiver()->IsJSGlobalObject()) {
it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
MessageTemplate::kNotDefined, it->name()));
return Nothing<bool>();
}
- *found = false;
- return Nothing<bool>();
-}
-
-
-Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
- bool found = false;
- Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_mode, &found);
- if (found) return result;
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
return AddDataProperty(it, value, NONE, should_throw, store_mode);
@@ -4269,10 +4300,13 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
StoreFromKeyed store_mode) {
Isolate* isolate = it->isolate();
- bool found = false;
- Maybe<bool> result =
- SetPropertyInternal(it, value, language_mode, store_mode, &found);
- if (found) return result;
+ it->UpdateProtector();
+ if (it->IsFound()) {
+ bool found = true;
+ Maybe<bool> result =
+ SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ if (found) return result;
+ }
// The property either doesn't exist on the holder or exists there as a data
// property.
@@ -4285,7 +4319,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- LookupIterator::Configuration c = LookupIterator::OWN;
+ LookupIterator::Configuration c = LookupIterator::HIDDEN;
LookupIterator own_lookup =
it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
: LookupIterator(receiver, it->name(), c);
@@ -4347,8 +4381,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
}
- return JSObject::AddDataProperty(&own_lookup, value, NONE, should_throw,
- store_mode);
+ return AddDataProperty(&own_lookup, value, NONE, should_throw, store_mode);
}
MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
@@ -4617,7 +4650,7 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
// Replace descriptors by new_descriptors in all maps that share it.
- map->GetHeap()->incremental_marking()->RecordWrites(*descriptors);
+ map->GetHeap()->incremental_marking()->IterateBlackObject(*descriptors);
Map* current = *map;
while (current->instance_descriptors() == *descriptors) {
@@ -4736,17 +4769,30 @@ static bool ContainsMap(MapHandleList* maps, Map* map) {
return false;
}
+Map* Map::FindElementsKindTransitionedMap(MapHandleList* candidates) {
+ DisallowHeapAllocation no_allocation;
+ DisallowDeoptimization no_deoptimization(GetIsolate());
-Handle<Map> Map::FindTransitionedMap(Handle<Map> map,
- MapHandleList* candidates) {
- ElementsKind kind = map->elements_kind();
+ ElementsKind kind = elements_kind();
bool packed = IsFastPackedElementsKind(kind);
Map* transition = nullptr;
if (IsTransitionableFastElementsKind(kind)) {
- for (Map* current = map->ElementsTransitionMap();
- current != nullptr && current->has_fast_elements();
- current = current->ElementsTransitionMap()) {
+ // Check the state of the root map.
+ Map* root_map = FindRootMap();
+ if (!EquivalentToForTransition(root_map)) return nullptr;
+ root_map = root_map->LookupElementsTransitionMap(kind);
+ DCHECK_NOT_NULL(root_map);
+ // Starting from the next existing elements kind transition try to
+ // replay the property transitions that does not involve instance rewriting
+ // (ElementsTransitionAndStoreStub does not support that).
+ for (root_map = root_map->ElementsTransitionMap();
+ root_map != nullptr && root_map->has_fast_elements();
+ root_map = root_map->ElementsTransitionMap()) {
+ Map* current = root_map->TryReplayPropertyTransitions(this);
+ if (current == nullptr) continue;
+ if (InstancesNeedRewriting(current)) continue;
+
if (ContainsMap(candidates, current) &&
(packed || !IsFastPackedElementsKind(current->elements_kind()))) {
transition = current;
@@ -4754,11 +4800,14 @@ Handle<Map> Map::FindTransitionedMap(Handle<Map> map,
}
}
}
- return transition == nullptr ? Handle<Map>() : handle(transition);
+ return transition;
}
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
+ // Ensure we are requested to search elements kind transition "near the root".
+ DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+ map->NumberOfOwnDescriptors());
Map* current_map = map;
ElementsKind kind = map->elements_kind();
@@ -4857,11 +4906,9 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
} else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
DisallowHeapAllocation no_gc;
- Strength strength = map->is_strong() ? Strength::STRONG : Strength::WEAK;
- if (native_context->get(Context::ArrayMapIndex(from_kind, strength)) ==
- *map) {
+ if (native_context->get(Context::ArrayMapIndex(from_kind)) == *map) {
Object* maybe_transitioned_map =
- native_context->get(Context::ArrayMapIndex(to_kind, strength));
+ native_context->get(Context::ArrayMapIndex(to_kind));
if (maybe_transitioned_map->IsMap()) {
return handle(Map::cast(maybe_transitioned_map), isolate);
}
@@ -4889,7 +4936,7 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
return Map::CopyAsElementsKind(map, to_kind, OMIT_TRANSITION);
}
- return Map::AsElementsKind(map, to_kind);
+ return Map::ReconfigureElementsKind(map, to_kind);
}
@@ -5189,11 +5236,9 @@ MaybeHandle<Context> JSReceiver::GetFunctionRealm(Handle<JSReceiver> receiver) {
Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
- Isolate* isolate = it->isolate();
- HandleScope scope(isolate);
PropertyDescriptor desc;
Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
- isolate, it->GetHolder<JSProxy>(), it->GetName(), &desc);
+ it->isolate(), it->GetHolder<JSProxy>(), it->GetName(), &desc);
MAYBE_RETURN(found, Nothing<PropertyAttributes>());
if (!found.FromJust()) return Just(ABSENT);
return Just(desc.ToAttributes());
@@ -5215,7 +5260,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
} else {
TransitionElementsKind(object, to_kind);
}
- map = Map::AsElementsKind(map, to_kind);
+ map = Map::ReconfigureElementsKind(map, to_kind);
}
JSObject::MigrateToMap(object, map);
}
@@ -5262,7 +5307,7 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes) {
- LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
#ifdef DEBUG
uint32_t index;
@@ -5373,7 +5418,6 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
should_throw);
case LookupIterator::DATA: {
- Handle<Object> old_value = it->factory()->the_hole_value();
// Regular property update if the attributes match.
if (it->property_attributes() == attributes) {
return SetDataProperty(it, value);
@@ -5387,6 +5431,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
}
// Reconfigure the data property if the attributes mismatch.
+ Handle<Object> old_value = it->factory()->the_hole_value();
if (is_observed) old_value = it->GetDataValue();
it->ReconfigureDataProperty(value, attributes);
@@ -5413,7 +5458,7 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
DCHECK(!value->IsTheHole());
- LookupIterator it(object, name, LookupIterator::OWN);
+ LookupIterator it(object, name, object, LookupIterator::OWN);
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -5421,7 +5466,7 @@ MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
Handle<JSObject> object, uint32_t index, Handle<Object> value,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -5429,8 +5474,8 @@ MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
- LookupIterator::OWN);
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, object, LookupIterator::OWN);
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -5449,17 +5494,20 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
!interceptor->can_intercept_symbols()) {
return Just(ABSENT);
}
- PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder,
- Object::DONT_THROW);
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<PropertyAttributes>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
if (!interceptor->query()->IsUndefined()) {
- v8::Local<v8::Integer> result;
+ Handle<Object> result;
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertyQueryCallback query =
v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", *holder, index));
result = args.Call(query, index);
} else {
Handle<Name> name = it->name();
@@ -5467,25 +5515,20 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
v8::GenericNamedPropertyQueryCallback query =
v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
interceptor->query());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
- result = args.Call(query, v8::Utils::ToLocal(name));
+ result = args.Call(query, name);
}
- if (!result.IsEmpty()) {
- DCHECK(result->IsInt32());
- return Just(static_cast<PropertyAttributes>(
- result->Int32Value(reinterpret_cast<v8::Isolate*>(isolate)
- ->GetCurrentContext()).FromJust()));
+ if (!result.is_null()) {
+ int32_t value;
+ CHECK(result->ToInt32(&value));
+ return Just(static_cast<PropertyAttributes>(value));
}
} else if (!interceptor->getter()->IsUndefined()) {
// TODO(verwaest): Use GetPropertyWithInterceptor?
- v8::Local<v8::Value> result;
+ Handle<Object> result;
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-get-has",
- *holder, index));
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
@@ -5493,11 +5536,9 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
v8::GenericNamedPropertyGetterCallback getter =
v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
- result = args.Call(getter, v8::Utils::ToLocal(name));
+ result = args.Call(getter, name);
}
- if (!result.IsEmpty()) return Just(DONT_ENUM);
+ if (!result.is_null()) return Just(DONT_ENUM);
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
@@ -5751,35 +5792,6 @@ void JSObject::ResetElements(Handle<JSObject> object) {
}
-static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
- Handle<FixedArrayBase> array, int length,
- Handle<SeededNumberDictionary> dictionary, bool used_as_prototype) {
- Isolate* isolate = array->GetIsolate();
- Factory* factory = isolate->factory();
- bool has_double_elements = array->IsFixedDoubleArray();
- for (int i = 0; i < length; i++) {
- Handle<Object> value;
- if (has_double_elements) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(array);
- if (double_array->is_the_hole(i)) {
- value = factory->the_hole_value();
- } else {
- value = factory->NewHeapNumber(double_array->get_scalar(i));
- }
- } else {
- value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
- }
- if (!value->IsTheHole()) {
- PropertyDetails details = PropertyDetails::Empty();
- dictionary = SeededNumberDictionary::AddNumberEntry(
- dictionary, i, value, details, used_as_prototype);
- }
- }
- return dictionary;
-}
-
-
void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
if (dictionary->requires_slow_elements()) return;
dictionary->set_requires_slow_elements();
@@ -5790,40 +5802,23 @@ void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
}
-Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
- Handle<JSObject> object, Handle<FixedArrayBase> elements) {
- DCHECK(!object->HasDictionaryElements());
- DCHECK(!object->HasSlowArgumentsElements());
- Isolate* isolate = object->GetIsolate();
- // Ensure that notifications fire if the array or object prototypes are
- // normalizing.
- isolate->UpdateArrayProtectorOnNormalizeElements(object);
- int length = object->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
- : elements->length();
- int used = object->GetFastElementsUsage();
- Handle<SeededNumberDictionary> dictionary =
- SeededNumberDictionary::New(isolate, used);
- return CopyFastElementsToDictionary(elements, length, dictionary,
- object->map()->is_prototype_map());
-}
-
-
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
DCHECK(!object->HasFixedTypedArrayElements());
Isolate* isolate = object->GetIsolate();
-
- // Find the backing store.
- Handle<FixedArrayBase> elements(object->elements(), isolate);
bool is_arguments = object->HasSloppyArgumentsElements();
- if (is_arguments) {
- FixedArray* parameter_map = FixedArray::cast(*elements);
- elements = handle(FixedArrayBase::cast(parameter_map->get(1)), isolate);
- }
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = object->elements();
- if (elements->IsDictionary()) {
- return Handle<SeededNumberDictionary>::cast(elements);
+ if (is_arguments) {
+ FixedArray* parameter_map = FixedArray::cast(elements);
+ elements = FixedArrayBase::cast(parameter_map->get(1));
+ }
+
+ if (elements->IsDictionary()) {
+ return handle(SeededNumberDictionary::cast(elements), isolate);
+ }
}
DCHECK(object->HasFastSmiOrObjectElements() ||
@@ -5832,7 +5827,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
object->HasFastStringWrapperElements());
Handle<SeededNumberDictionary> dictionary =
- GetNormalizedElementDictionary(object, elements);
+ object->GetElementsAccessor()->Normalize(object);
// Switch to using the dictionary as the backing storage for elements.
ElementsKind target_kind = is_arguments
@@ -5882,14 +5877,6 @@ static Smi* GenerateIdentityHash(Isolate* isolate) {
}
-void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
- DCHECK(!object->IsJSGlobalProxy());
- Isolate* isolate = object->GetIsolate();
- Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
- JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
-}
-
-
template<typename ProxyType>
static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
Isolate* isolate = proxy->GetIsolate();
@@ -5902,40 +5889,42 @@ static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
return hash;
}
-
-Object* JSObject::GetIdentityHash() {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- if (IsJSGlobalProxy()) {
- return JSGlobalProxy::cast(this)->hash();
+// static
+Handle<Object> JSObject::GetIdentityHash(Isolate* isolate,
+ Handle<JSObject> object) {
+ if (object->IsJSGlobalProxy()) {
+ return handle(JSGlobalProxy::cast(*object)->hash(), isolate);
}
- Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
- Handle<Object> stored_value =
- Object::GetPropertyOrElement(Handle<Object>(this, isolate),
- hash_code_symbol).ToHandleChecked();
- return stored_value->IsSmi() ? *stored_value
- : isolate->heap()->undefined_value();
+ Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
+ return JSReceiver::GetDataProperty(object, hash_code_symbol);
}
-
+// static
Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
}
Isolate* isolate = object->GetIsolate();
- Handle<Object> maybe_hash(object->GetIdentityHash(), isolate);
- if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+ Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
+ LookupIterator it(object, hash_code_symbol, object, LookupIterator::OWN);
+ if (it.IsFound()) {
+ DCHECK_EQ(LookupIterator::DATA, it.state());
+ Handle<Object> maybe_hash = it.GetDataValue();
+ if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+ }
Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
- Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
- JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
+ CHECK(AddDataProperty(&it, hash, NONE, THROW_ON_ERROR,
+ CERTAINLY_NOT_STORE_FROM_KEYED)
+ .IsJust());
return hash;
}
-
-Object* JSProxy::GetIdentityHash() {
- return this->hash();
+// static
+Handle<Object> JSProxy::GetIdentityHash(Isolate* isolate,
+ Handle<JSProxy> proxy) {
+ return handle(proxy->hash(), isolate);
}
@@ -6027,7 +6016,7 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
- LookupIterator it(object, hidden);
+ LookupIterator it(object, hidden, object);
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
// Cannot get an exception since the hidden_properties_symbol isn't exposed to
// JS.
@@ -6062,8 +6051,10 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
return GetHeap()->undefined_value();
}
} else {
- Handle<Symbol> hidden = GetIsolate()->factory()->hidden_properties_symbol();
- LookupIterator it(handle(this), hidden);
+ Isolate* isolate = GetIsolate();
+ Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
+ Handle<JSObject> receiver(this, isolate);
+ LookupIterator it(receiver, hidden, receiver);
// Access check is always skipped for the hidden string anyways.
return *GetDataProperty(&it);
}
@@ -6110,16 +6101,20 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
- PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder, should_throw);
- v8::Local<v8::Boolean> result;
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
+ Handle<Object> result;
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertyDeleterCallback deleter =
v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", *holder, index));
result = args.Call(deleter, index);
} else if (it->name()->IsSymbol() && !interceptor->can_intercept_symbols()) {
return Nothing<bool>();
@@ -6129,19 +6124,15 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
v8::GenericNamedPropertyDeleterCallback deleter =
v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
interceptor->deleter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
- result = args.Call(deleter, v8::Utils::ToLocal(name));
+ result = args.Call(deleter, name);
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- if (result.IsEmpty()) return Nothing<bool>();
+ if (result.is_null()) return Nothing<bool>();
DCHECK(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
// Rebox CustomArguments::kReturnValueOffset before returning.
- return Just(result_internal->BooleanValue());
+ return Just(result->IsTrue());
}
@@ -6231,15 +6222,12 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
}
// Fall through.
case LookupIterator::ACCESSOR: {
- if (!it->IsConfigurable() || receiver->map()->is_strong()) {
- // Fail if the property is not configurable, or on a strong object.
+ if (!it->IsConfigurable()) {
+ // Fail if the property is not configurable.
if (is_strict(language_mode)) {
- MessageTemplate::Template templ =
- receiver->map()->is_strong()
- ? MessageTemplate::kStrongDeleteProperty
- : MessageTemplate::kStrictDeleteProperty;
isolate->Throw(*isolate->factory()->NewTypeError(
- templ, it->GetName(), receiver));
+ MessageTemplate::kStrictDeleteProperty, it->GetName(),
+ receiver));
return Nothing<bool>();
}
return Just(false);
@@ -6265,7 +6253,7 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode) {
- LookupIterator it(object->GetIsolate(), object, index,
+ LookupIterator it(object->GetIsolate(), object, index, object,
LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
@@ -6274,7 +6262,7 @@ Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
- LookupIterator it(object, name, LookupIterator::HIDDEN);
+ LookupIterator it(object, name, object, LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
@@ -6283,7 +6271,7 @@ Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
@@ -6659,14 +6647,6 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
} else if (current_is_data_descriptor && desc_is_data_descriptor) {
// 8a. If the [[Configurable]] field of current is false, then:
if (!current->configurable()) {
- // [Strong mode] Disallow changing writable -> readonly for
- // non-configurable properties.
- if (it != NULL && current->writable() && desc->has_writable() &&
- !desc->writable() && object->map()->is_strong()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kStrongRedefineDisallowed,
- object, it->GetName()));
- }
// 8a i. Return false, if the [[Writable]] field of current is false and
// the [[Writable]] field of Desc is true.
if (!current->writable() && desc->has_writable() && desc->writable()) {
@@ -6787,7 +6767,7 @@ Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
Isolate* isolate = receiver->GetIsolate();
if (receiver->IsJSObject()) {
- return JSObject::CreateDataProperty(it, value); // Shortcut.
+ return JSObject::CreateDataProperty(it, value, should_throw); // Shortcut.
}
PropertyDescriptor new_desc;
@@ -6800,17 +6780,28 @@ Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
&new_desc, should_throw);
}
-
Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
- Handle<Object> value) {
+ Handle<Object> value,
+ ShouldThrow should_throw) {
DCHECK(it->GetReceiver()->IsJSObject());
MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
if (it->IsFound()) {
- if (!it->IsConfigurable()) return Just(false);
+ Maybe<PropertyAttributes> attributes = GetPropertyAttributes(it);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ if ((attributes.FromJust() & DONT_DELETE) != 0) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
} else {
- if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
- return Just(false);
+ if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver()))) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed, it->GetName()));
+ }
}
RETURN_ON_EXCEPTION_VALUE(it->isolate(),
@@ -7158,7 +7149,7 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
? desc->value()
: Handle<Object>::cast(isolate->factory()->undefined_value());
- LookupIterator it(proxy, private_name);
+ LookupIterator it(proxy, private_name, proxy);
if (it.IsFound()) {
DCHECK_EQ(LookupIterator::DATA, it.state());
@@ -7220,7 +7211,7 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
if (!is_accessor_pair) {
// 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
Handle<Object> value;
- if (!JSObject::GetProperty(it).ToHandle(&value)) {
+ if (!Object::GetProperty(it).ToHandle(&value)) {
DCHECK(isolate->has_pending_exception());
return Nothing<bool>();
}
@@ -7510,8 +7501,7 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
if (!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() &&
- (!object->map()->is_strong() || level == SEALED)) { // Fast path.
+ !object->map()->is_observed()) { // Fast path.
if (level == SEALED) {
return JSObject::PreventExtensionsWithTransition<SEALED>(object,
should_throw);
@@ -7841,8 +7831,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
: object->elements()->length();
new_element_dictionary =
length == 0 ? isolate->factory()->empty_slow_element_dictionary()
- : GetNormalizedElementDictionary(
- object, handle(object->elements()));
+ : object->GetElementsAccessor()->Normalize(object);
}
Handle<Symbol> transition_marker;
@@ -7964,9 +7953,7 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
return Object::WrapForRead(isolate, raw_value, representation);
}
-enum class BoilerplateKind { kNormalBoilerplate, kApiBoilerplate };
-
-template <class ContextObject, BoilerplateKind boilerplate_kind>
+template <class ContextObject>
class JSObjectWalkVisitor {
public:
JSObjectWalkVisitor(ContextObject* site_context, bool copying,
@@ -7998,9 +7985,9 @@ class JSObjectWalkVisitor {
const JSObject::DeepCopyHints hints_;
};
-template <class ContextObject, BoilerplateKind boilerplate_kind>
-MaybeHandle<JSObject> JSObjectWalkVisitor<
- ContextObject, boilerplate_kind>::StructureWalk(Handle<JSObject> object) {
+template <class ContextObject>
+MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+ Handle<JSObject> object) {
Isolate* isolate = this->isolate();
bool copying = this->copying();
bool shallow = hints_ == JSObject::kObjectIsShallow;
@@ -8020,26 +8007,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<
Handle<JSObject> copy;
if (copying) {
- if (boilerplate_kind == BoilerplateKind::kApiBoilerplate) {
- if (object->IsJSFunction()) {
-#ifdef DEBUG
- // Ensure that it is an Api function and template_instantiations_cache
- // contains an entry for function's FunctionTemplateInfo.
- JSFunction* function = JSFunction::cast(*object);
- CHECK(function->shared()->IsApiFunction());
- FunctionTemplateInfo* data = function->shared()->get_api_func_data();
- auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
- CHECK(serial_number->value());
- auto cache = isolate->template_instantiations_cache();
- Object* element = cache->Lookup(serial_number);
- CHECK_EQ(function, element);
-#endif
- return object;
- }
- } else {
- // JSFunction objects are not allowed to be in normal boilerplates at all.
- DCHECK(!object->IsJSFunction());
- }
+ // JSFunction objects are not allowed to be in normal boilerplates at all.
+ DCHECK(!object->IsJSFunction());
Handle<AllocationSite> site_to_pass;
if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
@@ -8108,7 +8077,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<
DCHECK(names->get(i)->IsName());
Handle<Name> name(Name::cast(names->get(i)));
Handle<Object> value =
- Object::GetProperty(copy, name).ToHandleChecked();
+ JSObject::GetProperty(copy, name).ToHandleChecked();
if (value->IsJSObject()) {
Handle<JSObject> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -8208,9 +8177,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<
MaybeHandle<JSObject> JSObject::DeepWalk(
Handle<JSObject> object,
AllocationSiteCreationContext* site_context) {
- JSObjectWalkVisitor<AllocationSiteCreationContext,
- BoilerplateKind::kNormalBoilerplate> v(site_context,
- false, kNoHints);
+ JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
+ kNoHints);
MaybeHandle<JSObject> result = v.StructureWalk(object);
Handle<JSObject> for_assert;
DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
@@ -8222,30 +8190,7 @@ MaybeHandle<JSObject> JSObject::DeepCopy(
Handle<JSObject> object,
AllocationSiteUsageContext* site_context,
DeepCopyHints hints) {
- JSObjectWalkVisitor<AllocationSiteUsageContext,
- BoilerplateKind::kNormalBoilerplate> v(site_context, true,
- hints);
- MaybeHandle<JSObject> copy = v.StructureWalk(object);
- Handle<JSObject> for_assert;
- DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
- return copy;
-}
-
-class DummyContextObject : public AllocationSiteContext {
- public:
- explicit DummyContextObject(Isolate* isolate)
- : AllocationSiteContext(isolate) {}
-
- bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
- Handle<AllocationSite> EnterNewScope() { return Handle<AllocationSite>(); }
- void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object) {}
-};
-
-MaybeHandle<JSObject> JSObject::DeepCopyApiBoilerplate(
- Handle<JSObject> object) {
- DummyContextObject dummy_context_object(object->GetIsolate());
- JSObjectWalkVisitor<DummyContextObject, BoilerplateKind::kApiBoilerplate> v(
- &dummy_context_object, true, kNoHints);
+ JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
MaybeHandle<JSObject> copy = v.StructureWalk(object);
Handle<JSObject> for_assert;
DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
@@ -8323,7 +8268,9 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
-bool HasEnumerableElements(JSObject* object) {
+bool JSObject::HasEnumerableElements() {
+ // TODO(cbruni): cleanup
+ JSObject* object = this;
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -8388,7 +8335,6 @@ bool HasEnumerableElements(JSObject* object) {
return true;
}
-
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -8405,7 +8351,7 @@ bool JSReceiver::IsSimpleEnum() {
if (current->IsAccessCheckNeeded()) return false;
DCHECK(!current->HasNamedInterceptor());
DCHECK(!current->HasIndexedInterceptor());
- if (HasEnumerableElements(current)) return false;
+ if (current->HasEnumerableElements()) return false;
if (current != this && enum_length != 0) return false;
}
return true;
@@ -8465,15 +8411,13 @@ bool Map::OnlyHasSimpleProperties() {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
- !is_access_check_needed() && !has_named_interceptor() &&
- !has_indexed_interceptor() && !has_hidden_prototype() &&
- !is_dictionary_map();
+ instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
+ !has_hidden_prototype() && !is_dictionary_map();
}
-namespace {
-
-Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
- Handle<JSObject> object) {
+// static
+Handle<FixedArray> JSObject::GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object) {
Handle<Map> map(object->map());
bool cache_enum_length = map->OnlyHasSimpleProperties();
@@ -8524,8 +8468,9 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
for (int i = 0; i < size; i++) {
PropertyDetails details = descs->GetDetails(i);
+ if (details.IsDontEnum()) continue;
Object* key = descs->GetKey(i);
- if (details.IsDontEnum() || key->IsSymbol()) continue;
+ if (key->IsSymbol()) continue;
storage->set(index, key);
if (!indices.is_null()) {
if (details.type() != DATA) {
@@ -8547,7 +8492,6 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
return storage;
}
-} // namespace
Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
@@ -8557,7 +8501,7 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
Handle<GlobalDictionary> dictionary(object->global_dictionary());
int length = dictionary->NumberOfEnumElements();
if (length == 0) {
- return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ return isolate->factory()->empty_fixed_array();
}
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
dictionary->CopyEnumKeysTo(*storage);
@@ -8566,7 +8510,7 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
Handle<NameDictionary> dictionary(object->property_dictionary());
int length = dictionary->NumberOfEnumElements();
if (length == 0) {
- return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ return isolate->factory()->empty_fixed_array();
}
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
dictionary->CopyEnumKeysTo(*storage);
@@ -8599,7 +8543,7 @@ static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
*object, Object::DONT_THROW);
- v8::Local<v8::Object> result;
+ Handle<JSObject> result;
if (!interceptor->enumerator()->IsUndefined()) {
Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
@@ -8608,18 +8552,13 @@ static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
result = args.Call(enum_fun);
}
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- if (result.IsEmpty()) return Just(true);
- DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- (v8::Utils::OpenHandle(*result)->IsJSObject() &&
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*result))
- ->HasSloppyArgumentsElements()));
+ if (result.is_null()) return Just(true);
+ DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
// The accumulator takes care of string/symbol filtering.
if (type == kIndexed) {
- accumulator->AddElementKeysFromInterceptor(
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+ accumulator->AddElementKeysFromInterceptor(result);
} else {
- accumulator->AddKeys(Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)),
- DO_NOT_CONVERT);
+ accumulator->AddKeys(result, DO_NOT_CONVERT);
}
return Just(true);
}
@@ -8679,12 +8618,23 @@ static Maybe<bool> GetKeys_Internal(Isolate* isolate,
KeyCollectionType type,
PropertyFilter filter,
KeyAccumulator* accumulator) {
+ // Proxies have no hidden prototype and we should not trigger the
+ // [[GetPrototypeOf]] trap on the last iteration when using
+ // AdvanceFollowingProxies.
+ if (type == OWN_ONLY && object->IsJSProxy()) {
+ MAYBE_RETURN(JSProxy::OwnPropertyKeys(isolate, receiver,
+ Handle<JSProxy>::cast(object), filter,
+ accumulator),
+ Nothing<bool>());
+ return Just(true);
+ }
+
PrototypeIterator::WhereToEnd end = type == OWN_ONLY
? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER, end);
- !iter.IsAtEnd(); iter.Advance()) {
+ !iter.IsAtEnd();) {
Handle<JSReceiver> current =
PrototypeIterator::GetCurrent<JSReceiver>(iter);
Maybe<bool> result = Just(false); // Dummy initialization.
@@ -8700,6 +8650,11 @@ static Maybe<bool> GetKeys_Internal(Isolate* isolate,
}
MAYBE_RETURN(result, Nothing<bool>());
if (!result.FromJust()) break; // |false| means "stop iterating".
+ // Iterate through proxies but ignore access checks for the ALL_CAN_READ
+ // case on API objects for OWN_ONLY keys handlede in GgetKeysFromJSObject.
+ if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
+ return Nothing<bool>();
+ }
}
return Just(true);
}
@@ -8798,7 +8753,7 @@ Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
return accumulator->AddKeysFromProxy(proxy, trap_result);
}
// 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
- Zone set_zone;
+ Zone set_zone(isolate->allocator());
const int kPresent = 1;
const int kGone = 0;
IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
@@ -8858,14 +8813,15 @@ Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
return accumulator->AddKeysFromProxy(proxy, trap_result);
}
-
MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
KeyCollectionType type,
PropertyFilter filter,
- GetKeysConversion keys_conversion) {
+ GetKeysConversion keys_conversion,
+ bool filter_proxy_keys) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
KeyAccumulator accumulator(isolate, type, filter);
+ accumulator.set_filter_proxy_keys(filter_proxy_keys);
MAYBE_RETURN(
GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
MaybeHandle<FixedArray>());
@@ -8874,10 +8830,94 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
return keys;
}
+MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
+ Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
+ Handle<FixedArray>* result) {
+ Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> object(JSObject::cast(*receiver));
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_elements =
+ object->GetElementsAccessor()->GetCapacity(*object, object->elements());
+ Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
+ number_of_own_descriptors + number_of_own_elements);
+ int count = 0;
+
+ if (object->elements() != isolate->heap()->empty_fixed_array()) {
+ MAYBE_RETURN(object->GetElementsAccessor()->CollectValuesOrEntries(
+ isolate, object, values_or_entries, get_entries, &count,
+ ENUMERABLE_STRINGS),
+ Nothing<bool>());
+ }
+
+ bool stable = object->map() == *map;
+
+ for (int index = 0; index < number_of_own_descriptors; index++) {
+ Handle<Name> next_key(descriptors->GetKey(index), isolate);
+ if (!next_key->IsString()) continue;
+ Handle<Object> prop_value;
+
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(index);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetValue(index), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
+ prop_value =
+ JSObject::FastPropertyAt(object, representation, field_index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, JSReceiver::GetProperty(object, next_key),
+ Nothing<bool>());
+ stable = object->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(object, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+
+ if (get_entries) {
+ prop_value = MakeEntryPair(isolate, next_key, prop_value);
+ }
+
+ values_or_entries->set(count, *prop_value);
+ count++;
+ }
+
+ if (count < values_or_entries->length()) values_or_entries->Shrink(count);
+ *result = values_or_entries;
+ return Just(true);
+}
+
MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<JSReceiver> object,
PropertyFilter filter,
bool get_entries) {
+ Handle<FixedArray> values_or_entries;
+ if (filter == ENUMERABLE_STRINGS) {
+ Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
+ isolate, object, get_entries, &values_or_entries);
+ if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
+ if (fast_values_or_entries.FromJust()) return values_or_entries;
+ }
+
PropertyFilter key_filter =
static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
@@ -8887,8 +8927,7 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
DCHECK(ContainsOnlyValidKeys(keys));
- Handle<FixedArray> values_or_entries =
- isolate->factory()->NewFixedArray(keys->length());
+ values_or_entries = isolate->factory()->NewFixedArray(keys->length());
int length = 0;
for (int i = 0; i < keys->length(); ++i) {
@@ -8982,6 +9021,8 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
PropertyAttributes attributes) {
Isolate* isolate = it->isolate();
+ it->UpdateProtector();
+
if (it->state() == LookupIterator::ACCESS_CHECK) {
if (!it->HasAccess()) {
isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
@@ -9006,7 +9047,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
preexists = it->IsFound();
if (preexists && (it->state() == LookupIterator::DATA ||
it->GetAccessors()->IsAccessorInfo())) {
- old_value = GetProperty(it).ToHandleChecked();
+ old_value = Object::GetProperty(it).ToHandleChecked();
}
}
@@ -9283,16 +9324,14 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
int unused_property_fields) {
#ifdef DEBUG
Isolate* isolate = map->GetIsolate();
- // Strict and strong function maps have Function as a constructor but the
+ // Strict function maps have Function as a constructor but the
// Function's initial map is a sloppy function map. Same holds for
// GeneratorFunction and its initial map.
Object* constructor = map->GetConstructor();
DCHECK(constructor->IsJSFunction());
DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
*map == *isolate->strict_function_map() ||
- *map == *isolate->strong_function_map() ||
- *map == *isolate->strict_generator_function_map() ||
- *map == *isolate->strong_generator_function_map());
+ *map == *isolate->strict_generator_function_map());
#endif
// Initial maps must always own their descriptors and it's descriptor array
// does not contain descriptors that do not belong to the map.
@@ -9563,6 +9602,10 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
TransitionFlag flag) {
Map* maybe_elements_transition_map = NULL;
if (flag == INSERT_TRANSITION) {
+ // Ensure we are requested to add elements kind transition "near the root".
+ DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+ map->NumberOfOwnDescriptors());
+
maybe_elements_transition_map = map->ElementsTransitionMap();
DCHECK(maybe_elements_transition_map == NULL ||
(maybe_elements_transition_map->elements_kind() ==
@@ -9598,9 +9641,8 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
LanguageMode language_mode, FunctionKind kind) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
// Initial map for sloppy mode function is stored in the function
- // constructor. Initial maps for strict and strong modes are cached as
- // special transitions using |strict_function_transition_symbol| and
- // |strong_function_transition_symbol| respectively as a key.
+ // constructor. Initial maps for strict mode are cached as special transitions
+ // using |strict_function_transition_symbol| as a key.
if (language_mode == SLOPPY) return initial_map;
Isolate* isolate = initial_map->GetIsolate();
Factory* factory = isolate->factory();
@@ -9615,9 +9657,6 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
case STRICT:
transition_symbol = factory->strict_function_transition_symbol();
break;
- case STRONG:
- transition_symbol = factory->strong_function_transition_symbol();
- break;
default:
UNREACHABLE();
break;
@@ -9890,7 +9929,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
// There is no benefit from reconstructing transition tree for maps without
// back pointers.
return CopyGeneralizeAllRepresentations(
- map, descriptor, FORCE_FIELD, kind, attributes,
+ map, map->elements_kind(), descriptor, FORCE_FIELD, kind, attributes,
"GenAll_AttributesMismatchProtoMap");
}
@@ -9905,9 +9944,8 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
return new_map;
}
-
Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
- Handle<Name> name,
+ Handle<Name> name, int descriptor,
AccessorComponent component,
Handle<Object> accessor,
PropertyAttributes attributes) {
@@ -9950,7 +9988,6 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
Handle<AccessorPair> pair;
DescriptorArray* old_descriptors = map->instance_descriptors();
- int descriptor = old_descriptors->SearchWithCache(isolate, *name, *map);
if (descriptor != DescriptorArray::kNotFound) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
@@ -10196,6 +10233,7 @@ void CodeCache::Update(
void CodeCache::UpdateDefaultCache(
Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
+ Isolate* isolate = code_cache->GetIsolate();
// When updating the default code cache we disregard the type encoded in the
// flags. This allows call constant stubs to overwrite call field
// stubs, etc.
@@ -10208,19 +10246,23 @@ void CodeCache::UpdateDefaultCache(
{
DisallowHeapAllocation no_alloc;
int deleted_index = -1;
+ Object* null = isolate->heap()->null_value();
+ Object* undefined = isolate->heap()->undefined_value();
+ DCHECK(name->IsUniqueName());
for (int i = 0; i < length; i += kCodeCacheEntrySize) {
Object* key = cache->get(i);
- if (key->IsNull()) {
+ if (key == null) {
if (deleted_index < 0) deleted_index = i;
continue;
}
- if (key->IsUndefined()) {
+ if (key == undefined) {
if (deleted_index >= 0) i = deleted_index;
cache->set(i + kCodeCacheEntryNameOffset, *name);
cache->set(i + kCodeCacheEntryCodeOffset, *code);
return;
}
- if (name->Equals(Name::cast(key))) {
+ DCHECK(key->IsUniqueName());
+ if (*name == key) {
Code::Flags found =
Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
if (Code::RemoveTypeFromFlags(found) == flags) {
@@ -10241,7 +10283,6 @@ void CodeCache::UpdateDefaultCache(
// Extend the code cache with some new entries (at least one). Must be a
// multiple of the entry size.
- Isolate* isolate = cache->GetIsolate();
int new_length = length + (length >> 1) + kCodeCacheEntrySize;
new_length = new_length - new_length % kCodeCacheEntrySize;
DCHECK((new_length % kCodeCacheEntrySize) == 0);
@@ -10276,13 +10317,18 @@ Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
FixedArray* cache = default_cache();
+ Heap* heap = GetHeap();
+ Object* null = heap->null_value();
+ Object* undefined = heap->undefined_value();
int length = cache->length();
+ DCHECK(name->IsUniqueName());
for (int i = 0; i < length; i += kCodeCacheEntrySize) {
Object* key = cache->get(i + kCodeCacheEntryNameOffset);
// Skip deleted elements.
- if (key->IsNull()) continue;
- if (key->IsUndefined()) return key;
- if (name->Equals(Name::cast(key))) {
+ if (key == null) continue;
+ if (key == undefined) return key;
+ DCHECK(key->IsUniqueName());
+ if (name == key) {
Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
return code;
@@ -10767,7 +10813,6 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj,
return array;
}
-
Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
Handle<Object> obj2, AddMode mode) {
int length = array->Length();
@@ -11595,6 +11640,10 @@ void String::WriteToFlat(String* src,
// Right hand side is longer. Recurse over left.
if (from < boundary) {
WriteToFlat(first, sink, from, boundary);
+ if (from == 0 && cons_string->second() == first) {
+ CopyChars(sink + boundary, sink, boundary);
+ return;
+ }
sink += boundary - from;
from = 0;
} else {
@@ -12125,7 +12174,8 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
Heap* heap = string->GetHeap();
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
- heap->CreateFillerObjectAt(start_of_string + new_size, delta);
+ heap->CreateFillerObjectAt(start_of_string + new_size, delta,
+ ClearRecordedSlots::kNo);
heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
// We are storing the new length using release store after creating a filler
@@ -12280,7 +12330,6 @@ bool CheckEquivalent(Map* first, Map* second) {
first->instance_type() == second->instance_type() &&
first->bit_field() == second->bit_field() &&
first->is_extensible() == second->is_extensible() &&
- first->is_strong() == second->is_strong() &&
first->new_target_is_base() == second->new_target_is_base() &&
first->has_hidden_prototype() == second->has_hidden_prototype();
}
@@ -12349,14 +12398,6 @@ void JSFunction::AttemptConcurrentOptimization() {
MarkForOptimization();
return;
}
- if (isolate->concurrent_osr_enabled() &&
- isolate->optimizing_compile_dispatcher()->IsQueuedForOSR(this)) {
- // Do not attempt regular recompilation if we already queued this for OSR.
- // TODO(yangguo): This is necessary so that we don't install optimized
- // code on a function that is already optimized, since OSR and regular
- // recompilation race. This goes away as soon as OSR becomes one-shot.
- return;
- }
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
@@ -12389,17 +12430,15 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
}
}
-
-void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
+// static
+void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- Handle<HeapObject> code, Handle<LiteralsArray> literals,
+ MaybeHandle<Code> code, Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
if (isolate->serializer_enabled()) return;
- DCHECK(*code == isolate->heap()->undefined_value() ||
- !shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
- DCHECK(*code == isolate->heap()->undefined_value() ||
- Code::cast(*code)->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code.is_null() ||
+ code.ToHandleChecked()->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map;
@@ -12414,15 +12453,10 @@ void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry > kSharedCodeIndex) {
- // Found an existing context-specific entry. If the user provided valid
- // code, it must not contain any code.
- DCHECK(code->IsUndefined() ||
- WeakCell::cast(old_code_map->get(entry + kCachedCodeOffset))
- ->cleared());
-
- // Just set the code and literals to the entry.
- if (!code->IsUndefined()) {
- Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ // Just set the code and literals of the entry.
+ if (!code.is_null()) {
+ Handle<WeakCell> code_cell =
+ isolate->factory()->NewWeakCell(code.ToHandleChecked());
old_code_map->set(entry + kCachedCodeOffset, *code_cell);
}
Handle<WeakCell> literals_cell =
@@ -12455,9 +12489,9 @@ void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
}
}
- Handle<WeakCell> code_cell = code->IsUndefined()
- ? isolate->factory()->empty_weak_cell()
- : isolate->factory()->NewWeakCell(code);
+ Handle<WeakCell> code_cell =
+ code.is_null() ? isolate->factory()->empty_weak_cell()
+ : isolate->factory()->NewWeakCell(code.ToHandleChecked());
Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
WeakCell* context_cell = native_context->self_weak_cell();
@@ -12875,12 +12909,10 @@ Handle<Object> CacheInitialJSArrayMaps(
Handle<Context> native_context, Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
- Strength strength =
- initial_map->is_strong() ? Strength::STRONG : Strength::WEAK;
Handle<Map> current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
DCHECK_EQ(GetInitialFastElementsKind(), kind);
- native_context->set(Context::ArrayMapIndex(kind, strength), *current_map);
+ native_context->set(Context::ArrayMapIndex(kind), *current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
@@ -12892,7 +12924,7 @@ Handle<Object> CacheInitialJSArrayMaps(
current_map, next_kind, INSERT_TRANSITION);
}
DCHECK_EQ(next_kind, new_map->elements_kind());
- native_context->set(Context::ArrayMapIndex(next_kind, strength), *new_map);
+ native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
current_map = new_map;
}
return initial_map;
@@ -12924,9 +12956,6 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
function->set_prototype_or_initial_map(*value);
} else {
Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
- if (function->map()->is_strong()) {
- new_map->set_is_strong();
- }
JSFunction::SetInitialMap(function, new_map, value);
// If the function is used as the global Array function, cache the
@@ -12938,9 +12967,6 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
if (array_function->IsJSFunction() &&
*function == JSFunction::cast(*array_function)) {
CacheInitialJSArrayMaps(native_context, new_map);
- Handle<Map> new_strong_map = Map::Copy(new_map, "SetInstancePrototype");
- new_strong_map->set_is_strong();
- CacheInitialJSArrayMaps(native_context, new_strong_map);
}
}
@@ -13109,7 +13135,7 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// The constructor should be compiled for the optimization hints to be
// available.
- Compiler::Compile(function, CLEAR_EXCEPTION);
+ Compiler::Compile(function, Compiler::CLEAR_EXCEPTION);
// First create a new map with the size and number of in-object properties
// suggested by the function.
@@ -13125,9 +13151,6 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
&in_object_properties);
Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
- if (function->map()->is_strong()) {
- map->set_is_strong();
- }
// Fetch or allocate prototype.
Handle<Object> prototype;
@@ -13256,43 +13279,6 @@ void JSFunction::PrintName(FILE* out) {
}
-// The filter is a pattern that matches function names in this way:
-// "*" all; the default
-// "-" all but the top-level function
-// "-name" all but the function "name"
-// "" only the top-level function
-// "name" only the function "name"
-// "name*" only functions starting with "name"
-// "~" none; the tilde is not an identifier
-bool JSFunction::PassesFilter(const char* raw_filter) {
- if (*raw_filter == '*') return true;
- String* name = shared()->DebugName();
- Vector<const char> filter = CStrVector(raw_filter);
- if (filter.length() == 0) return name->length() == 0;
- if (filter[0] == '-') {
- // Negative filter.
- if (filter.length() == 1) {
- return (name->length() != 0);
- } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
- return false;
- }
- if (filter[filter.length() - 1] == '*' &&
- name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
- return false;
- }
- return true;
-
- } else if (name->IsUtf8EqualTo(filter)) {
- return true;
- }
- if (filter[filter.length() - 1] == '*' &&
- name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
- return true;
- }
- return false;
-}
-
-
Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Object> name =
@@ -13417,14 +13403,14 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
return builder.Finish().ToHandleChecked();
}
-
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
- const char* type_of, byte kind) {
+ bool to_boolean, const char* type_of, byte kind) {
Handle<String> internalized_to_string =
isolate->factory()->InternalizeUtf8String(to_string);
Handle<String> internalized_type_of =
isolate->factory()->InternalizeUtf8String(type_of);
+ oddball->set_to_boolean(isolate->heap()->ToBoolean(to_boolean));
oddball->set_to_number(*to_number);
oddball->set_to_string(*internalized_to_string);
oddball->set_type_of(*internalized_type_of);
@@ -13526,8 +13512,9 @@ Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("nameOrSourceURL"));
Handle<JSObject> script_wrapper = Script::GetWrapper(script);
- Handle<Object> property = Object::GetProperty(
- script_wrapper, name_or_source_url_key).ToHandleChecked();
+ Handle<Object> property =
+ JSReceiver::GetProperty(script_wrapper, name_or_source_url_key)
+ .ToHandleChecked();
DCHECK(property->IsJSFunction());
Handle<Object> result;
// Do not check against pending exception, since this function may be called
@@ -13627,7 +13614,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
}
#ifdef DEBUG
- {
+ if (FLAG_enable_slow_asserts) {
WeakFixedArray::Iterator iterator(*list);
SharedFunctionInfo* next;
while ((next = iterator.Next<SharedFunctionInfo>())) {
@@ -13669,6 +13656,41 @@ String* SharedFunctionInfo::DebugName() {
return String::cast(n);
}
+// The filter is a pattern that matches function names in this way:
+// "*" all; the default
+// "-" all but the top-level function
+// "-name" all but the function "name"
+// "" only the top-level function
+// "name" only the function "name"
+// "name*" only functions starting with "name"
+// "~" none; the tilde is not an identifier
+bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
+ if (*raw_filter == '*') return true;
+ String* name = DebugName();
+ Vector<const char> filter = CStrVector(raw_filter);
+ if (filter.length() == 0) return name->length() == 0;
+ if (filter[0] == '-') {
+ // Negative filter.
+ if (filter.length() == 1) {
+ return (name->length() != 0);
+ } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return false;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
+ return false;
+ }
+ return true;
+
+ } else if (name->IsUtf8EqualTo(filter)) {
+ return true;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+ return true;
+ }
+ return false;
+}
bool SharedFunctionInfo::HasSourceCode() const {
return !script()->IsUndefined() &&
@@ -13695,14 +13717,11 @@ int SharedFunctionInfo::SourceSize() {
return end_position() - start_position();
}
-
-namespace {
-
-void CalculateInstanceSizeHelper(InstanceType instance_type,
- int requested_internal_fields,
- int requested_in_object_properties,
- int* instance_size,
- int* in_object_properties) {
+void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
+ int requested_internal_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties) {
int header_size = JSObject::GetHeaderSize(instance_type);
DCHECK_LE(requested_internal_fields,
(JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
@@ -13715,8 +13734,6 @@ void CalculateInstanceSizeHelper(InstanceType instance_type,
requested_internal_fields;
}
-} // namespace
-
void JSFunction::CalculateInstanceSize(InstanceType instance_type,
int requested_internal_fields,
@@ -13829,8 +13846,10 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
set_optimization_disabled(true);
set_disable_optimization_reason(reason);
// Code should be the lazy compilation stub or else unoptimized.
- DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
- PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
+ DCHECK(abstract_code()->kind() == AbstractCode::FUNCTION ||
+ abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
+ abstract_code()->kind() == AbstractCode::BUILTIN);
+ PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
ShortPrint();
@@ -14085,8 +14104,6 @@ void Code::Relocate(intptr_t delta) {
void Code::CopyFrom(const CodeDesc& desc) {
- DCHECK(Marking::Color(this) == Marking::WHITE_OBJECT);
-
// copy code
CopyBytes(instruction_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -14110,21 +14127,22 @@ void Code::CopyFrom(const CodeDesc& desc) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_object(*p, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else if (mode == RelocInfo::CELL) {
Handle<Cell> cell = it.rinfo()->target_cell_handle();
- it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_cell(*cell, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
// pointers to the first instruction in the code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->instruction_start(),
- SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+ it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
} else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
@@ -14168,6 +14186,8 @@ int Code::SourcePosition(int code_offset) {
}
it.next();
}
+ DCHECK(kind() == FUNCTION || (is_optimized_code() && is_turbofanned()) ||
+ is_wasm_code() || position == RelocInfo::kNoPosition);
return position;
}
@@ -14459,6 +14479,12 @@ void Code::MakeYoung(Isolate* isolate) {
if (sequence != NULL) MakeCodeAgeSequenceYoung(sequence, isolate);
}
+void Code::PreAge(Isolate* isolate) {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence != NULL) {
+ PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge, NO_MARKING_PARITY);
+ }
+}
void Code::MarkToBeExecutedOnce(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
@@ -14468,7 +14494,6 @@ void Code::MarkToBeExecutedOnce(Isolate* isolate) {
}
}
-
void Code::MakeOlder(MarkingParity current_parity) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
@@ -14741,6 +14766,15 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::TAIL_CALLER_FRAME: {
+ int shared_info_id = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ os << "{function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << "}";
+ break;
+ }
+
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME: {
int shared_info_id = iterator.Next();
@@ -14946,11 +14980,16 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "compare_operation = " << Token::Name(stub.op()) << "\n";
}
}
- if ((name != NULL) && (name[0] != '\0')) {
+ if ((name != nullptr) && (name[0] != '\0')) {
os << "name = " << name << "\n";
} else if (kind() == BUILTIN) {
name = GetIsolate()->builtins()->Lookup(instruction_start());
- if (name != NULL) {
+ if (name != nullptr) {
+ os << "name = " << name << "\n";
+ }
+ } else if (kind() == BYTECODE_HANDLER) {
+ name = GetIsolate()->interpreter()->LookupNameOfBytecodeHandler(this);
+ if (name != nullptr) {
os << "name = " << name << "\n";
}
}
@@ -15051,7 +15090,6 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
#ifdef OBJECT_PRINT
if (!type_feedback_info()->IsUndefined()) {
- OFStream os(stdout);
TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
os << "\n";
}
@@ -15078,7 +15116,8 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
int BytecodeArray::SourcePosition(int offset) {
int last_position = 0;
- for (interpreter::SourcePositionTableIterator iterator(this);
+ for (interpreter::SourcePositionTableIterator iterator(
+ source_position_table());
!iterator.done() && iterator.bytecode_offset() <= offset;
iterator.Advance()) {
last_position = iterator.source_position();
@@ -15092,7 +15131,7 @@ int BytecodeArray::SourceStatementPosition(int offset) {
int position = SourcePosition(offset);
// Now find the closest statement position before the position.
int statement_position = 0;
- interpreter::SourcePositionTableIterator iterator(this);
+ interpreter::SourcePositionTableIterator iterator(source_position_table());
while (!iterator.done()) {
if (iterator.is_statement()) {
int p = iterator.source_position();
@@ -15110,49 +15149,30 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Frame size " << frame_size() << "\n";
Vector<char> buf = Vector<char>::New(50);
- const uint8_t* first_bytecode_address = GetFirstBytecodeAddress();
- int bytecode_size = 0;
-
- interpreter::SourcePositionTableIterator source_positions(this);
+ const uint8_t* base_address = GetFirstBytecodeAddress();
+ interpreter::SourcePositionTableIterator source_positions(
+ source_position_table());
- for (int i = 0; i < this->length(); i += bytecode_size) {
- const uint8_t* bytecode_start = &first_bytecode_address[i];
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_start[0]);
- bytecode_size = interpreter::Bytecodes::Size(bytecode);
-
- if (!source_positions.done() && i == source_positions.bytecode_offset()) {
+ interpreter::BytecodeArrayIterator iterator(handle(this));
+ while (!iterator.done()) {
+ if (!source_positions.done() &&
+ iterator.current_offset() == source_positions.bytecode_offset()) {
os << std::setw(5) << source_positions.source_position();
os << (source_positions.is_statement() ? " S> " : " E> ");
source_positions.Advance();
} else {
os << " ";
}
-
- SNPrintF(buf, "%p", bytecode_start);
+ const uint8_t* current_address = base_address + iterator.current_offset();
+ SNPrintF(buf, "%p", current_address);
os << buf.start() << " : ";
- interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
-
- if (interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
- DCHECK_EQ(bytecode_size, 3);
- int index = static_cast<int>(ReadUnalignedUInt16(bytecode_start + 1));
- int offset = Smi::cast(constant_pool()->get(index))->value();
- SNPrintF(buf, " (%p)", bytecode_start + offset);
- os << buf.start();
- } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- DCHECK_EQ(bytecode_size, 2);
- int index = static_cast<int>(bytecode_start[1]);
- int offset = Smi::cast(constant_pool()->get(index))->value();
- SNPrintF(buf, " (%p)", bytecode_start + offset);
- os << buf.start();
- } else if (interpreter::Bytecodes::IsJump(bytecode)) {
- DCHECK_EQ(bytecode_size, 2);
- int offset = static_cast<int8_t>(bytecode_start[1]);
- SNPrintF(buf, " (%p)", bytecode_start + offset);
+ interpreter::Bytecodes::Decode(os, current_address, parameter_count());
+ if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
+ SNPrintF(buf, " (%p)", base_address + iterator.GetJumpTargetOffset());
os << buf.start();
}
-
os << std::endl;
+ iterator.Advance();
}
if (constant_pool()->length() > 0) {
@@ -15191,7 +15211,7 @@ static bool GetOldValue(Isolate* isolate,
uint32_t index,
List<Handle<Object> >* old_values,
List<uint32_t>* indices) {
- LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+ LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
CHECK(JSReceiver::GetPropertyAttributes(&it).IsJust());
DCHECK(it.IsFound());
if (!it.IsConfigurable()) return false;
@@ -15698,16 +15718,6 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
- // Setting the prototype of an Array instance invalidates the species
- // protector
- // because it could change the constructor property of the instance, which
- // could change the @@species constructor.
- if (object->IsJSArray() && isolate->IsArraySpeciesLookupChainIntact()) {
- isolate->CountUsage(
- v8::Isolate::UseCounterFeature::kArrayInstanceProtoModified);
- isolate->InvalidateArraySpeciesProtector();
- }
-
const bool observed = from_javascript && object->map()->is_observed();
Handle<Object> old_value;
if (observed) {
@@ -15760,12 +15770,6 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
DCHECK(!object->IsAccessCheckNeeded());
}
- // Strong objects may not have their prototype set via __proto__ or
- // setPrototypeOf.
- if (from_javascript && object->map()->is_strong()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kStrongSetProto, object));
- }
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
@@ -16256,10 +16260,18 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
- LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
+ Map* map = array->map();
+ // Fast path: "length" is the first fast property of arrays. Since it's not
+ // configurable, it's guaranteed to be the first in the descriptor array.
+ if (!map->is_dictionary_map()) {
+ DCHECK(map->instance_descriptors()->GetKey(0) ==
+ array->GetHeap()->length_string());
+ return map->instance_descriptors()->GetDetails(0).IsReadOnly();
+ }
+
+ Isolate* isolate = array->GetIsolate();
+ LookupIterator it(array, isolate->factory()->length_string(), array,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- CHECK(it.IsFound());
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
return it.IsReadOnly();
}
@@ -16362,16 +16374,6 @@ void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
}
-InterceptorInfo* JSObject::GetNamedInterceptor() {
- DCHECK(map()->has_named_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
- DCHECK(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->named_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
bool* done) {
*done = false;
@@ -16387,17 +16389,19 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
}
Handle<JSObject> holder = it->GetHolder<JSObject>();
- v8::Local<v8::Value> result;
- PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder,
- Object::DONT_THROW);
+ Handle<Object> result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
if (it->IsElement()) {
uint32_t index = it->index();
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", *holder, index));
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
@@ -16410,18 +16414,14 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
v8::GenericNamedPropertyGetterCallback getter =
v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
- result = args.Call(getter, v8::Utils::ToLocal(name));
+ result = args.Call(getter, name);
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) return isolate->factory()->undefined_value();
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
+ if (result.is_null()) return isolate->factory()->undefined_value();
*done = true;
// Rebox handle before return
- return handle(*result_internal, isolate);
+ return handle(*result, isolate);
}
@@ -16436,7 +16436,7 @@ Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
uint32_t index) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index,
+ LookupIterator it(isolate, object, index, object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
return HasProperty(&it);
}
@@ -16564,7 +16564,6 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
}
}
-
void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
PropertyFilter filter) {
if (HasFastProperties()) {
@@ -16605,7 +16604,6 @@ int JSObject::NumberOfOwnElements(PropertyFilter filter) {
return GetOwnElementKeys(NULL, filter);
}
-
void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
KeyAccumulator* keys,
PropertyFilter filter) {
@@ -16757,15 +16755,14 @@ MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
Object::ToObject(isolate, object).ToHandleChecked();
Handle<String> tag;
- if (FLAG_harmony_tostring) {
- Handle<Object> to_string_tag;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, to_string_tag,
- GetProperty(receiver, isolate->factory()->to_string_tag_symbol()),
- String);
- if (to_string_tag->IsString()) {
- tag = Handle<String>::cast(to_string_tag);
- }
+ Handle<Object> to_string_tag;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, to_string_tag,
+ JSReceiver::GetProperty(receiver,
+ isolate->factory()->to_string_tag_symbol()),
+ String);
+ if (to_string_tag->IsString()) {
+ tag = Handle<String>::cast(to_string_tag);
}
if (tag.is_null()) {
@@ -16851,7 +16848,6 @@ class StringSharedKey : public HashTableKey {
hash ^= String::cast(script->source())->Hash();
STATIC_ASSERT(LANGUAGE_END == 3);
if (is_strict(language_mode)) hash ^= 0x8000;
- if (is_strong(language_mode)) hash ^= 0x10000;
hash += scope_position;
}
return hash;
@@ -16920,7 +16916,6 @@ JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
flag = JSRegExp::kUnicode;
break;
case 'y':
- if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
flag = JSRegExp::kSticky;
break;
default:
@@ -17151,7 +17146,7 @@ bool SeqOneByteSubStringKey::IsMatch(Object* string) {
class InternalizedStringKey : public HashTableKey {
public:
explicit InternalizedStringKey(Handle<String> string)
- : string_(string) { }
+ : string_(String::Flatten(string)) {}
bool IsMatch(Object* string) override {
return String::cast(string)->Equals(*string_);
@@ -17541,6 +17536,10 @@ template Handle<Object>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
uint32_t>::DeleteProperty(Handle<SeededNumberDictionary>, int);
+template Handle<Object>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+ uint32_t>::DeleteProperty(Handle<UnseededNumberDictionary>, int);
+
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
New(Isolate*, int, MinimumCapacity, PretenureFlag);
@@ -17553,6 +17552,10 @@ template Handle<SeededNumberDictionary>
HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
Shrink(Handle<SeededNumberDictionary>, uint32_t);
+template Handle<UnseededNumberDictionary>
+ HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+ uint32_t>::Shrink(Handle<UnseededNumberDictionary>, uint32_t);
+
template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
@@ -17599,9 +17602,6 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
-template bool Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::HasComplexElements();
-
template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
uint32_t>::FindEntry(uint32_t);
@@ -18015,7 +18015,7 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
int expected) {
Handle<StringTable> table = isolate->factory()->string_table();
// We need a key instance for the virtual hash function.
- InternalizedStringKey dummy_key(Handle<String>::null());
+ InternalizedStringKey dummy_key(isolate->factory()->empty_string());
table = StringTable::EnsureCapacity(table, expected, &dummy_key);
isolate->heap()->SetRootStringTable(*table);
}
@@ -18023,8 +18023,20 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> string) {
+ if (string->IsConsString() && string->IsFlat()) {
+ string = String::Flatten(string);
+ if (string->IsInternalizedString()) return string;
+ }
+
InternalizedStringKey key(string);
- return LookupKey(isolate, &key);
+ Handle<String> result = LookupKey(isolate, &key);
+
+ if (string->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(*result);
+ cons->set_second(isolate->heap()->empty_string());
+ }
+ return result;
}
@@ -18063,6 +18075,25 @@ String* StringTable::LookupKeyIfExists(Isolate* isolate, HashTableKey* key) {
return NULL;
}
+Handle<StringSet> StringSet::New(Isolate* isolate) {
+ return HashTable::New(isolate, 0);
+}
+
+Handle<StringSet> StringSet::Add(Handle<StringSet> stringset,
+ Handle<String> name) {
+ if (!stringset->Has(name)) {
+ stringset = EnsureCapacity(stringset, 1, *name);
+ uint32_t hash = StringSetShape::Hash(*name);
+ int entry = stringset->FindInsertionEntry(hash);
+ stringset->set(EntryToIndex(entry), *name);
+ stringset->ElementAdded();
+ }
+ return stringset;
+}
+
+bool StringSet::Has(Handle<String> name) {
+ return FindEntry(*name) != kNotFound;
+}
Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
Handle<Context> context,
@@ -18220,40 +18251,6 @@ void CompilationCacheTable::Remove(Object* value) {
}
-// StringsKey used for HashTable where key is array of internalized strings.
-class StringsKey : public HashTableKey {
- public:
- explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { }
-
- bool IsMatch(Object* strings) override {
- FixedArray* o = FixedArray::cast(strings);
- int len = strings_->length();
- if (o->length() != len) return false;
- for (int i = 0; i < len; i++) {
- if (o->get(i) != strings_->get(i)) return false;
- }
- return true;
- }
-
- uint32_t Hash() override { return HashForObject(*strings_); }
-
- uint32_t HashForObject(Object* obj) override {
- FixedArray* strings = FixedArray::cast(obj);
- int len = strings->length();
- uint32_t hash = 0;
- for (int i = 0; i < len; i++) {
- hash ^= String::cast(strings->get(i))->Hash();
- }
- return hash;
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override { return strings_; }
-
- private:
- Handle<FixedArray> strings_;
-};
-
-
template<typename Derived, typename Shape, typename Key>
Handle<Derived> Dictionary<Derived, Shape, Key>::New(
Isolate* isolate,
@@ -18430,6 +18427,21 @@ void Dictionary<Derived, Shape, Key>::AddEntry(
dictionary->ElementAdded();
}
+bool SeededNumberDictionary::HasComplexElements() {
+ if (!requires_slow_elements()) return false;
+ int capacity = this->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k)) {
+ DCHECK(!IsDeleted(i));
+ PropertyDetails details = this->DetailsAt(i);
+ if (details.type() == ACCESSOR_CONSTANT) return true;
+ PropertyAttributes attr = details.attributes();
+ if (attr & ALL_ATTRIBUTES_MASK) return true;
+ }
+ }
+ return false;
+}
void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
bool used_as_prototype) {
@@ -18537,23 +18549,6 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
}
-template <typename Derived, typename Shape, typename Key>
-bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
- int capacity = this->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !k->FilterKey(ALL_PROPERTIES)) {
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- if (details.type() == ACCESSOR_CONSTANT) return true;
- PropertyAttributes attr = details.attributes();
- if (attr & ALL_ATTRIBUTES_MASK) return true;
- }
- }
- return false;
-}
-
-
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
@@ -18615,7 +18610,6 @@ int Dictionary<Derived, Shape, Key>::CopyKeysTo(
return index - start_index;
}
-
template <typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::CollectKeysTo(
Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index f5e35c3596..e441546180 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -33,6 +33,8 @@
#include "src/mips64/constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/constants-s390.h" // NOLINT
#endif
@@ -78,6 +80,7 @@
// - HashTable
// - Dictionary
// - StringTable
+// - StringSet
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
@@ -419,6 +422,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
@@ -438,7 +442,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
-
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
@@ -591,7 +594,6 @@ static inline bool IsShortcutCandidate(int type) {
return ((type & kShortcutTypeMask) == kShortcutTypeTag);
}
-
enum InstanceType {
// String types.
INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
@@ -703,16 +705,18 @@ enum InstanceType {
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
// compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
- JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
+ JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_GLOBAL_PROXY_TYPE,
+ // Like JS_OBJECT_TYPE, but requires access checks and/or has interceptors.
+ JS_SPECIAL_API_OBJECT_TYPE, // LAST_SPECIAL_RECEIVER_TYPE
+ JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
- JS_GLOBAL_OBJECT_TYPE,
- JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
@@ -753,8 +757,14 @@ enum InstanceType {
FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
LAST_JS_RECEIVER_TYPE = LAST_TYPE,
// Boundaries for testing the types represented as JSObject
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+ FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE,
LAST_JS_OBJECT_TYPE = LAST_TYPE,
+ // Boundary for testing JSReceivers that need special property lookup handling
+ LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE,
+ // Boundary case for testing JSReceivers that may have elements while having
+ // an empty fixed array as elements backing store. This is true for string
+ // wrappers.
+ LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -967,6 +977,7 @@ template <class C> inline bool Is(Object* obj);
V(HashTable) \
V(Dictionary) \
V(StringTable) \
+ V(StringSet) \
V(NormalizedMapCache) \
V(CompilationCacheTable) \
V(CodeCacheHashTable) \
@@ -974,7 +985,7 @@ template <class C> inline bool Is(Object* obj);
V(MapCache) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
- V(UndetectableObject) \
+ V(Undetectable) \
V(AccessCheckNeeded) \
V(Callable) \
V(Function) \
@@ -999,7 +1010,8 @@ template <class C> inline bool Is(Object* obj);
V(Uninitialized) \
V(True) \
V(False) \
- V(ArgumentsMarker)
+ V(ArgumentsMarker) \
+ V(OptimizedOut)
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1068,7 +1080,7 @@ class Object {
INLINE(bool IsNaN() const);
INLINE(bool IsMinusZero() const);
bool ToInt32(int32_t* value);
- bool ToUint32(uint32_t* value);
+ inline bool ToUint32(uint32_t* value);
inline Representation OptimalRepresentation();
@@ -1119,9 +1131,13 @@ class Object {
MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
Isolate* isolate, Handle<Object> object, Handle<Context> context);
+ // ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+ MUST_USE_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
+ Isolate* isolate, Handle<Object> object);
+
// ES6 section 7.1.14 ToPropertyKey
- MUST_USE_RESULT static MaybeHandle<Name> ToName(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Name> ToName(Isolate* isolate,
+ Handle<Object> input);
// ES6 section 7.1.1 ToPrimitive
MUST_USE_RESULT static inline MaybeHandle<Object> ToPrimitive(
@@ -1231,6 +1247,10 @@ class Object {
Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+ MUST_USE_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
+ Handle<Object> object, Handle<Name> name, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
MUST_USE_RESULT static Maybe<bool> SetSuperProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
@@ -1261,8 +1281,6 @@ class Object {
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
- Isolate* isolate, Handle<Object> object, const char* key);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
Handle<Object> object, Handle<Name> name);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
@@ -1371,6 +1389,9 @@ class Object {
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode, bool* found);
+ MUST_USE_RESULT static MaybeHandle<Name> ConvertToName(Isolate* isolate,
+ Handle<Object> input);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
@@ -1812,6 +1833,13 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
Handle<JSReceiver> object, Handle<Name> name);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ Handle<JSReceiver> receiver, Handle<Name> name);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
+ Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
+
// Implementation of ES6 [[Delete]]
MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name,
@@ -1919,15 +1947,15 @@ class JSReceiver: public HeapObject {
bool from_javascript,
ShouldThrow should_throw);
-
- static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
- Handle<Name> name);
+ inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name);
static Handle<Object> GetDataProperty(LookupIterator* it);
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet.
- inline Object* GetIdentityHash();
+ static inline Handle<Object> GetIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> object);
// Retrieves a permanent object identity hash code. May create and store a
// hash code if needed and none exists.
@@ -1944,7 +1972,8 @@ class JSReceiver: public HeapObject {
// "for (n in object) { }".
MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
- GetKeysConversion keys_conversion = KEEP_NUMBERS);
+ GetKeysConversion keys_conversion = KEEP_NUMBERS,
+ bool filter_proxy_keys_ = true);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
Handle<JSReceiver> object, PropertyFilter filter);
@@ -2037,11 +2066,12 @@ class JSObject: public JSReceiver {
inline bool HasSlowArgumentsElements();
inline bool HasFastStringWrapperElements();
inline bool HasSlowStringWrapperElements();
+ bool HasEnumerableElements();
+
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
// Requires: HasFastElements().
- static Handle<FixedArray> EnsureWritableFastElements(
- Handle<JSObject> object);
+ static void EnsureWritableFastElements(Handle<JSObject> object);
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
@@ -2089,8 +2119,9 @@ class JSObject: public JSReceiver {
// Adds or reconfigures a property to attributes NONE. It will fail when it
// cannot.
- MUST_USE_RESULT static Maybe<bool> CreateDataProperty(LookupIterator* it,
- Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+ LookupIterator* it, Handle<Object> value,
+ ShouldThrow should_throw = DONT_THROW);
static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
@@ -2144,7 +2175,7 @@ class JSObject: public JSReceiver {
};
// Retrieve interceptors.
- InterceptorInfo* GetNamedInterceptor();
+ inline InterceptorInfo* GetNamedInterceptor();
inline InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
@@ -2207,8 +2238,6 @@ class JSObject: public JSReceiver {
// Returns true if the object has a property with the hidden string as name.
static bool HasHiddenProperties(Handle<JSObject> object);
- static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
-
static void ValidateElements(Handle<JSObject> object);
// Makes sure that this object can contain HeapObject as elements.
@@ -2289,6 +2318,9 @@ class JSObject: public JSReceiver {
static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
+ static Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object);
+
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -2378,10 +2410,6 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
AllocationSiteUsageContext* site_context,
DeepCopyHints hints = kNoHints);
- // Deep copies given object with special handling for JSFunctions which
- // 1) must be Api functions and 2) are not copied but left as is.
- MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopyApiBoilerplate(
- Handle<JSObject> object);
MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
Handle<JSObject> object,
AllocationSiteCreationContext* site_context);
@@ -2525,13 +2553,11 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
Handle<Object> value);
- MUST_USE_RESULT Object* GetIdentityHash();
+ static Handle<Object> GetIdentityHash(Isolate* isolate,
+ Handle<JSObject> object);
static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
- static Handle<SeededNumberDictionary> GetNormalizedElementDictionary(
- Handle<JSObject> object, Handle<FixedArrayBase> elements);
-
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
@@ -3065,6 +3091,16 @@ class DescriptorArray: public FixedArray {
return ToKeyIndex(number_of_descriptors);
}
+ static int ToDetailsIndex(int descriptor_number) {
+ return kFirstIndex + (descriptor_number * kDescriptorSize) +
+ kDescriptorDetails;
+ }
+
+ // Conversion from descriptor number to array indices.
+ static int ToKeyIndex(int descriptor_number) {
+ return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
+ }
+
private:
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
@@ -3080,19 +3116,6 @@ class DescriptorArray: public FixedArray {
int index_;
};
- // Conversion from descriptor number to array indices.
- static int ToKeyIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorKey;
- }
-
- static int ToDetailsIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorDetails;
- }
-
static int ToValueIndex(int descriptor_number) {
return kFirstIndex +
(descriptor_number * kDescriptorSize) +
@@ -3196,6 +3219,7 @@ class HashTableBase : public FixedArray {
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
inline bool IsKey(Object* k);
+ inline bool IsKey(Heap* heap, Object* k);
// Compute the probe offset (quadratic probing).
INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3417,6 +3441,25 @@ class StringTable: public HashTable<StringTable,
DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
};
+class StringSetShape : public BaseShape<String*> {
+ public:
+ static inline bool IsMatch(String* key, Object* value);
+ static inline uint32_t Hash(String* key);
+ static inline uint32_t HashForObject(String* key, Object* object);
+
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 1;
+};
+
+class StringSet : public HashTable<StringSet, StringSetShape, String*> {
+ public:
+ static Handle<StringSet> New(Isolate* isolate);
+ static Handle<StringSet> Add(Handle<StringSet> blacklist,
+ Handle<String> name);
+ bool Has(Handle<String> name);
+
+ DECLARE_CAST(StringSet)
+};
template <typename Derived, typename Shape, typename Key>
class Dictionary: public HashTable<Derived, Shape, Key> {
@@ -3473,10 +3516,6 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
}
- // Returns true if the dictionary contains any elements that are non-writable,
- // non-configurable, non-enumerable, or have getters/setters.
- bool HasComplexElements();
-
enum SortMode { UNSORTED, SORTED };
// Fill in details for properties into storage.
@@ -3709,6 +3748,10 @@ class SeededNumberDictionary
void UpdateMaxNumberKey(uint32_t key, bool used_as_prototype);
+ // Returns true if the dictionary contains any elements that are non-writable,
+ // non-configurable, non-enumerable, or have getters/setters.
+ bool HasComplexElements();
+
// If slow elements are required we will never go back to fast-case
// for the elements kept in this dictionary. We require slow
// elements if an element has been added at an index larger than
@@ -4462,7 +4505,7 @@ class BytecodeArray : public FixedArrayBase {
// Accessors for source position table containing mappings between byte code
// offset and source position.
- DECL_ACCESSORS(source_position_table, FixedArray)
+ DECL_ACCESSORS(source_position_table, ByteArray)
DECLARE_CAST(BytecodeArray)
@@ -4870,11 +4913,14 @@ class Code: public HeapObject {
#define NON_IC_KIND_LIST(V) \
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
+ V(BYTECODE_HANDLER) \
V(STUB) \
V(HANDLER) \
V(BUILTIN) \
V(REGEXP) \
- V(WASM_FUNCTION)
+ V(WASM_FUNCTION) \
+ V(WASM_TO_JS_FUNCTION) \
+ V(JS_TO_WASM_FUNCTION)
#define IC_KIND_LIST(V) \
V(LOAD_IC) \
@@ -4884,7 +4930,6 @@ class Code: public HeapObject {
V(KEYED_STORE_IC) \
V(BINARY_OP_IC) \
V(COMPARE_IC) \
- V(COMPARE_NIL_IC) \
V(TO_BOOLEAN_IC)
#define CODE_KIND_LIST(V) \
@@ -4998,10 +5043,10 @@ class Code: public HeapObject {
inline bool is_call_stub();
inline bool is_binary_op_stub();
inline bool is_compare_ic_stub();
- inline bool is_compare_nil_ic_stub();
inline bool is_to_boolean_ic_stub();
inline bool is_keyed_stub();
inline bool is_optimized_code();
+ inline bool is_wasm_code();
inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
@@ -5244,6 +5289,7 @@ class Code: public HeapObject {
static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeYoung(Isolate* isolate);
+ void PreAge(Isolate* isolate);
void MarkToBeExecutedOnce(Isolate* isolate);
void MakeOlder(MarkingParity);
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
@@ -5301,8 +5347,9 @@ class Code: public HeapObject {
// Note: We might be able to squeeze this into the flags above.
static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
- static const int kHeaderPaddingStart =
+ static const int kBuiltinIndexOffset =
kConstantPoolOffset + kConstantPoolSize;
+ static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -5326,10 +5373,11 @@ class Code: public HeapObject {
class TypeField : public BitField<StubType, 3, 1> {};
class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
class KindField : public BitField<Kind, 6, 5> {};
- class ExtraICStateField: public BitField<ExtraICState, 11,
- PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
+ class ExtraICStateField
+ : public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize -
+ 11 + 1> {}; // NOLINT
- // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
+ // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
static const int kMarkedForDeoptimizationBit =
@@ -5404,11 +5452,37 @@ class Code: public HeapObject {
class AbstractCode : public HeapObject {
public:
+ // All code kinds and INTERPRETED_FUNCTION.
+ enum Kind {
+#define DEFINE_CODE_KIND_ENUM(name) name,
+ CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
+ INTERPRETED_FUNCTION,
+ };
+
int SourcePosition(int offset);
int SourceStatementPosition(int offset);
+ // Returns the address of the first instruction.
+ inline Address instruction_start();
+
+ // Returns the address right after the last instruction.
+ inline Address instruction_end();
+
+ // Returns the of the code instructions.
+ inline int instruction_size();
+
+ // Returns true if pc is inside this object's instructions.
+ inline bool contains(byte* pc);
+
+ // Returns the AbstractCode::Kind of the code.
+ inline Kind kind();
+
+ // Calculate the size of the code object to report for log events. This takes
+ // the layout of the code object into account.
+ inline int ExecutableSize();
+
DECLARE_CAST(AbstractCode)
- inline int Size();
inline Code* GetCode();
inline BytecodeArray* GetBytecodeArray();
};
@@ -5565,6 +5639,10 @@ class Map: public HeapObject {
static MaybeHandle<JSFunction> GetConstructorFunction(
Handle<Map> map, Handle<Context> native_context);
+ // Retrieve interceptors.
+ inline InterceptorInfo* GetNamedInterceptor();
+ inline InterceptorInfo* GetIndexedInterceptor();
+
// Instance type.
inline InstanceType instance_type();
inline void set_instance_type(InstanceType value);
@@ -5597,7 +5675,7 @@ class Map: public HeapObject {
class Deprecated : public BitField<bool, 23, 1> {};
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
- class IsStrong : public BitField<bool, 26, 1> {};
+ // Bit 26 is free.
class NewTargetIsBase : public BitField<bool, 27, 1> {};
// Bit 28 is free.
@@ -5706,8 +5784,6 @@ class Map: public HeapObject {
inline void set_is_callable();
inline bool is_callable() const;
- inline void set_is_strong();
- inline bool is_strong();
inline void set_new_target_is_base(bool value);
inline bool new_target_is_base();
inline void set_is_extensible(bool value);
@@ -5773,6 +5849,7 @@ class Map: public HeapObject {
int NumberOfFields();
// TODO(ishell): candidate with JSObject::MigrateToMap().
+ bool InstancesNeedRewriting(Map* target);
bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields);
@@ -5784,15 +5861,14 @@ class Map: public HeapObject {
static void GeneralizeFieldType(Handle<Map> map, int modify_index,
Representation new_representation,
Handle<FieldType> new_field_type);
- static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type,
- StoreMode store_mode);
- static Handle<Map> CopyGeneralizeAllRepresentations(
- Handle<Map> map, int modify_index, StoreMode store_mode,
- PropertyKind kind, PropertyAttributes attributes, const char* reason);
+
+ static inline Handle<Map> ReconfigureProperty(
+ Handle<Map> map, int modify_index, PropertyKind new_kind,
+ PropertyAttributes new_attributes, Representation new_representation,
+ Handle<FieldType> new_field_type, StoreMode store_mode);
+
+ static inline Handle<Map> ReconfigureElementsKind(
+ Handle<Map> map, ElementsKind new_elements_kind);
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
@@ -5961,8 +6037,9 @@ class Map: public HeapObject {
PropertyAttributes attributes,
StoreFromKeyed store_mode);
static Handle<Map> TransitionToAccessorProperty(
- Handle<Map> map, Handle<Name> name, AccessorComponent component,
- Handle<Object> accessor, PropertyAttributes attributes);
+ Handle<Map> map, Handle<Name> name, int descriptor,
+ AccessorComponent component, Handle<Object> accessor,
+ PropertyAttributes attributes);
static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
int descriptor,
PropertyKind kind,
@@ -6022,17 +6099,10 @@ class Map: public HeapObject {
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
- // Returns the map that this map transitions to if its elements_kind
- // is changed to |elements_kind|, or NULL if no such map is cached yet.
- // |safe_to_add_transitions| is set to false if adding transitions is not
- // allowed.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind);
-
// Returns the transitioned map for this map with the most generic
- // elements_kind that's found in |candidates|, or null handle if no match is
+ // elements_kind that's found in |candidates|, or |nullptr| if no match is
// found at all.
- static Handle<Map> FindTransitionedMap(Handle<Map> map,
- MapHandleList* candidates);
+ Map* FindElementsKindTransitionedMap(MapHandleList* candidates);
inline bool CanTransition();
@@ -6191,6 +6261,17 @@ class Map: public HeapObject {
Handle<LayoutDescriptor> full_layout_descriptor);
private:
+ // Returns the map that this (root) map transitions to if its elements_kind
+ // is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
+ Map* LookupElementsTransitionMap(ElementsKind elements_kind);
+
+ // Tries to replay property transitions starting from this (root) map using
+ // the descriptor array of the |map|. The |root_map| is expected to have
+ // proper elements kind and therefore elements kinds transitions are not
+ // taken by this function. Returns |nullptr| if matching transition map is
+ // not found.
+ Map* TryReplayPropertyTransitions(Map* map);
+
static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag);
@@ -6227,6 +6308,19 @@ class Map: public HeapObject {
static Handle<Map> CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode);
+ static Handle<Map> Reconfigure(Handle<Map> map,
+ ElementsKind new_elements_kind,
+ int modify_index, PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type,
+ StoreMode store_mode);
+
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map, ElementsKind elements_kind, int modify_index,
+ StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
+ const char* reason);
+
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
// the descriptor array.
@@ -6535,41 +6629,43 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array.prototype, indexOf, ArrayIndexOf) \
- V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(Array.prototype, shift, ArrayShift) \
- V(Function.prototype, apply, FunctionApply) \
- V(Function.prototype, call, FunctionCall) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String.prototype, concat, StringConcat) \
- V(String.prototype, toLowerCase, StringToLowerCase) \
- V(String.prototype, toUpperCase, StringToUpperCase) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, random, MathRandom) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, exp, MathExp) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow) \
- V(Math, max, MathMax) \
- V(Math, min, MathMin) \
- V(Math, cos, MathCos) \
- V(Math, sin, MathSin) \
- V(Math, tan, MathTan) \
- V(Math, acos, MathAcos) \
- V(Math, asin, MathAsin) \
- V(Math, atan, MathAtan) \
- V(Math, atan2, MathAtan2) \
- V(Math, imul, MathImul) \
- V(Math, clz32, MathClz32) \
- V(Math, fround, MathFround)
+#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array.prototype, indexOf, ArrayIndexOf) \
+ V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(Array.prototype, shift, ArrayShift) \
+ V(Function.prototype, apply, FunctionApply) \
+ V(Function.prototype, call, FunctionCall) \
+ V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String.prototype, concat, StringConcat) \
+ V(String.prototype, toLowerCase, StringToLowerCase) \
+ V(String.prototype, toUpperCase, StringToUpperCase) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, random, MathRandom) \
+ V(Math, floor, MathFloor) \
+ V(Math, round, MathRound) \
+ V(Math, ceil, MathCeil) \
+ V(Math, abs, MathAbs) \
+ V(Math, log, MathLog) \
+ V(Math, exp, MathExp) \
+ V(Math, sqrt, MathSqrt) \
+ V(Math, pow, MathPow) \
+ V(Math, max, MathMax) \
+ V(Math, min, MathMin) \
+ V(Math, cos, MathCos) \
+ V(Math, sin, MathSin) \
+ V(Math, tan, MathTan) \
+ V(Math, acos, MathAcos) \
+ V(Math, asin, MathAsin) \
+ V(Math, atan, MathAtan) \
+ V(Math, atan2, MathAtan2) \
+ V(Math, imul, MathImul) \
+ V(Math, clz32, MathClz32) \
+ V(Math, fround, MathFround) \
+ V(Math, trunc, MathTrunc)
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
V(Atomics, load, AtomicsLoad) \
@@ -6577,6 +6673,9 @@ class Script: public Struct {
enum BuiltinFunctionId {
kArrayCode,
+ kGeneratorObjectNext,
+ kGeneratorObjectReturn,
+ kGeneratorObjectThrow,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -6606,6 +6705,10 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // Get the abstract code associated with the function, which will either be
+ // a Code object or a BytecodeArray.
+ inline AbstractCode* abstract_code();
+
inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
@@ -6635,22 +6738,17 @@ class SharedFunctionInfo: public HeapObject {
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
- // Add a new entry to the optimized code map for context-independent code.
+ // Add or update entry in the optimized code map for context-independent code.
static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
- // Add a new entry to the optimized code map for context-dependent code.
- inline static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<Code> code,
- Handle<LiteralsArray> literals,
- BailoutId osr_ast_id);
-
- // We may already have cached the code, but want to store literals in the
- // cache.
- inline static void AddLiteralsToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- Handle<LiteralsArray> literals);
+ // Add or update entry in the optimized code map for context-dependent code.
+ // If {code} is not given, then an existing entry's code won't be overwritten.
+ static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ MaybeHandle<Code> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
@@ -6721,19 +6819,34 @@ class SharedFunctionInfo: public HeapObject {
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
- // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
- // In the long run we don't want all functions to have this field but
- // we can fix that when we have a better model for storing hidden data
- // on objects.
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
- inline bool HasBuiltinFunctionId();
- inline BuiltinFunctionId builtin_function_id();
+ inline void set_api_func_data(FunctionTemplateInfo* data);
inline bool HasBytecodeArray();
inline BytecodeArray* bytecode_array();
+ inline void set_bytecode_array(BytecodeArray* bytecode);
+ inline void ClearBytecodeArray();
+
+ // [function identifier]: This field holds an additional identifier for the
+ // function.
+ // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
+ // - a String identifying the function's inferred name [HasInferredName()].
+ // The inferred_name is inferred from variable or property
+ // assignment of this function. It is used to facilitate debugging and
+ // profiling of JavaScript code written in OO style, where almost
+ // all functions are anonymous but are assigned to object
+ // properties.
+ DECL_ACCESSORS(function_identifier, Object)
+
+ inline bool HasBuiltinFunctionId();
+ inline BuiltinFunctionId builtin_function_id();
+ inline void set_builtin_function_id(BuiltinFunctionId id);
+ inline bool HasInferredName();
+ inline String* inferred_name();
+ inline void set_inferred_name(String* inferred_name);
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
@@ -6760,16 +6873,12 @@ class SharedFunctionInfo: public HeapObject {
// [debug info]: Debug information.
DECL_ACCESSORS(debug_info, Object)
- // [inferred name]: Name inferred from variable or property
- // assignment of this function. Used to facilitate debugging and
- // profiling of JavaScript code written in OO style, where almost
- // all functions are anonymous but are assigned to object
- // properties.
- DECL_ACCESSORS(inferred_name, String)
-
// The function's name if it is non-empty, otherwise the inferred name.
String* DebugName();
+ // Used for flags such as --hydrogen-filter.
+ bool PassesFilter(const char* raw_filter);
+
// Position of the 'function' token in the script source.
inline int function_token_position() const;
inline void set_function_token_position(int function_token_position);
@@ -7006,9 +7115,9 @@ class SharedFunctionInfo: public HeapObject {
kInstanceClassNameOffset + kPointerSize;
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
- static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
+ static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
static const int kFeedbackVectorOffset =
- kInferredNameOffset + kPointerSize;
+ kFunctionIdentifierOffset + kPointerSize;
#if TRACE_MAPS
static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
static const int kLastPointerFieldOffset = kUniqueIdOffset;
@@ -7139,9 +7248,9 @@ class SharedFunctionInfo: public HeapObject {
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kOptimizationDisabled,
+ kNeverCompiled,
kNative,
kStrictModeFunction,
- kStrongModeFunction,
kUsesArguments,
kNeedsHomeObject,
// byte 1
@@ -7165,7 +7274,6 @@ class SharedFunctionInfo: public HeapObject {
kIsSetterFunction,
// byte 3
kDeserialized,
- kNeverCompiled,
kIsDeclaration,
kCompilerHintsCount, // Pseudo entry
};
@@ -7215,8 +7323,6 @@ class SharedFunctionInfo: public HeapObject {
// native tests when using integer-width instructions.
static const int kStrictModeBit =
kStrictModeFunction + kCompilerHintsSmiTagSize;
- static const int kStrongModeBit =
- kStrongModeFunction + kCompilerHintsSmiTagSize;
static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
static const int kClassConstructorBits =
@@ -7227,7 +7333,6 @@ class SharedFunctionInfo: public HeapObject {
// native tests.
// Allows to use byte-width instructions.
static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
- static const int kStrongModeBitWithinByte = kStrongModeBit % kBitsPerByte;
static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
static const int kClassConstructorBitsWithinByte =
@@ -7246,7 +7351,6 @@ class SharedFunctionInfo: public HeapObject {
#error Unknown byte ordering
#endif
static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
- static const int kStrongModeByteOffset = BYTE_OFFSET(kStrongModeFunction);
static const int kNativeByteOffset = BYTE_OFFSET(kNative);
static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
#undef BYTE_OFFSET
@@ -7258,13 +7362,6 @@ class SharedFunctionInfo: public HeapObject {
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);
- // If code is undefined, then existing code won't be overwritten.
- static void AddToOptimizedCodeMapInternal(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<HeapObject> code,
- Handle<LiteralsArray> literals,
- BailoutId osr_ast_id);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -7441,6 +7538,10 @@ class JSFunction: public JSObject {
inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
+ // Get the abstract code associated with the function, which will either be
+ // a Code object or a BytecodeArray.
+ inline AbstractCode* abstract_code();
+
// Tells whether this function inlines the given shared function info.
bool Inlines(SharedFunctionInfo* candidate);
@@ -7527,7 +7628,11 @@ class JSFunction: public JSObject {
int requested_internal_fields,
int* instance_size,
int* in_object_properties);
-
+ static void CalculateInstanceSizeHelper(InstanceType instance_type,
+ int requested_internal_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties);
// Visiting policy flags define whether the code entry or next function
// should be visited or not.
enum BodyVisitingPolicy {
@@ -7556,9 +7661,6 @@ class JSFunction: public JSObject {
// Returns the number of allocated literals.
inline int NumberOfLiterals();
- // Used for flags such as --hydrogen-filter.
- bool PassesFilter(const char* raw_filter);
-
// The function's name if it is configured, otherwise shared function info
// debug name.
static Handle<String> GetName(Handle<JSFunction> function);
@@ -9510,6 +9612,9 @@ class Oddball: public HeapObject {
// [to_number]: Cached to_number computed at startup.
DECL_ACCESSORS(to_number, Object)
+ // [to_number]: Cached to_boolean computed at startup.
+ DECL_ACCESSORS(to_boolean, Oddball)
+
// [typeof]: Cached type_of computed at startup.
DECL_ACCESSORS(type_of, String)
@@ -9527,12 +9632,13 @@ class Oddball: public HeapObject {
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
- const char* type_of, byte kind);
+ bool to_boolean, const char* type_of, byte kind);
// Layout description.
static const int kToStringOffset = HeapObject::kHeaderSize;
static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kTypeOfOffset = kToNumberOffset + kPointerSize;
+ static const int kToBooleanOffset = kToNumberOffset + kPointerSize;
+ static const int kTypeOfOffset = kToBooleanOffset + kPointerSize;
static const int kKindOffset = kTypeOfOffset + kPointerSize;
static const int kSize = kKindOffset + kPointerSize;
@@ -9546,6 +9652,7 @@ class Oddball: public HeapObject {
static const byte kUninitialized = 6;
static const byte kOther = 7;
static const byte kException = 8;
+ static const byte kOptimizedOut = 9;
typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
kSize> BodyDescriptor;
@@ -9768,7 +9875,8 @@ class JSProxy: public JSReceiver {
typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
BodyDescriptor;
- MUST_USE_RESULT Object* GetIdentityHash();
+ static Handle<Object> GetIdentityHash(Isolate* isolate,
+ Handle<JSProxy> receiver);
static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
@@ -10235,6 +10343,12 @@ class JSArray: public JSObject {
PropertyDescriptor* desc,
ShouldThrow should_throw);
+ // Checks whether the Array has the current realm's Array.prototype as its
+ // prototype. This function is best-effort and only gives a conservative
+ // approximation, erring on the side of false, in particular with respect
+ // to Proxies and objects with a hidden prototype.
+ inline bool HasArrayPrototype(Isolate* isolate);
+
DECLARE_CAST(JSArray)
// Dispatched behavior.
@@ -10315,6 +10429,9 @@ class AccessorInfo: public Struct {
inline bool is_special_data_property();
inline void set_is_special_data_property(bool value);
+ inline bool is_sloppy();
+ inline void set_is_sloppy(bool value);
+
inline PropertyAttributes property_attributes();
inline void set_property_attributes(PropertyAttributes attributes);
@@ -10351,7 +10468,8 @@ class AccessorInfo: public Struct {
static const int kAllCanReadBit = 0;
static const int kAllCanWriteBit = 1;
static const int kSpecialDataProperty = 2;
- class AttributesField : public BitField<PropertyAttributes, 3, 3> {};
+ static const int kIsSloppy = 3;
+ class AttributesField : public BitField<PropertyAttributes, 4, 3> {};
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
};
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index 4836b9bebb..ed202242ba 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -20,17 +20,8 @@ void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
// The recompile job is allocated in the CompilationInfo's zone.
CompilationInfo* info = job->info();
if (restore_function_code) {
- if (info->is_osr()) {
- if (!job->IsWaitingForInstall()) {
- // Remove stack check that guards OSR entry on original code.
- Handle<Code> code = info->unoptimized_code();
- uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
- BackEdgeTable::RemoveStackCheck(code, offset);
- }
- } else {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
- }
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
}
delete info;
}
@@ -92,14 +83,6 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
#endif
DCHECK_EQ(0, input_queue_length_);
DeleteArray(input_queue_);
- if (FLAG_concurrent_osr) {
-#ifdef DEBUG
- for (int i = 0; i < osr_buffer_capacity_; i++) {
- CHECK_NULL(osr_buffer_[i]);
- }
-#endif
- DeleteArray(osr_buffer_);
- }
}
@@ -159,16 +142,6 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
}
-void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
- for (int i = 0; i < osr_buffer_capacity_; i++) {
- if (osr_buffer_[i] != NULL) {
- DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
- osr_buffer_[i] = NULL;
- }
- }
-}
-
-
void OptimizingCompileDispatcher::Flush() {
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
@@ -178,7 +151,6 @@ void OptimizingCompileDispatcher::Flush() {
base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
}
FlushOutputQueue(true);
- if (FLAG_concurrent_osr) FlushOsrBuffer(true);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Flushed concurrent recompilation queues.\n");
}
@@ -202,13 +174,6 @@ void OptimizingCompileDispatcher::Stop() {
} else {
FlushOutputQueue(false);
}
-
- if (FLAG_concurrent_osr) FlushOsrBuffer(false);
-
- if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
- FLAG_concurrent_osr) {
- PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
- }
}
@@ -225,31 +190,15 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
CompilationInfo* info = job->info();
Handle<JSFunction> function(*info->closure());
- if (info->is_osr()) {
- if (FLAG_trace_osr) {
- PrintF("[COSR - ");
+ if (function->IsOptimized()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Aborting compilation for ");
function->ShortPrint();
- PrintF(" is ready for install and entry at AST id %d]\n",
- info->osr_ast_id().ToInt());
+ PrintF(" as it has already been optimized.\n");
}
- job->WaitForInstall();
- // Remove stack check that guards OSR entry on original code.
- Handle<Code> code = info->unoptimized_code();
- uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
- BackEdgeTable::RemoveStackCheck(code, offset);
+ DisposeOptimizedCompileJob(job, false);
} else {
- if (function->IsOptimized()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Aborting compilation for ");
- function->ShortPrint();
- PrintF(" as it has already been optimized.\n");
- }
- DisposeOptimizedCompileJob(job, false);
- } else {
- MaybeHandle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
- function->ReplaceCode(code.is_null() ? function->shared()->code()
- : *code.ToHandleChecked());
- }
+ Compiler::FinalizeOptimizedCompileJob(job);
}
}
}
@@ -258,18 +207,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
void OptimizingCompileDispatcher::QueueForOptimization(
OptimizedCompileJob* job) {
DCHECK(IsQueueAvailable());
- CompilationInfo* info = job->info();
- if (info->is_osr()) {
- osr_attempts_++;
- AddToOsrBuffer(job);
- // Add job to the front of the input queue.
- base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
- DCHECK_LT(input_queue_length_, input_queue_capacity_);
- // Move shift_ back by one.
- input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
- input_queue_[InputQueueIndex(0)] = job;
- input_queue_length_++;
- } else {
+ {
// Add job to the back of the input queue.
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
DCHECK_LT(input_queue_length_, input_queue_capacity_);
@@ -294,67 +232,5 @@ void OptimizingCompileDispatcher::Unblock() {
}
-OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
- Handle<JSFunction> function, BailoutId osr_ast_id) {
- for (int i = 0; i < osr_buffer_capacity_; i++) {
- OptimizedCompileJob* current = osr_buffer_[i];
- if (current != NULL && current->IsWaitingForInstall() &&
- current->info()->HasSameOsrEntry(function, osr_ast_id)) {
- osr_hits_++;
- osr_buffer_[i] = NULL;
- return current;
- }
- }
- return NULL;
-}
-
-
-bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
- BailoutId osr_ast_id) {
- for (int i = 0; i < osr_buffer_capacity_; i++) {
- OptimizedCompileJob* current = osr_buffer_[i];
- if (current != NULL &&
- current->info()->HasSameOsrEntry(function, osr_ast_id)) {
- return !current->IsWaitingForInstall();
- }
- }
- return false;
-}
-
-
-bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
- for (int i = 0; i < osr_buffer_capacity_; i++) {
- OptimizedCompileJob* current = osr_buffer_[i];
- if (current != NULL && *current->info()->closure() == function) {
- return !current->IsWaitingForInstall();
- }
- }
- return false;
-}
-
-
-void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
- // Find the next slot that is empty or has a stale job.
- OptimizedCompileJob* stale = NULL;
- while (true) {
- stale = osr_buffer_[osr_buffer_cursor_];
- if (stale == NULL || stale->IsWaitingForInstall()) break;
- osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
- }
-
- // Add to found slot and dispose the evicted job.
- if (stale != NULL) {
- DCHECK(stale->IsWaitingForInstall());
- CompilationInfo* info = stale->info();
- if (FLAG_trace_osr) {
- PrintF("[COSR - Discarded ");
- info->closure()->PrintName();
- PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
- }
- DisposeOptimizedCompileJob(stale, false);
- }
- osr_buffer_[osr_buffer_cursor_] = job;
- osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/optimizing-compile-dispatcher.h b/deps/v8/src/optimizing-compile-dispatcher.h
index 9c4e4cb8df..e14e8aafbc 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/optimizing-compile-dispatcher.h
@@ -28,20 +28,11 @@ class OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
- osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
- osr_buffer_cursor_(0),
- osr_hits_(0),
- osr_attempts_(0),
blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
- if (FLAG_concurrent_osr) {
- // Allocate and mark OSR buffer slots as empty.
- osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
- for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
- }
}
~OptimizingCompileDispatcher();
@@ -52,24 +43,12 @@ class OptimizingCompileDispatcher {
void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
void Unblock();
void InstallOptimizedFunctions();
- OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
- BailoutId osr_ast_id);
- bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
-
- bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
return input_queue_length_ < input_queue_capacity_;
}
- inline void AgeBufferedOsrJobs() {
- // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
- // Dispose said OSR job in the latter case. Calling this on every GC
- // should make sure that we do not hold onto stale jobs indefinitely.
- AddToOsrBuffer(NULL);
- }
-
static bool Enabled() { return FLAG_concurrent_recompilation; }
private:
@@ -78,14 +57,9 @@ class OptimizingCompileDispatcher {
enum ModeFlag { COMPILE, FLUSH };
void FlushOutputQueue(bool restore_function_code);
- void FlushOsrBuffer(bool restore_function_code);
void CompileNext(OptimizedCompileJob* job);
OptimizedCompileJob* NextInput(bool check_if_flushing = false);
- // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
- // Tasks evicted from the cyclic buffer are discarded.
- void AddToOsrBuffer(OptimizedCompileJob* compiler);
-
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
DCHECK_LE(0, result);
@@ -108,16 +82,8 @@ class OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- // Cyclic buffer of recompilation tasks for OSR.
- OptimizedCompileJob** osr_buffer_;
- int osr_buffer_capacity_;
- int osr_buffer_cursor_;
-
volatile base::AtomicWord mode_;
- int osr_hits_;
- int osr_attempts_;
-
int blocked_jobs_;
int ref_count_;
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index fa1a2f97a4..71fa3d3e89 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -36,18 +36,16 @@ class ExpressionClassifier {
AssignmentPatternProduction = 1 << 3,
DistinctFormalParametersProduction = 1 << 4,
StrictModeFormalParametersProduction = 1 << 5,
- StrongModeFormalParametersProduction = 1 << 6,
- ArrowFormalParametersProduction = 1 << 7,
- LetPatternProduction = 1 << 8,
- CoverInitializedNameProduction = 1 << 9,
+ ArrowFormalParametersProduction = 1 << 6,
+ LetPatternProduction = 1 << 7,
+ CoverInitializedNameProduction = 1 << 8,
ExpressionProductions =
(ExpressionProduction | FormalParameterInitializerProduction),
PatternProductions = (BindingPatternProduction |
AssignmentPatternProduction | LetPatternProduction),
FormalParametersProductions = (DistinctFormalParametersProduction |
- StrictModeFormalParametersProduction |
- StrongModeFormalParametersProduction),
+ StrictModeFormalParametersProduction),
StandardProductions = ExpressionProductions | PatternProductions,
AllProductions =
(StandardProductions | FormalParametersProductions |
@@ -110,12 +108,6 @@ class ExpressionClassifier {
return is_valid(StrictModeFormalParametersProduction);
}
- // Note: callers should also check is_valid_strict_mode_formal_parameters()
- // and is_valid_formal_parameter_list_without_duplicates().
- bool is_valid_strong_mode_formal_parameters() const {
- return is_valid(StrongModeFormalParametersProduction);
- }
-
bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
const Error& expression_error() const { return expression_error_; }
@@ -142,10 +134,6 @@ class ExpressionClassifier {
return strict_mode_formal_parameter_error_;
}
- const Error& strong_mode_formal_parameter_error() const {
- return strong_mode_formal_parameter_error_;
- }
-
const Error& let_pattern_error() const { return let_pattern_error_; }
bool has_cover_initialized_name() const {
@@ -252,16 +240,6 @@ class ExpressionClassifier {
strict_mode_formal_parameter_error_.arg = arg;
}
- void RecordStrongModeFormalParameterError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (!is_valid_strong_mode_formal_parameters()) return;
- invalid_productions_ |= StrongModeFormalParametersProduction;
- strong_mode_formal_parameter_error_.location = loc;
- strong_mode_formal_parameter_error_.message = message;
- strong_mode_formal_parameter_error_.arg = arg;
- }
-
void RecordLetPatternError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -323,9 +301,6 @@ class ExpressionClassifier {
if (errors & StrictModeFormalParametersProduction)
strict_mode_formal_parameter_error_ =
inner->strict_mode_formal_parameter_error_;
- if (errors & StrongModeFormalParametersProduction)
- strong_mode_formal_parameter_error_ =
- inner->strong_mode_formal_parameter_error_;
if (errors & LetPatternProduction)
let_pattern_error_ = inner->let_pattern_error_;
if (errors & CoverInitializedNameProduction)
@@ -372,7 +347,6 @@ class ExpressionClassifier {
Error arrow_formal_parameters_error_;
Error duplicate_formal_parameter_error_;
Error strict_mode_formal_parameter_error_;
- Error strong_mode_formal_parameter_error_;
Error let_pattern_error_;
Error cover_initialized_name_error_;
DuplicateFinder* duplicate_finder_;
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index 003bbebae0..3e3587b2bd 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -62,7 +62,7 @@ void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
if (proxy->is_resolved()) {
Variable* var = proxy->var();
- DCHECK_EQ(var->mode(), TEMPORARY);
+ if (var->mode() != TEMPORARY) return;
if (old_scope_->RemoveTemporary(var)) {
var->set_scope(new_scope_);
new_scope_->AddTemporary(var);
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 6be19b397c..dde6b1dd86 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -24,6 +24,10 @@ enum FunctionNameValidity {
kFunctionNameValidityUnknown
};
+enum AllowLabelledFunctionStatement {
+ kAllowLabelledFunctionStatement,
+ kDisallowLabelledFunctionStatement,
+};
struct FormalParametersBase {
explicit FormalParametersBase(Scope* scope) : scope(scope) {}
@@ -108,14 +112,11 @@ class ParserBase : public Traits {
stack_overflow_(false),
allow_lazy_(false),
allow_natives_(false),
+ allow_tailcalls_(false),
allow_harmony_sloppy_(false),
allow_harmony_sloppy_function_(false),
allow_harmony_sloppy_let_(false),
- allow_harmony_default_parameters_(false),
- allow_harmony_destructuring_bind_(false),
- allow_harmony_destructuring_assignment_(false),
- allow_strong_mode_(false),
- allow_legacy_const_(true),
+ allow_harmony_restrictive_declarations_(false),
allow_harmony_do_expressions_(false),
allow_harmony_function_name_(false),
allow_harmony_function_sent_(false) {}
@@ -124,19 +125,25 @@ class ParserBase : public Traits {
bool allow_##name() const { return allow_##name##_; } \
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
+#define SCANNER_ACCESSORS(name) \
+ bool allow_##name() const { return scanner_->allow_##name(); } \
+ void set_allow_##name(bool allow) { \
+ return scanner_->set_allow_##name(allow); \
+ }
+
ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
+ ALLOW_ACCESSORS(tailcalls);
ALLOW_ACCESSORS(harmony_sloppy);
ALLOW_ACCESSORS(harmony_sloppy_function);
ALLOW_ACCESSORS(harmony_sloppy_let);
- ALLOW_ACCESSORS(harmony_default_parameters);
- ALLOW_ACCESSORS(harmony_destructuring_bind);
- ALLOW_ACCESSORS(harmony_destructuring_assignment);
- ALLOW_ACCESSORS(strong_mode);
- ALLOW_ACCESSORS(legacy_const);
+ ALLOW_ACCESSORS(harmony_restrictive_declarations);
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_name);
ALLOW_ACCESSORS(harmony_function_sent);
+ SCANNER_ACCESSORS(harmony_exponentiation_operator);
+
+#undef SCANNER_ACCESSORS
#undef ALLOW_ACCESSORS
uintptr_t stack_limit() const { return stack_limit_; }
@@ -368,7 +375,6 @@ class ParserBase : public Traits {
Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
DCHECK(ast_value_factory());
- DCHECK(scope_type != MODULE_SCOPE || FLAG_harmony_modules);
Scope* result = new (zone())
Scope(zone(), parent, scope_type, ast_value_factory(), kind);
result->Initialize();
@@ -481,12 +487,7 @@ class ParserBase : public Traits {
bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
if (Check(Token::IN)) {
- if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->location(), MessageTemplate::kStrongForIn);
- *ok = false;
- } else {
- *visit_mode = ForEachStatement::ENUMERATE;
- }
+ *visit_mode = ForEachStatement::ENUMERATE;
return true;
} else if (CheckContextualKeyword(CStrVector("of"))) {
*visit_mode = ForEachStatement::ITERATE;
@@ -547,12 +548,6 @@ class ParserBase : public Traits {
*ok = false;
return;
}
- if (is_strong(language_mode) && this->IsUndefined(function_name)) {
- Traits::ReportMessageAt(function_name_loc,
- MessageTemplate::kStrongUndefined);
- *ok = false;
- return;
- }
}
// Determine precedence of given token.
@@ -570,8 +565,7 @@ class ParserBase : public Traits {
bool is_generator() const { return function_state_->is_generator(); }
bool allow_const() {
- return is_strict(language_mode()) || allow_harmony_sloppy() ||
- allow_legacy_const();
+ return is_strict(language_mode()) || allow_harmony_sloppy();
}
bool allow_let() {
@@ -593,7 +587,8 @@ class ParserBase : public Traits {
}
void GetUnexpectedTokenMessage(
- Token::Value token, MessageTemplate::Template* message, const char** arg,
+ Token::Value token, MessageTemplate::Template* message,
+ Scanner::Location* location, const char** arg,
MessageTemplate::Template default_ = MessageTemplate::kUnexpectedToken);
void ReportUnexpectedToken(Token::Value token);
@@ -657,10 +652,6 @@ class ParserBase : public Traits {
!classifier->is_valid_strict_mode_formal_parameters()) {
ReportClassifierError(classifier->strict_mode_formal_parameter_error());
*ok = false;
- } else if (is_strong(language_mode) &&
- !classifier->is_valid_strong_mode_formal_parameters()) {
- ReportClassifierError(classifier->strong_mode_formal_parameter_error());
- *ok = false;
}
}
@@ -698,33 +689,25 @@ class ParserBase : public Traits {
void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
- GetUnexpectedTokenMessage(peek(), &message, &arg);
- classifier->RecordExpressionError(scanner()->peek_location(), message, arg);
+ Scanner::Location location = scanner()->peek_location();
+ GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+ classifier->RecordExpressionError(location, message, arg);
}
void BindingPatternUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
- GetUnexpectedTokenMessage(peek(), &message, &arg);
- classifier->RecordBindingPatternError(scanner()->peek_location(), message,
- arg);
+ Scanner::Location location = scanner()->peek_location();
+ GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+ classifier->RecordBindingPatternError(location, message, arg);
}
void ArrowFormalParametersUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
- GetUnexpectedTokenMessage(peek(), &message, &arg);
- classifier->RecordArrowFormalParametersError(scanner()->peek_location(),
- message, arg);
- }
-
- void FormalParameterInitializerUnexpectedToken(
- ExpressionClassifier* classifier) {
- MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
- const char* arg;
- GetUnexpectedTokenMessage(peek(), &message, &arg);
- classifier->RecordFormalParameterInitializerError(
- scanner()->peek_location(), message, arg);
+ Scanner::Location location = scanner()->peek_location();
+ GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+ classifier->RecordArrowFormalParametersError(location, message, arg);
}
// Recursive descent functions:
@@ -804,10 +787,6 @@ class ParserBase : public Traits {
ExpressionT ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
- ExpressionT ParseStrongInitializationExpression(
- ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseStrongSuperCallExpression(ExpressionClassifier* classifier,
- bool* ok);
void ParseFormalParameter(FormalParametersT* parameters,
ExpressionClassifier* classifier, bool* ok);
@@ -825,10 +804,6 @@ class ParserBase : public Traits {
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok);
- ExpressionT ClassifyAndRewriteReferenceExpression(
- ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
- int end_pos, MessageTemplate::Template message,
- ParseErrorType type = kSyntaxError);
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok);
@@ -841,10 +816,6 @@ class ParserBase : public Traits {
Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
return false;
}
- if (is_strong(language_mode()) &&
- Traits::IsUndefined(Traits::AsIdentifier(expression))) {
- return false;
- }
return true;
}
@@ -858,8 +829,12 @@ class ParserBase : public Traits {
void CheckPossibleEvalCall(ExpressionT expression, Scope* scope) {
if (Traits::IsIdentifier(expression) &&
Traits::IsEval(Traits::AsIdentifier(expression))) {
- scope->DeclarationScope()->RecordEvalCall();
scope->RecordEvalCall();
+ if (is_sloppy(scope->language_mode())) {
+ // For sloppy scopes we also have to record the call at function level,
+ // in case it includes declarations that will be hoisted.
+ scope->DeclarationScope()->RecordEvalCall();
+ }
}
}
@@ -945,14 +920,11 @@ class ParserBase : public Traits {
bool allow_lazy_;
bool allow_natives_;
+ bool allow_tailcalls_;
bool allow_harmony_sloppy_;
bool allow_harmony_sloppy_function_;
bool allow_harmony_sloppy_let_;
- bool allow_harmony_default_parameters_;
- bool allow_harmony_destructuring_bind_;
- bool allow_harmony_destructuring_assignment_;
- bool allow_strong_mode_;
- bool allow_legacy_const_;
+ bool allow_harmony_restrictive_declarations_;
bool allow_harmony_do_expressions_;
bool allow_harmony_function_name_;
bool allow_harmony_function_sent_;
@@ -987,32 +959,28 @@ ParserBase<Traits>::FunctionState::~FunctionState() {
*function_state_stack_ = outer_function_state_;
}
-
template <class Traits>
void ParserBase<Traits>::GetUnexpectedTokenMessage(
- Token::Value token, MessageTemplate::Template* message, const char** arg,
+ Token::Value token, MessageTemplate::Template* message,
+ Scanner::Location* location, const char** arg,
MessageTemplate::Template default_) {
+ *arg = nullptr;
switch (token) {
case Token::EOS:
*message = MessageTemplate::kUnexpectedEOS;
- *arg = nullptr;
break;
case Token::SMI:
case Token::NUMBER:
*message = MessageTemplate::kUnexpectedTokenNumber;
- *arg = nullptr;
break;
case Token::STRING:
*message = MessageTemplate::kUnexpectedTokenString;
- *arg = nullptr;
break;
case Token::IDENTIFIER:
*message = MessageTemplate::kUnexpectedTokenIdentifier;
- *arg = nullptr;
break;
case Token::FUTURE_RESERVED_WORD:
*message = MessageTemplate::kUnexpectedReserved;
- *arg = nullptr;
break;
case Token::LET:
case Token::STATIC:
@@ -1021,17 +989,22 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
*message = is_strict(language_mode())
? MessageTemplate::kUnexpectedStrictReserved
: MessageTemplate::kUnexpectedTokenIdentifier;
- *arg = nullptr;
break;
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
*message = MessageTemplate::kUnexpectedTemplateString;
- *arg = nullptr;
break;
case Token::ESCAPED_STRICT_RESERVED_WORD:
case Token::ESCAPED_KEYWORD:
*message = MessageTemplate::kInvalidEscapedReservedWord;
- *arg = nullptr;
+ break;
+ case Token::ILLEGAL:
+ if (scanner()->has_error()) {
+ *message = scanner()->error();
+ *location = scanner()->error_location();
+ } else {
+ *message = MessageTemplate::kInvalidOrUnexpectedToken;
+ }
break;
default:
const char* name = Token::String(token);
@@ -1053,7 +1026,7 @@ void ParserBase<Traits>::ReportUnexpectedTokenAt(
Scanner::Location source_location, Token::Value token,
MessageTemplate::Template message) {
const char* arg;
- GetUnexpectedTokenMessage(token, &message, &arg);
+ GetUnexpectedTokenMessage(token, &message, &source_location, &arg);
Traits::ReportMessageAt(source_location, message, arg);
}
@@ -1105,19 +1078,6 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
classifier->RecordBindingPatternError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
}
- if (is_strong(language_mode())) {
- classifier->RecordExpressionError(scanner()->location(),
- MessageTemplate::kStrongArguments);
- }
- }
- if (this->IsUndefined(name)) {
- classifier->RecordStrongModeFormalParameterError(
- scanner()->location(), MessageTemplate::kStrongUndefined);
- if (is_strong(language_mode())) {
- // TODO(dslomov): allow 'undefined' in nested patterns.
- classifier->RecordPatternError(scanner()->location(),
- MessageTemplate::kStrongUndefined);
- }
}
if (classifier->duplicate_finder() != nullptr &&
@@ -1218,8 +1178,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
}
int js_flags = flags.FromJust();
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index,
- is_strong(language_mode()), pos);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
}
@@ -1262,15 +1221,6 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::THIS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::THIS);
- if (FLAG_strong_this && is_strong(language_mode())) {
- // Constructors' usages of 'this' in strong mode are parsed separately.
- // TODO(rossberg): this does not work with arrow functions yet.
- if (IsClassConstructor(function_state_->kind())) {
- ReportMessage(MessageTemplate::kStrongConstructorThis);
- *ok = false;
- return this->EmptyExpression();
- }
- }
return this->ThisExpression(scope_, factory(), beg_pos);
}
@@ -1313,15 +1263,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->ParseRegExpLiteral(false, classifier, ok);
case Token::LBRACK:
- if (!allow_harmony_destructuring_bind()) {
- BindingPatternUnexpectedToken(classifier);
- }
return this->ParseArrayLiteral(classifier, ok);
case Token::LBRACE:
- if (!allow_harmony_destructuring_bind()) {
- BindingPatternUnexpectedToken(classifier);
- }
return this->ParseObjectLiteral(classifier, ok);
case Token::LPAREN: {
@@ -1394,7 +1338,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
CHECK_OK);
class_name_location = scanner()->location();
}
- return this->ParseClassLiteral(name, class_name_location,
+ return this->ParseClassLiteral(classifier, name, class_name_location,
is_strict_reserved_name,
class_token_position, ok);
}
@@ -1510,12 +1454,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
while (peek() != Token::RBRACK) {
ExpressionT elem = this->EmptyExpression();
if (peek() == Token::COMMA) {
- if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kStrongEllision);
- *ok = false;
- return this->EmptyExpression();
- }
elem = this->GetLiteralTheHole(peek_position(), factory());
} else if (peek() == Token::ELLIPSIS) {
int start_pos = peek_position();
@@ -1559,9 +1497,8 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
// Update the scope information before the pre-parsing bailout.
int literal_index = function_state_->NextMaterializedLiteralIndex();
- ExpressionT result =
- factory()->NewArrayLiteral(values, first_spread_index, literal_index,
- is_strong(language_mode()), pos);
+ ExpressionT result = factory()->NewArrayLiteral(values, first_spread_index,
+ literal_index, pos);
if (first_spread_index >= 0) {
result = factory()->NewRewritableExpression(result);
Traits::QueueNonPatternForRewriting(result);
@@ -1823,7 +1760,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
typename Traits::Type::PropertyList properties =
this->NewPropertyList(4, zone_);
int number_of_boilerplate_properties = 0;
- bool has_function = false;
bool has_computed_names = false;
ObjectLiteralChecker checker(this);
@@ -1845,12 +1781,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
has_computed_names = true;
}
- // Mark top-level object literals that contain function literals and
- // pretenure the literal so it can be added as a constant function
- // property. (Parser only.)
- this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, property,
- &has_function);
-
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (!has_computed_names && this->IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
@@ -1876,8 +1806,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
return factory()->NewObjectLiteral(properties,
literal_index,
number_of_boilerplate_properties,
- has_function,
- is_strong(language_mode()),
pos);
}
@@ -1984,6 +1912,13 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
Token::String(Token::ARROW));
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
parenthesized_formals, CHECK_OK);
+ // This reads strangely, but is correct: it checks whether any
+ // sub-expression of the parameter list failed to be a valid formal
+ // parameter initializer. Since YieldExpressions are banned anywhere
+ // in an arrow parameter list, this is correct.
+ // TODO(adamk): Rename "FormalParameterInitializerError" to refer to
+ // "YieldExpression", which is its only use.
+ ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
Scope* scope =
this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
@@ -2039,23 +1974,10 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// Now pending non-pattern expressions must be discarded.
arrow_formals_classifier.Discard();
- if (!(allow_harmony_destructuring_bind() ||
- allow_harmony_default_parameters())) {
- BindingPatternUnexpectedToken(classifier);
- }
-
- if (allow_harmony_destructuring_assignment() && IsValidPattern(expression) &&
- peek() == Token::ASSIGN) {
+ if (IsValidPattern(expression) && peek() == Token::ASSIGN) {
classifier->ForgiveCoverInitializedNameError();
ValidateAssignmentPattern(classifier, CHECK_OK);
is_destructuring_assignment = true;
- } else if (allow_harmony_default_parameters() &&
- !allow_harmony_destructuring_assignment()) {
- // TODO(adamk): This branch should be removed once the destructuring
- // assignment and default parameter flags are removed.
- expression = this->ClassifyAndRewriteReferenceExpression(
- classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
- MessageTemplate::kInvalidLhsInAssignment);
} else {
expression = this->CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
@@ -2108,6 +2030,11 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
Traits::SetFunctionNameFromIdentifierRef(right, expression);
}
+ if (op == Token::ASSIGN_EXP) {
+ DCHECK(!is_destructuring_assignment);
+ return Traits::RewriteAssignExponentiation(expression, right, pos);
+ }
+
ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
if (is_destructuring_assignment) {
@@ -2127,14 +2054,15 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
int pos = peek_position();
classifier->RecordPatternError(scanner()->peek_location(),
MessageTemplate::kInvalidDestructuringTarget);
- FormalParameterInitializerUnexpectedToken(classifier);
+ classifier->RecordFormalParameterInitializerError(
+ scanner()->peek_location(), MessageTemplate::kYieldInParameter);
Expect(Token::YIELD, CHECK_OK);
ExpressionT generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
ExpressionT expression = Traits::EmptyExpression();
- Yield::Kind kind = Yield::kSuspend;
+ bool delegating = false; // yield*
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
- if (Check(Token::MUL)) kind = Yield::kDelegating;
+ if (Check(Token::MUL)) delegating = true;
switch (peek()) {
case Token::EOS:
case Token::SEMICOLON:
@@ -2146,10 +2074,8 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
// The above set of tokens is the complete set of tokens that can appear
// after an AssignmentExpression, and none of them can start an
// AssignmentExpression. This allows us to avoid looking for an RHS for
- // a Yield::kSuspend operation, given only one look-ahead token.
- if (kind == Yield::kSuspend)
- break;
- DCHECK_EQ(Yield::kDelegating, kind);
+ // a regular yield, given only one look-ahead token.
+ if (!delegating) break;
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
@@ -2157,13 +2083,16 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
break;
}
}
- if (kind == Yield::kDelegating) {
+
+ if (delegating) {
return Traits::RewriteYieldStar(generator_object, expression, pos);
}
+
+ expression = Traits::BuildIteratorResult(expression, false);
// Hackily disambiguate o from o.next and o [Symbol.iterator]().
// TODO(verwaest): Come up with a better solution.
typename Traits::Type::YieldExpression yield =
- factory()->NewYield(generator_object, expression, kind, pos);
+ factory()->NewYield(generator_object, expression, pos);
return yield;
}
@@ -2215,10 +2144,12 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Token::Value op = Next();
- Scanner::Location op_location = scanner()->location();
int pos = position();
+
+ const bool is_right_associative = op == Token::EXP;
+ const int next_prec = is_right_associative ? prec1 : prec1 + 1;
ExpressionT y =
- ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
+ ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
Traits::RewriteNonPattern(classifier, CHECK_OK);
if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
@@ -2237,11 +2168,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- if (cmp == Token::EQ && is_strong(language_mode())) {
- ReportMessageAt(op_location, MessageTemplate::kStrongEqual);
- *ok = false;
- return this->EmptyExpression();
- } else if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
+ if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
x = Traits::RewriteInstanceof(x, y, pos);
} else {
x = factory()->NewCompareOperation(cmp, x, y, pos);
@@ -2250,6 +2177,9 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
}
+
+ } else if (op == Token::EXP) {
+ x = Traits::RewriteExponentiation(x, y, pos);
} else {
// We have a "normal" binary operation.
x = factory()->NewBinaryOperation(op, x, y, pos);
@@ -2287,11 +2217,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
Traits::RewriteNonPattern(classifier, CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
- if (is_strong(language_mode())) {
- ReportMessage(MessageTemplate::kStrongDelete);
- *ok = false;
- return this->EmptyExpression();
- } else if (this->IsIdentifier(expression)) {
+ if (this->IsIdentifier(expression)) {
// "delete identifier" is a syntax error in strict mode.
ReportMessage(MessageTemplate::kStrictDelete);
*ok = false;
@@ -2299,6 +2225,12 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
}
}
+ if (peek() == Token::EXP) {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
// Allow Traits do rewrite the expression.
return this->BuildUnaryExpression(expression, op, pos, factory());
} else if (Token::IsCountOp(op)) {
@@ -2386,12 +2318,6 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
- if (is_strong(language_mode()) && this->IsIdentifier(result) &&
- this->IsEval(this->AsIdentifier(result))) {
- ReportMessage(MessageTemplate::kStrongDirectEval);
- *ok = false;
- return this->EmptyExpression();
- }
int pos;
if (scanner()->current_token() == Token::IDENTIFIER ||
scanner()->current_token() == Token::SUPER) {
@@ -2609,148 +2535,6 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
template <class Traits>
typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseStrongInitializationExpression(
- ExpressionClassifier* classifier, bool* ok) {
- // InitializationExpression :: (strong mode)
- // 'this' '.' IdentifierName '=' AssignmentExpression
- // 'this' '[' Expression ']' '=' AssignmentExpression
-
- FuncNameInferrer::State fni_state(fni_);
-
- Consume(Token::THIS);
- int pos = position();
- function_state_->set_this_location(scanner()->location());
- ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
-
- ExpressionT left = this->EmptyExpression();
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
- left = factory()->NewProperty(this_expr, index, pos);
- if (fni_ != NULL) {
- this->PushPropertyName(fni_, index);
- }
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- IdentifierT name = ParseIdentifierName(CHECK_OK);
- left = factory()->NewProperty(
- this_expr, factory()->NewStringLiteral(name, pos), pos);
- if (fni_ != NULL) {
- this->PushLiteralName(fni_, name);
- }
- break;
- }
- default:
- ReportMessage(MessageTemplate::kStrongConstructorThis);
- *ok = false;
- return this->EmptyExpression();
- }
-
- if (peek() != Token::ASSIGN) {
- ReportMessageAt(function_state_->this_location(),
- MessageTemplate::kStrongConstructorThis);
- *ok = false;
- return this->EmptyExpression();
- }
- Consume(Token::ASSIGN);
- left = this->MarkExpressionAsAssigned(left);
-
- ExpressionT right =
- this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
- this->CheckAssigningFunctionLiteralToProperty(left, right);
- function_state_->AddProperty();
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "this.a = function(){...}();"-like
- // expression.
- if (!right->IsCall() && !right->IsCallNew()) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- }
-
- if (function_state_->return_location().IsValid()) {
- ReportMessageAt(function_state_->return_location(),
- MessageTemplate::kStrongConstructorReturnMisplaced);
- *ok = false;
- return this->EmptyExpression();
- }
-
- return factory()->NewAssignment(Token::ASSIGN, left, right, pos);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseStrongSuperCallExpression(
- ExpressionClassifier* classifier, bool* ok) {
- // SuperCallExpression :: (strong mode)
- // 'super' '(' ExpressionList ')'
- BindingPatternUnexpectedToken(classifier);
-
- Consume(Token::SUPER);
- int pos = position();
- Scanner::Location super_loc = scanner()->location();
- ExpressionT expr = this->SuperCallReference(scope_, factory(), pos);
-
- if (peek() != Token::LPAREN) {
- ReportMessage(MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return this->EmptyExpression();
- }
-
- Scanner::Location spread_pos;
- typename Traits::Type::ExpressionList args =
- ParseArguments(&spread_pos, classifier, CHECK_OK);
-
- // TODO(rossberg): This doesn't work with arrow functions yet.
- if (!IsSubclassConstructor(function_state_->kind())) {
- ReportMessage(MessageTemplate::kUnexpectedSuper);
- *ok = false;
- return this->EmptyExpression();
- } else if (function_state_->super_location().IsValid()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kStrongSuperCallDuplicate);
- *ok = false;
- return this->EmptyExpression();
- } else if (function_state_->this_location().IsValid()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kStrongSuperCallMisplaced);
- *ok = false;
- return this->EmptyExpression();
- } else if (function_state_->return_location().IsValid()) {
- ReportMessageAt(function_state_->return_location(),
- MessageTemplate::kStrongConstructorReturnMisplaced);
- *ok = false;
- return this->EmptyExpression();
- }
-
- function_state_->set_super_location(super_loc);
- if (spread_pos.IsValid()) {
- args = Traits::PrepareSpreadArguments(args);
- expr = Traits::SpreadCall(expr, args, pos);
- } else {
- expr = factory()->NewCall(expr, args, pos);
- }
-
- // Explicit calls to the super constructor using super() perform an implicit
- // binding assignment to the 'this' variable.
- ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
- return factory()->NewAssignment(Token::INIT, this_expr, expr, pos);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier,
bool* ok) {
@@ -2768,13 +2552,6 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
// new super() is never allowed.
// super() is only allowed in derived constructor
if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
- if (is_strong(language_mode())) {
- // Super calls in strong mode are parsed separately.
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return this->EmptyExpression();
- }
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
function_state_->set_super_location(scanner()->location());
@@ -2878,6 +2655,11 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
ParseTemplateLiteral(expression, pos, classifier, CHECK_OK);
break;
}
+ case Token::ILLEGAL: {
+ ReportUnexpectedTokenAt(scanner()->peek_location(), Token::ILLEGAL);
+ *ok = false;
+ return this->EmptyExpression();
+ }
default:
return expression;
}
@@ -2894,7 +2676,6 @@ void ParserBase<Traits>::ParseFormalParameter(
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
- Token::Value next = peek();
ExpressionT pattern = ParsePrimaryExpression(classifier, ok);
if (!*ok) return;
@@ -2902,11 +2683,6 @@ void ParserBase<Traits>::ParseFormalParameter(
if (!*ok) return;
if (!Traits::IsIdentifier(pattern)) {
- if (!allow_harmony_destructuring_bind()) {
- ReportUnexpectedToken(next);
- *ok = false;
- return;
- }
parameters->is_simple = false;
ValidateFormalParameterInitializer(classifier, ok);
if (!*ok) return;
@@ -2914,7 +2690,7 @@ void ParserBase<Traits>::ParseFormalParameter(
}
ExpressionT initializer = Traits::EmptyExpression();
- if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
+ if (!is_rest && Check(Token::ASSIGN)) {
ExpressionClassifier init_classifier(this);
initializer = ParseAssignmentExpression(true, &init_classifier, ok);
if (!*ok) return;
@@ -3099,6 +2875,10 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
body->Add(factory()->NewReturnStatement(expression, pos), zone());
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ if (allow_tailcalls() && !is_sloppy(language_mode())) {
+ this->MarkTailPosition(expression);
+ }
}
super_loc = function_state.super_location();
@@ -3253,47 +3033,25 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
- ExpressionClassifier classifier(this);
- ExpressionT result = ClassifyAndRewriteReferenceExpression(
- &classifier, expression, beg_pos, end_pos, message, type);
- ValidateExpression(&classifier, ok);
- if (!*ok) return this->EmptyExpression();
- return result;
-}
-
-
-template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ClassifyAndRewriteReferenceExpression(
- ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
- int end_pos, MessageTemplate::Template message, ParseErrorType type) {
- Scanner::Location location(beg_pos, end_pos);
- if (this->IsIdentifier(expression)) {
- if (is_strict(language_mode()) &&
- this->IsEvalOrArguments(this->AsIdentifier(expression))) {
- classifier->RecordExpressionError(
- location, MessageTemplate::kStrictEvalArguments, kSyntaxError);
- return expression;
- }
- if (is_strong(language_mode()) &&
- this->IsUndefined(this->AsIdentifier(expression))) {
- classifier->RecordExpressionError(
- location, MessageTemplate::kStrongUndefined, kSyntaxError);
- return expression;
- }
+ if (this->IsIdentifier(expression) && is_strict(language_mode()) &&
+ this->IsEvalOrArguments(this->AsIdentifier(expression))) {
+ ReportMessageAt(Scanner::Location(beg_pos, end_pos),
+ MessageTemplate::kStrictEvalArguments, kSyntaxError);
+ *ok = false;
+ return this->EmptyExpression();
}
if (expression->IsValidReferenceExpression()) {
return expression;
- } else if (expression->IsCall()) {
+ }
+ if (expression->IsCall()) {
// If it is a call, make it a runtime error for legacy web compatibility.
// Rewrite `expr' to `expr[throw ReferenceError]'.
- int pos = location.beg_pos;
- ExpressionT error = this->NewThrowReferenceError(message, pos);
- return factory()->NewProperty(expression, error, pos);
- } else {
- classifier->RecordExpressionError(location, message, type);
- return expression;
+ ExpressionT error = this->NewThrowReferenceError(message, beg_pos);
+ return factory()->NewProperty(expression, error, beg_pos);
}
+ ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
+ *ok = false;
+ return this->EmptyExpression();
}
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 968e8ed4ff..c9897cdd92 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -439,6 +439,14 @@ bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
*x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
+ case Token::EXP: {
+ double value = Pow(x_val, y_val);
+ int int_value = static_cast<int>(value);
+ *x = factory->NewNumberLiteral(
+ int_value == value && value != -0.0 ? int_value : value, pos,
+ has_dot);
+ return true;
+ }
default:
break;
}
@@ -491,6 +499,20 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
return factory->NewUnaryOperation(op, expression, pos);
}
+Expression* ParserTraits::BuildIteratorResult(Expression* value, bool done) {
+ int pos = RelocInfo::kNoPosition;
+ AstNodeFactory* factory = parser_->factory();
+ Zone* zone = parser_->zone();
+
+ if (value == nullptr) value = factory->NewUndefinedLiteral(pos);
+
+ auto args = new (zone) ZoneList<Expression*>(2, zone);
+ args->Add(value, zone);
+ args->Add(factory->NewBooleanLiteral(done, pos), zone);
+
+ return factory->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
+ pos);
+}
Expression* ParserTraits::NewThrowReferenceError(
MessageTemplate::Template message, int pos) {
@@ -734,14 +756,17 @@ FunctionLiteral* ParserTraits::ParseFunctionLiteral(
function_token_position, type, language_mode, ok);
}
-
ClassLiteral* ParserTraits::ParseClassLiteral(
- const AstRawString* name, Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos, bool* ok) {
- return parser_->ParseClassLiteral(name, class_name_location,
+ Type::ExpressionClassifier* classifier, const AstRawString* name,
+ Scanner::Location class_name_location, bool name_is_strict_reserved,
+ int pos, bool* ok) {
+ return parser_->ParseClassLiteral(classifier, name, class_name_location,
name_is_strict_reserved, pos, ok);
}
+void ParserTraits::MarkTailPosition(Expression* expression) {
+ expression->MarkTail();
+}
Parser::Parser(ParseInfo* info)
: ParserBase<ParserTraits>(info->zone(), &scanner_, info->stack_limit(),
@@ -762,18 +787,18 @@ Parser::Parser(ParseInfo* info)
DCHECK(!info->script().is_null() || info->source_stream() != NULL);
set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
+ set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
+ info->isolate()->is_tail_call_elimination_enabled());
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
- set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
- set_allow_harmony_destructuring_bind(FLAG_harmony_destructuring_bind);
- set_allow_harmony_destructuring_assignment(
- FLAG_harmony_destructuring_assignment);
- set_allow_strong_mode(FLAG_strong_mode);
- set_allow_legacy_const(FLAG_legacy_const);
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_name(FLAG_harmony_function_name);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
+ set_allow_harmony_restrictive_declarations(
+ FLAG_harmony_restrictive_declarations);
+ set_allow_harmony_exponentiation_operator(
+ FLAG_harmony_exponentiation_operator);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -901,7 +926,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
kNormalFunction, &function_factory);
// Don't count the mode in the use counters--give the program a chance
- // to enable script/module-wide strict/strong mode below.
+ // to enable script/module-wide strict mode below.
scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
@@ -926,8 +951,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
// unchanged if the property already exists.
InsertSloppyBlockFunctionVarBindings(scope, &ok);
}
- if (ok && (is_strict(language_mode()) || allow_harmony_sloppy() ||
- allow_harmony_destructuring_bind())) {
+ if (ok) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1154,28 +1178,7 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
}
Scanner::Location token_loc = scanner()->peek_location();
- Scanner::Location old_this_loc = function_state_->this_location();
- Scanner::Location old_super_loc = function_state_->super_location();
Statement* stat = ParseStatementListItem(CHECK_OK);
-
- if (is_strong(language_mode()) && scope_->is_function_scope() &&
- IsClassConstructor(function_state_->kind())) {
- Scanner::Location this_loc = function_state_->this_location();
- Scanner::Location super_loc = function_state_->super_location();
- if (this_loc.beg_pos != old_this_loc.beg_pos &&
- this_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
- *ok = false;
- return nullptr;
- }
- if (super_loc.beg_pos != old_super_loc.beg_pos &&
- super_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return nullptr;
- }
- }
-
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
@@ -1189,43 +1192,21 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
literal->raw_value()->IsString()) {
- // Check "use strict" directive (ES5 14.1), "use asm" directive, and
- // "use strong" directive (experimental).
+ // Check "use strict" directive (ES5 14.1), "use asm" directive.
bool use_strict_found =
literal->raw_value()->AsString() ==
ast_value_factory()->use_strict_string() &&
token_loc.end_pos - token_loc.beg_pos ==
ast_value_factory()->use_strict_string()->length() + 2;
- bool use_strong_found =
- allow_strong_mode() &&
- literal->raw_value()->AsString() ==
- ast_value_factory()->use_strong_string() &&
- token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory()->use_strong_string()->length() + 2;
- if (use_strict_found || use_strong_found) {
- // Strong mode implies strict mode. If there are several "use strict"
- // / "use strong" directives, do the strict mode changes only once.
+ if (use_strict_found) {
if (is_sloppy(scope_->language_mode())) {
RaiseLanguageMode(STRICT);
}
- if (use_strong_found) {
- RaiseLanguageMode(STRONG);
- if (IsClassConstructor(function_state_->kind())) {
- // "use strong" cannot occur in a class constructor body, to avoid
- // unintuitive strong class object semantics.
- ParserTraits::ReportMessageAt(
- token_loc, MessageTemplate::kStrongConstructorDirective);
- *ok = false;
- return nullptr;
- }
- }
if (!scope_->HasSimpleParameters()) {
// TC39 deemed "use strict" directives to be an error when occurring
// in the body of a function with non-simple parameter list, on
// 29/7/2015. https://goo.gl/ueA7Ln
- //
- // In V8, this also applies to "use strong " directives.
const AstRawString* string = literal->raw_value()->AsString();
ParserTraits::ReportMessageAt(
token_loc, MessageTemplate::kIllegalLanguageModeDirective,
@@ -1294,7 +1275,7 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
default:
break;
}
- return ParseStatement(NULL, ok);
+ return ParseStatement(NULL, kAllowLabelledFunctionStatement, ok);
}
@@ -1445,10 +1426,6 @@ ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
*ok = false;
ReportMessage(MessageTemplate::kStrictEvalArguments);
return NULL;
- } else if (is_strong(language_mode()) && IsUndefined(local_name)) {
- *ok = false;
- ReportMessage(MessageTemplate::kStrongUndefined);
- return NULL;
}
VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
ImportDeclaration* declaration =
@@ -1594,9 +1571,9 @@ Statement* Parser::ParseExportDefault(bool* ok) {
if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
// ClassDeclaration[+Default] ::
// 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
- default_export =
- ParseClassLiteral(default_string, Scanner::Location::invalid(),
- false, position(), CHECK_OK);
+ default_export = ParseClassLiteral(nullptr, default_string,
+ Scanner::Location::invalid(), false,
+ position(), CHECK_OK);
result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
} else {
result = ParseClassDeclaration(&names, CHECK_OK);
@@ -1748,8 +1725,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
return result;
}
-
Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function,
bool* ok) {
// Statement ::
// EmptyStatement
@@ -1759,12 +1736,12 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
Next();
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
- return ParseSubStatement(labels, ok);
+ return ParseSubStatement(labels, allow_function, ok);
}
-
-Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
+Statement* Parser::ParseSubStatement(
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
// Block
// VariableStatement
@@ -1793,12 +1770,6 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
return ParseBlock(labels, ok);
case Token::SEMICOLON:
- if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kStrongEmpty);
- *ok = false;
- return NULL;
- }
Next();
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1839,26 +1810,18 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
case Token::SWITCH:
return ParseSwitchStatement(labels, ok);
- case Token::FUNCTION: {
- // FunctionDeclaration is only allowed in the context of SourceElements
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- // Common language extension is to allow function declaration in place
- // of any statement. This language extension is disabled in strict mode.
- //
- // In Harmony mode, this case also handles the extension:
- // Statement:
- // GeneratorDeclaration
- if (is_strict(language_mode())) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kStrictFunction);
- *ok = false;
- return NULL;
- }
- return ParseFunctionDeclaration(NULL, ok);
- }
+ case Token::FUNCTION:
+ // FunctionDeclaration only allowed as a StatementListItem, not in
+ // an arbitrary Statement position. Exceptions such as
+ // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+ // are handled by calling ParseScopedStatement rather than
+ // ParseSubStatement directly.
+ ReportMessageAt(scanner()->peek_location(),
+ is_strict(language_mode())
+ ? MessageTemplate::kStrictFunction
+ : MessageTemplate::kSloppyFunction);
+ *ok = false;
+ return nullptr;
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -1866,17 +1829,8 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
case Token::VAR:
return ParseVariableStatement(kStatement, NULL, ok);
- case Token::CONST:
- // In ES6 CONST is not allowed as a Statement, only as a
- // LexicalDeclaration, however we continue to allow it in sloppy mode for
- // backwards compatibility.
- if (is_sloppy(language_mode()) && allow_legacy_const()) {
- return ParseVariableStatement(kStatement, NULL, ok);
- }
-
- // Fall through.
default:
- return ParseExpressionOrLabelledStatement(labels, ok);
+ return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
}
}
@@ -1958,13 +1912,6 @@ Variable* Parser::Declare(Declaration* declaration,
}
var = declaration_scope->DeclareLocal(
name, mode, declaration->initialization(), kind, kNotAssigned);
- } else if ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
- !declaration_scope->is_script_scope()) {
- // Duplicate legacy const definitions throw at runtime.
- DCHECK(is_sloppy(language_mode()));
- Expression* expression = NewThrowSyntaxError(
- MessageTemplate::kVarRedeclaration, name, declaration->position());
- declaration_scope->SetIllegalRedeclaration(expression);
} else if ((IsLexicalVariableMode(mode) ||
IsLexicalVariableMode(var->mode())) &&
// Lexical bindings may appear for some parameters in sloppy
@@ -2160,12 +2107,10 @@ Statement* Parser::ParseFunctionDeclaration(
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
- is_strong(language_mode())
- ? CONST
- : (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
- !scope_->is_declaration_scope()
- ? LET
- : VAR;
+ (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
+ !scope_->is_declaration_scope()
+ ? LET
+ : VAR;
VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
@@ -2211,13 +2156,12 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
bool is_strict_reserved = false;
const AstRawString* name =
ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- ClassLiteral* value = ParseClassLiteral(name, scanner()->location(),
+ ClassLiteral* value = ParseClassLiteral(nullptr, name, scanner()->location(),
is_strict_reserved, pos, CHECK_OK);
- VariableMode mode = is_strong(language_mode()) ? CONST : LET;
- VariableProxy* proxy = NewUnresolved(name, mode);
+ VariableProxy* proxy = NewUnresolved(name, LET);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+ factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
proxy->var()->set_initializer_position(position());
Assignment* assignment =
@@ -2337,23 +2281,12 @@ Block* Parser::ParseVariableDeclarations(
}
if (peek() == Token::VAR) {
- if (is_strong(language_mode())) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, MessageTemplate::kStrongVar);
- *ok = false;
- return nullptr;
- }
Consume(Token::VAR);
} else if (peek() == Token::CONST && allow_const()) {
Consume(Token::CONST);
- if (is_sloppy(language_mode()) && allow_legacy_const()) {
- parsing_result->descriptor.mode = CONST_LEGACY;
- ++use_counts_[v8::Isolate::kLegacyConst];
- } else {
- DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
- DCHECK(var_context != kStatement);
- parsing_result->descriptor.mode = CONST;
- }
+ DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
+ DCHECK(var_context != kStatement);
+ parsing_result->descriptor.mode = CONST;
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
@@ -2378,17 +2311,11 @@ Block* Parser::ParseVariableDeclarations(
int decl_pos = peek_position();
{
ExpressionClassifier pattern_classifier(this);
- Token::Value next = peek();
pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
ValidateLetPattern(&pattern_classifier, CHECK_OK);
}
- if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
- ReportUnexpectedToken(next);
- *ok = false;
- return nullptr;
- }
}
Scanner::Location variable_loc = scanner()->location();
@@ -2492,9 +2419,9 @@ static bool ContainsLabel(ZoneList<const AstRawString*>* labels,
return false;
}
-
Statement* Parser::ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
@@ -2513,42 +2440,6 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
*ok = false;
return nullptr;
- case Token::THIS:
- if (!FLAG_strong_this) break;
- // Fall through.
- case Token::SUPER:
- if (is_strong(language_mode()) &&
- IsClassConstructor(function_state_->kind())) {
- bool is_this = peek() == Token::THIS;
- Expression* expr;
- ExpressionClassifier classifier(this);
- if (is_this) {
- expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
- } else {
- expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
- }
- RewriteNonPattern(&classifier, CHECK_OK);
- switch (peek()) {
- case Token::SEMICOLON:
- Consume(Token::SEMICOLON);
- break;
- case Token::RBRACE:
- case Token::EOS:
- break;
- default:
- if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessageAt(function_state_->this_location(),
- is_this
- ? MessageTemplate::kStrongConstructorThis
- : MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return nullptr;
- }
- }
- return factory()->NewExpressionStatement(expr, pos);
- }
- break;
-
default:
break;
}
@@ -2581,7 +2472,15 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// during the scope processing.
scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
- return ParseStatement(labels, ok);
+ // ES#sec-labelled-function-declarations Labelled Function Declarations
+ if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+ if (allow_function == kAllowLabelledFunctionStatement) {
+ return ParseFunctionDeclaration(labels, ok);
+ } else {
+ return ParseScopedStatement(labels, true, ok);
+ }
+ }
+ return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
}
// If we have an extension, we allow a native function declaration.
@@ -2621,11 +2520,11 @@ IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::LPAREN, CHECK_OK);
Expression* condition = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement* then_statement = ParseSubStatement(labels, CHECK_OK);
+ Statement* then_statement = ParseScopedStatement(labels, false, CHECK_OK);
Statement* else_statement = NULL;
if (peek() == Token::ELSE) {
Next();
- else_statement = ParseSubStatement(labels, CHECK_OK);
+ else_statement = ParseScopedStatement(labels, false, CHECK_OK);
} else {
else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -2724,15 +2623,6 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
return_value = GetLiteralUndefined(position());
}
} else {
- if (is_strong(language_mode()) &&
- IsClassConstructor(function_state_->kind())) {
- int pos = peek_position();
- ReportMessageAt(Scanner::Location(pos, pos + 1),
- MessageTemplate::kStrongConstructorReturnValue);
- *ok = false;
- return NULL;
- }
-
int pos = peek_position();
return_value = ParseExpression(true, CHECK_OK);
@@ -2778,22 +2668,18 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
}
// ES6 14.6.1 Static Semantics: IsInTailPosition
- if (FLAG_harmony_tailcalls && !is_sloppy(language_mode())) {
+ if (allow_tailcalls() && !is_sloppy(language_mode())) {
function_state_->AddExpressionInTailPosition(return_value);
}
}
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
- Expression* generator = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Expression* yield = factory()->NewYield(
- generator, return_value, Yield::kFinal, loc.beg_pos);
- result = factory()->NewExpressionStatement(yield, loc.beg_pos);
- } else {
- result = factory()->NewReturnStatement(return_value, loc.beg_pos);
+ return_value = BuildIteratorResult(return_value, true);
}
+ result = factory()->NewReturnStatement(return_value, loc.beg_pos);
+
Scope* decl_scope = scope_->DeclarationScope();
if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
@@ -2822,27 +2708,11 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- scope_->DeclarationScope()->RecordWithStatement();
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
- Block* body;
+ Statement* body;
{ BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
-
- // The body of the with statement must be enclosed in an additional
- // lexical scope in case the body is a FunctionDeclaration.
- body = factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
- block_scope->set_start_position(scanner()->location().beg_pos);
- {
- BlockState block_state(&scope_, block_scope);
- Target target(&this->target_stack_, body);
- Statement* stmt = ParseSubStatement(labels, CHECK_OK);
- body->statements()->Add(stmt, zone());
- block_scope->set_end_position(scanner()->location().end_pos);
- block_scope = block_scope->FinalizeBlockScope();
- body->set_scope(block_scope);
- }
-
+ body = ParseScopedStatement(labels, true, CHECK_OK);
with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -2878,13 +2748,6 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
stat = ParseStatementListItem(CHECK_OK);
statements->Add(stat, zone());
}
- if (is_strong(language_mode()) && stat != NULL && !stat->IsJump() &&
- peek() != Token::RBRACE) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kStrongSwitchFallthrough);
- *ok = false;
- return NULL;
- }
return factory()->NewCaseClause(label, statements, pos);
}
@@ -3108,8 +2971,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
pattern, pattern->position(),
factory()->NewVariableProxy(catch_variable));
+ Block* init_block =
+ factory()->NewBlock(nullptr, 8, true, RelocInfo::kNoPosition);
PatternRewriter::DeclareAndInitializeVariables(
- catch_block, &descriptor, &decl, nullptr, CHECK_OK);
+ init_block, &descriptor, &decl, nullptr, CHECK_OK);
+ catch_block->statements()->Add(init_block, zone());
}
Expect(Token::LBRACE, CHECK_OK);
@@ -3183,7 +3049,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
+ Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -3213,7 +3079,7 @@ WhileStatement* Parser::ParseWhileStatement(
Expect(Token::LPAREN, CHECK_OK);
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
+ Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
if (loop != NULL) loop->Initialize(cond, body);
return loop;
@@ -3257,76 +3123,15 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
throw_call, pos);
}
-
void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* each, Expression* subject,
- Statement* body,
- bool is_destructuring) {
- DCHECK(!is_destructuring || allow_harmony_destructuring_assignment());
+ Statement* body) {
ForOfStatement* for_of = stmt->AsForOfStatement();
-
if (for_of != NULL) {
- Variable* iterator = scope_->NewTemporary(
- ast_value_factory()->dot_iterator_string());
- Variable* result = scope_->NewTemporary(
- ast_value_factory()->dot_result_string());
-
- Expression* assign_iterator;
- Expression* next_result;
- Expression* result_done;
- Expression* assign_each;
-
- // iterator = subject[Symbol.iterator]()
- // Hackily disambiguate o from o.next and o [Symbol.iterator]().
- // TODO(verwaest): Come up with a better solution.
- assign_iterator = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(subject, factory(), subject->position() - 2),
- subject->position());
-
- // !%_IsJSReceiver(result = iterator.next()) &&
- // %ThrowIteratorResultNotAnObject(result)
- {
- // result = iterator.next()
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- // Hackily disambiguate o from o.next and o [Symbol.iterator]().
- // TODO(verwaest): Come up with a better solution.
- next_result = BuildIteratorNextResult(iterator_proxy, result,
- subject->position() - 1);
- }
-
- // result.done
- {
- Expression* done_literal = factory()->NewStringLiteral(
- ast_value_factory()->done_string(), RelocInfo::kNoPosition);
- Expression* result_proxy = factory()->NewVariableProxy(result);
- result_done = factory()->NewProperty(
- result_proxy, done_literal, RelocInfo::kNoPosition);
- }
-
- // each = result.value
- {
- Expression* value_literal = factory()->NewStringLiteral(
- ast_value_factory()->value_string(), RelocInfo::kNoPosition);
- Expression* result_proxy = factory()->NewVariableProxy(result);
- Expression* result_value = factory()->NewProperty(
- result_proxy, value_literal, RelocInfo::kNoPosition);
- assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
- RelocInfo::kNoPosition);
- if (is_destructuring) {
- assign_each = PatternRewriter::RewriteDestructuringAssignment(
- this, assign_each->AsAssignment(), scope_);
- }
- }
-
- for_of->Initialize(each, subject, body,
- iterator,
- assign_iterator,
- next_result,
- result_done,
- assign_each);
+ InitializeForOfStatement(for_of, each, subject, body,
+ RelocInfo::kNoPosition);
} else {
- if (is_destructuring) {
+ if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
Variable* temp =
scope_->NewTemporary(ast_value_factory()->empty_string());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
@@ -3347,6 +3152,70 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
}
}
+void Parser::InitializeForOfStatement(ForOfStatement* for_of, Expression* each,
+ Expression* iterable, Statement* body,
+ int iterable_pos) {
+ Variable* iterator =
+ scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
+ Variable* result =
+ scope_->NewTemporary(ast_value_factory()->dot_result_string());
+
+ Expression* assign_iterator;
+ Expression* next_result;
+ Expression* result_done;
+ Expression* assign_each;
+
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
+ int get_iterator_pos = iterable_pos != RelocInfo::kNoPosition
+ ? iterable_pos
+ : iterable->position() - 2;
+ int next_result_pos = iterable_pos != RelocInfo::kNoPosition
+ ? iterable_pos
+ : iterable->position() - 1;
+
+ // iterator = iterable[Symbol.iterator]()
+ assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(iterator),
+ GetIterator(iterable, factory(), get_iterator_pos), iterable->position());
+
+ // !%_IsJSReceiver(result = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ {
+ // result = iterator.next()
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ next_result =
+ BuildIteratorNextResult(iterator_proxy, result, next_result_pos);
+ }
+
+ // result.done
+ {
+ Expression* done_literal = factory()->NewStringLiteral(
+ ast_value_factory()->done_string(), RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ result_done = factory()->NewProperty(result_proxy, done_literal,
+ RelocInfo::kNoPosition);
+ }
+
+ // each = result.value
+ {
+ Expression* value_literal = factory()->NewStringLiteral(
+ ast_value_factory()->value_string(), RelocInfo::kNoPosition);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ Expression* result_value = factory()->NewProperty(
+ result_proxy, value_literal, RelocInfo::kNoPosition);
+ assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
+ RelocInfo::kNoPosition);
+ if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
+ assign_each = PatternRewriter::RewriteDestructuringAssignment(
+ this, assign_each->AsAssignment(), scope_);
+ }
+ }
+
+ for_of->Initialize(each, iterable, body, iterator, assign_iterator,
+ next_result, result_done, assign_each);
+}
+
Statement* Parser::DesugarLexicalBindingsInForStatement(
Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
@@ -3595,6 +3464,28 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
return outer_block;
}
+Statement* Parser::ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+ bool legacy, bool* ok) {
+ if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+ (legacy && allow_harmony_restrictive_declarations())) {
+ return ParseSubStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ } else {
+ if (legacy) {
+ ++use_counts_[v8::Isolate::kLegacyFunctionDeclaration];
+ }
+ // Make a block around the statement for a lexical binding
+ // is introduced by a FunctionDeclaration.
+ Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+ BlockState block_state(&scope_, body_scope);
+ Block* block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+ Statement* body = ParseFunctionDeclaration(NULL, CHECK_OK);
+ block->statements()->Add(body, zone());
+ body_scope->set_end_position(scanner()->location().end_pos);
+ body_scope = body_scope->FinalizeBlockScope();
+ block->set_scope(body_scope);
+ return block;
+ }
+}
Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
bool* ok) {
@@ -3617,7 +3508,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
CHECK_OK);
- ForEachStatement::VisitMode mode;
+ ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
int each_beg_pos = scanner()->location().beg_pos;
int each_end_pos = scanner()->location().end_pos;
@@ -3706,9 +3597,11 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
{
+ DontCollectExpressionsInTailPositionScope no_tail_calls(
+ function_state_);
BlockState block_state(&scope_, body_scope);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
+ Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
auto each_initialization_block =
factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
@@ -3729,8 +3622,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
body_block->statements()->Add(body, zone());
VariableProxy* temp_proxy =
factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
- false);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
}
body_scope->set_end_position(scanner()->location().end_pos);
body_scope = body_scope->FinalizeBlockScope();
@@ -3785,7 +3677,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
ExpressionClassifier classifier(this);
Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
- ForEachStatement::VisitMode mode;
+ ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
is_let_identifier_expression =
expression->IsVariableProxy() &&
expression->AsVariableProxy()->raw_name() ==
@@ -3793,9 +3685,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
bool is_for_each = CheckInOrOf(&mode, ok);
if (!*ok) return nullptr;
- bool is_destructuring =
- is_for_each && allow_harmony_destructuring_assignment() &&
- (expression->IsArrayLiteral() || expression->IsObjectLiteral());
+ bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+ expression->IsObjectLiteral());
if (is_destructuring) {
ValidateAssignmentPattern(&classifier, CHECK_OK);
@@ -3825,25 +3716,10 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::RPAREN, CHECK_OK);
- // Make a block around the statement in case a lexical binding
- // is introduced, e.g. by a FunctionDeclaration.
- // This block must not use for_scope as its scope because if a
- // lexical binding is introduced which overlaps with the for-in/of,
- // expressions in head of the loop should actually have variables
- // resolved in the outer scope.
- Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
- {
- BlockState block_state(&scope_, body_scope);
- Block* block =
- factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
- block->statements()->Add(body, zone());
- InitializeForEachStatement(loop, expression, enumerable, block,
- is_destructuring);
- body_scope->set_end_position(scanner()->location().end_pos);
- body_scope = body_scope->FinalizeBlockScope();
- block->set_scope(body_scope);
- }
+ // For legacy compat reasons, give for loops similar treatment to
+ // if statements in allowing a function declaration for a body
+ Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
+ InitializeForEachStatement(loop, expression, enumerable, body);
Statement* final_loop = loop->IsForOfStatement()
? FinalizeForOfStatement(
@@ -3900,7 +3776,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
Expect(Token::RPAREN, CHECK_OK);
- body = ParseSubStatement(NULL, CHECK_OK);
+ body = ParseScopedStatement(NULL, true, CHECK_OK);
}
Statement* result = NULL;
@@ -4064,7 +3940,6 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
} else if (expr->IsAssignment()) {
Assignment* assignment = expr->AsAssignment();
- DCHECK(parser_->allow_harmony_default_parameters());
DCHECK(!assignment->is_compound());
initializer = assignment->value();
expr = assignment->target();
@@ -4227,7 +4102,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
- // activation.
+ // activation. The machine code produced for generators (by full-codegen)
+ // relies on this forced context allocation, but not in an essential way.
scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
@@ -4347,7 +4223,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// temp_zone is deallocated. These objects are instead allocated in a
// parser-persistent zone (see parser_zone_ in AstNodeFactory).
{
- Zone temp_zone;
+ Zone temp_zone(zone()->allocator());
AstNodeFactory::BodyScope inner(factory(), &temp_zone, use_temp_zone);
body = ParseEagerFunctionBody(function_name, pos, formals, kind,
@@ -4365,16 +4241,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Parsing the body may change the language mode in our scope.
language_mode = scope->language_mode();
- if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
- if (!function_state.super_location().IsValid()) {
- ReportMessageAt(function_name_location,
- MessageTemplate::kStrongSuperCallMissing,
- kReferenceError);
- *ok = false;
- return nullptr;
- }
- }
-
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
CheckFunctionName(language_mode, function_name, function_name_validity,
@@ -4391,10 +4257,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
}
- if (is_strict(language_mode) || allow_harmony_sloppy() ||
- allow_harmony_destructuring_bind()) {
- CheckConflictingVarDeclarations(scope, CHECK_OK);
- }
+ CheckConflictingVarDeclarations(scope, CHECK_OK);
if (body) {
// If body can be inspected, rewrite queued destructuring assignments
@@ -4677,15 +4540,12 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
if (IsGeneratorFunction(kind)) {
// We produce:
//
- // try { InitialYield; ...body...; FinalYield }
+ // try { InitialYield; ...body...; return {value: undefined, done: true} }
// finally { %GeneratorClose(generator) }
//
// - InitialYield yields the actual generator object.
- // - FinalYield yields {value: foo, done: true} where foo is the
- // completion value of body. (This is needed here in case the body
- // falls through without an explicit return.)
- // - Any return statement inside the body will be converted into a similar
- // FinalYield.
+ // - Any return statement inside the body will have its argument wrapped
+ // in a "done" iterator result object.
// - If the generator terminates for whatever reason, we must close it.
// Hence the finally clause.
@@ -4703,8 +4563,8 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
- Yield* yield = factory()->NewYield(
- get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
+ Yield* yield =
+ factory()->NewYield(get_proxy, assignment, RelocInfo::kNoPosition);
try_block->statements()->Add(
factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
zone());
@@ -4712,15 +4572,9 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Expression* undefined =
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
- Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
- RelocInfo::kNoPosition);
- try_block->statements()->Add(
- factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
- zone());
+ Statement* final_return = factory()->NewReturnStatement(
+ BuildIteratorResult(nullptr, true), RelocInfo::kNoPosition);
+ try_block->statements()->Add(final_return, zone());
Block* finally_block =
factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
@@ -4801,7 +4655,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const List<Expression*>& expressions_in_tail_position =
function_state_->expressions_in_tail_position();
for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
- expressions_in_tail_position[i]->MarkTail();
+ MarkTailPosition(expressions_in_tail_position[i]);
}
return result;
}
@@ -4825,14 +4679,13 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_sloppy);
+ SET_ALLOW(harmony_sloppy_function);
SET_ALLOW(harmony_sloppy_let);
- SET_ALLOW(harmony_default_parameters);
- SET_ALLOW(harmony_destructuring_bind);
- SET_ALLOW(harmony_destructuring_assignment);
- SET_ALLOW(strong_mode);
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_name);
SET_ALLOW(harmony_function_sent);
+ SET_ALLOW(harmony_exponentiation_operator);
+ SET_ALLOW(harmony_restrictive_declarations);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4844,8 +4697,8 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
return result;
}
-
-ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
+ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
+ const AstRawString* name,
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok) {
@@ -4861,11 +4714,6 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
*ok = false;
return NULL;
}
- if (is_strong(language_mode()) && IsUndefined(name)) {
- ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
- *ok = false;
- return NULL;
- }
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
@@ -4883,9 +4731,13 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Expression* extends = NULL;
if (Check(Token::EXTENDS)) {
block_scope->set_start_position(scanner()->location().end_pos);
- ExpressionClassifier classifier(this);
- extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
+ ExpressionClassifier extends_classifier(this);
+ extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+ RewriteNonPattern(&extends_classifier, CHECK_OK);
+ if (classifier != nullptr) {
+ classifier->Accumulate(&extends_classifier,
+ ExpressionClassifier::ExpressionProductions);
+ }
} else {
block_scope->set_start_position(scanner()->location().end_pos);
}
@@ -4906,12 +4758,16 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
- ExpressionClassifier classifier(this);
+ ExpressionClassifier property_classifier(this);
const AstRawString* property_name = nullptr;
ObjectLiteral::Property* property = ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name,
- &has_seen_constructor, &classifier, &property_name, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
+ &has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
+ RewriteNonPattern(&property_classifier, CHECK_OK);
+ if (classifier != nullptr) {
+ classifier->Accumulate(&property_classifier,
+ ExpressionClassifier::ExpressionProductions);
+ }
if (has_seen_constructor && constructor == NULL) {
constructor = GetPropertyValue(property)->AsFunctionLiteral();
@@ -4938,8 +4794,8 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
end_pos, block_scope->language_mode());
}
- // Note that we do not finalize this block scope because strong
- // mode uses it as a sentinel value indicating an anonymous class.
+ // Note that we do not finalize this block scope because it is
+ // used as a sentinel value indicating an anonymous class.
block_scope->set_end_position(end_pos);
if (name != NULL) {
@@ -4997,7 +4853,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the expected number of arguments are being passed.
if (function->nargs != -1 && function->nargs != args->length()) {
- ReportMessage(MessageTemplate::kIllegalAccess);
+ ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
*ok = false;
return NULL;
}
@@ -5331,12 +5187,11 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
args->Add(factory()->NewArrayLiteral(
const_cast<ZoneList<Expression*>*>(cooked_strings),
- cooked_idx, is_strong(language_mode()), pos),
+ cooked_idx, pos),
zone());
args->Add(
factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx,
- is_strong(language_mode()), pos),
+ const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
zone());
// Ensure hash is suitable as a Smi value
@@ -5425,7 +5280,6 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
}
int literal_index = function_state_->NextMaterializedLiteralIndex();
args->Add(factory()->NewArrayLiteral(unspread, literal_index,
- is_strong(language_mode()),
RelocInfo::kNoPosition),
zone());
@@ -5511,8 +5365,6 @@ void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
v8::Isolate::UseCounterFeature feature;
if (is_sloppy(mode))
feature = v8::Isolate::kSloppyMode;
- else if (is_strong(mode))
- feature = v8::Isolate::kStrongMode;
else if (is_strict(mode))
feature = v8::Isolate::kStrictMode;
else
@@ -5523,8 +5375,8 @@ void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
void Parser::RaiseLanguageMode(LanguageMode mode) {
- SetLanguageMode(scope_,
- static_cast<LanguageMode>(scope_->language_mode() | mode));
+ LanguageMode old = scope_->language_mode();
+ SetLanguageMode(scope_, old > mode ? old : mode);
}
@@ -5532,6 +5384,16 @@ void ParserTraits::RewriteDestructuringAssignments() {
parser_->RewriteDestructuringAssignments();
}
+Expression* ParserTraits::RewriteExponentiation(Expression* left,
+ Expression* right, int pos) {
+ return parser_->RewriteExponentiation(left, right, pos);
+}
+
+Expression* ParserTraits::RewriteAssignExponentiation(Expression* left,
+ Expression* right,
+ int pos) {
+ return parser_->RewriteAssignExponentiation(left, right, pos);
+}
void ParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
bool* ok) {
@@ -5605,7 +5467,6 @@ void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
void Parser::RewriteDestructuringAssignments() {
- if (!allow_harmony_destructuring_assignment()) return;
const auto& assignments =
function_state_->destructuring_assignments_to_rewrite();
for (int i = assignments.length() - 1; i >= 0; --i) {
@@ -5622,6 +5483,60 @@ void Parser::RewriteDestructuringAssignments() {
}
}
+Expression* Parser::RewriteExponentiation(Expression* left, Expression* right,
+ int pos) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(left, zone());
+ args->Add(right, zone());
+ return factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+}
+
+Expression* Parser::RewriteAssignExponentiation(Expression* left,
+ Expression* right, int pos) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ if (left->IsVariableProxy()) {
+ VariableProxy* lhs = left->AsVariableProxy();
+
+ Expression* result;
+ DCHECK_NOT_NULL(lhs->raw_name());
+ result =
+ this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
+ lhs->end_position(), scope_, factory());
+ args->Add(left, zone());
+ args->Add(right, zone());
+ Expression* call =
+ factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+ return factory()->NewAssignment(Token::ASSIGN, result, call, pos);
+ } else if (left->IsProperty()) {
+ Property* prop = left->AsProperty();
+ auto temp_obj = scope_->NewTemporary(ast_value_factory()->empty_string());
+ auto temp_key = scope_->NewTemporary(ast_value_factory()->empty_string());
+ Expression* assign_obj = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(temp_obj), prop->obj(),
+ RelocInfo::kNoPosition);
+ Expression* assign_key = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(temp_key), prop->key(),
+ RelocInfo::kNoPosition);
+ args->Add(factory()->NewProperty(factory()->NewVariableProxy(temp_obj),
+ factory()->NewVariableProxy(temp_key),
+ left->position()),
+ zone());
+ args->Add(right, zone());
+ Expression* call =
+ factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+ Expression* target = factory()->NewProperty(
+ factory()->NewVariableProxy(temp_obj),
+ factory()->NewVariableProxy(temp_key), RelocInfo::kNoPosition);
+ Expression* assign =
+ factory()->NewAssignment(Token::ASSIGN, target, call, pos);
+ return factory()->NewBinaryOperation(
+ Token::COMMA, assign_obj,
+ factory()->NewBinaryOperation(Token::COMMA, assign_key, assign, pos),
+ pos);
+ }
+ UNREACHABLE();
+ return nullptr;
+}
Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// Array literals containing spreads are rewritten using do expressions, e.g.
@@ -5673,45 +5588,6 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
Variable* each =
scope_->NewTemporary(ast_value_factory()->dot_for_string());
Expression* subject = spread->expression();
- Variable* iterator =
- scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
- Variable* element =
- scope_->NewTemporary(ast_value_factory()->dot_result_string());
- // iterator = subject[Symbol.iterator]()
- Expression* assign_iterator = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(subject, factory(), spread->expression_position()),
- subject->position());
- // !%_IsJSReceiver(element = iterator.next()) &&
- // %ThrowIteratorResultNotAnObject(element)
- Expression* next_element;
- {
- // element = iterator.next()
- Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- next_element = BuildIteratorNextResult(iterator_proxy, element,
- spread->expression_position());
- }
- // element.done
- Expression* element_done;
- {
- Expression* done_literal = factory()->NewStringLiteral(
- ast_value_factory()->done_string(), RelocInfo::kNoPosition);
- Expression* element_proxy = factory()->NewVariableProxy(element);
- element_done = factory()->NewProperty(element_proxy, done_literal,
- RelocInfo::kNoPosition);
- }
- // each = element.value
- Expression* assign_each;
- {
- Expression* value_literal = factory()->NewStringLiteral(
- ast_value_factory()->value_string(), RelocInfo::kNoPosition);
- Expression* element_proxy = factory()->NewVariableProxy(element);
- Expression* element_value = factory()->NewProperty(
- element_proxy, value_literal, RelocInfo::kNoPosition);
- assign_each = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(each), element_value,
- RelocInfo::kNoPosition);
- }
// %AppendElement($R, each)
Statement* append_body;
{
@@ -5728,11 +5604,10 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// for (each of spread) %AppendElement($R, each)
ForEachStatement* loop = factory()->NewForEachStatement(
ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
- ForOfStatement* for_of = loop->AsForOfStatement();
- for_of->Initialize(factory()->NewVariableProxy(each), subject,
- append_body, iterator, assign_iterator, next_element,
- element_done, assign_each);
- do_block->statements()->Add(for_of, zone());
+ InitializeForOfStatement(loop->AsForOfStatement(),
+ factory()->NewVariableProxy(each), subject,
+ append_body, spread->expression_position());
+ do_block->statements()->Add(loop, zone());
}
}
// Now, rewind the original array literal to truncate everything from the
@@ -6055,9 +5930,8 @@ Expression* ParserTraits::RewriteYieldStar(
Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
- BuildIteratorClose(
- then->statements(), var_iterator, factory->NewUndefinedLiteral(nopos),
- var_tmp);
+ BuildIteratorClose(then->statements(), var_iterator, Nothing<Variable*>(),
+ var_tmp);
then->statements()->Add(throw_call, zone);
check_throw = factory->NewIfStatement(
condition, then, factory->NewEmptyStatement(nopos), nopos);
@@ -6128,13 +6002,11 @@ Expression* ParserTraits::RewriteYieldStar(
set_mode_return = factory->NewExpressionStatement(assignment, nopos);
}
-
- // RawYield(output);
+ // Yield(output);
Statement* yield_output;
{
Expression* output_proxy = factory->NewVariableProxy(var_output);
- Yield* yield = factory->NewYield(
- generator, output_proxy, Yield::kInitial, nopos);
+ Yield* yield = factory->NewYield(generator, output_proxy, nopos);
yield_output = factory->NewExpressionStatement(yield, nopos);
}
@@ -6232,8 +6104,7 @@ Expression* ParserTraits::RewriteYieldStar(
case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
auto case_return = new (zone) ZoneList<Statement*>(5, zone);
- BuildIteratorClose(case_return, var_iterator,
- factory->NewVariableProxy(var_input, nopos), var_output);
+ BuildIteratorClose(case_return, var_iterator, Just(var_input), var_output);
case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
@@ -6311,7 +6182,8 @@ Expression* ParserTraits::RewriteYieldStar(
// if (!IS_CALLABLE(C)) {
// throw MakeTypeError(kCalledNonCallableInstanceOf);
// }
-// handler_result = %ordinary_has_instance(C, O);
+// handler_result = %_GetOrdinaryHasInstance()
+// handler_result = %_Call(handler_result, C, O);
// } else {
// handler_result = !!(%_Call(handler_result, C, O));
// }
@@ -6356,8 +6228,8 @@ Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
Expression* call =
NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
- avfactory->empty_string(), nopos);
- Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+ avfactory->empty_string(), pos);
+ Statement* throw_call = factory->NewExpressionStatement(call, pos);
validate_C =
factory->NewIfStatement(is_receiver_call,
@@ -6384,7 +6256,8 @@ Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
// if (!IS_CALLABLE(C)) {
// throw MakeTypeError(kCalledNonCallableInstanceOf);
// }
- // result = %ordinary_has_instance(C, O);
+ // handler_result = %_GetOrdinaryHasInstance()
+ // handler_result = %_Call(handler_result, C, O);
// } else {
// handler_result = !!%_Call(handler_result, C, O);
// }
@@ -6394,17 +6267,29 @@ Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
factory->NewUndefinedLiteral(nopos), nopos);
- Block* then_side = factory->NewBlock(nullptr, 2, false, nopos);
+ Block* then_side = factory->NewBlock(nullptr, 3, false, nopos);
{
Expression* throw_expr =
NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
- avfactory->empty_string(), nopos);
- Statement* validate_C = CheckCallable(var_C, throw_expr);
- ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
+ avfactory->empty_string(), pos);
+ Statement* validate_C = CheckCallable(var_C, throw_expr, pos);
+
+ ZoneList<Expression*>* empty_args =
+ new (zone) ZoneList<Expression*>(0, zone);
+ Expression* ordinary_has_instance = factory->NewCallRuntime(
+ Runtime::kInlineGetOrdinaryHasInstance, empty_args, pos);
+ Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
+ Expression* assignment_handler = factory->NewAssignment(
+ Token::ASSIGN, handler_proxy, ordinary_has_instance, nopos);
+ Statement* assignment_get_handler =
+ factory->NewExpressionStatement(assignment_handler, nopos);
+
+ ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(3, zone);
+ args->Add(factory->NewVariableProxy(var_handler_result), zone);
args->Add(factory->NewVariableProxy(var_C), zone);
args->Add(factory->NewVariableProxy(var_O), zone);
- CallRuntime* call = factory->NewCallRuntime(
- Context::ORDINARY_HAS_INSTANCE_INDEX, args, pos);
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, pos);
Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
Expression* assignment =
factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
@@ -6412,6 +6297,7 @@ Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
factory->NewExpressionStatement(assignment, nopos);
then_side->statements()->Add(validate_C, zone);
+ then_side->statements()->Add(assignment_get_handler, zone);
then_side->statements()->Add(assignment_return, zone);
}
@@ -6455,7 +6341,8 @@ Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
return instanceof;
}
-Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
+Statement* ParserTraits::CheckCallable(Variable* var, Expression* error,
+ int pos) {
auto factory = parser_->factory();
auto avfactory = parser_->ast_value_factory();
const int nopos = RelocInfo::kNoPosition;
@@ -6468,7 +6355,7 @@ Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
Expression* condition = factory->NewCompareOperation(
Token::EQ_STRICT, type_of, function_literal, nopos);
- Statement* throw_call = factory->NewExpressionStatement(error, nopos);
+ Statement* throw_call = factory->NewExpressionStatement(error, pos);
validate_var = factory->NewIfStatement(
condition, factory->NewEmptyStatement(nopos), throw_call, nopos);
@@ -6478,17 +6365,27 @@ Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
Variable* iterator,
- Expression* input,
+ Maybe<Variable*> input,
Variable* var_output) {
//
// This function adds four statements to [statements], corresponding to the
// following code:
//
// let iteratorReturn = iterator.return;
- // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
- // output = %_Call(iteratorReturn, iterator);
+ // if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
+ // return {value: input, done: true};
+ // }
+ // output = %_Call(iteratorReturn, iterator, input);
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
//
+ // When the input variable is not given, the return statement becomes
+ // return {value: undefined, done: true};
+ // and %_Call has only two arguments:
+ // output = %_Call(iteratorReturn, iterator);
+ //
+ // The reason for allowing input is that BuildIteratorClose
+ // can then be reused to handle the return case in yield*.
+ //
const int nopos = RelocInfo::kNoPosition;
auto factory = parser_->factory();
@@ -6510,25 +6407,36 @@ void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
get_return = factory->NewExpressionStatement(assignment, nopos);
}
- // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+ // if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
+ // return {value: input, done: true};
+ // }
Statement* check_return;
{
Expression* condition = factory->NewCompareOperation(
Token::EQ, factory->NewVariableProxy(var_return),
factory->NewNullLiteral(nopos), nopos);
- Statement* return_input = factory->NewReturnStatement(input, nopos);
+ Expression* value = input.IsJust()
+ ? static_cast<Expression*>(
+ factory->NewVariableProxy(input.FromJust()))
+ : factory->NewUndefinedLiteral(nopos);
+
+ Statement* return_input =
+ factory->NewReturnStatement(BuildIteratorResult(value, true), nopos);
check_return = factory->NewIfStatement(
condition, return_input, factory->NewEmptyStatement(nopos), nopos);
}
- // output = %_Call(iteratorReturn, iterator);
+ // output = %_Call(iteratorReturn, iterator, input);
Statement* call_return;
{
auto args = new (zone) ZoneList<Expression*>(3, zone);
args->Add(factory->NewVariableProxy(var_return), zone);
args->Add(factory->NewVariableProxy(iterator), zone);
+ if (input.IsJust()) {
+ args->Add(factory->NewVariableProxy(input.FromJust()), zone);
+ }
Expression* call =
factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
@@ -6568,9 +6476,124 @@ void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
statements->Add(validate_output, zone);
}
+void ParserTraits::FinalizeIteratorUse(Variable* completion,
+ Expression* condition, Variable* iter,
+ Block* iterator_use, Block* target) {
+ if (!FLAG_harmony_iterator_close) return;
+
+ //
+ // This function adds two statements to [target], corresponding to the
+ // following code:
+ //
+ // completion = kNormalCompletion;
+ // try {
+ // try {
+ // iterator_use
+ // } catch(e) {
+ // if (completion === kAbruptCompletion) completion = kThrowCompletion;
+ // %ReThrow(e);
+ // }
+ // } finally {
+ // if (condition) {
+ // #BuildIteratorCloseForCompletion(iter, completion)
+ // }
+ // }
+ //
+
+ const int nopos = RelocInfo::kNoPosition;
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto scope = parser_->scope_;
+ auto zone = parser_->zone();
+
+ // completion = kNormalCompletion;
+ Statement* initialize_completion;
+ {
+ Expression* proxy = factory->NewVariableProxy(completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+ initialize_completion = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (completion === kAbruptCompletion) completion = kThrowCompletion;
+ Statement* set_completion_throw;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(completion),
+ factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
+
+ Expression* proxy = factory->NewVariableProxy(completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+ Statement* statement = factory->NewExpressionStatement(assignment, nopos);
+ set_completion_throw = factory->NewIfStatement(
+ condition, statement, factory->NewEmptyStatement(nopos), nopos);
+ }
+
+ // if (condition) {
+ // #BuildIteratorCloseForCompletion(iter, completion)
+ // }
+ Block* maybe_close;
+ {
+ Block* block = factory->NewBlock(nullptr, 2, true, nopos);
+ parser_->BuildIteratorCloseForCompletion(block->statements(), iter,
+ completion);
+ DCHECK(block->statements()->length() == 2);
-// Runtime encoding of different completion modes.
-enum ForOfLoopBodyCompletion { BODY_COMPLETED, BODY_ABORTED, BODY_THREW };
+ maybe_close = factory->NewBlock(nullptr, 1, true, nopos);
+ maybe_close->statements()->Add(
+ factory->NewIfStatement(condition, block,
+ factory->NewEmptyStatement(nopos), nopos),
+ zone);
+ }
+
+ // try { #try_block }
+ // catch(e) {
+ // #set_completion_throw;
+ // %ReThrow(e);
+ // }
+ Statement* try_catch;
+ {
+ Scope* catch_scope = parser_->NewScope(scope, CATCH_SCOPE);
+ Variable* catch_variable =
+ catch_scope->DeclareLocal(avfactory->dot_catch_string(), VAR,
+ kCreatedInitialized, Variable::NORMAL);
+
+ Statement* rethrow;
+ // We use %ReThrow rather than the ordinary throw because we want to
+ // preserve the original exception message. This is also why we create a
+ // TryCatchStatementForReThrow below (which does not clear the pending
+ // message), rather than a TryCatchStatement.
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(catch_variable), zone);
+ rethrow = factory->NewExpressionStatement(
+ factory->NewCallRuntime(Runtime::kReThrow, args, nopos), nopos);
+ }
+
+ Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
+ catch_block->statements()->Add(set_completion_throw, zone);
+ catch_block->statements()->Add(rethrow, zone);
+
+ try_catch = factory->NewTryCatchStatementForReThrow(
+ iterator_use, catch_scope, catch_variable, catch_block, nopos);
+ }
+
+ // try { #try_catch } finally { #maybe_close }
+ Statement* try_finally;
+ {
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(try_catch, zone);
+
+ try_finally =
+ factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
+ }
+
+ target->statements()->Add(initialize_completion, zone);
+ target->statements()->Add(try_finally, zone);
+}
void ParserTraits::BuildIteratorCloseForCompletion(
ZoneList<Statement*>* statements, Variable* iterator,
@@ -6581,16 +6604,17 @@ void ParserTraits::BuildIteratorCloseForCompletion(
//
// let iteratorReturn = iterator.return;
// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
- // let output;
- // if (completion === BODY_THREW) {
+ // if (completion === kThrowCompletion) {
// if (!IS_CALLABLE(iteratorReturn)) {
// throw MakeTypeError(kReturnMethodNotCallable);
// }
- // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+ // try { %_Call(iteratorReturn, iterator) } catch (_) { }
// } else {
- // output = %_Call(iteratorReturn, iterator);
+ // let output = %_Call(iteratorReturn, iterator);
+ // if (!IS_RECEIVER(output)) {
+ // %ThrowIterResultNotAnObject(output);
+ // }
// }
- // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
// }
//
@@ -6600,11 +6624,9 @@ void ParserTraits::BuildIteratorCloseForCompletion(
auto scope = parser_->scope_;
auto zone = parser_->zone();
- // let output;
- Variable* var_output = scope->NewTemporary(avfactory->empty_string());
// let iteratorReturn = iterator.return;
- Variable* var_return = var_output; // Reusing the output variable.
+ Variable* var_return = scope->NewTemporary(avfactory->empty_string());
Statement* get_return;
{
Expression* iterator_proxy = factory->NewVariableProxy(iterator);
@@ -6626,25 +6648,10 @@ void ParserTraits::BuildIteratorCloseForCompletion(
Expression* throw_expr = NewThrowTypeError(
MessageTemplate::kReturnMethodNotCallable,
avfactory->empty_string(), nopos);
- check_return_callable = CheckCallable(var_return, throw_expr);
+ check_return_callable = CheckCallable(var_return, throw_expr, nopos);
}
- // output = %_Call(iteratorReturn, iterator);
- Statement* call_return;
- {
- auto args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(factory->NewVariableProxy(var_return), zone);
- args->Add(factory->NewVariableProxy(iterator), zone);
- Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
-
- Expression* output_proxy = factory->NewVariableProxy(var_output);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, output_proxy, call, nopos);
- call_return = factory->NewExpressionStatement(assignment, nopos);
- }
-
- // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+ // try { %_Call(iteratorReturn, iterator) } catch (_) { }
Statement* try_call_return;
{
auto args = new (zone) ZoneList<Expression*>(2, zone);
@@ -6653,12 +6660,10 @@ void ParserTraits::BuildIteratorCloseForCompletion(
Expression* call =
factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(
- factory->NewExpressionStatement(assignment, nopos), zone);
+ try_block->statements()->Add(factory->NewExpressionStatement(call, nopos),
+ zone);
Block* catch_block = factory->NewBlock(nullptr, 0, false, nopos);
@@ -6671,29 +6676,27 @@ void ParserTraits::BuildIteratorCloseForCompletion(
try_block, catch_scope, catch_variable, catch_block, nopos);
}
- // if (completion === ABRUPT_THROW) {
- // #check_return_callable;
- // #try_call_return;
- // } else {
- // #call_return;
+ // let output = %_Call(iteratorReturn, iterator);
+ // if (!IS_RECEIVER(output)) {
+ // %ThrowIteratorResultNotAnObject(output);
// }
- Statement* call_return_carefully;
+ Block* validate_return;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(completion),
- factory->NewSmiLiteral(BODY_THREW, nopos), nopos);
-
- Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
- then_block->statements()->Add(check_return_callable, zone);
- then_block->statements()->Add(try_call_return, zone);
+ Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+ Statement* call_return;
+ {
+ auto args = new (zone) ZoneList<Expression*>(2, zone);
+ args->Add(factory->NewVariableProxy(var_return), zone);
+ args->Add(factory->NewVariableProxy(iterator), zone);
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- call_return_carefully =
- factory->NewIfStatement(condition, then_block, call_return, nopos);
- }
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+ call_return = factory->NewExpressionStatement(assignment, nopos);
+ }
- // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
- Statement* validate_output;
- {
Expression* is_receiver_call;
{
auto args = new (zone) ZoneList<Expression*>(1, zone);
@@ -6711,8 +6714,32 @@ void ParserTraits::BuildIteratorCloseForCompletion(
throw_call = factory->NewExpressionStatement(call, nopos);
}
- validate_output = factory->NewIfStatement(
+ Statement* check_return = factory->NewIfStatement(
is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+
+ validate_return = factory->NewBlock(nullptr, 2, false, nopos);
+ validate_return->statements()->Add(call_return, zone);
+ validate_return->statements()->Add(check_return, zone);
+ }
+
+ // if (completion === kThrowCompletion) {
+ // #check_return_callable;
+ // #try_call_return;
+ // } else {
+ // #validate_return;
+ // }
+ Statement* call_return_carefully;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(completion),
+ factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+
+ Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
+ then_block->statements()->Add(check_return_callable, zone);
+ then_block->statements()->Add(try_call_return, zone);
+
+ call_return_carefully =
+ factory->NewIfStatement(condition, then_block, validate_return, nopos);
}
// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
@@ -6722,12 +6749,9 @@ void ParserTraits::BuildIteratorCloseForCompletion(
Token::EQ, factory->NewVariableProxy(var_return),
factory->NewNullLiteral(nopos), nopos);
- Block* block = factory->NewBlock(nullptr, 2, false, nopos);
- block->statements()->Add(call_return_carefully, zone);
- block->statements()->Add(validate_output, zone);
-
- maybe_call_return = factory->NewIfStatement(
- condition, factory->NewEmptyStatement(nopos), block, nopos);
+ maybe_call_return =
+ factory->NewIfStatement(condition, factory->NewEmptyStatement(nopos),
+ call_return_carefully, nopos);
}
@@ -6742,25 +6766,35 @@ Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
//
// This function replaces the loop with the following wrapping:
//
- // let completion = BODY_COMPLETED;
+ // let each;
+ // let completion = kNormalCompletion;
// try {
- // #loop;
- // } catch(e) {
- // if (completion === BODY_ABORTED) completion = BODY_THREW;
- // throw e;
+ // try {
+ // #loop;
+ // } catch(e) {
+ // if (completion === kAbruptCompletion) completion = kThrowCompletion;
+ // %ReThrow(e);
+ // }
// } finally {
- // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
- // #BuildIteratorClose(#iterator, completion) // See above.
+ // if (!(completion === kNormalCompletion || IS_UNDEFINED(#iterator))) {
+ // #BuildIteratorCloseForCompletion(#iterator, completion)
// }
// }
//
// where the loop's body is wrapped as follows:
//
// {
- // {{completion = BODY_ABORTED;}}
// #loop-body
- // {{completion = BODY_COMPLETED;}}
+ // {{completion = kNormalCompletion;}}
// }
+ //
+ // and the loop's assign_each is wrapped as follows
+ //
+ // do {
+ // {{completion = kAbruptCompletion;}}
+ // #assign-each
+ // }
+ //
const int nopos = RelocInfo::kNoPosition;
auto factory = parser_->factory();
@@ -6768,143 +6802,96 @@ Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
auto scope = parser_->scope_;
auto zone = parser_->zone();
- // let completion = BODY_COMPLETED;
Variable* var_completion = scope->NewTemporary(avfactory->empty_string());
- Statement* initialize_completion;
+
+ // let each;
+ Variable* var_each = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_each;
{
- Expression* proxy = factory->NewVariableProxy(var_completion);
+ Expression* proxy = factory->NewVariableProxy(var_each);
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, proxy,
- factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
- initialize_completion =
+ factory->NewUndefinedLiteral(nopos), nopos);
+ initialize_each =
factory->NewExpressionStatement(assignment, nopos);
}
- // if (completion === BODY_ABORTED) completion = BODY_THREW;
- Statement* set_completion_throw;
+ // !(completion === kNormalCompletion || IS_UNDEFINED(#iterator))
+ Expression* closing_condition;
{
- Expression* condition = factory->NewCompareOperation(
+ Expression* lhs = factory->NewCompareOperation(
Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
- factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
-
- Expression* proxy = factory->NewVariableProxy(var_completion);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_THREW, nopos),
- nopos);
- Statement* statement = factory->NewExpressionStatement(assignment, nopos);
- set_completion_throw = factory->NewIfStatement(
- condition, statement, factory->NewEmptyStatement(nopos), nopos);
- }
-
- // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
- // #BuildIteratorClose(#iterator, completion)
- // }
- Block* maybe_close;
- {
- Expression* condition1 = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
- factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
- Expression* condition2 = factory->NewCompareOperation(
+ factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+ Expression* rhs = factory->NewCompareOperation(
Token::EQ_STRICT, factory->NewVariableProxy(loop->iterator()),
factory->NewUndefinedLiteral(nopos), nopos);
- Expression* condition = factory->NewBinaryOperation(
- Token::OR, condition1, condition2, nopos);
-
- Block* block = factory->NewBlock(nullptr, 2, false, nopos);
- BuildIteratorCloseForCompletion(
- block->statements(), loop->iterator(), var_completion);
- DCHECK(block->statements()->length() == 2);
-
- maybe_close = factory->NewBlock(nullptr, 1, false, nopos);
- maybe_close->statements()->Add(factory->NewIfStatement(
- condition, factory->NewEmptyStatement(nopos), block, nopos), zone);
- }
-
- // try { #try_block }
- // catch(e) {
- // #set_completion_throw;
- // throw e;
- // }
- Statement* try_catch;
- {
- Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
- Variable* catch_variable = catch_scope->DeclareLocal(
- avfactory->dot_catch_string(), VAR, kCreatedInitialized,
- Variable::NORMAL);
-
- Statement* rethrow;
- {
- Expression* proxy = factory->NewVariableProxy(catch_variable);
- rethrow = factory->NewExpressionStatement(
- factory->NewThrow(proxy, nopos), nopos);
- }
-
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(loop, zone);
-
- Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
- catch_block->statements()->Add(set_completion_throw, zone);
- catch_block->statements()->Add(rethrow, zone);
-
- try_catch = factory->NewTryCatchStatement(
- try_block, catch_scope, catch_variable, catch_block, nopos);
- }
-
- // try { #try_catch } finally { #maybe_close }
- Statement* try_finally;
- {
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(try_catch, zone);
-
- try_finally =
- factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
- }
-
- // #initialize_completion;
- // #try_finally;
- Statement* final_loop;
- {
- Block* block = factory->NewBlock(nullptr, 2, false, nopos);
- block->statements()->Add(initialize_completion, zone);
- block->statements()->Add(try_finally, zone);
- final_loop = block;
+ closing_condition = factory->NewUnaryOperation(
+ Token::NOT, factory->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
+ nopos);
}
- // {{completion = BODY_ABORTED;}}
- Statement* set_completion_break;
+ // {{completion = kNormalCompletion;}}
+ Statement* set_completion_normal;
{
Expression* proxy = factory->NewVariableProxy(var_completion);
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, proxy,
- factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
+ factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
Block* block = factory->NewBlock(nullptr, 1, true, nopos);
block->statements()->Add(
factory->NewExpressionStatement(assignment, nopos), zone);
- set_completion_break = block;
+ set_completion_normal = block;
}
- // {{completion = BODY_COMPLETED;}}
- Statement* set_completion_normal;
+ // {{completion = kAbruptCompletion;}}
+ Statement* set_completion_abrupt;
{
Expression* proxy = factory->NewVariableProxy(var_completion);
Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_COMPLETED, nopos),
- nopos);
+ Token::ASSIGN, proxy,
+ factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
Block* block = factory->NewBlock(nullptr, 1, true, nopos);
- block->statements()->Add(
- factory->NewExpressionStatement(assignment, nopos), zone);
- set_completion_normal = block;
+ block->statements()->Add(factory->NewExpressionStatement(assignment, nopos),
+ zone);
+ set_completion_abrupt = block;
}
- // { #set_completion_break; #loop-body; #set_completion_normal }
+ // { #loop-body; #set_completion_normal }
Block* new_body = factory->NewBlock(nullptr, 2, false, nopos);
- new_body->statements()->Add(set_completion_break, zone);
- new_body->statements()->Add(loop->body(), zone);
- new_body->statements()->Add(set_completion_normal, zone);
+ {
+ new_body->statements()->Add(loop->body(), zone);
+ new_body->statements()->Add(set_completion_normal, zone);
+ }
+
+ // { #set_completion_abrupt; #assign-each }
+ Block* new_assign_each = factory->NewBlock(nullptr, 2, false, nopos);
+ {
+ new_assign_each->statements()->Add(set_completion_abrupt, zone);
+ new_assign_each->statements()->Add(
+ factory->NewExpressionStatement(loop->assign_each(), nopos), zone);
+ }
+
+ // Now put things together.
loop->set_body(new_body);
+ loop->set_assign_each(
+ factory->NewDoExpression(new_assign_each, var_each, nopos));
+
+ Statement* final_loop;
+ {
+ Block* target = factory->NewBlock(nullptr, 3, false, nopos);
+ target->statements()->Add(initialize_each, zone);
+
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(loop, zone);
+
+ FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
+ try_block, target);
+ final_loop = target;
+ }
+
return final_loop;
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index d4fb62f02c..c82682e323 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -53,7 +53,6 @@ class ParseInfo {
FLAG_ACCESSOR(kEval, is_eval, set_eval)
FLAG_ACCESSOR(kGlobal, is_global, set_global)
FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
- FLAG_ACCESSOR(kStrongMode, is_strong_mode, set_strong_mode)
FLAG_ACCESSOR(kNative, is_native, set_native)
FLAG_ACCESSOR(kModule, is_module, set_module)
FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
@@ -132,17 +131,17 @@ class ParseInfo {
Handle<Context> context() { return context_; }
void clear_script() { script_ = Handle<Script>::null(); }
void set_isolate(Isolate* isolate) { isolate_ = isolate; }
+ void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
void set_context(Handle<Context> context) { context_ = context; }
void set_script(Handle<Script> script) { script_ = script; }
//--------------------------------------------------------------------------
LanguageMode language_mode() {
- return construct_language_mode(is_strict_mode(), is_strong_mode());
+ return construct_language_mode(is_strict_mode());
}
void set_language_mode(LanguageMode language_mode) {
STATIC_ASSERT(LANGUAGE_END == 3);
- set_strict_mode(language_mode & STRICT_BIT);
- set_strong_mode(language_mode & STRONG_BIT);
+ set_strict_mode(is_strict(language_mode));
}
void ReopenHandlesInNewHandleScope() {
@@ -165,13 +164,12 @@ class ParseInfo {
kEval = 1 << 2,
kGlobal = 1 << 3,
kStrictMode = 1 << 4,
- kStrongMode = 1 << 5,
- kNative = 1 << 6,
- kParseRestriction = 1 << 7,
- kModule = 1 << 8,
- kAllowLazyParsing = 1 << 9,
+ kNative = 1 << 5,
+ kParseRestriction = 1 << 6,
+ kModule = 1 << 7,
+ kAllowLazyParsing = 1 << 8,
// ---------- Output flags --------------------------
- kAstValueFactoryOwned = 1 << 10
+ kAstValueFactoryOwned = 1 << 9
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -205,7 +203,6 @@ class ParseInfo {
void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
- void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
};
@@ -404,16 +401,6 @@ class ParserTraits {
fni->AddFunction(func_to_infer);
}
- static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- Scope* scope, ObjectLiteralProperty* property, bool* has_function) {
- Expression* value = property->value();
- if (scope->DeclarationScope()->is_script_scope() &&
- value->AsFunctionLiteral() != NULL) {
- *has_function = true;
- value->AsFunctionLiteral()->set_pretenure();
- }
- }
-
// If we assign a function literal to a property we pretenure the
// literal so it can be added as a constant function property.
static void CheckAssigningFunctionLiteralToProperty(Expression* left,
@@ -444,6 +431,8 @@ class ParserTraits {
Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
int pos, AstNodeFactory* factory);
+ Expression* BuildIteratorResult(Expression* value, bool done);
+
// Generate AST node that throws a ReferenceError with the given type.
Expression* NewThrowReferenceError(MessageTemplate::Template message,
int pos);
@@ -464,6 +453,9 @@ class ParserTraits {
MessageTemplate::Template message,
const AstRawString* arg, int pos);
+ void FinalizeIteratorUse(Variable* completion, Expression* condition,
+ Variable* iter, Block* iterator_use, Block* result);
+
Statement* FinalizeForOfStatement(ForOfStatement* loop, int pos);
// Reporting errors.
@@ -581,11 +573,14 @@ class ParserTraits {
const ParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok);
- ClassLiteral* ParseClassLiteral(const AstRawString* name,
+ ClassLiteral* ParseClassLiteral(Type::ExpressionClassifier* classifier,
+ const AstRawString* name,
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
+ V8_INLINE void MarkTailPosition(Expression* expression);
+
V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
bool* ok);
@@ -644,6 +639,11 @@ class ParserTraits {
// Rewrite all DestructuringAssignments in the current FunctionState.
V8_INLINE void RewriteDestructuringAssignments();
+ V8_INLINE Expression* RewriteExponentiation(Expression* left,
+ Expression* right, int pos);
+ V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
+ Expression* right, int pos);
+
V8_INLINE void QueueDestructuringAssignmentForRewriting(
Expression* assignment);
V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
@@ -670,13 +670,12 @@ class ParserTraits {
private:
Parser* parser_;
- void BuildIteratorClose(
- ZoneList<Statement*>* statements, Variable* iterator,
- Expression* input, Variable* output);
+ void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
+ Maybe<Variable*> input, Variable* output);
void BuildIteratorCloseForCompletion(
ZoneList<Statement*>* statements, Variable* iterator,
Variable* body_threw);
- Statement* CheckCallable(Variable* var, Expression* error);
+ Statement* CheckCallable(Variable* var, Expression* error, int pos);
};
@@ -705,6 +704,13 @@ class Parser : public ParserBase<ParserTraits> {
private:
friend class ParserTraits;
+ // Runtime encoding of different completion modes.
+ enum CompletionKind {
+ kNormalCompletion,
+ kThrowCompletion,
+ kAbruptCompletion
+ };
+
// Limit the allowed number of local variables in a function. The hard limit
// is that offsets computed by FullCodeGenerator::StackOperand and similar
// functions are ints, and they should not overflow. In addition, accessing
@@ -754,8 +760,12 @@ class Parser : public ParserBase<ParserTraits> {
ZoneList<const AstRawString*>* local_names,
Scanner::Location* reserved_loc, bool* ok);
ZoneList<ImportDeclaration*>* ParseNamedImports(int pos, bool* ok);
- Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
- Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ Statement* ParseStatement(ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function,
+ bool* ok);
+ Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function,
+ bool* ok);
Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
bool* ok);
Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
@@ -897,7 +907,8 @@ class Parser : public ParserBase<ParserTraits> {
ZoneList<const AstRawString*>* names,
bool* ok);
Statement* ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels, bool* ok);
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok);
IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
bool* ok);
Statement* ParseContinueStatement(bool* ok);
@@ -920,6 +931,14 @@ class Parser : public ParserBase<ParserTraits> {
class CollectExpressionsInTailPositionToListScope;
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
+ // Parse a SubStatement in strict mode, or with an extra block scope in
+ // sloppy mode to handle
+ // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+ // The legacy parameter indicates whether function declarations are
+ // banned by the ES2015 specification in this location, and they are being
+ // permitted here to match previous V8 behavior.
+ Statement* ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+ bool legacy, bool* ok);
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
@@ -929,8 +948,10 @@ class Parser : public ParserBase<ParserTraits> {
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
- Expression* subject, Statement* body,
- bool is_destructuring);
+ Expression* subject, Statement* body);
+ void InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
+ Expression* iterable, Statement* body,
+ int iterable_pos);
Statement* DesugarLexicalBindingsInForStatement(
Scope* inner_scope, VariableMode mode,
ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
@@ -944,8 +965,8 @@ class Parser : public ParserBase<ParserTraits> {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
-
- ClassLiteral* ParseClassLiteral(const AstRawString* name,
+ ClassLiteral* ParseClassLiteral(ExpressionClassifier* classifier,
+ const AstRawString* name,
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
@@ -1035,6 +1056,11 @@ class Parser : public ParserBase<ParserTraits> {
V8_INLINE void RewriteDestructuringAssignments();
+ V8_INLINE Expression* RewriteExponentiation(Expression* left,
+ Expression* right, int pos);
+ V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
+ Expression* right, int pos);
+
friend class NonPatternRewriter;
V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 768a948863..e699255cdb 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -17,6 +17,8 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
ZoneList<const AstRawString*>* names, bool* ok) {
PatternRewriter rewriter;
+ DCHECK(block->ignore_completion_value());
+
rewriter.scope_ = declaration_descriptor->scope;
rewriter.parser_ = declaration_descriptor->parser;
rewriter.context_ = BINDING;
@@ -234,22 +236,20 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
if (IsImmutableVariableMode(descriptor_->mode)) {
arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
-
// Construct the call to Runtime_InitializeConstGlobal
// and add it to the initialization statement block.
// Note that the function does different things depending on
// the number of arguments (1 or 2).
- initialize =
- factory()->NewCallRuntime(Runtime::kInitializeConstGlobal, arguments,
- descriptor_->initialization_pos);
+ initialize = factory()->NewCallRuntime(Runtime::kInitializeConstGlobal,
+ arguments, value->position());
+ value = NULL; // zap the value to avoid the unnecessary assignment
} else {
// Add language mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode,
- descriptor_->declaration_pos),
- zone());
+ arguments->Add(
+ factory()->NewNumberLiteral(language_mode, RelocInfo::kNoPosition),
+ zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -257,12 +257,11 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// which is why we need to generate a separate assignment node.
if (value != NULL && !descriptor_->scope->inside_with()) {
arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeVarGlobal
// and add it to the initialization statement block.
- initialize =
- factory()->NewCallRuntime(Runtime::kInitializeVarGlobal, arguments,
- descriptor_->declaration_pos);
+ initialize = factory()->NewCallRuntime(Runtime::kInitializeVarGlobal,
+ arguments, value->position());
+ value = NULL; // zap the value to avoid the unnecessary assignment
} else {
initialize = NULL;
}
@@ -270,7 +269,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
if (initialize != NULL) {
block_->statements()->Add(
- factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+ factory()->NewExpressionStatement(initialize, initialize->position()),
zone());
}
} else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
@@ -286,7 +285,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NOT_NULL(proxy->var());
DCHECK_NOT_NULL(value);
// Add break location for destructured sub-pattern.
- int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ int pos = IsSubPattern() ? pattern->position() : value->position();
Assignment* assignment =
factory()->NewAssignment(Token::INIT, proxy, value, pos);
block_->statements()->Add(
@@ -303,7 +302,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// property).
VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
// Add break location for destructured sub-pattern.
- int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ int pos = IsSubPattern() ? pattern->position() : value->position();
Assignment* assignment =
factory()->NewAssignment(Token::INIT, proxy, value, pos);
block_->statements()->Add(
@@ -365,7 +364,7 @@ void Parser::PatternRewriter::VisitRewritableExpression(
PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
int pos = assign->position();
Block* old_block = block_;
- block_ = factory()->NewBlock(nullptr, 8, false, pos);
+ block_ = factory()->NewBlock(nullptr, 8, true, pos);
Variable* temp = nullptr;
Expression* pattern = assign->target();
Expression* old_value = current_value_;
@@ -414,16 +413,27 @@ void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* node) {
void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
Variable** temp_var) {
- auto temp = *temp_var = CreateTempVar(current_value_);
-
- block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
+ DCHECK(block_->ignore_completion_value());
+ auto temp = *temp_var = CreateTempVar(current_value_);
auto iterator = CreateTempVar(parser_->GetIterator(
factory()->NewVariableProxy(temp), factory(), RelocInfo::kNoPosition));
auto done = CreateTempVar(
factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
auto result = CreateTempVar();
auto v = CreateTempVar();
+ auto completion = CreateTempVar();
+ auto nopos = RelocInfo::kNoPosition;
+
+ // For the purpose of iterator finalization, we temporarily set block_ to a
+ // new block. In the main body of this function, we write to block_ (both
+ // explicitly and implicitly via recursion). At the end of the function, we
+ // wrap this new block in a try-finally statement, restore block_ to its
+ // original value, and add the try-finally statement to block_.
+ auto target = block_;
+ if (FLAG_harmony_iterator_close) {
+ block_ = factory()->NewBlock(nullptr, 8, true, nopos);
+ }
Spread* spread = nullptr;
for (Expression* value : *node->values()) {
@@ -433,88 +443,201 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
}
PatternContext context = SetInitializerContextIfNeeded(value);
+
// if (!done) {
+ // done = true; // If .next, .done or .value throws, don't close.
// result = IteratorNext(iterator);
- // v = (done = result.done) ? undefined : result.value;
+ // if (result.done) {
+ // v = undefined;
+ // } else {
+ // v = result.value;
+ // done = false;
+ // }
// }
- auto next_block =
- factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
- next_block->statements()->Add(factory()->NewExpressionStatement(
- parser_->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator),
- result, RelocInfo::kNoPosition),
+ Statement* if_not_done;
+ {
+ auto result_done = factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->done_string(),
RelocInfo::kNoPosition),
- zone());
-
- auto assign_to_done = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->done_string(),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- auto next_value = factory()->NewConditional(
- assign_to_done, factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(ast_value_factory()->value_string(),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- next_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::ASSIGN,
- factory()->NewVariableProxy(v), next_value,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
-
- auto if_statement = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT,
- factory()->NewVariableProxy(done),
- RelocInfo::kNoPosition),
- next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- block_->statements()->Add(if_statement, zone());
+ RelocInfo::kNoPosition);
+
+ auto assign_undefined = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(v),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ auto assign_value = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(v),
+ factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->value_string(),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ auto unset_done = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(done),
+ factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ auto inner_else =
+ factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
+ inner_else->statements()->Add(
+ factory()->NewExpressionStatement(assign_value, nopos), zone());
+ inner_else->statements()->Add(
+ factory()->NewExpressionStatement(unset_done, nopos), zone());
+
+ auto inner_if = factory()->NewIfStatement(
+ result_done,
+ factory()->NewExpressionStatement(assign_undefined, nopos),
+ inner_else, nopos);
+
+ auto next_block =
+ factory()->NewBlock(nullptr, 3, true, RelocInfo::kNoPosition);
+ next_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(done),
+ factory()->NewBooleanLiteral(true, nopos), nopos),
+ nopos),
+ zone());
+ next_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ parser_->BuildIteratorNextResult(
+ factory()->NewVariableProxy(iterator), result,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+ next_block->statements()->Add(inner_if, zone());
+
+ if_not_done = factory()->NewIfStatement(
+ factory()->NewUnaryOperation(Token::NOT,
+ factory()->NewVariableProxy(done),
+ RelocInfo::kNoPosition),
+ next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ }
+ block_->statements()->Add(if_not_done, zone());
if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
+ if (FLAG_harmony_iterator_close) {
+ // completion = kAbruptCompletion;
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory()->NewSmiLiteral(kAbruptCompletion, nopos), nopos);
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, nopos), zone());
+ }
+
RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
+
+ if (FLAG_harmony_iterator_close) {
+ // completion = kNormalCompletion;
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory()->NewSmiLiteral(kNormalCompletion, nopos), nopos);
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, nopos), zone());
+ }
}
set_context(context);
}
if (spread != nullptr) {
- // array = [];
- // if (!done) %concat_iterable_to_array(array, iterator);
- auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
- auto array = CreateTempVar(factory()->NewArrayLiteral(
- empty_exprs,
- // Reuse pattern's literal index - it is unused since there is no
- // actual literal allocated.
- node->literal_index(), is_strong(scope()->language_mode()),
- RelocInfo::kNoPosition));
-
- auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
- arguments->Add(factory()->NewVariableProxy(array), zone());
- arguments->Add(factory()->NewVariableProxy(iterator), zone());
- auto spread_into_array_call =
- factory()->NewCallRuntime(Context::CONCAT_ITERABLE_TO_ARRAY_INDEX,
- arguments, RelocInfo::kNoPosition);
-
- auto if_statement = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT,
- factory()->NewVariableProxy(done),
- RelocInfo::kNoPosition),
- factory()->NewExpressionStatement(spread_into_array_call,
- RelocInfo::kNoPosition),
- factory()->NewEmptyStatement(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- block_->statements()->Add(if_statement, zone());
+ // A spread can only occur as the last component. It is not handled by
+ // RecurseIntoSubpattern above.
+ // let array = [];
+ // while (!done) {
+ // result = IteratorNext(iterator);
+ // if (result.done) {
+ // done = true;
+ // } else {
+ // %AppendElement(array, result.value);
+ // }
+ // }
+
+ // let array = [];
+ Variable* array;
+ {
+ auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
+ array = CreateTempVar(factory()->NewArrayLiteral(
+ empty_exprs,
+ // Reuse pattern's literal index - it is unused since there is no
+ // actual literal allocated.
+ node->literal_index(), RelocInfo::kNoPosition));
+ }
+
+ // result = IteratorNext(iterator);
+ Statement* get_next = factory()->NewExpressionStatement(
+ parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+ result, nopos),
+ nopos);
+
+ // done = true;
+ Statement* set_done = factory()->NewExpressionStatement(
+ factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(done),
+ factory()->NewBooleanLiteral(true, nopos), nopos),
+ nopos);
+
+ // %AppendElement(array, result.value);
+ Statement* append_element;
+ {
+ auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewVariableProxy(array), zone());
+ args->Add(factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(
+ ast_value_factory()->value_string(), nopos),
+ nopos),
+ zone());
+ append_element = factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kAppendElement, args, nopos),
+ nopos);
+ }
+
+ // if (result.done) { #set_done } else { #append_element }
+ Statement* set_done_or_append;
+ {
+ Expression* result_done =
+ factory()->NewProperty(factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(
+ ast_value_factory()->done_string(), nopos),
+ nopos);
+ set_done_or_append = factory()->NewIfStatement(result_done, set_done,
+ append_element, nopos);
+ }
+
+ // while (!done) {
+ // #get_next;
+ // #set_done_or_append;
+ // }
+ WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
+ {
+ Expression* condition = factory()->NewUnaryOperation(
+ Token::NOT, factory()->NewVariableProxy(done), nopos);
+ Block* body = factory()->NewBlock(nullptr, 2, true, nopos);
+ body->statements()->Add(get_next, zone());
+ body->statements()->Add(set_done_or_append, zone());
+ loop->Initialize(condition, body);
+ }
+
+ block_->statements()->Add(loop, zone());
RecurseIntoSubpattern(spread->expression(),
factory()->NewVariableProxy(array));
}
+
+ if (FLAG_harmony_iterator_close) {
+ Expression* closing_condition = factory()->NewUnaryOperation(
+ Token::NOT, factory()->NewVariableProxy(done), nopos);
+ parser_->FinalizeIteratorUse(completion, closing_condition, iterator,
+ block_, target);
+ block_ = target;
+ }
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index dbe1022d1e..1c99450810 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -6,6 +6,7 @@
#define V8_PARSING_PREPARSE_DATA_H_
#include "src/allocation.h"
+#include "src/collector.h"
#include "src/hashmap.h"
#include "src/messages.h"
#include "src/parsing/preparse-data-format.h"
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index d335c8bdcd..da1c35bcc0 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -78,8 +78,6 @@ PreParserExpression PreParserTraits::ExpressionFromString(
int pos, Scanner* scanner, PreParserFactory* factory) {
if (scanner->UnescapedLiteralMatches("use strict", 10)) {
return PreParserExpression::UseStrictStringLiteral();
- } else if (scanner->UnescapedLiteralMatches("use strong", 10)) {
- return PreParserExpression::UseStrongStringLiteral();
}
return PreParserExpression::StringLiteral();
}
@@ -132,25 +130,16 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_pos, &ok);
if (!ok) return kPreParseSuccess;
-
- if (is_strong(scope_->language_mode()) && IsSubclassConstructor(kind)) {
- if (!function_state.super_location().IsValid()) {
- ReportMessageAt(Scanner::Location(start_position, start_position + 1),
- MessageTemplate::kStrongSuperCallMissing,
- kReferenceError);
- return kPreParseSuccess;
- }
- }
}
}
return kPreParseSuccess;
}
-
PreParserExpression PreParserTraits::ParseClassLiteral(
- PreParserIdentifier name, Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos, bool* ok) {
- return pre_parser_->ParseClassLiteral(name, class_name_location,
+ Type::ExpressionClassifier* classifier, PreParserIdentifier name,
+ Scanner::Location class_name_location, bool name_is_strict_reserved,
+ int pos, bool* ok) {
+ return pre_parser_->ParseClassLiteral(classifier, name, class_name_location,
name_is_strict_reserved, pos, ok);
}
@@ -205,7 +194,7 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
default:
break;
}
- return ParseStatement(ok);
+ return ParseStatement(kAllowLabelledFunctionStatement, ok);
}
@@ -226,62 +215,26 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
}
bool starts_with_identifier = peek() == Token::IDENTIFIER;
Scanner::Location token_loc = scanner()->peek_location();
- Scanner::Location old_this_loc = function_state_->this_location();
- Scanner::Location old_super_loc = function_state_->super_location();
Statement statement = ParseStatementListItem(ok);
if (!*ok) return;
- if (is_strong(language_mode()) && scope_->is_function_scope() &&
- IsClassConstructor(function_state_->kind())) {
- Scanner::Location this_loc = function_state_->this_location();
- Scanner::Location super_loc = function_state_->super_location();
- if (this_loc.beg_pos != old_this_loc.beg_pos &&
- this_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
- *ok = false;
- return;
- }
- if (super_loc.beg_pos != old_super_loc.beg_pos &&
- super_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return;
- }
- }
-
if (directive_prologue) {
bool use_strict_found = statement.IsUseStrictLiteral();
- bool use_strong_found =
- statement.IsUseStrongLiteral() && allow_strong_mode();
if (use_strict_found) {
scope_->SetLanguageMode(
static_cast<LanguageMode>(scope_->language_mode() | STRICT));
- } else if (use_strong_found) {
- scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRONG));
- if (IsClassConstructor(function_state_->kind())) {
- // "use strong" cannot occur in a class constructor body, to avoid
- // unintuitive strong class object semantics.
- PreParserTraits::ReportMessageAt(
- token_loc, MessageTemplate::kStrongConstructorDirective);
- *ok = false;
- return;
- }
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
- if ((use_strict_found || use_strong_found) &&
- !scope_->HasSimpleParameters()) {
+ if (use_strict_found && !scope_->HasSimpleParameters()) {
// TC39 deemed "use strict" directives to be an error when occurring
// in the body of a function with non-simple parameter list, on
// 29/7/2015. https://goo.gl/ueA7Ln
- //
- // In V8, this also applies to "use strong " directives.
PreParserTraits::ReportMessageAt(
token_loc, MessageTemplate::kIllegalLanguageModeDirective,
- use_strict_found ? "use strict" : "use strong");
+ "use strict");
*ok = false;
return;
}
@@ -310,8 +263,8 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
#define DUMMY ) // to make indentation work
#undef DUMMY
-
-PreParser::Statement PreParser::ParseStatement(bool* ok) {
+PreParser::Statement PreParser::ParseStatement(
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
// EmptyStatement
// ...
@@ -320,11 +273,20 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Next();
return Statement::Default();
}
- return ParseSubStatement(ok);
+ return ParseSubStatement(allow_function, ok);
}
+PreParser::Statement PreParser::ParseScopedStatement(bool legacy, bool* ok) {
+ if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+ (legacy && allow_harmony_restrictive_declarations())) {
+ return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
+ } else {
+ return ParseFunctionDeclaration(CHECK_OK);
+ }
+}
-PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
+PreParser::Statement PreParser::ParseSubStatement(
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
// Block
// VariableStatement
@@ -355,12 +317,6 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
return ParseBlock(ok);
case Token::SEMICOLON:
- if (is_strong(language_mode())) {
- PreParserTraits::ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kStrongEmpty);
- *ok = false;
- return Statement::Default();
- }
Next();
return Statement::Default();
@@ -397,20 +353,18 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
case Token::TRY:
return ParseTryStatement(ok);
- case Token::FUNCTION: {
- Scanner::Location start_location = scanner()->peek_location();
- Statement statement = ParseFunctionDeclaration(CHECK_OK);
- Scanner::Location end_location = scanner()->location();
- if (is_strict(language_mode())) {
- PreParserTraits::ReportMessageAt(start_location.beg_pos,
- end_location.end_pos,
- MessageTemplate::kStrictFunction);
- *ok = false;
- return Statement::Default();
- } else {
- return statement;
- }
- }
+ case Token::FUNCTION:
+ // FunctionDeclaration only allowed as a StatementListItem, not in
+ // an arbitrary Statement position. Exceptions such as
+ // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+ // are handled by calling ParseScopedStatement rather than
+ // ParseSubStatement directly.
+ ReportMessageAt(scanner()->peek_location(),
+ is_strict(language_mode())
+ ? MessageTemplate::kStrictFunction
+ : MessageTemplate::kSloppyFunction);
+ *ok = false;
+ return Statement::Default();
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -418,17 +372,8 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
case Token::VAR:
return ParseVariableStatement(kStatement, ok);
- case Token::CONST:
- // In ES6 CONST is not allowed as a Statement, only as a
- // LexicalDeclaration, however we continue to allow it in sloppy mode for
- // backwards compatibility.
- if (is_sloppy(language_mode()) && allow_legacy_const()) {
- return ParseVariableStatement(kStatement, ok);
- }
-
- // Fall through.
default:
- return ParseExpressionOrLabelledStatement(ok);
+ return ParseExpressionOrLabelledStatement(allow_function, ok);
}
}
@@ -468,8 +413,8 @@ PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
bool is_strict_reserved = false;
Identifier name =
ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- ParseClassLiteral(name, scanner()->location(), is_strict_reserved, pos,
- CHECK_OK);
+ ParseClassLiteral(nullptr, name, scanner()->location(), is_strict_reserved,
+ pos, CHECK_OK);
return Statement::Default();
}
@@ -527,12 +472,6 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
bool lexical = false;
bool is_pattern = false;
if (peek() == Token::VAR) {
- if (is_strong(language_mode())) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, MessageTemplate::kStrongVar);
- *ok = false;
- return Statement::Default();
- }
Consume(Token::VAR);
} else if (peek() == Token::CONST && allow_const()) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
@@ -546,8 +485,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- if (is_strict(language_mode()) ||
- (allow_harmony_sloppy() && !allow_legacy_const())) {
+ if (is_strict(language_mode()) || allow_harmony_sloppy()) {
DCHECK(var_context != kStatement);
require_initializer = true;
lexical = true;
@@ -574,19 +512,12 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
PreParserExpression pattern = PreParserExpression::Default();
{
ExpressionClassifier pattern_classifier(this);
- Token::Value next = peek();
pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
if (lexical) {
ValidateLetPattern(&pattern_classifier, CHECK_OK);
}
-
- if (!allow_harmony_destructuring_bind() && !pattern.IsIdentifier()) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Statement::Default();
- }
}
is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
@@ -625,8 +556,8 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
return Statement::Default();
}
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
@@ -640,45 +571,6 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
*ok = false;
return Statement::Default();
- case Token::THIS:
- if (!FLAG_strong_this) break;
- // Fall through.
- case Token::SUPER:
- if (is_strong(language_mode()) &&
- IsClassConstructor(function_state_->kind())) {
- bool is_this = peek() == Token::THIS;
- Expression expr = Expression::Default();
- ExpressionClassifier classifier(this);
- if (is_this) {
- expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
- } else {
- expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
- }
- ValidateExpression(&classifier, CHECK_OK);
- switch (peek()) {
- case Token::SEMICOLON:
- Consume(Token::SEMICOLON);
- break;
- case Token::RBRACE:
- case Token::EOS:
- break;
- default:
- if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessageAt(function_state_->this_location(),
- is_this
- ? MessageTemplate::kStrongConstructorThis
- : MessageTemplate::kStrongConstructorSuper);
- *ok = false;
- return Statement::Default();
- }
- }
- return Statement::ExpressionStatement(expr);
- }
- break;
-
- // TODO(arv): Handle `let [`
- // https://code.google.com/p/v8/issues/detail?id=3847
-
default:
break;
}
@@ -698,7 +590,16 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
DCHECK(is_sloppy(language_mode()) ||
!IsFutureStrictReserved(expr.AsIdentifier()));
Consume(Token::COLON);
- Statement statement = ParseStatement(ok);
+ // ES#sec-labelled-function-declarations Labelled Function Declarations
+ if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+ if (allow_function == kAllowLabelledFunctionStatement) {
+ return ParseFunctionDeclaration(ok);
+ } else {
+ return ParseScopedStatement(true, ok);
+ }
+ }
+ Statement statement =
+ ParseStatement(kDisallowLabelledFunctionStatement, ok);
return statement.IsJumpStatement() ? Statement::Default() : statement;
// Preparsing is disabled for extensions (because the extension details
// aren't passed to lazily compiled functions), so we don't
@@ -726,10 +627,10 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement stat = ParseSubStatement(CHECK_OK);
+ Statement stat = ParseScopedStatement(false, CHECK_OK);
if (peek() == Token::ELSE) {
Next();
- Statement else_stat = ParseSubStatement(CHECK_OK);
+ Statement else_stat = ParseScopedStatement(false, CHECK_OK);
stat = (stat.IsJumpStatement() && else_stat.IsJumpStatement()) ?
Statement::Jump() : Statement::Default();
} else {
@@ -795,14 +696,6 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
- if (is_strong(language_mode()) &&
- IsClassConstructor(function_state_->kind())) {
- int pos = peek_position();
- ReportMessageAt(Scanner::Location(pos, pos + 1),
- MessageTemplate::kStrongConstructorReturnValue);
- *ok = false;
- return Statement::Default();
- }
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -825,7 +718,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
BlockState block_state(&scope_, with_scope);
- ParseSubStatement(CHECK_OK);
+ ParseScopedStatement(true, CHECK_OK);
return Statement::Default();
}
@@ -857,13 +750,6 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
statement = ParseStatementListItem(CHECK_OK);
token = peek();
}
- if (is_strong(language_mode()) && !statement.IsJumpStatement() &&
- token != Token::RBRACE) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kStrongSwitchFallthrough);
- *ok = false;
- return Statement::Default();
- }
}
Expect(Token::RBRACE, ok);
return Statement::Default();
@@ -875,7 +761,7 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
// 'do' Statement 'while' '(' Expression ')' ';'
Expect(Token::DO, CHECK_OK);
- ParseSubStatement(CHECK_OK);
+ ParseScopedStatement(true, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
@@ -893,7 +779,7 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- ParseSubStatement(ok);
+ ParseScopedStatement(true, ok);
return Statement::Default();
}
@@ -945,7 +831,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseSubStatement(CHECK_OK);
+ ParseScopedStatement(true, CHECK_OK);
return Statement::Default();
}
} else {
@@ -958,7 +844,6 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
bool is_for_each = CheckInOrOf(&mode, ok);
if (!*ok) return Statement::Default();
bool is_destructuring = is_for_each &&
- allow_harmony_destructuring_assignment() &&
(lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
if (is_destructuring) {
@@ -983,7 +868,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseSubStatement(CHECK_OK);
+ ParseScopedStatement(true, CHECK_OK);
return Statement::Default();
}
}
@@ -1009,7 +894,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseSubStatement(ok);
+ ParseScopedStatement(true, ok);
return Statement::Default();
}
@@ -1156,16 +1041,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
- if (!function_state.super_location().IsValid()) {
- ReportMessageAt(function_name_location,
- MessageTemplate::kStrongSuperCallMissing,
- kReferenceError);
- *ok = false;
- return Expression::Default();
- }
- }
-
return Expression::Default();
}
@@ -1186,10 +1061,10 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
scope_->uses_super_property(), scope_->calls_eval());
}
-
PreParserExpression PreParser::ParseClassLiteral(
- PreParserIdentifier name, Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos, bool* ok) {
+ ExpressionClassifier* classifier, PreParserIdentifier name,
+ Scanner::Location class_name_location, bool name_is_strict_reserved,
+ int pos, bool* ok) {
// All parts of a ClassDeclaration and ClassExpression are strict code.
if (name_is_strict_reserved) {
ReportMessageAt(class_name_location,
@@ -1202,13 +1077,8 @@ PreParserExpression PreParser::ParseClassLiteral(
*ok = false;
return EmptyExpression();
}
- LanguageMode class_language_mode = language_mode();
- if (is_strong(class_language_mode) && IsUndefined(name)) {
- ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
- *ok = false;
- return EmptyExpression();
- }
+ LanguageMode class_language_mode = language_mode();
Scope* scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, scope);
scope_->SetLanguageMode(
@@ -1218,9 +1088,13 @@ PreParserExpression PreParser::ParseClassLiteral(
bool has_extends = Check(Token::EXTENDS);
if (has_extends) {
- ExpressionClassifier classifier(this);
- ParseLeftHandSideExpression(&classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ ExpressionClassifier extends_classifier(this);
+ ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+ ValidateExpression(&extends_classifier, CHECK_OK);
+ if (classifier != nullptr) {
+ classifier->Accumulate(&extends_classifier,
+ ExpressionClassifier::ExpressionProductions);
+ }
}
ClassLiteralChecker checker(this);
@@ -1234,11 +1108,15 @@ PreParserExpression PreParser::ParseClassLiteral(
bool is_computed_name = false; // Classes do not care about computed
// property names here.
Identifier name;
- ExpressionClassifier classifier(this);
+ ExpressionClassifier property_classifier(this);
ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
&is_computed_name, &has_seen_constructor,
- &classifier, &name, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ &property_classifier, &name, CHECK_OK);
+ ValidateExpression(&property_classifier, CHECK_OK);
+ if (classifier != nullptr) {
+ classifier->Accumulate(&property_classifier,
+ ExpressionClassifier::ExpressionProductions);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 253251c073..f2f69517b2 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -144,11 +144,6 @@ class PreParserExpression {
IsUseStrictField::encode(true));
}
- static PreParserExpression UseStrongStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrongField::encode(true));
- }
-
static PreParserExpression This() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kThisExpression));
@@ -214,11 +209,6 @@ class PreParserExpression {
IsUseStrictField::decode(code_);
}
- bool IsUseStrongLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrongField::decode(code_);
- }
-
bool IsThis() const {
return TypeField::decode(code_) == kExpression &&
ExpressionTypeField::decode(code_) == kThisExpression;
@@ -317,7 +307,6 @@ class PreParserExpression {
// of the Type field, so they can share the storage.
typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
- typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
IdentifierTypeField;
typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
@@ -366,9 +355,6 @@ class PreParserStatement {
if (expression.IsUseStrictLiteral()) {
return PreParserStatement(kUseStrictExpressionStatement);
}
- if (expression.IsUseStrongLiteral()) {
- return PreParserStatement(kUseStrongExpressionStatement);
- }
if (expression.IsStringLiteral()) {
return PreParserStatement(kStringLiteralExpressionStatement);
}
@@ -376,15 +362,13 @@ class PreParserStatement {
}
bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement;
+ return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral();
}
bool IsUseStrictLiteral() {
return code_ == kUseStrictExpressionStatement;
}
- bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
-
bool IsFunctionDeclaration() {
return code_ == kFunctionDeclaration;
}
@@ -399,7 +383,6 @@ class PreParserStatement {
kJumpStatement,
kStringLiteralExpressionStatement,
kUseStrictExpressionStatement,
- kUseStrongExpressionStatement,
kFunctionDeclaration
};
@@ -424,18 +407,17 @@ class PreParserFactory {
}
PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
int js_flags, int literal_index,
- bool is_strong, int pos) {
+ int pos) {
return PreParserExpression::Default();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int literal_index,
- bool is_strong,
int pos) {
return PreParserExpression::ArrayLiteral();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int first_spread_index, int literal_index,
- bool is_strong, int pos) {
+ int pos) {
return PreParserExpression::ArrayLiteral();
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
@@ -454,8 +436,6 @@ class PreParserFactory {
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
int literal_index,
int boilerplate_properties,
- bool has_function,
- bool is_strong,
int pos) {
return PreParserExpression::ObjectLiteral();
}
@@ -496,7 +476,6 @@ class PreParserFactory {
}
PreParserExpression NewYield(PreParserExpression generator_object,
PreParserExpression expression,
- Yield::Kind yield_kind,
int pos) {
return PreParserExpression::Default();
}
@@ -683,9 +662,6 @@ class PreParserTraits {
UNREACHABLE();
}
- static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- Scope* scope, PreParserExpression property, bool* has_function) {}
-
static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
@@ -710,6 +686,10 @@ class PreParserTraits {
return PreParserExpression::Default();
}
+ PreParserExpression BuildIteratorResult(PreParserExpression value,
+ bool done) {
+ return PreParserExpression::Default();
+ }
PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
int pos) {
return PreParserExpression::Default();
@@ -902,11 +882,14 @@ class PreParserTraits {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ PreParserExpression ParseClassLiteral(Type::ExpressionClassifier* classifier,
+ PreParserIdentifier name,
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
+ V8_INLINE void MarkTailPosition(PreParserExpression) {}
+
PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
return list;
}
@@ -922,6 +905,16 @@ class PreParserTraits {
inline void RewriteDestructuringAssignments() {}
+ inline PreParserExpression RewriteExponentiation(PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return left;
+ }
+ inline PreParserExpression RewriteAssignExponentiation(
+ PreParserExpression left, PreParserExpression right, int pos) {
+ return left;
+ }
+
inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
inline void QueueNonPatternForRewriting(PreParserExpression) {}
@@ -1029,8 +1022,11 @@ class PreParser : public ParserBase<PreParserTraits> {
Statement ParseStatementListItem(bool* ok);
void ParseStatementList(int end_token, bool* ok,
Scanner::BookmarkScope* bookmark = nullptr);
- Statement ParseStatement(bool* ok);
- Statement ParseSubStatement(bool* ok);
+ Statement ParseStatement(AllowLabelledFunctionStatement allow_function,
+ bool* ok);
+ Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
+ bool* ok);
+ Statement ParseScopedStatement(bool legacy, bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
Statement ParseClassDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
@@ -1042,7 +1038,8 @@ class PreParser : public ParserBase<PreParserTraits> {
Scanner::Location* first_initializer_loc,
Scanner::Location* bindings_loc,
bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
+ Statement ParseExpressionOrLabelledStatement(
+ AllowLabelledFunctionStatement allow_function, bool* ok);
Statement ParseIfStatement(bool* ok);
Statement ParseContinueStatement(bool* ok);
Statement ParseBreakStatement(bool* ok);
@@ -1075,7 +1072,8 @@ class PreParser : public ParserBase<PreParserTraits> {
void ParseLazyFunctionLiteralBody(bool* ok,
Scanner::BookmarkScope* bookmark = nullptr);
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ PreParserExpression ParseClassLiteral(ExpressionClassifier* classifier,
+ PreParserIdentifier name,
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
@@ -1140,8 +1138,7 @@ ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
PreParserExpression PreParserTraits::RewriteYieldStar(
PreParserExpression generator, PreParserExpression expression, int pos) {
- return pre_parser_->factory()->NewYield(
- generator, expression, Yield::kDelegating, pos);
+ return PreParserExpression::Default();
}
PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index c8e8fedc23..915a464bf4 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -355,14 +355,7 @@ bool Rewriter::Rewrite(ParseInfo* info) {
if (processor.HasStackOverflow()) return false;
if (processor.result_assigned()) {
- DCHECK(function->end_position() != RelocInfo::kNoPosition);
- // Set the position of the assignment statement one character past the
- // source code, such that it definitely is not in the source code range
- // of an immediate inner scope. For example in
- // eval('with ({x:1}) x = 1');
- // the end position of the function generated for executing the eval code
- // coincides with the end of the with scope which is the position of '1'.
- int pos = function->end_position();
+ int pos = RelocInfo::kNoPosition;
VariableProxy* result_proxy =
processor.factory()->NewVariableProxy(result, pos);
Statement* result_statement =
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 2d5a579583..698cb5e905 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -40,7 +40,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
bookmark_c0_(kNoBookmark),
octal_pos_(Location::invalid()),
- found_html_comment_(false) {
+ found_html_comment_(false),
+ allow_harmony_exponentiation_operator_(false) {
bookmark_current_.literal_chars = &bookmark_current_literal_;
bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -60,15 +61,19 @@ void Scanner::Initialize(Utf16CharacterStream* source) {
Scan();
}
-
-template <bool capture_raw>
+template <bool capture_raw, bool unicode>
uc32 Scanner::ScanHexNumber(int expected_length) {
DCHECK(expected_length <= 4); // prevent overflow
+ int begin = source_pos() - 2;
uc32 x = 0;
for (int i = 0; i < expected_length; i++) {
int d = HexValue(c0_);
if (d < 0) {
+ ReportScannerError(Location(begin, begin + expected_length + 2),
+ unicode
+ ? MessageTemplate::kInvalidUnicodeEscapeSequence
+ : MessageTemplate::kInvalidHexEscapeSequence);
return -1;
}
x = x * 16 + d;
@@ -78,20 +83,23 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
return x;
}
-
template <bool capture_raw>
-uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value) {
+uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
uc32 x = 0;
int d = HexValue(c0_);
- if (d < 0) {
- return -1;
- }
+ if (d < 0) return -1;
+
while (d >= 0) {
x = x * 16 + d;
- if (x > max_value) return -1;
+ if (x > max_value) {
+ ReportScannerError(Location(beg_pos, source_pos() + 1),
+ MessageTemplate::kUndefinedUnicodeCodePoint);
+ return -1;
+ }
Advance<capture_raw>();
d = HexValue(c0_);
}
+
return x;
}
@@ -565,7 +573,14 @@ void Scanner::Scan() {
case '*':
// * *=
- token = Select('=', Token::ASSIGN_MUL, Token::MUL);
+ Advance();
+ if (c0_ == '*' && allow_harmony_exponentiation_operator()) {
+ token = Select('=', Token::ASSIGN_EXP, Token::EXP);
+ } else if (c0_ == '=') {
+ token = Select(Token::ASSIGN_MUL);
+ } else {
+ token = Token::MUL;
+ }
break;
case '%':
@@ -847,7 +862,9 @@ Token::Value Scanner::ScanString() {
uc32 c = c0_;
Advance();
if (c == '\\') {
- if (c0_ < 0 || !ScanEscape<false, false>()) return Token::ILLEGAL;
+ if (c0_ < 0 || !ScanEscape<false, false>()) {
+ return Token::ILLEGAL;
+ }
} else {
AddLiteralChar(c);
}
@@ -879,7 +896,6 @@ Token::Value Scanner::ScanTemplateSpan() {
StartRawLiteral();
const bool capture_raw = true;
const bool in_template_literal = true;
-
while (true) {
uc32 c = c0_;
Advance<capture_raw>();
@@ -1099,18 +1115,19 @@ uc32 Scanner::ScanUnicodeEscape() {
// Accept both \uxxxx and \u{xxxxxx}. In the latter case, the number of
// hex digits between { } is arbitrary. \ and u have already been read.
if (c0_ == '{') {
+ int begin = source_pos() - 2;
Advance<capture_raw>();
- uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff);
- if (cp < 0) {
- return -1;
- }
- if (c0_ != '}') {
+ uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff, begin);
+ if (cp < 0 || c0_ != '}') {
+ ReportScannerError(source_pos(),
+ MessageTemplate::kInvalidUnicodeEscapeSequence);
return -1;
}
Advance<capture_raw>();
return cp;
}
- return ScanHexNumber<capture_raw>(4);
+ const bool unicode = true;
+ return ScanHexNumber<capture_raw, unicode>(4);
}
@@ -1420,7 +1437,6 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
flag = RegExp::kUnicode;
break;
case 'y':
- if (!FLAG_harmony_regexps) return Nothing<RegExp::Flags>();
flag = RegExp::kSticky;
break;
default:
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 3f9bbb54a4..22c504c98e 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -10,13 +10,14 @@
#include "src/allocation.h"
#include "src/base/logging.h"
#include "src/char-predicates.h"
+#include "src/collector.h"
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
+#include "src/messages.h"
#include "src/parsing/token.h"
#include "src/unicode.h"
#include "src/unicode-decoder.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -354,6 +355,10 @@ class Scanner {
// (the token last returned by Next()).
Location location() const { return current_.location; }
+ bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
+ MessageTemplate::Template error() const { return scanner_error_; }
+ Location error_location() const { return scanner_error_location_; }
+
// Similar functions for the upcoming token.
// One token look-ahead (past the token returned by Next()).
@@ -450,6 +455,12 @@ class Scanner {
bool FoundHtmlComment() const { return found_html_comment_; }
+#define DECLARE_ACCESSORS(name) \
+ inline bool allow_##name() const { return allow_##name##_; } \
+ inline void set_allow_##name(bool allow) { allow_##name##_ = allow; }
+ DECLARE_ACCESSORS(harmony_exponentiation_operator)
+#undef ACCESSOR
+
private:
// The current and look-ahead token.
struct TokenDesc {
@@ -476,6 +487,7 @@ class Scanner {
current_.raw_literal_chars = NULL;
next_next_.token = Token::UNINITIALIZED;
found_html_comment_ = false;
+ scanner_error_ = MessageTemplate::kNone;
}
// Support BookmarkScope functionality.
@@ -486,6 +498,19 @@ class Scanner {
void DropBookmark();
static void CopyTokenDesc(TokenDesc* to, TokenDesc* from);
+ void ReportScannerError(const Location& location,
+ MessageTemplate::Template error) {
+ if (has_error()) return;
+ scanner_error_ = error;
+ scanner_error_location_ = location;
+ }
+
+ void ReportScannerError(int pos, MessageTemplate::Template error) {
+ if (has_error()) return;
+ scanner_error_ = error;
+ scanner_error_location_ = Location(pos, pos + 1);
+ }
+
// Literal buffer support
inline void StartLiteral() {
LiteralBuffer* free_buffer =
@@ -631,13 +656,13 @@ class Scanner {
return current_.raw_literal_chars->is_one_byte();
}
- template <bool capture_raw>
+ template <bool capture_raw, bool unicode = false>
uc32 ScanHexNumber(int expected_length);
// Scan a number of any length but not bigger than max_value. For example, the
// number can be 000000001, so it's very long in characters but its value is
// small.
template <bool capture_raw>
- uc32 ScanUnlimitedLengthHexNumber(int max_value);
+ uc32 ScanUnlimitedLengthHexNumber(int max_value, int beg_pos);
// Scans a single JavaScript token.
void Scan();
@@ -758,6 +783,11 @@ class Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
+
+ bool allow_harmony_exponentiation_operator_;
+
+ MessageTemplate::Template scanner_error_;
+ Location scanner_error_location_;
};
} // namespace internal
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 7a62b4d915..fae9ea8bff 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -63,6 +63,7 @@ namespace internal {
T(ASSIGN_MUL, "*=", 2) \
T(ASSIGN_DIV, "/=", 2) \
T(ASSIGN_MOD, "%=", 2) \
+ T(ASSIGN_EXP, "**=", 2) \
\
/* Binary operators sorted by precedence. */ \
/* IsBinaryOp() relies on this block of enum values */ \
@@ -82,6 +83,7 @@ namespace internal {
T(MUL, "*", 13) \
T(DIV, "/", 13) \
T(MOD, "%", 13) \
+ T(EXP, "**", 14) \
\
/* Compare operators sorted by precedence. */ \
/* IsCompareOp() relies on this block of enum values */ \
@@ -214,12 +216,10 @@ class Token {
}
static bool IsAssignmentOp(Value tok) {
- return INIT <= tok && tok <= ASSIGN_MOD;
+ return INIT <= tok && tok <= ASSIGN_EXP;
}
- static bool IsBinaryOp(Value op) {
- return COMMA <= op && op <= MOD;
- }
+ static bool IsBinaryOp(Value op) { return COMMA <= op && op <= EXP; }
static bool IsTruncatingBinaryOp(Value op) {
return BIT_OR <= op && op <= ROR;
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
new file mode 100644
index 0000000000..6f3551468a
--- /dev/null
+++ b/deps/v8/src/perf-jit.cc
@@ -0,0 +1,336 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/perf-jit.h"
+
+#include "src/assembler.h"
+#include "src/objects-inl.h"
+
+#if V8_OS_LINUX
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#endif // V8_OS_LINUX
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_LINUX
+
+struct PerfJitHeader {
+ uint32_t magic_;
+ uint32_t version_;
+ uint32_t size_;
+ uint32_t elf_mach_target_;
+ uint32_t reserved_;
+ uint32_t process_id_;
+ uint64_t time_stamp_;
+ uint64_t flags_;
+
+ static const uint32_t kMagic = 0x4A695444;
+ static const uint32_t kVersion = 1;
+};
+
+struct PerfJitBase {
+ enum PerfJitEvent { kLoad = 0, kMove = 1, kDebugInfo = 2, kClose = 3 };
+
+ uint32_t event_;
+ uint32_t size_;
+ uint64_t time_stamp_;
+};
+
+struct PerfJitCodeLoad : PerfJitBase {
+ uint32_t process_id_;
+ uint32_t thread_id_;
+ uint64_t vma_;
+ uint64_t code_address_;
+ uint64_t code_size_;
+ uint64_t code_id_;
+};
+
+struct PerfJitDebugEntry {
+ uint64_t address_;
+ int line_number_;
+ int column_;
+ // Followed by null-terminated name or \0xff\0 if same as previous.
+};
+
+struct PerfJitCodeDebugInfo : PerfJitBase {
+ uint64_t address_;
+ uint64_t entry_count_;
+ // Followed by entry_count_ instances of PerfJitDebugEntry.
+};
+
+const char PerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
+
+// Extra padding for the PID in the filename
+const int PerfJitLogger::kFilenameBufferPadding = 16;
+
+base::LazyRecursiveMutex PerfJitLogger::file_mutex_;
+// The following static variables are protected by PerfJitLogger::file_mutex_.
+uint64_t PerfJitLogger::reference_count_ = 0;
+void* PerfJitLogger::marker_address_ = nullptr;
+uint64_t PerfJitLogger::code_index_ = 0;
+FILE* PerfJitLogger::perf_output_handle_ = nullptr;
+
+void PerfJitLogger::OpenJitDumpFile() {
+ // Open the perf JIT dump file.
+ perf_output_handle_ = nullptr;
+
+ int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+ ScopedVector<char> perf_dump_name(bufferSize);
+ int size = SNPrintF(perf_dump_name, kFilenameFormatString,
+ base::OS::GetCurrentProcessId());
+ CHECK_NE(size, -1);
+
+ int fd = open(perf_dump_name.start(), O_CREAT | O_TRUNC | O_RDWR, 0666);
+ if (fd == -1) return;
+
+ marker_address_ = OpenMarkerFile(fd);
+ if (marker_address_ == nullptr) return;
+
+ perf_output_handle_ = fdopen(fd, "w+");
+ if (perf_output_handle_ == nullptr) return;
+
+ setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+}
+
+void PerfJitLogger::CloseJitDumpFile() {
+ if (perf_output_handle_ == nullptr) return;
+ fclose(perf_output_handle_);
+ perf_output_handle_ = nullptr;
+}
+
+void* PerfJitLogger::OpenMarkerFile(int fd) {
+ long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
+ if (page_size == -1) return nullptr;
+
+ // Mmap the file so that there is a mmap record in the perf_data file.
+ //
+ // The map must be PROT_EXEC to ensure it is not ignored by perf record.
+ void* marker_address =
+ mmap(nullptr, page_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
+ return (marker_address == MAP_FAILED) ? nullptr : marker_address;
+}
+
+void PerfJitLogger::CloseMarkerFile(void* marker_address) {
+ if (marker_address == nullptr) return;
+ long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
+ if (page_size == -1) return;
+ munmap(marker_address, page_size);
+}
+
+PerfJitLogger::PerfJitLogger() {
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ reference_count_++;
+ // If this is the first logger, open the file and write the header.
+ if (reference_count_ == 1) {
+ OpenJitDumpFile();
+ if (perf_output_handle_ == nullptr) return;
+ LogWriteHeader();
+ }
+}
+
+PerfJitLogger::~PerfJitLogger() {
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ reference_count_--;
+ // If this was the last logger, close the file.
+ if (reference_count_ == 0) {
+ CloseJitDumpFile();
+ }
+}
+
+uint64_t PerfJitLogger::GetTimestamp() {
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ DCHECK_EQ(0, result);
+ USE(result);
+ static const uint64_t kNsecPerSec = 1000000000;
+ return (ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
+}
+
+void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
+ SharedFunctionInfo* shared,
+ const char* name, int length) {
+ if (FLAG_perf_basic_prof_only_functions &&
+ (abstract_code->kind() != AbstractCode::FUNCTION &&
+ abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
+ return;
+ }
+
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ if (perf_output_handle_ == nullptr) return;
+
+ // We only support non-interpreted functions.
+ if (!abstract_code->IsCode()) return;
+ Code* code = abstract_code->GetCode();
+ DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
+
+ // Debug info has to be emitted first.
+ if (FLAG_perf_prof_debug_info && shared != nullptr) {
+ LogWriteDebugInfo(code, shared);
+ }
+
+ const char* code_name = name;
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+ uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
+ : code->instruction_size();
+
+ static const char string_terminator[] = "\0";
+
+ PerfJitCodeLoad code_load;
+ code_load.event_ = PerfJitCodeLoad::kLoad;
+ code_load.size_ = sizeof(code_load) + length + 1 + code_size;
+ code_load.time_stamp_ = GetTimestamp();
+ code_load.process_id_ =
+ static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+ code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
+ code_load.vma_ = 0x0; // Our addresses are absolute.
+ code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
+ code_load.code_size_ = code_size;
+ code_load.code_id_ = code_index_;
+
+ code_index_++;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+ LogWriteBytes(code_name, length);
+ LogWriteBytes(string_terminator, 1);
+ LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
+ // Compute the entry count and get the name of the script.
+ uint32_t entry_count = 0;
+ for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
+ it.next()) {
+ entry_count++;
+ }
+ if (entry_count == 0) return;
+ Handle<Script> script(Script::cast(shared->script()));
+ Handle<Object> name_or_url(Script::GetNameOrSourceURL(script));
+
+ int name_length = 0;
+ base::SmartArrayPointer<char> name_string;
+ if (name_or_url->IsString()) {
+ name_string =
+ Handle<String>::cast(name_or_url)
+ ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
+ DCHECK_EQ(0, name_string.get()[name_length]);
+ } else {
+ const char unknown[] = "<unknown>";
+ name_length = static_cast<int>(strlen(unknown));
+ char* buffer = NewArray<char>(name_length);
+ base::OS::StrNCpy(buffer, name_length + 1, unknown,
+ static_cast<size_t>(name_length));
+ name_string = base::SmartArrayPointer<char>(buffer);
+ }
+ DCHECK_EQ(name_length, strlen(name_string.get()));
+
+ PerfJitCodeDebugInfo debug_info;
+
+ debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
+ debug_info.time_stamp_ = GetTimestamp();
+ debug_info.address_ = reinterpret_cast<uint64_t>(code->instruction_start());
+ debug_info.entry_count_ = entry_count;
+
+ uint32_t size = sizeof(debug_info);
+ // Add the sizes of fixed parts of entries.
+ size += entry_count * sizeof(PerfJitDebugEntry);
+ // Add the size of the name after the first entry.
+ size += (static_cast<uint32_t>(name_length) + 1) * entry_count;
+
+ int padding = ((size + 7) & (~7)) - size;
+
+ debug_info.size_ = size + padding;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
+
+ int script_line_offset = script->line_offset();
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+
+ for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
+ it.next()) {
+ int position = static_cast<int>(it.rinfo()->data());
+ int line_number = Script::GetLineNumber(script, position);
+ // Compute column.
+ int relative_line_number = line_number - script_line_offset;
+ int start =
+ (relative_line_number == 0)
+ ? 0
+ : Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+ int column_offset = position - start;
+ if (relative_line_number == 0) {
+ // For the case where the code is on the same line as the script tag.
+ column_offset += script->column_offset();
+ }
+
+ PerfJitDebugEntry entry;
+ entry.address_ = reinterpret_cast<uint64_t>(it.rinfo()->pc());
+ entry.line_number_ = line_number;
+ entry.column_ = column_offset;
+ LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
+ LogWriteBytes(name_string.get(), name_length + 1);
+ }
+ char padding_bytes[] = "\0\0\0\0\0\0\0\0";
+ LogWriteBytes(padding_bytes, padding);
+}
+
+void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
+ // Code relocation not supported.
+ UNREACHABLE();
+}
+
+void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
+ DCHECK(static_cast<size_t>(size) == rv);
+ USE(rv);
+}
+
+void PerfJitLogger::LogWriteHeader() {
+ DCHECK(perf_output_handle_ != NULL);
+ PerfJitHeader header;
+
+ header.magic_ = PerfJitHeader::kMagic;
+ header.version_ = PerfJitHeader::kVersion;
+ header.size_ = sizeof(header);
+ header.elf_mach_target_ = GetElfMach();
+ header.reserved_ = 0xdeadbeef;
+ header.process_id_ = base::OS::GetCurrentProcessId();
+ header.time_stamp_ =
+ static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
+ header.flags_ = 0;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+}
+
+#endif // V8_OS_LINUX
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
new file mode 100644
index 0000000000..25cc3b3686
--- /dev/null
+++ b/deps/v8/src/perf-jit.h
@@ -0,0 +1,122 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PERF_JIT_H_
+#define V8_PERF_JIT_H_
+
+#include "src/log.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_LINUX
+
+// Linux perf tool logging support
+class PerfJitLogger : public CodeEventLogger {
+ public:
+ PerfJitLogger();
+ virtual ~PerfJitLogger();
+
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {}
+
+ private:
+ void OpenJitDumpFile();
+ void CloseJitDumpFile();
+ void* OpenMarkerFile(int fd);
+ void CloseMarkerFile(void* marker_address);
+
+ uint64_t GetTimestamp();
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) override;
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kFilenameFormatString[];
+ static const int kFilenameBufferPadding;
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ void LogWriteBytes(const char* bytes, int size);
+ void LogWriteHeader();
+ void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
+
+ static const uint32_t kElfMachIA32 = 3;
+ static const uint32_t kElfMachX64 = 62;
+ static const uint32_t kElfMachARM = 40;
+ static const uint32_t kElfMachMIPS = 10;
+
+ uint32_t GetElfMach() {
+#if V8_TARGET_ARCH_IA32
+ return kElfMachIA32;
+#elif V8_TARGET_ARCH_X64
+ return kElfMachX64;
+#elif V8_TARGET_ARCH_ARM
+ return kElfMachARM;
+#elif V8_TARGET_ARCH_MIPS
+ return kElfMachMIPS;
+#else
+ UNIMPLEMENTED();
+ return 0;
+#endif
+ }
+
+ // Per-process singleton file. We assume that there is one main isolate;
+ // to determine when it goes away, we keep reference count.
+ static base::LazyRecursiveMutex file_mutex_;
+ static FILE* perf_output_handle_;
+ static uint64_t reference_count_;
+ static void* marker_address_;
+ static uint64_t code_index_;
+};
+
+#else
+
+// PerfJitLogger is only implemented on Linux
+class PerfJitLogger : public CodeEventLogger {
+ public:
+ void CodeMoveEvent(AbstractCode* from, Address to) override {
+ UNIMPLEMENTED();
+ }
+
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {
+ UNIMPLEMENTED();
+ }
+
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+ const char* name, int length) override {
+ UNIMPLEMENTED();
+ }
+};
+
+#endif // V8_OS_LINUX
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 42e220809f..c495fee182 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -89,6 +89,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
@@ -179,6 +183,18 @@ Address Assembler::return_address_from_call_start(Address pc) {
return pc + (len + 2) * kInstrSize;
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index aed149bcab..507eec11af 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -849,6 +849,10 @@ void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
}
+void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
+}
void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
// a special xo_form
@@ -861,12 +865,15 @@ void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
}
-
-void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
- RCBit r) {
+void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
}
+void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
+}
void Assembler::subfic(Register dst, Register src, const Operand& imm) {
d_form(SUBFIC, dst, src, imm.imm_, true);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 58c6c94dc6..3e8be7d75a 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -808,18 +808,21 @@ class Assembler : public AssemblerBase {
void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
RCBit r = LeaveRC);
- void subfic(Register dst, Register src, const Operand& imm);
+ void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
- void subfc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
- RCBit r = LeaveRC);
+ void subfic(Register dst, Register src, const Operand& imm);
void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
RCBit r = LeaveRC);
void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
-
- void addze(Register dst, Register src1, OEBit o, RCBit r);
+ void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+ void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
@@ -1216,7 +1219,9 @@ class Assembler : public AssemblerBase {
void dq(uint64_t data);
void dp(uintptr_t data);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -1463,8 +1468,8 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index f0b76ccc39..884afedb21 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -532,6 +532,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r4 : constructor function
// -- r5 : allocation site or undefined
// -- r6 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -547,11 +548,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (!create_implicit_receiver) {
__ SmiTag(r7, r3, SetRC);
- __ Push(r5, r7);
+ __ Push(cp, r5, r7);
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r3);
- __ Push(r5, r3);
+ __ Push(cp, r5, r3);
// Allocate the new receiver object.
__ Push(r4, r6);
@@ -623,7 +624,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -754,9 +755,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r0,r8-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ li(cp, Operand::Zero());
-
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -853,8 +851,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushFixedFrame(r4);
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r4);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -1208,8 +1205,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ mr(ip, r3);
// Perform prologue operations usually performed by the young code stub.
- __ PushFixedFrame(r4);
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushStandardFrame(r4);
// Jump to point after the code-age stub.
__ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
@@ -1454,24 +1450,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bge(&ok);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1518,6 +1496,27 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[4] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ LoadP(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ LoadP(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Ret(2);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1988,19 +1987,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ mov(scratch1, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ lbz(scratch1, MemOperand(scratch1));
__ cmpi(scratch1, Operand::Zero());
- __ bne(&done);
+ __ beq(&done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ LoadP(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
__ bne(&no_interpreter_frame);
__ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2008,75 +2009,41 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ LoadP(
+ scratch3,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&no_arguments_adaptor);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mr(fp, scratch2);
- __ LoadP(scratch1,
+ __ LoadP(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ LoadP(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
- scratch1, FieldMemOperand(
- scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+ caller_args_count_reg,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
#endif
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ ShiftLeftImm(dst_reg, scratch1, Operand(kPointerSizeLog2));
- __ add(dst_reg, fp, dst_reg);
- __ addi(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ ShiftLeftImm(src_reg, args_reg, Operand(kPointerSizeLog2));
- __ add(src_reg, sp, src_reg);
- // Count receiver argument as well (not included in args_reg).
- __ addi(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ cmpl(src_reg, dst_reg);
- __ Check(lt, kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ RestoreFrameStateForTailCall();
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop;
- __ addi(tmp_reg, args_reg, Operand(1)); // +1 for receiver
- __ mtctr(tmp_reg);
- __ bind(&loop);
- __ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
- __ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
- __ bdnz(&loop);
-
- // Leave current frame.
- __ mr(sp, dst_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2551,31 +2518,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r8, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r8, SharedFunctionInfo::kStrongModeBit, r0);
- __ beq(&no_strong_error, cr0);
-
- // What we really care about is the required number of arguments.
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kLengthOffset));
-#if V8_TARGET_ARCH_PPC64
- // See commment near kLenghtOffset in src/objects.h
- __ srawi(r7, r7, kSmiTagSize);
-#else
- __ SmiUntag(r7);
-#endif
- __ cmp(r3, r7);
- __ bge(&no_strong_error);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 03c73af503..0671f990e8 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_PPC
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -76,6 +77,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -494,8 +499,9 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
__ b(both_loaded_as_doubles);
}
-
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
@@ -503,7 +509,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
// r5 is object type of rhs.
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ andi(r0, r5, Operand(kIsNotStringMask));
__ bne(&object_test, cr0);
@@ -541,6 +547,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
__ beq(&return_unequal, cr0);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
+ __ beq(&return_equal);
+ __ CompareInstanceType(r6, r6, ODDBALL_TYPE);
+ __ bne(&return_unequal);
+
+ __ bind(&return_equal);
__ li(r3, Operand(EQUAL));
__ Ret();
}
@@ -1409,8 +1425,12 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ bne(&slow_case);
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ TestBit(scratch, Map::kIsConstructor, r0);
+ __ beq(&slow_case, cr0);
+
+ // Ensure that {function} has an instance prototype.
__ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
__ bne(&slow_case, cr0);
@@ -1478,7 +1498,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -1531,29 +1552,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ TestIfPositiveSmi(key, r0);
- __ bne(&slow, cr0);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2695,29 +2693,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r3.
- Label not_smi;
- __ JumpIfNotSmi(r3, &not_smi);
- __ blr();
- __ bind(&not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ TestIfSmi(r3, r0);
+ __ Ret(eq, cr0);
__ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
// r3: receiver
// r4: receiver instance type
__ Ret(eq);
- Label not_string, slow_string;
- __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
- __ bge(&not_string);
- // Check if string has a cached array index.
- __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
- __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
- __ bne(&slow_string, cr0);
- __ IndexFromHash(r5, r3);
- __ blr();
- __ bind(&slow_string);
- __ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
- __ bind(&not_string);
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in r3.
+ __ AssertNotNumber(r3);
+
+ __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub, lt);
Label not_oddball;
__ cmpi(r4, Operand(ODDBALL_TYPE));
@@ -2730,29 +2727,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in r3.
+ __ AssertString(r3);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes one argument in r3.
- Label not_smi;
- __ JumpIfNotSmi(r3, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmpi(r3, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r3, r0, r3);
- } else {
- Label positive;
- __ bgt(&positive);
- __ li(r3, Operand::Zero());
- __ bind(&positive);
- }
- __ Ret();
- __ bind(&not_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
+ __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
+ __ bne(&runtime, cr0);
+ __ IndexFromHash(r5, r3);
+ __ blr();
+ __ bind(&runtime);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r3.
Label is_number;
@@ -2933,42 +2924,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : left
- // -- r3 : right
- // -- lr : return address
- // -----------------------------------
- __ AssertString(r4);
- __ AssertString(r3);
-
- Label not_same;
- __ cmp(r3, r4);
- __ bne(&not_same);
- __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
- r5);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
-
- // Compare flat one-byte strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
- r6);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ Push(r4, r3);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r4 : left
@@ -3285,10 +3240,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+ __ sub(r3, r3, r4);
+ __ Ret();
} else {
+ __ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3832,7 +3794,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ LoadP(r4, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ addi(r4, r4, Operand(1));
@@ -4844,7 +4806,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r4);
__ bne(&loop);
}
@@ -4853,7 +4815,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&no_rest_parameters);
@@ -4998,7 +4960,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
@@ -5240,7 +5202,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r4);
__ bne(&loop);
}
@@ -5248,7 +5210,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset));
+ __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&arguments_adaptor);
{
@@ -5632,16 +5594,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ b(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : callee
// -- r7 : call_data
// -- r5 : holder
// -- r4 : api_function_address
- // -- r6 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5667,11 +5625,9 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || r6.is(argc.reg()));
-
// context save
__ push(context);
- if (!is_lazy) {
+ if (!is_lazy()) {
// load context from callee
__ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
@@ -5683,7 +5639,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
@@ -5719,28 +5675,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
// FunctionCallbackInfo::implicit_args_
__ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::values_
- __ addi(ip, scratch,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(ip, Operand(argc.immediate()));
- __ stw(ip, MemOperand(r3, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ li(ip, Operand::Zero());
- __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
- } else {
- __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
- __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_
- __ add(r0, scratch, ip);
- __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_
- __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
- }
+ // FunctionCallbackInfo::values_
+ __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(ip, Operand(argc()));
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ li(ip, Operand::Zero());
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5750,7 +5693,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5760,33 +5703,14 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand is_construct_call_operand =
MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index d6d86b0fcc..5642e91f9d 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -136,7 +136,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
+ __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
allocation_memento_found);
}
@@ -169,7 +169,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -289,7 +289,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
scratch));
if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -616,9 +616,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->PushFixedFrame(r4);
- patcher->masm()->addi(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ patcher->masm()->PushStandardFrame(r4);
for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
patcher->masm()->nop();
}
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 9ec5cdd11a..ead877e149 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -106,12 +106,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on PPC in the input frame.
- return false;
-}
-
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -168,7 +162,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass six arguments in r3 to r8.
__ PrepareCallCFunction(6, r8);
+ __ li(r3, Operand::Zero());
+ Label context_check;
+ __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ li(r4, Operand(type())); // bailout type,
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
@@ -238,6 +237,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
+ __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index e72658fba7..baba14643f 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -657,10 +657,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "subfc'. 'rt, 'ra, 'rb");
return;
}
+ case SUBFEX: {
+ Format(instr, "subfe'. 'rt, 'ra, 'rb");
+ return;
+ }
case ADDCX: {
Format(instr, "addc'. 'rt, 'ra, 'rb");
return;
}
+ case ADDEX: {
+ Format(instr, "adde'. 'rt, 'ra, 'rb");
+ return;
+ }
case CNTLZWX: {
Format(instr, "cntlzw'. 'ra, 'rs");
return;
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
index b1de9f50ff..fd4abe2e4d 100644
--- a/deps/v8/src/ppc/frames-ppc.h
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -152,16 +152,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize =
- FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
-
- static const int kConstantPoolOffset =
- FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
@@ -179,7 +174,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 3db7bd5c17..48b6cdcf0c 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -106,35 +106,9 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return r3; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return r3; }
-
// static
-const Register ToNameDescriptor::ReceiverRegister() { return r3; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -262,6 +236,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -306,24 +287,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
+ Register registers[] = {r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -387,21 +360,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // callee
- r7, // call_data
- r5, // holder
- r4, // api_function_address
- r6, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // callee
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 14759de0da..42e5a13157 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -570,14 +570,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
StoreP(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
- mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
- and_(r0, scratch, r0, SetRC);
+ TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
if (and_then == kFallThroughAtEnd) {
- beq(&done, cr0);
+ bne(&done, cr0);
} else {
DCHECK(and_then == kReturnAtEnd);
- Ret(eq, cr0);
+ Ret(ne, cr0);
}
mflr(r0);
push(r0);
@@ -591,42 +590,69 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
-
-void MacroAssembler::PushFixedFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ int fp_delta = 0;
mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
if (marker_reg.is_valid()) {
- Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ Push(r0, fp, kConstantPoolRegister, marker_reg);
+ fp_delta = 2;
} else {
- Push(r0, fp, kConstantPoolRegister, cp);
+ Push(r0, fp, kConstantPoolRegister);
+ fp_delta = 1;
}
} else {
if (marker_reg.is_valid()) {
- Push(r0, fp, cp, marker_reg);
+ Push(r0, fp, marker_reg);
+ fp_delta = 1;
} else {
- Push(r0, fp, cp);
+ Push(r0, fp);
+ fp_delta = 0;
}
}
+ addi(fp, sp, Operand(fp_delta * kPointerSize));
}
-
-void MacroAssembler::PopFixedFrame(Register marker_reg) {
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (FLAG_enable_embedded_constant_pool) {
if (marker_reg.is_valid()) {
- Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ Pop(r0, fp, kConstantPoolRegister, marker_reg);
} else {
- Pop(r0, fp, kConstantPoolRegister, cp);
+ Pop(r0, fp, kConstantPoolRegister);
}
} else {
if (marker_reg.is_valid()) {
- Pop(r0, fp, cp, marker_reg);
+ Pop(r0, fp, marker_reg);
} else {
- Pop(r0, fp, cp);
+ Pop(r0, fp);
}
}
mtlr(r0);
}
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ int fp_delta = 0;
+ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ if (function_reg.is_valid()) {
+ Push(r0, fp, kConstantPoolRegister, cp, function_reg);
+ fp_delta = 3;
+ } else {
+ Push(r0, fp, kConstantPoolRegister, cp);
+ fp_delta = 2;
+ }
+ } else {
+ if (function_reg.is_valid()) {
+ Push(r0, fp, cp, function_reg);
+ fp_delta = 2;
+ } else {
+ Push(r0, fp, cp);
+ fp_delta = 1;
+ }
+ }
+ addi(fp, sp, Operand(fp_delta * kPointerSize));
+}
+
void MacroAssembler::RestoreFrameStateForTailCall() {
if (FLAG_enable_embedded_constant_pool) {
LoadP(kConstantPoolRegister,
@@ -803,6 +829,145 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
}
#endif
+#if !V8_TARGET_ARCH_PPC64
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high, shift));
+ DCHECK(!AreAliased(dst_high, src_low, shift));
+ Label less_than_32;
+ Label done;
+ cmpi(shift, Operand(32));
+ blt(&less_than_32);
+ // If shift >= 32
+ andi(scratch, shift, Operand(0x1f));
+ slw(dst_high, src_low, scratch);
+ li(dst_low, Operand::Zero());
+ b(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ subfic(scratch, shift, Operand(32));
+ slw(dst_high, src_high, shift);
+ srw(scratch, src_low, scratch);
+ orx(dst_high, dst_high, scratch);
+ slw(dst_low, src_low, shift);
+ bind(&done);
+}
+
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_high, src_low));
+ if (shift == 32) {
+ Move(dst_high, src_low);
+ li(dst_low, Operand::Zero());
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ slwi(dst_high, src_low, Operand(shift));
+ li(dst_low, Operand::Zero());
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ slwi(dst_high, src_high, Operand(shift));
+ rlwimi(dst_high, src_low, shift, 32 - shift, 31);
+ slwi(dst_low, src_low, Operand(shift));
+ }
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high, shift));
+ DCHECK(!AreAliased(dst_high, src_low, shift));
+ Label less_than_32;
+ Label done;
+ cmpi(shift, Operand(32));
+ blt(&less_than_32);
+ // If shift >= 32
+ andi(scratch, shift, Operand(0x1f));
+ srw(dst_low, src_high, scratch);
+ li(dst_high, Operand::Zero());
+ b(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ subfic(scratch, shift, Operand(32));
+ srw(dst_low, src_low, shift);
+ slw(scratch, src_high, scratch);
+ orx(dst_low, dst_low, scratch);
+ srw(dst_high, src_high, shift);
+ bind(&done);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_high, src_low));
+ if (shift == 32) {
+ Move(dst_low, src_high);
+ li(dst_high, Operand::Zero());
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ srwi(dst_low, src_high, Operand(shift));
+ li(dst_high, Operand::Zero());
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ srwi(dst_low, src_low, Operand(shift));
+ rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
+ srwi(dst_high, src_high, Operand(shift));
+ }
+}
+
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ DCHECK(!AreAliased(dst_low, src_high, shift));
+ DCHECK(!AreAliased(dst_high, src_low, shift));
+ Label less_than_32;
+ Label done;
+ cmpi(shift, Operand(32));
+ blt(&less_than_32);
+ // If shift >= 32
+ andi(scratch, shift, Operand(0x1f));
+ sraw(dst_low, src_high, scratch);
+ srawi(dst_high, src_high, 31);
+ b(&done);
+ bind(&less_than_32);
+ // If shift < 32
+ subfic(scratch, shift, Operand(32));
+ srw(dst_low, src_low, shift);
+ slw(scratch, src_high, scratch);
+ orx(dst_low, dst_low, scratch);
+ sraw(dst_high, src_high, shift);
+ bind(&done);
+}
+
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_high, src_low));
+ if (shift == 32) {
+ Move(dst_low, src_high);
+ srawi(dst_high, src_high, 31);
+ } else if (shift > 32) {
+ shift &= 0x1f;
+ srawi(dst_low, src_high, shift);
+ srawi(dst_high, src_high, 31);
+ } else if (shift == 0) {
+ Move(dst_low, src_low);
+ Move(dst_high, src_high);
+ } else {
+ srwi(dst_low, src_low, Operand(shift));
+ rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
+ srawi(dst_high, src_high, shift);
+ }
+}
+#endif
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -824,12 +989,13 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
}
-
-void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
- LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
- PushFixedFrame(r11);
- // Adjust FP to point to saved FP.
- addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+ int prologue_offset) {
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ LoadSmiLiteral(r11, Smi::FromInt(type));
+ PushCommonFrame(r11);
+ }
if (FLAG_enable_embedded_constant_pool) {
if (!base.is(no_reg)) {
// base contains prologue address
@@ -865,9 +1031,7 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
}
} else {
// This matches the code found in GetNoCodeAgeSequence()
- PushFixedFrame(r4);
- // Adjust fp to point to saved fp.
- addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ PushStandardFrame(r4);
for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
nop();
}
@@ -892,20 +1056,20 @@ void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
- PushFixedFrame();
- // This path should not rely on ip containing code entry.
+ // Push type explicitly so we can leverage the constant pool.
+ // This path cannot rely on ip containing code entry.
+ PushCommonFrame();
LoadConstantPoolPointerRegister();
LoadSmiLiteral(ip, Smi::FromInt(type));
push(ip);
} else {
LoadSmiLiteral(ip, Smi::FromInt(type));
- PushFixedFrame(ip);
+ PushCommonFrame(ip);
+ }
+ if (type == StackFrame::INTERNAL) {
+ mov(r0, Operand(CodeObject()));
+ push(r0);
}
- // Adjust FP to point to saved FP.
- addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-
- mov(r0, Operand(CodeObject()));
- push(r0);
}
@@ -921,11 +1085,8 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
if (FLAG_enable_embedded_constant_pool) {
- const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
- const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
- const int offset =
- ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
- LoadP(kConstantPoolRegister, MemOperand(fp, offset));
+ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
}
mtlr(r0);
frame_ends = pc_offset();
@@ -962,12 +1123,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// all of the pushes that have happened inside of V8
// since we were called from C code
- // replicate ARM frame - TODO make this more closely follow PPC ABI
- mflr(r0);
- Push(r0, fp);
- mr(fp, sp);
+ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::EXIT));
+ PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
- subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+ subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
li(r8, Operand::Zero());
@@ -1052,7 +1211,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Calculate the stack location of the saved doubles and restore them.
const int kNumRegs = kNumCallerSavedDoubles;
const int offset =
- (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
+ (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
addi(r6, fp, Operand(-offset));
MultiPopDoubles(kCallerSavedDoubles, r6);
}
@@ -1093,6 +1252,67 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d1);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+ add(dst_reg, fp, dst_reg);
+ addi(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+ add(src_reg, sp, src_reg);
+ addi(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
+ }
+
+ if (FLAG_debug_code) {
+ cmpl(src_reg, dst_reg);
+ Check(lt, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ RestoreFrameStateForTailCall();
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop;
+ if (callee_args_count.is_reg()) {
+ addi(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
+ } else {
+ mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
+ }
+ mtctr(tmp_reg);
+ bind(&loop);
+ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
+ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+ bdnz(&loop);
+
+ // Leave current frame.
+ mr(sp, dst_reg);
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
@@ -1370,8 +1590,20 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(ip));
DCHECK(!scratch.is(ip));
- // Load current lexical context from the stack frame.
- LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ DCHECK(!ip.is(scratch));
+ mr(ip, fp);
+ bind(&load_context);
+ LoadP(scratch,
+ MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch, &has_context);
+ LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+ b(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
cmpi(scratch, Operand::Zero());
@@ -2679,6 +2911,17 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
JumpIfSmi(reg2, on_either_smi);
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsANumber, cr0);
+ push(object);
+ CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+ pop(object);
+ Check(ne, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -4228,28 +4471,52 @@ void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
}
}
-
void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
+ Register scratch2_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- addi(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Cmpi(scratch_reg, Operand(new_space_start), r0);
- blt(no_memento_found);
- mov(ip, Operand(new_space_allocation_top));
- LoadP(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ Register mask = scratch2_reg;
+
+ DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+
+ DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+ lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
+ addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ Xor(r0, scratch_reg, Operand(new_space_allocation_top));
+ and_(r0, r0, mask, SetRC);
+ beq(&top_check, cr0);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ xor_(r0, scratch_reg, receiver_reg);
+ and_(r0, r0, mask, SetRC);
+ bne(no_memento_found, cr0);
+ // Continue with the actual map check.
+ b(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ Cmpi(scratch_reg, Operand(new_space_allocation_top), r0);
bgt(no_memento_found);
- LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ // Memento map check.
+ bind(&map_check);
+ LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
r0);
}
-
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5,
Register reg6) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index d9dbd56827..a529b627f2 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -156,12 +156,6 @@ class MacroAssembler : public Assembler {
void Call(Label* target);
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- Call(self, RelocInfo::CODE_TARGET);
- }
-
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<Object> value);
@@ -344,10 +338,14 @@ class MacroAssembler : public Assembler {
addi(sp, sp, Operand(5 * kPointerSize));
}
- // Push a fixed frame, consisting of lr, fp, context and
- // JS function / marker id if marker_reg is a valid register.
- void PushFixedFrame(Register marker_reg = no_reg);
- void PopFixedFrame(Register marker_reg = no_reg);
+ // Push a fixed frame, consisting of lr, fp, constant pool.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of lr, fp, constant pool,
+ // context and JS function
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
// Restore caller's frame pointer and return address prior to being
// overwritten by tail call stack preparation.
@@ -416,8 +414,24 @@ class MacroAssembler : public Assembler {
FPRoundingMode rounding_mode = kRoundToZero);
#endif
+#if !V8_TARGET_ARCH_PPC64
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+#endif
+
// Generates function and stub prologue code.
- void StubPrologue(Register base = no_reg, int prologue_offset = 0);
+ void StubPrologue(StackFrame::Type type, Register base = no_reg,
+ int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
@@ -564,6 +578,15 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1096,14 +1119,16 @@ class MacroAssembler : public Assembler {
// (for consistency between 32/64-bit).
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
- // and place them into the least significant bits of dst.
+ // and, if !test, shift them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
- int rangeEnd, RCBit rc = LeaveRC) {
+ int rangeEnd, RCBit rc = LeaveRC,
+ bool test = false) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
- if (rc == SetRC && rangeEnd == 0 && width <= 16) {
- andi(dst, src, Operand((1 << width) - 1));
+ if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
+ // Prefer faster andi when applicable.
+ andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
@@ -1115,14 +1140,14 @@ class MacroAssembler : public Assembler {
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
- RCBit rc = LeaveRC) {
- ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
+ RCBit rc = LeaveRC, bool test = false) {
+ ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
}
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
- RCBit rc = LeaveRC) {
+ RCBit rc = LeaveRC, bool test = false) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
@@ -1142,25 +1167,25 @@ class MacroAssembler : public Assembler {
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
- ExtractBitRange(dst, src, start, end, rc);
+ ExtractBitRange(dst, src, start, end, rc, test);
}
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
- ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
+ ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
- ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
}
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
- ExtractBitMask(scratch, value, mask, SetRC);
+ ExtractBitMask(scratch, value, mask, SetRC, true);
}
@@ -1307,6 +1332,9 @@ class MacroAssembler : public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1481,13 +1509,15 @@ class MacroAssembler : public Assembler {
// If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
+ Register scratch2_reg,
Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
+ Register scratch2_reg,
Label* memento_found) {
Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
&no_memento_found);
beq(memento_found);
bind(&no_memento_found);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 9a1f9e035f..79dc8252b7 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -1733,8 +1733,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb);
- intptr_t result = rs_val >> (rb_val & 0x3f);
+ uintptr_t rb_val = get_register(rb) & 0x3f;
+ intptr_t result = (rb_val > 31) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
@@ -1747,8 +1747,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb);
- intptr_t result = rs_val >> (rb_val & 0x7f);
+ uintptr_t rb_val = get_register(rb) & 0x7f;
+ intptr_t result = (rb_val > 63) ? 0 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
@@ -1761,8 +1761,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
int32_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb);
- intptr_t result = rs_val >> (rb_val & 0x3f);
+ intptr_t rb_val = get_register(rb) & 0x3f;
+ intptr_t result = (rb_val > 31) ? rs_val >> 31 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
@@ -1775,8 +1775,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
intptr_t rs_val = get_register(rs);
- intptr_t rb_val = get_register(rb);
- intptr_t result = rs_val >> (rb_val & 0x7f);
+ intptr_t rb_val = get_register(rb) & 0x7f;
+ intptr_t result = (rb_val > 63) ? rs_val >> 63 : rs_val >> rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
@@ -2025,19 +2025,37 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
uintptr_t ra_val = get_register(ra);
uintptr_t rb_val = get_register(rb);
uintptr_t alu_out = ~ra_val + rb_val + 1;
- set_register(rt, alu_out);
- // If the sign of rb and alu_out don't match, carry = 0
- if ((alu_out ^ rb_val) & 0x80000000) {
- special_reg_xer_ &= ~0xF0000000;
- } else {
+ // Set carry
+ if (ra_val <= rb_val) {
special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+ } else {
+ special_reg_xer_ &= ~0xF0000000;
}
+ set_register(rt, alu_out);
if (instr->Bit(0)) { // RC bit set
SetCR0(alu_out);
}
// todo - handle OE bit
break;
}
+ case SUBFEX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t rb_val = get_register(rb);
+ uintptr_t alu_out = ~ra_val + rb_val;
+ if (special_reg_xer_ & 0x20000000) {
+ alu_out += 1;
+ }
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
+ // todo - handle OE bit
+ break;
+ }
case ADDCX: {
int rt = instr->RTValue();
int ra = instr->RAValue();
@@ -2046,7 +2064,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
uintptr_t ra_val = get_register(ra);
uintptr_t rb_val = get_register(rb);
uintptr_t alu_out = ra_val + rb_val;
- // Check overflow
+ // Set carry
if (~ra_val < rb_val) {
special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
} else {
@@ -2059,6 +2077,24 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
// todo - handle OE bit
break;
}
+ case ADDEX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t rb_val = get_register(rb);
+ uintptr_t alu_out = ra_val + rb_val;
+ if (special_reg_xer_ & 0x20000000) {
+ alu_out += 1;
+ }
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
+ // todo - handle OE bit
+ break;
+ }
case MULHWX: {
int rt = instr->RTValue();
int ra = instr->RAValue();
@@ -2117,8 +2153,8 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uint32_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb);
- uint32_t result = rs_val << (rb_val & 0x3f);
+ uintptr_t rb_val = get_register(rb) & 0x3f;
+ uint32_t result = (rb_val > 31) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
@@ -2131,8 +2167,8 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
int ra = instr->RAValue();
int rb = instr->RBValue();
uintptr_t rs_val = get_register(rs);
- uintptr_t rb_val = get_register(rb);
- uintptr_t result = rs_val << (rb_val & 0x7f);
+ uintptr_t rb_val = get_register(rb) & 0x7f;
+ uintptr_t result = (rb_val > 63) ? 0 : rs_val << rb_val;
set_register(ra, result);
if (instr->Bit(0)) { // RC bit set
SetCR0(result);
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 942068ea08..791cdf03f0 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -149,7 +149,8 @@ void AddressToTraceMap::Clear() {
void AddressToTraceMap::Print() {
- PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
+ PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
+ ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
PrintF("[%p - %p] => %u\n", it->second.start, it->first,
it->second.trace_node_id);
@@ -226,7 +227,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
- heap->CreateFillerObjectAt(addr, size);
+ heap->CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
Isolate* isolate = heap->isolate();
int length = 0;
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index b6c7945797..47585b7b08 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -211,10 +211,8 @@ void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
processor_->Enqueue(evt_rec);
}
-
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- const char* name) {
+ AbstractCode* code, const char* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -222,14 +220,13 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
}
-
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- Name* name) {
+ AbstractCode* code, Name* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -237,12 +234,13 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ AbstractCode* code,
SharedFunctionInfo* shared,
CompilationInfo* info, Name* script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -250,9 +248,11 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
rec->start = code->address();
rec->entry = profiles_->NewCodeEntry(
tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name),
+ CodeEntry::kEmptyNamePrefix,
+ profiles_->GetName(InferScriptName(script_name, shared)),
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
if (info) {
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
@@ -261,46 +261,65 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
processor_->Enqueue(evt_rec);
}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ AbstractCode* abstract_code,
SharedFunctionInfo* shared,
CompilationInfo* info, Name* script_name,
int line, int column) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
+ rec->start = abstract_code->address();
Script* script = Script::cast(shared->script());
JITLineInfoTable* line_table = NULL;
if (script) {
- line_table = new JITLineInfoTable();
- for (RelocIterator it(code); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsPosition(mode)) {
- int position = static_cast<int>(it.rinfo()->data());
- if (position >= 0) {
- int pc_offset = static_cast<int>(it.rinfo()->pc() - code->address());
- int line_number = script->GetLineNumber(position) + 1;
- line_table->SetPosition(pc_offset, line_number);
- }
+ if (abstract_code->IsCode()) {
+ Code* code = abstract_code->GetCode();
+ int start_position = shared->start_position();
+ int end_position = shared->end_position();
+ line_table = new JITLineInfoTable();
+ for (RelocIterator it(code); !it.done(); it.next()) {
+ RelocInfo* reloc_info = it.rinfo();
+ if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
+ int position = static_cast<int>(reloc_info->data());
+ // TODO(alph): in case of inlining the position may correspond
+ // to an inlined function source code. Do not collect positions
+ // that fall beyond the function source code. There's however a
+ // chance the inlined function has similar positions but in another
+ // script. So the proper fix is to store script_id in some form
+ // along with the inlined function positions.
+ if (position < start_position || position >= end_position) continue;
+ int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
+ int line_number = script->GetLineNumber(position) + 1;
+ line_table->SetPosition(pc_offset, line_number);
+ }
+ } else {
+ BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
+ line_table = new JITLineInfoTable();
+ interpreter::SourcePositionTableIterator it(
+ bytecode->source_position_table());
+ for (; !it.done(); it.Advance()) {
+ int line_number = script->GetLineNumber(it.source_position()) + 1;
+ int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
+ line_table->SetPosition(pc_offset, line_number);
}
}
}
rec->entry = profiles_->NewCodeEntry(
tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line,
- column, line_table, code->instruction_start());
+ CodeEntry::kEmptyNamePrefix,
+ profiles_->GetName(InferScriptName(script_name, shared)), line, column,
+ line_table, abstract_code->instruction_start());
+ RecordInliningInfo(rec->entry, abstract_code);
if (info) {
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
rec->entry->FillFunctionInfo(shared);
- rec->size = code->ExecutableSize();
+ rec->size = abstract_code->ExecutableSize();
processor_->Enqueue(evt_rec);
}
-
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code,
- int args_count) {
+ AbstractCode* code, int args_count) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -308,21 +327,21 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
tag, profiles_->GetName(args_count), "args_count: ",
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
}
-
-void CpuProfiler::CodeMoveEvent(Address from, Address to) {
+void CpuProfiler::CodeMoveEvent(AbstractCode* from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from = from;
+ rec->from = from->address();
rec->to = to;
processor_->Enqueue(evt_rec);
}
-
-void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
+void CpuProfiler::CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
rec->start = code->address();
@@ -330,7 +349,6 @@ void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
processor_->Enqueue(evt_rec);
}
-
void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
@@ -343,11 +361,6 @@ void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
}
-
-void CpuProfiler::CodeDeleteEvent(Address from) {
-}
-
-
void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -360,8 +373,7 @@ void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
processor_->Enqueue(evt_rec);
}
-
-void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
+void CpuProfiler::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -386,6 +398,60 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
processor_->Enqueue(evt_rec);
}
+Name* CpuProfiler::InferScriptName(Name* name, SharedFunctionInfo* info) {
+ if (name->IsString() && String::cast(name)->length()) return name;
+ if (!info->script()->IsScript()) return name;
+ Object* source_url = Script::cast(info->script())->source_url();
+ return source_url->IsName() ? Name::cast(source_url) : name;
+}
+
+void CpuProfiler::RecordInliningInfo(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (!abstract_code->IsCode()) return;
+ Code* code = abstract_code->GetCode();
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int deopt_count = deopt_input_data->DeoptCount();
+ for (int i = 0; i < deopt_count; i++) {
+ int pc_offset = deopt_input_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ int translation_index = deopt_input_data->TranslationIndex(i)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ int depth = 0;
+ std::vector<CodeEntry*> inline_stack;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ it.Next(); // Skip ast_id
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ if (!depth++) continue; // Skip the current function itself.
+ CodeEntry* inline_entry = new CodeEntry(
+ entry->tag(), profiles_->GetFunctionName(shared_info->DebugName()),
+ CodeEntry::kEmptyNamePrefix, entry->resource_name(),
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ inline_entry->FillFunctionInfo(shared_info);
+ inline_stack.push_back(inline_entry);
+ }
+ if (!inline_stack.empty()) {
+ entry->AddInlineStack(pc_offset, inline_stack);
+ DCHECK(inline_stack.empty());
+ }
+ }
+}
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 1a1249c8b2..a04ee3c3a8 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -201,7 +201,7 @@ class CpuProfiler : public CodeEventListener {
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor);
- virtual ~CpuProfiler();
+ ~CpuProfiler() override;
void set_sampling_interval(base::TimeDelta value);
void CollectSample();
@@ -220,29 +220,28 @@ class CpuProfiler : public CodeEventListener {
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
- virtual void CallbackEvent(Name* name, Address entry_point);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, const char* comment);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, Name* name);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name,
- int line, int column);
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, int args_count);
- virtual void CodeMovingGCEvent() {}
- virtual void CodeMoveEvent(Address from, Address to);
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
- virtual void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
- virtual void CodeDeleteEvent(Address from);
- virtual void GetterCallbackEvent(Name* name, Address entry_point);
- virtual void RegExpCodeCreateEvent(Code* code, String* source);
- virtual void SetterCallbackEvent(Name* name, Address entry_point);
- virtual void SharedFunctionInfoMoveEvent(Address from, Address to) {}
+ void CallbackEvent(Name* name, Address entry_point) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ const char* comment) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ Name* name) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
+ Name* script_name) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, CompilationInfo* info,
+ Name* script_name, int line, int column) override;
+ void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ int args_count) override;
+ void CodeMovingGCEvent() override {}
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override;
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
+ void GetterCallbackEvent(Name* name, Address entry_point) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void SetterCallbackEvent(Name* name, Address entry_point) override;
+ void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
INLINE(bool is_profiling() const) { return is_profiling_; }
bool* is_profiling_address() {
@@ -259,6 +258,8 @@ class CpuProfiler : public CodeEventListener {
void StopProcessor();
void ResetProfiles();
void LogBuiltins();
+ void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+ Name* InferScriptName(Name* name, SharedFunctionInfo* info);
Isolate* isolate_;
base::TimeDelta sampling_interval_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index fc43f9f471..748f3074a1 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1383,9 +1383,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"debug_info", shared->debug_info(),
SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry,
- "inferred_name", shared->inferred_name(),
- SharedFunctionInfo::kInferredNameOffset);
+ SetInternalReference(obj, entry, "function_identifier",
+ shared->function_identifier(),
+ SharedFunctionInfo::kFunctionIdentifierOffset);
SetInternalReference(obj, entry,
"optimized_code_map", shared->optimized_code_map(),
SharedFunctionInfo::kOptimizedCodeMapOffset);
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 58d06c9db2..abcd9e5d88 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -50,6 +50,11 @@ const char* const CodeEntry::kNoDeoptReason = "";
CodeEntry::~CodeEntry() {
delete line_info_;
+ for (auto location : inline_locations_) {
+ for (auto entry : location.second) {
+ delete entry;
+ }
+ }
}
@@ -100,6 +105,18 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
return v8::CpuProfileNode::kNoLineNumberInfo;
}
+void CodeEntry::AddInlineStack(int pc_offset,
+ std::vector<CodeEntry*>& inline_stack) {
+ // It's better to use std::move to place the vector into the map,
+ // but it's not supported by the current stdlibc++ on MacOS.
+ inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
+ .first->second.swap(inline_stack);
+}
+
+const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
+ auto it = inline_locations_.find(pc_offset);
+ return it != inline_locations_.end() ? &it->second : NULL;
+}
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
@@ -109,7 +126,6 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
}
-
CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
DCHECK(has_deopt_info());
@@ -274,17 +290,14 @@ unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
}
-ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
+ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
int src_line, bool update_stats) {
ProfileNode* node = root_;
CodeEntry* last_entry = NULL;
- for (CodeEntry** entry = path.start() + path.length() - 1;
- entry != path.start() - 1;
- --entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- last_entry = *entry;
- }
+ for (auto it = path.rbegin(); it != path.rend(); ++it) {
+ if (*it == NULL) continue;
+ last_entry = *it;
+ node = node->FindOrAddChild(*it);
}
if (last_entry && last_entry->has_deopt_info()) {
node->CollectDeoptInfo(last_entry);
@@ -356,7 +369,7 @@ CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
top_down_(isolate) {}
void CpuProfile::AddPath(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line,
+ const std::vector<CodeEntry*>& path, int src_line,
bool update_stats) {
ProfileNode* top_frame_node =
top_down_.AddPathFromEnd(path, src_line, update_stats);
@@ -525,8 +538,8 @@ void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
}
void CpuProfilesCollection::AddPathToCurrentProfiles(
- base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line,
- bool update_stats) {
+ base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
+ int src_line, bool update_stats) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
@@ -576,12 +589,10 @@ ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
- // Allocate space for stack frames + pc + function + vm-state.
- ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
- // As actual number of decoded code entries may vary, initialize
- // entries vector with NULL values.
- CodeEntry** entry = entries.start();
- memset(entry, 0, entries.length() * sizeof(*entry));
+ std::vector<CodeEntry*> entries;
+ // Conservatively reserve space for stack frames + pc + function + vm-state.
+ // There could in fact be more of them because of inlined entries.
+ entries.reserve(sample.frames_count + 3);
// The ProfileNode knows nothing about all versions of generated code for
// the same JS function. The line number information associated with
@@ -597,13 +608,14 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- *entry++ = code_map_.FindEntry(sample.external_callback_entry);
+ entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
} else {
CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
// If there is no pc_entry we're likely in native code.
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
+ sample.top_frame_type == StackFrame::INTERPRETED ||
sample.top_frame_type == StackFrame::OPTIMIZED)) {
pc_entry = code_map_.FindEntry(sample.tos);
}
@@ -619,7 +631,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
src_line = pc_entry->line_number();
}
src_line_not_found = false;
- *entry++ = pc_entry;
+ entries.push_back(pc_entry);
if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
@@ -630,7 +642,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
- *entry++ = unresolved_entry_;
+ entries.push_back(unresolved_entry_);
}
}
}
@@ -639,35 +651,43 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
for (const Address *stack_pos = sample.stack,
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end; ++stack_pos) {
- *entry = code_map_.FindEntry(*stack_pos);
+ CodeEntry* entry = code_map_.FindEntry(*stack_pos);
- // Skip unresolved frames (e.g. internal frame) and get source line of
- // the first JS caller.
- if (src_line_not_found && *entry) {
+ if (entry) {
+ // Find out if the entry has an inlining stack associated.
int pc_offset =
- static_cast<int>(*stack_pos - (*entry)->instruction_start());
- src_line = (*entry)->GetSourceLine(pc_offset);
- if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
- src_line = (*entry)->line_number();
+ static_cast<int>(*stack_pos - entry->instruction_start());
+ const std::vector<CodeEntry*>* inline_stack =
+ entry->GetInlineStack(pc_offset);
+ if (inline_stack) {
+ entries.insert(entries.end(), inline_stack->rbegin(),
+ inline_stack->rend());
+ }
+ // Skip unresolved frames (e.g. internal frame) and get source line of
+ // the first JS caller.
+ if (src_line_not_found) {
+ src_line = entry->GetSourceLine(pc_offset);
+ if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+ src_line = entry->line_number();
+ }
+ src_line_not_found = false;
}
- src_line_not_found = false;
}
-
- entry++;
+ entries.push_back(entry);
}
}
if (FLAG_prof_browser_mode) {
bool no_symbolized_entries = true;
- for (CodeEntry** e = entries.start(); e != entry; ++e) {
- if (*e != NULL) {
+ for (auto e : entries) {
+ if (e != NULL) {
no_symbolized_entries = false;
break;
}
}
// If no frames were symbolized, put the VM state entry in.
if (no_symbolized_entries) {
- *entry++ = EntryForVMState(sample.state);
+ entries.push_back(EntryForVMState(sample.state));
}
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 3c976d6292..194b490929 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -99,7 +99,11 @@ class CodeEntry {
int GetSourceLine(int pc_offset) const;
+ void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
+ const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+
Address instruction_start() const { return instruction_start_; }
+ Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
@@ -109,7 +113,6 @@ class CodeEntry {
private:
class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
class BuiltinIdField : public BitField<Builtins::Name, 8, 8> {};
- Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
uint32_t bit_field_;
const char* name_prefix_;
@@ -125,6 +128,8 @@ class CodeEntry {
size_t pc_offset_;
JITLineInfoTable* line_info_;
Address instruction_start_;
+ // Should be an unordered_map, but it doesn't currently work on Win & MacOS.
+ std::map<int, std::vector<CodeEntry*>> inline_locations_;
std::vector<InlinedFunctionInfo> inlined_function_infos_;
@@ -191,7 +196,7 @@ class ProfileTree {
~ProfileTree();
ProfileNode* AddPathFromEnd(
- const Vector<CodeEntry*>& path,
+ const std::vector<CodeEntry*>& path,
int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
bool update_stats = true);
ProfileNode* root() const { return root_; }
@@ -225,7 +230,7 @@ class CpuProfile {
CpuProfile(Isolate* isolate, const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
- void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
+ void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
int src_line, bool update_stats);
void CalculateTotalTicksAndSamplingRate();
@@ -334,8 +339,8 @@ class CpuProfilesCollection {
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line,
- bool update_stats);
+ const std::vector<CodeEntry*>& path,
+ int src_line, bool update_stats);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/deps/v8/src/profiler/sampler.cc b/deps/v8/src/profiler/sampler.cc
index e331db9dcf..a34042453c 100644
--- a/deps/v8/src/profiler/sampler.cc
+++ b/deps/v8/src/profiler/sampler.cc
@@ -336,6 +336,14 @@ class SimulatorHelper {
reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
state->fp =
reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+ if (!simulator_->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ }
+ state->sp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
+ state->fp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
#endif
}
@@ -441,7 +449,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC))
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
#if V8_OS_LINUX
@@ -482,6 +490,17 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+#elif V8_HOST_ARCH_S390
+#if V8_TARGET_ARCH_32_BIT
+ // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
+ // mode. This bit needs to be masked out to resolve actual address.
+ state.pc =
+ reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
+#else
+ state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
+#endif // V8_TARGET_ARCH_32_BIT
+ state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
+ state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
@@ -731,7 +750,18 @@ void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
frames[i++] = isolate->c_function();
}
while (!it.done() && i < frames_limit) {
- frames[i++] = it.frame()->pc();
+ if (it.frame()->is_interpreted()) {
+ // For interpreted frames use the bytecode array pointer as the pc.
+ InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
+ // Since the sampler can interrupt execution at any point the
+ // bytecode_array might be garbage, so don't dereference it.
+ Address bytecode_array =
+ reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
+ frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
+ frame->GetBytecodeOffset();
+ } else {
+ frames[i++] = it.frame()->pc();
+ }
it.Advance();
}
sample_info->frames_count = i;
diff --git a/deps/v8/src/profiler/sampler.h b/deps/v8/src/profiler/sampler.h
index 8e8ef1cfc3..dcd1255d75 100644
--- a/deps/v8/src/profiler/sampler.h
+++ b/deps/v8/src/profiler/sampler.h
@@ -58,7 +58,7 @@ struct TickSample {
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
bool update_stats : 1; // Whether the sample should update aggregated stats.
- StackFrame::Type top_frame_type : 4;
+ StackFrame::Type top_frame_type : 5;
};
class Sampler {
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index c13538c356..a32cae3ef9 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -99,7 +99,8 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
// Mark the new block as FreeSpace to make sure the heap is iterable while we
// are taking the sample.
- heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size));
+ heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
Local<v8::Value> loc = v8::Utils::ToLocal(obj);
@@ -199,19 +200,22 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
int column = v8::AllocationProfile::kNoColumnNumberInfo;
std::vector<v8::AllocationProfile::Allocation> allocations;
allocations.reserve(node->allocations_.size());
- if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+ if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
+ scripts.find(node->script_id_) != scripts.end()) {
// Cannot use std::map<T>::at because it is not available on android.
auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
Script* script = non_const_scripts[node->script_id_];
- if (script->name()->IsName()) {
- Name* name = Name::cast(script->name());
- script_name = ToApiHandle<v8::String>(
- isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+ if (script) {
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ script_name = ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+ }
+ Handle<Script> script_handle(script);
+ line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
+ column =
+ 1 + Script::GetColumnNumber(script_handle, node->script_position_);
}
- Handle<Script> script_handle(script);
-
- line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
- column = 1 + Script::GetColumnNumber(script_handle, node->script_position_);
for (auto alloc : node->allocations_) {
allocations.push_back(ScaleSample(alloc.first, alloc.second));
}
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 750f948adb..31efb413b6 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -18,9 +18,9 @@ namespace {
// Helper function for ToPropertyDescriptor. Comments describe steps for
// "enumerable", other properties are handled the same way.
// Returns false if an exception was thrown.
-bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
+bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
Handle<Object>* value) {
- LookupIterator it(obj, name);
+ LookupIterator it(receiver, name, receiver);
// 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
Maybe<bool> has_property = JSReceiver::HasProperty(&it);
// 5. ReturnIfAbrupt(hasEnumerable).
@@ -29,7 +29,7 @@ bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
if (has_property.FromJust() == true) {
// 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
// 6b. ReturnIfAbrupt(enum).
- if (!JSObject::GetProperty(&it).ToHandle(value)) return false;
+ if (!Object::GetProperty(&it).ToHandle(value)) return false;
}
return true;
}
@@ -39,7 +39,7 @@ bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
// objects: nothing on the prototype chain, just own fast data properties.
// Must not have observable side effects, because the slow path will restart
// the entire conversion!
-bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
+bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDescriptor* desc) {
if (!obj->IsJSObject()) return false;
Map* map = Handle<JSObject>::cast(obj)->map();
@@ -105,7 +105,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
Handle<String> name, Handle<Object> value) {
- LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
CHECK(result.IsJust() && result.FromJust());
}
@@ -190,14 +190,15 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// 3. Let desc be a new Property Descriptor that initially has no fields.
DCHECK(desc->is_empty());
- if (ToPropertyDescriptorFastPath(isolate, obj, desc)) {
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(obj);
+ if (ToPropertyDescriptorFastPath(isolate, receiver, desc)) {
return true;
}
// enumerable?
Handle<Object> enumerable;
// 4 through 6b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->enumerable_string(),
&enumerable)) {
return false;
}
@@ -209,7 +210,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// configurable?
Handle<Object> configurable;
// 7 through 9b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->configurable_string(),
&configurable)) {
return false;
}
@@ -221,7 +222,8 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// value?
Handle<Object> value;
// 10 through 12b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(), &value)) {
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->value_string(),
+ &value)) {
return false;
}
// 12c. Set the [[Value]] field of desc to value.
@@ -230,7 +232,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// writable?
Handle<Object> writable;
// 13 through 15b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->writable_string(),
&writable)) {
return false;
}
@@ -240,7 +242,8 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// getter?
Handle<Object> getter;
// 16 through 18b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter)) {
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->get_string(),
+ &getter)) {
return false;
}
if (!getter.is_null()) {
@@ -257,7 +260,8 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
// setter?
Handle<Object> setter;
// 19 through 21b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(), &setter)) {
+ if (!GetPropertyIfPresent(receiver, isolate->factory()->set_string(),
+ &setter)) {
return false;
}
if (!setter.is_null()) {
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index c5e954554c..e09ff0ff0c 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -125,7 +125,7 @@ class PrototypeIterator {
// Returns false iff a call to JSProxy::GetPrototype throws.
// TODO(neis): This should probably replace Advance().
- bool AdvanceFollowingProxies() {
+ MUST_USE_RESULT bool AdvanceFollowingProxies() {
DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
if (!HasAccess()) {
// Abort the lookup if we do not have access to the current object.
@@ -133,10 +133,15 @@ class PrototypeIterator {
is_at_end_ = true;
return true;
}
+ return AdvanceFollowingProxiesIgnoringAccessChecks();
+ }
+
+ MUST_USE_RESULT bool AdvanceFollowingProxiesIgnoringAccessChecks() {
if (handle_.is_null() || !handle_->IsJSProxy()) {
AdvanceIgnoringProxies();
return true;
}
+
// Due to possible __proto__ recursion limit the number of Proxies
// we visit to an arbitrarily chosen large number.
seen_proxies_++;
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index ce72188ae1..f8dfc97c9c 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -891,7 +891,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 941cceaa59..e8bdad8e14 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -1088,7 +1088,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 4c22b43f77..9c55af6645 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -511,7 +511,8 @@ void RegExpMacroAssemblerIA32::CheckBitInTable(
__ and_(ebx, current_character());
index = ebx;
}
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+ __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize),
+ Immediate(0));
BranchOrBacktrack(not_equal, on_bit_set);
}
@@ -936,7 +937,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
isolate()->factory()->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 80f48ca1a9..ddb4a16caf 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -41,6 +41,8 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
@@ -133,7 +135,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
Isolate* isolate = re->GetIsolate();
- Zone zone;
+ Zone zone(isolate->allocator());
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<FixedArray> maybe_cached =
compilation_cache->LookupRegExp(pattern, flags);
@@ -344,7 +346,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_one_byte) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- Zone zone;
+ Zone zone(isolate->allocator());
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -6703,6 +6705,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM64
RegExpMacroAssemblerARM64 macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_S390
+ RegExpMacroAssemblerS390 macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_PPC
RegExpMacroAssemblerPPC macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 6197f45452..062d6618e9 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -905,7 +905,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index bf95a9c97f..e0317dec8a 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -942,7 +942,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index c05c580e86..70842f5a2c 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -940,7 +940,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 6f79a16540..2aa439eceb 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -40,6 +40,7 @@ class RegExpMacroAssembler {
kARMImplementation,
kARM64Implementation,
kMIPSImplementation,
+ kS390Implementation,
kPPCImplementation,
kX64Implementation,
kX87Implementation,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 46c593c264..d433fc8578 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -359,14 +359,17 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
if (unicode()) {
if (FLAG_harmony_regexp_property) {
- ZoneList<CharacterRange>* ranges = ParsePropertyClass();
- if (ranges == nullptr) {
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ if (!ParsePropertyClass(ranges)) {
return ReportError(CStrVector("Invalid property name"));
}
RegExpCharacterClass* cc =
new (zone()) RegExpCharacterClass(ranges, p == 'P');
builder->AddCharacterClass(cc);
} else {
+ // With /u, no identity escapes except for syntax characters
+ // are allowed. Otherwise, all identity escapes are allowed.
return ReportError(CStrVector("Invalid escape"));
}
} else {
@@ -841,54 +844,95 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
return result;
}
-ZoneList<CharacterRange>* RegExpParser::ParsePropertyClass() {
#ifdef V8_I18N_SUPPORT
- char property_name[3];
- memset(property_name, 0, sizeof(property_name));
- if (current() == '{') {
- Advance();
- if (current() < 'A' || current() > 'Z') return nullptr;
- property_name[0] = static_cast<char>(current());
- Advance();
- if (current() >= 'a' && current() <= 'z') {
- property_name[1] = static_cast<char>(current());
- Advance();
- }
- if (current() != '}') return nullptr;
- } else if (current() >= 'A' && current() <= 'Z') {
- property_name[0] = static_cast<char>(current());
- } else {
- return nullptr;
+bool IsExactPropertyValueAlias(const char* property_name, UProperty property,
+ int32_t property_value) {
+ const char* short_name =
+ u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
+ if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
+ for (int i = 0;; i++) {
+ const char* long_name = u_getPropertyValueName(
+ property, property_value,
+ static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
+ if (long_name == NULL) break;
+ if (strcmp(property_name, long_name) == 0) return true;
}
- Advance();
+ return false;
+}
- int32_t category =
- u_getPropertyValueEnum(UCHAR_GENERAL_CATEGORY_MASK, property_name);
- if (category == UCHAR_INVALID_CODE) return nullptr;
+bool LookupPropertyClass(UProperty property, const char* property_name,
+ ZoneList<CharacterRange>* result, Zone* zone) {
+ int32_t property_value = u_getPropertyValueEnum(property, property_name);
+ if (property_value == UCHAR_INVALID_CODE) return false;
+
+ // We require the property name to match exactly to one of the property value
+ // aliases. However, u_getPropertyValueEnum uses loose matching.
+ if (!IsExactPropertyValueAlias(property_name, property, property_value)) {
+ return false;
+ }
USet* set = uset_openEmpty();
UErrorCode ec = U_ZERO_ERROR;
- uset_applyIntPropertyValue(set, UCHAR_GENERAL_CATEGORY_MASK, category, &ec);
- ZoneList<CharacterRange>* ranges = nullptr;
- if (ec == U_ZERO_ERROR && !uset_isEmpty(set)) {
+ uset_applyIntPropertyValue(set, property, property_value, &ec);
+ bool success = ec == U_ZERO_ERROR && !uset_isEmpty(set);
+
+ if (success) {
uset_removeAllStrings(set);
int item_count = uset_getItemCount(set);
- ranges = new (zone()) ZoneList<CharacterRange>(item_count, zone());
int item_result = 0;
for (int i = 0; i < item_count; i++) {
uc32 start = 0;
uc32 end = 0;
item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
- ranges->Add(CharacterRange::Range(start, end), zone());
+ result->Add(CharacterRange::Range(start, end), zone);
}
DCHECK_EQ(U_ZERO_ERROR, ec);
DCHECK_EQ(0, item_result);
}
uset_close(set);
- return ranges;
-#else // V8_I18N_SUPPORT
- return nullptr;
+ return success;
+}
+#endif // V8_I18N_SUPPORT
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
+#ifdef V8_I18N_SUPPORT
+ List<char> property_name_list;
+ if (current() == '{') {
+ for (Advance(); current() != '}'; Advance()) {
+ if (!has_next()) return false;
+ property_name_list.Add(static_cast<char>(current()));
+ }
+ } else if (current() != kEndMarker) {
+ property_name_list.Add(static_cast<char>(current()));
+ } else {
+ return false;
+ }
+ Advance();
+ property_name_list.Add(0); // null-terminate string.
+
+ const char* property_name = property_name_list.ToConstVector().start();
+
+#define PROPERTY_NAME_LOOKUP(PROPERTY) \
+ do { \
+ if (LookupPropertyClass(PROPERTY, property_name, result, zone())) { \
+ return true; \
+ } \
+ } while (false)
+
+ // General_Category (gc) found in PropertyValueAliases.txt
+ PROPERTY_NAME_LOOKUP(UCHAR_GENERAL_CATEGORY_MASK);
+ // Script (sc) found in Scripts.txt
+ PROPERTY_NAME_LOOKUP(UCHAR_SCRIPT);
+ // To disambiguate from script names, block names have an "In"-prefix.
+ if (property_name_list.length() > 3 && property_name[0] == 'I' &&
+ property_name[1] == 'n') {
+ // Block (blk) found in Blocks.txt
+ property_name += 2;
+ PROPERTY_NAME_LOOKUP(UCHAR_BLOCK);
+ }
+#undef PROPERTY_NAME_LOOKUP
#endif // V8_I18N_SUPPORT
+ return false;
}
bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
@@ -1068,6 +1112,34 @@ static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
}
}
+bool RegExpParser::ParseClassProperty(ZoneList<CharacterRange>* ranges) {
+ if (!FLAG_harmony_regexp_property) return false;
+ if (!unicode()) return false;
+ if (current() != '\\') return false;
+ uc32 next = Next();
+ bool parse_success = false;
+ if (next == 'p') {
+ Advance(2);
+ parse_success = ParsePropertyClass(ranges);
+ } else if (next == 'P') {
+ Advance(2);
+ ZoneList<CharacterRange>* property_class =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ parse_success = ParsePropertyClass(property_class);
+ if (parse_success) {
+ ZoneList<CharacterRange>* negated =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::Negate(property_class, negated, zone());
+ const Vector<CharacterRange> negated_vector = negated->ToVector();
+ ranges->AddAll(negated_vector, zone());
+ }
+ } else {
+ return false;
+ }
+ if (!parse_success)
+ ReportError(CStrVector("Invalid property name in character class"));
+ return parse_success;
+}
RegExpTree* RegExpParser::ParseCharacterClass() {
static const char* kUnterminated = "Unterminated character class";
@@ -1084,6 +1156,8 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
while (has_more() && current() != ']') {
+ bool parsed_property = ParseClassProperty(ranges CHECK_FAILED);
+ if (parsed_property) continue;
uc16 char_class = kNoCharClass;
CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
if (current() == '-') {
@@ -1356,14 +1430,10 @@ void RegExpBuilder::FlushTerms() {
bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
if (!unicode()) return false;
- switch (cc->standard_type()) {
- case 's': // white space
- case 'w': // ASCII word character
- case 'd': // ASCII digit
- return false; // These characters do not need desugaring.
- default:
- break;
- }
+ // TODO(yangguo): we could be smarter than this. Case-insensitivity does not
+ // necessarily mean that we need to desugar. It's probably nicer to have a
+ // separate pass to figure out unicode desugarings.
+ if (ignore_case()) return true;
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
CharacterRange::Canonicalize(ranges);
for (int i = ranges->length() - 1; i >= 0; i--) {
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index acf783cc41..6142a9ea53 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -174,7 +174,7 @@ class RegExpParser BASE_EMBEDDED {
bool ParseHexEscape(int length, uc32* value);
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
- ZoneList<CharacterRange>* ParsePropertyClass();
+ bool ParsePropertyClass(ZoneList<CharacterRange>* result);
uc32 ParseOctalLiteral();
@@ -184,6 +184,7 @@ class RegExpParser BASE_EMBEDDED {
// can be reparsed.
bool ParseBackReferenceIndex(int* index_out);
+ bool ParseClassProperty(ZoneList<CharacterRange>* result);
CharacterRange ParseClassAtom(uc16* char_class);
RegExpTree* ReportError(Vector<const char> message);
void Advance();
diff --git a/deps/v8/src/regexp/s390/OWNERS b/deps/v8/src/regexp/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/regexp/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
new file mode 100644
index 0000000000..9dac534636
--- /dev/null
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -0,0 +1,1256 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#include "src/unicode.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r6: Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - r7: Pointer to current code object (Code*) including heap object tag.
+ * - r8: Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - r9: Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - r13: Points to tip of backtrack stack
+ * - r10: End of input (points to byte after last character in input).
+ * - r11: Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - r12: IP register, used by assembler. Very volatile.
+ * - r15/sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ * - fp[112] Isolate* isolate (address of the current isolate)
+ * - fp[108] secondary link/return address used by native call.
+ * - fp[104] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[100] stack_area_base (high end of the memory area to use as
+ * backtracking stack).
+ * - fp[96] capture array size (may fit multiple sets of matches)
+ * - fp[0..96] zLinux ABI register saving area
+ * --- sp when called ---
+ * --- frame pointer ----
+ * - fp[-4] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[-8] stack_area_base (high end of the memory area to use as
+ * backtracking stack).
+ * - fp[-12] capture array size (may fit multiple sets of matches)
+ * - fp[-16] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[-20] end of input (address of end of string).
+ * - fp[-24] start of input (address of first character in string).
+ * - fp[-28] start index (character index of start).
+ * - fp[-32] void* input_string (location of a handle containing the string).
+ * - fp[-36] success counter (only for global regexps to count matches).
+ * - fp[-40] Offset of location before start of input (effectively character
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-44] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-48] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * byte* stack_area_base,
+ * Address secondary_return_address, // Only used by native call.
+ * bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in s390/simulator-s390.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ DCHECK_EQ(0, registers_to_save % 2);
+
+ __ b(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ LoadImmP(r2, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+}
+
+int RegExpMacroAssemblerS390::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerS390::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ AddP(current_input_offset(), Operand(by * char_size()));
+ }
+}
+
+void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
+ DCHECK(reg >= 0);
+ DCHECK(reg < num_registers_);
+ if (by != 0) {
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(by)) {
+ __ AddMI(register_location(reg), Operand(by));
+ } else {
+ __ LoadP(r2, register_location(reg), r0);
+ __ mov(r0, Operand(by));
+ __ AddRR(r2, r0);
+ __ StoreP(r2, register_location(reg));
+ }
+ }
+}
+
+void RegExpMacroAssemblerS390::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(r2);
+ __ AddP(r2, code_pointer());
+ __ b(r2);
+}
+
+void RegExpMacroAssemblerS390::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerS390::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ CmpLogicalP(current_character(), Operand(c));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ CmpLogicalP(current_character(), Operand(limit));
+ BranchOrBacktrack(gt, on_greater);
+}
+
+void RegExpMacroAssemblerS390::CheckAtStart(Label* on_at_start) {
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ AddP(r2, current_input_offset(), Operand(-char_size()));
+ __ CmpP(r2, r3);
+ BranchOrBacktrack(eq, on_at_start);
+}
+
+void RegExpMacroAssemblerS390::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ AddP(r2, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ __ CmpP(r2, r3);
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ CmpLogicalP(current_character(), Operand(limit));
+ BranchOrBacktrack(lt, on_less);
+}
+
+void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0));
+ __ bne(&backtrack_non_equal);
+ __ AddP(backtrack_stackpointer(), Operand(kPointerSize));
+
+ BranchOrBacktrack(al, on_equal);
+ __ bind(&backtrack_non_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ Label fallthrough;
+ __ LoadP(r2, register_location(start_reg)); // Index of start of
+ // capture
+ __ LoadP(r3, register_location(start_reg + 1)); // Index of end
+ __ SubP(r3, r3, r2);
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ beq(&fallthrough);
+
+ // Check that there are enough characters left in the input.
+ if (read_backward) {
+ __ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ AddP(r5, r5, r3);
+ __ CmpP(current_input_offset(), r5);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ AddP(r0, r3, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+ }
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // r2 - offset of start of capture
+ // r3 - length of capture
+ __ AddP(r2, end_of_input_address());
+ __ AddP(r4, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ SubP(r4, r4, r3); // Offset by length when matching backwards.
+ }
+ __ mov(r1, Operand::Zero());
+
+ // r1 - Loop index
+ // r2 - Address of start of capture.
+ // r4 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ LoadlB(r5, MemOperand(r2, r1));
+ __ LoadlB(r6, MemOperand(r4, r1));
+
+ __ CmpP(r6, r5);
+ __ beq(&loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(r5, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(r6, Operand(0x20)); // Also convert input character.
+ __ CmpP(r6, r5);
+ __ bne(&fail);
+ __ SubP(r5, Operand('a'));
+ __ CmpLogicalP(r5, Operand('z' - 'a')); // Is r5 a lowercase letter?
+ __ ble(&loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ SubP(r5, Operand(224 - 'a'));
+ __ CmpLogicalP(r5, Operand(254 - 224));
+ __ bgt(&fail); // Weren't Latin-1 letters.
+ __ CmpLogicalP(r5, Operand(247 - 224)); // Check for 247.
+ __ beq(&fail);
+
+ __ bind(&loop_check);
+ __ la(r1, MemOperand(r1, char_size()));
+ __ CmpP(r1, r3);
+ __ blt(&loop);
+ __ b(&success);
+
+ __ bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ SubP(current_input_offset(), r4, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r2, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r3,
+ register_location(start_reg + 1)); // Index of end of capture
+ __ AddP(current_input_offset(), current_input_offset(), r2);
+ __ SubP(current_input_offset(), current_input_offset(), r3);
+ }
+ __ AddP(current_input_offset(), r1);
+ } else {
+ DCHECK(mode_ == UC16);
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, r4);
+
+ // r2 - offset of start of capture
+ // r3 - length of capture
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // r2: Address byte_offset1 - Address captured substring's start.
+ // r3: Address byte_offset2 - Address of current character position.
+ // r4: size_t byte_length - length of capture in bytes(!)
+ // r5: Isolate* isolate or 0 if unicode flag.
+
+ // Address of start of capture.
+ __ AddP(r2, end_of_input_address());
+ // Length of capture.
+ __ LoadRR(r4, r3);
+ // Save length in callee-save register for use on return.
+ __ LoadRR(r6, r3);
+ // Address of current input position.
+ __ AddP(r3, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ SubP(r3, r3, r6);
+ }
+// Isolate.
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ LoadImmP(r5, Operand::Zero());
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ CmpP(r2, Operand::Zero());
+ BranchOrBacktrack(eq, on_no_match);
+
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ SubP(current_input_offset(), current_input_offset(), r6);
+ } else {
+ __ AddP(current_input_offset(), current_input_offset(), r6);
+ }
+ }
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ LoadP(r2, register_location(start_reg));
+ __ LoadP(r3, register_location(start_reg + 1));
+ __ SubP(r3, r3, r2); // Length to check.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ beq(&fallthrough);
+
+ // Check that there are enough characters left in the input.
+ if (read_backward) {
+ __ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ AddP(r5, r5, r3);
+ __ CmpP(current_input_offset(), r5);
+ BranchOrBacktrack(lt, on_no_match);
+ } else {
+ __ AddP(r0, r3, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match, cr0);
+ }
+
+ // r2 - offset of start of capture
+ // r3 - length of capture
+ __ la(r2, MemOperand(r2, end_of_input_address()));
+ __ la(r4, MemOperand(current_input_offset(), end_of_input_address()));
+ if (read_backward) {
+ __ SubP(r4, r4, r3); // Offset by length when matching backwards.
+ }
+ __ mov(r1, Operand::Zero());
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ LoadlB(r5, MemOperand(r2, r1));
+ __ LoadlB(r6, MemOperand(r4, r1));
+ } else {
+ DCHECK(mode_ == UC16);
+ __ LoadLogicalHalfWordP(r5, MemOperand(r2, r1));
+ __ LoadLogicalHalfWordP(r6, MemOperand(r4, r1));
+ }
+ __ la(r1, MemOperand(r1, char_size()));
+ __ CmpP(r5, r6);
+ BranchOrBacktrack(ne, on_no_match);
+ __ CmpP(r1, r3);
+ __ blt(&loop);
+
+ // Move current character position to position after match.
+ __ SubP(current_input_offset(), r4, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r2, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r3, register_location(start_reg + 1)); // Index of end of capture
+ __ AddP(current_input_offset(), current_input_offset(), r2);
+ __ SubP(current_input_offset(), current_input_offset(), r3);
+ }
+ __ AddP(current_input_offset(), r1);
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ __ CmpLogicalP(current_character(), Operand(c));
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal) {
+ __ AndP(r2, current_character(), Operand(mask));
+ if (c != 0) {
+ __ CmpLogicalP(r2, Operand(c));
+ }
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ AndP(r2, current_character(), Operand(mask));
+ if (c != 0) {
+ __ CmpLogicalP(r2, Operand(c));
+ }
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacterAfterMinusAnd(
+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
+ DCHECK(minus < String::kMaxUtf16CodeUnit);
+ __ lay(r2, MemOperand(current_character(), -minus));
+ __ And(r2, Operand(mask));
+ if (c != 0) {
+ __ CmpLogicalP(r2, Operand(c));
+ }
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterInRange(uc16 from, uc16 to,
+ Label* on_in_range) {
+ __ lay(r2, MemOperand(current_character(), -from));
+ __ CmpLogicalP(r2, Operand(to - from));
+ BranchOrBacktrack(le, on_in_range); // Unsigned lower-or-same condition.
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterNotInRange(
+ uc16 from, uc16 to, Label* on_not_in_range) {
+ __ lay(r2, MemOperand(current_character(), -from));
+ __ CmpLogicalP(r2, Operand(to - from));
+ BranchOrBacktrack(gt, on_not_in_range); // Unsigned higher condition.
+}
+
+void RegExpMacroAssemblerS390::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ mov(r2, Operand(table));
+ Register index = current_character();
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ AndP(r3, current_character(), Operand(kTableSize - 1));
+ index = r3;
+ }
+ __ LoadlB(r2,
+ MemOperand(r2, index, (ByteArray::kHeaderSize - kHeapObjectTag)));
+ __ CmpP(r2, Operand::Zero());
+ BranchOrBacktrack(ne, on_bit_set);
+}
+
+bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ CmpP(current_character(), Operand(' '));
+ __ beq(&success);
+ // Check range 0x09..0x0d
+ __ SubP(r2, current_character(), Operand('\t'));
+ __ CmpLogicalP(r2, Operand('\r' - '\t'));
+ __ ble(&success);
+ // \u00a0 (NBSP).
+ __ CmpLogicalP(r2, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(ne, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ __ SubP(r2, current_character(), Operand('0'));
+ __ CmpLogicalP(r2, Operand('9' - '0'));
+ BranchOrBacktrack(gt, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ __ SubP(r2, current_character(), Operand('0'));
+ __ CmpLogicalP(r2, Operand('9' - '0'));
+ BranchOrBacktrack(le, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ XorP(r2, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ SubP(r2, Operand(0x0b));
+ __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(le, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ SubP(r2, Operand(0x2028 - 0x0b));
+ __ CmpLogicalP(r2, Operand(1));
+ BranchOrBacktrack(le, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ XorP(r2, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ SubP(r2, Operand(0x0b));
+ __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(gt, on_no_match);
+ } else {
+ Label done;
+ __ ble(&done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ SubP(r2, Operand(0x2028 - 0x0b));
+ __ CmpLogicalP(r2, Operand(1));
+ BranchOrBacktrack(gt, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 1256 entries, so all LATIN1 characters can be tested.
+ __ CmpP(current_character(), Operand('z'));
+ BranchOrBacktrack(gt, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r2, Operand(map));
+ __ LoadlB(r2, MemOperand(r2, current_character()));
+ __ CmpLogicalP(r2, Operand::Zero());
+ BranchOrBacktrack(eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all LATIN characters can be tested.
+ __ CmpLogicalP(current_character(), Operand('z'));
+ __ bgt(&done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r2, Operand(map));
+ __ LoadlB(r2, MemOperand(r2, current_character()));
+ __ CmpLogicalP(r2, Operand::Zero());
+ BranchOrBacktrack(ne, on_no_match);
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+void RegExpMacroAssemblerS390::Fail() {
+ __ LoadImmP(r2, Operand(FAILURE));
+ __ b(&exit_label_);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
+ Label return_r2;
+
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type
+ // is MANUAL, no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Ensure register assigments are consistent with callee save mask
+ DCHECK(r6.bit() & kRegExpCalleeSaved);
+ DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
+ DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
+ DCHECK(current_character().bit() & kRegExpCalleeSaved);
+ DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
+ DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
+ DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+
+ // zLinux ABI
+ // Incoming parameters:
+ // r2: input_string
+ // r3: start_index
+ // r4: start addr
+ // r5: end addr
+ // r6: capture output arrray
+ // Requires us to save the callee-preserved registers r6-r13
+ // General convention is to also save r14 (return addr) and
+ // sp/r15 as well in a single STM/STMG
+ __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+
+ // Load stack parameters from caller stack frame
+ __ LoadMultipleP(r7, r9,
+ MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ // r7 = capture array size
+ // r8 = stack area base
+ // r9 = direct call
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ //
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ __ LoadRR(frame_pointer(), sp);
+ __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+ __ mov(r1, Operand::Zero()); // success counter
+ __ LoadRR(r0, r1); // offset of location
+ __ StoreMultipleP(r0, r9, MemOperand(sp, 0));
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(r2, Operand(stack_limit));
+ __ LoadP(r2, MemOperand(r2));
+ __ SubP(r2, sp, r2);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ CmpLogicalP(r2, Operand(num_registers_ * kPointerSize));
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r2, Operand(EXCEPTION));
+ __ b(&return_r2);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r2);
+ __ CmpP(r2, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ bne(&return_r2);
+
+ __ bind(&stack_ok);
+
+ // Allocate space on stack for registers.
+ __ lay(sp, MemOperand(sp, (-num_registers_ * kPointerSize)));
+ // Load string end.
+ __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ SubP(current_input_offset(), r4, end_of_input_address());
+ __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+ // Set r1 to address of char before start of the input string
+ // (effectively string position -1).
+ __ LoadRR(r1, r4);
+ __ SubP(r1, current_input_offset(), Operand(char_size()));
+ if (mode_ == UC16) {
+ __ ShiftLeftP(r0, r3, Operand(1));
+ __ SubP(r1, r1, r0);
+ } else {
+ __ SubP(r1, r1, r3);
+ }
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ StoreP(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ CmpP(r3, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ if (num_saved_registers_ > 8) {
+ // One slot beyond address of register 0.
+ __ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize));
+ __ LoadImmP(r4, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ StoreP(r1, MemOperand(r3, -kPointerSize));
+ __ lay(r3, MemOperand(r3, -kPointerSize));
+ __ BranchOnCount(r4, &init_loop);
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ StoreP(r1, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ LoadP(backtrack_stackpointer(),
+ MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ b(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ LoadP(r0, MemOperand(frame_pointer(), kInputStart));
+ __ LoadP(r2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
+ __ SubP(r0, end_of_input_address(), r0);
+ // r0 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ ShiftRightP(r0, r0, Operand(1));
+ }
+ // r0 is length of input in characters.
+ __ AddP(r0, r4);
+ // r0 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ __ lay(r2, MemOperand(r2, num_saved_registers_ * kIntSize));
+ for (int i = 0; i < num_saved_registers_;) {
+ if (false && i < num_saved_registers_ - 4) {
+ // TODO(john.yan): Can be optimized by SIMD instructions
+ __ LoadMultipleP(r3, r6, register_location(i + 3));
+ if (mode_ == UC16) {
+ __ ShiftRightArithP(r3, r3, Operand(1));
+ __ ShiftRightArithP(r4, r4, Operand(1));
+ __ ShiftRightArithP(r5, r5, Operand(1));
+ __ ShiftRightArithP(r6, r6, Operand(1));
+ }
+ __ AddP(r3, r0);
+ __ AddP(r4, r0);
+ __ AddP(r5, r0);
+ __ AddP(r6, r0);
+ __ StoreW(r3,
+ MemOperand(r2, -(num_saved_registers_ - i - 3) * kIntSize));
+ __ StoreW(r4,
+ MemOperand(r2, -(num_saved_registers_ - i - 2) * kIntSize));
+ __ StoreW(r5,
+ MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
+ __ StoreW(r6, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
+ i += 4;
+ } else {
+ __ LoadMultipleP(r3, r4, register_location(i + 1));
+ if (mode_ == UC16) {
+ __ ShiftRightArithP(r3, r3, Operand(1));
+ __ ShiftRightArithP(r4, r4, Operand(1));
+ }
+ __ AddP(r3, r0);
+ __ AddP(r4, r0);
+ __ StoreW(r3,
+ MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
+ __ StoreW(r4, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
+ i += 2;
+ }
+ }
+ if (global_with_zero_length_check()) {
+ // Keep capture start in r6 for the zero-length check later.
+ __ LoadP(r6, register_location(0));
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ LoadP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ LoadP(r4, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ AddP(r2, Operand(1));
+ __ StoreP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ SubP(r3, Operand(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ CmpP(r3, Operand(num_saved_registers_));
+ __ blt(&return_r2);
+
+ __ StoreP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ AddP(r4, Operand(num_saved_registers_ * kIntSize));
+ __ StoreP(r4, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare r2 to initialize registers with its value in the next run.
+ __ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // r6: capture start index
+ __ CmpP(current_input_offset(), r6);
+ // Not a zero-length match, restart.
+ __ bne(&load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ CmpP(current_input_offset(), Operand::Zero());
+ __ beq(&exit_label_);
+ // Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
+ __ AddP(current_input_offset(), Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+ }
+
+ __ b(&load_char_start_regexp);
+ } else {
+ __ LoadImmP(r2, Operand(SUCCESS));
+ }
+ }
+
+ // Exit and return r2
+ __ bind(&exit_label_);
+ if (global()) {
+ __ LoadP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_r2);
+ // Skip sp past regexp registers and local variables..
+ __ LoadRR(sp, frame_pointer());
+ // Restore registers r6..r15.
+ __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+
+ __ b(r14);
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ CallCheckStackGuardState(r2);
+ __ CmpP(r2, Operand::Zero());
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ bne(&return_r2);
+
+ // String might have moved: Reload end of string from frame.
+ __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ Label grow_failed;
+
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, r2);
+ __ LoadRR(r2, backtrack_stackpointer());
+ __ AddP(r3, frame_pointer(), Operand(kStackHighEnd));
+ __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+ ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ LoadRR(backtrack_stackpointer(), r2);
+ // Restore saved registers and continue.
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ LoadImmP(r2, Operand(EXCEPTION));
+ __ b(&return_r2);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerS390::GoTo(Label* to) { BranchOrBacktrack(al, to); }
+
+void RegExpMacroAssemblerS390::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ LoadP(r2, register_location(reg), r0);
+ __ CmpP(r2, Operand(comparand));
+ BranchOrBacktrack(ge, if_ge);
+}
+
+void RegExpMacroAssemblerS390::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ LoadP(r2, register_location(reg), r0);
+ __ CmpP(r2, Operand(comparand));
+ BranchOrBacktrack(lt, if_lt);
+}
+
+void RegExpMacroAssemblerS390::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ LoadP(r2, register_location(reg), r0);
+ __ CmpP(r2, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerS390::Implementation() {
+ return kS390Implementation;
+}
+
+void RegExpMacroAssemblerS390::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
+ if (check_bounds) {
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+void RegExpMacroAssemblerS390::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerS390::PopRegister(int register_index) {
+ Pop(r2);
+ __ StoreP(r2, register_location(register_index));
+}
+
+void RegExpMacroAssemblerS390::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ mov(r2, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ masm_->load_label_offset(r2, label);
+ }
+ Push(r2);
+ CheckStackLimit();
+}
+
+void RegExpMacroAssemblerS390::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerS390::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ LoadP(r2, register_location(register_index), r0);
+ Push(r2);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerS390::ReadCurrentPositionFromRegister(int reg) {
+ __ LoadP(current_input_offset(), register_location(reg), r0);
+}
+
+void RegExpMacroAssemblerS390::ReadStackPointerFromRegister(int reg) {
+ __ LoadP(backtrack_stackpointer(), register_location(reg), r0);
+ __ LoadP(r2, MemOperand(frame_pointer(), kStackHighEnd));
+ __ AddP(backtrack_stackpointer(), r2);
+}
+
+void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ CmpP(current_input_offset(), Operand(-by * char_size()));
+ __ bge(&after_position);
+ __ mov(current_input_offset(), Operand(-by * char_size()));
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerS390::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ mov(r2, Operand(to));
+ __ StoreP(r2, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerS390::Succeed() {
+ __ b(&success_label_);
+ return global();
+}
+
+void RegExpMacroAssemblerS390::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ StoreP(current_input_offset(), register_location(reg));
+ } else {
+ __ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
+ __ StoreP(r2, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ StoreP(r2, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
+ __ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
+ __ SubP(r2, backtrack_stackpointer(), r3);
+ __ StoreP(r2, register_location(reg));
+}
+
+// Private methods:
+
+void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
+ // RegExp code frame pointer.
+ __ LoadRR(r4, frame_pointer());
+ // Code* of self.
+ __ mov(r3, Operand(masm_->CodeObject()));
+ // r2 becomes return address pointer.
+ __ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ DCHECK(sizeof(T) == kPointerSize);
+#ifdef V8_TARGET_ARCH_S390X
+ return reinterpret_cast<T&>(Memory::uint64_at(re_frame + frame_offset));
+#else
+ return reinterpret_cast<T&>(Memory::uint32_at(re_frame + frame_offset));
+#endif
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int RegExpMacroAssemblerS390::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ return NativeRegExpMacroAssembler::CheckStackGuardState(
+ frame_entry<Isolate*>(re_frame, kIsolate),
+ frame_entry<intptr_t>(re_frame, kStartIndex),
+ frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
+ re_code, frame_entry_address<String*>(re_frame, kInputString),
+ frame_entry_address<const byte*>(re_frame, kInputStart),
+ frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ if (cp_offset >= 0) {
+ __ CmpP(current_input_offset(), Operand(-cp_offset * char_size()));
+ BranchOrBacktrack(ge, on_outside_input);
+ } else {
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
+ __ CmpP(r2, r3);
+ BranchOrBacktrack(le, on_outside_input);
+ }
+}
+
+void RegExpMacroAssemblerS390::BranchOrBacktrack(Condition condition, Label* to,
+ CRegister cr) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ b(to);
+ return;
+ }
+ if (to == NULL) {
+ __ b(condition, &backtrack_label_);
+ return;
+ }
+ __ b(condition, to);
+}
+
+void RegExpMacroAssemblerS390::SafeCall(Label* to, Condition cond,
+ CRegister cr) {
+ Label skip;
+ __ b(NegateCondition(cond), &skip);
+ __ b(r14, to);
+ __ bind(&skip);
+}
+
+void RegExpMacroAssemblerS390::SafeReturn() {
+ __ pop(r14);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ AddP(r14, ip);
+ __ Ret();
+}
+
+void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ CleanseP(r14);
+ __ LoadRR(r0, r14);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ SubP(r0, r0, ip);
+ __ push(r0);
+}
+
+void RegExpMacroAssemblerS390::Push(Register source) {
+ DCHECK(!source.is(backtrack_stackpointer()));
+ __ lay(backtrack_stackpointer(),
+ MemOperand(backtrack_stackpointer(), -kPointerSize));
+ __ StoreP(source, MemOperand(backtrack_stackpointer()));
+}
+
+void RegExpMacroAssemblerS390::Pop(Register target) {
+ DCHECK(!target.is(backtrack_stackpointer()));
+ __ LoadP(target, MemOperand(backtrack_stackpointer()));
+ __ la(backtrack_stackpointer(),
+ MemOperand(backtrack_stackpointer(), kPointerSize));
+}
+
+void RegExpMacroAssemblerS390::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(r2, Operand(stack_limit));
+ __ CmpLogicalP(sp, MemOperand(r2));
+ SafeCall(&check_preempt_label_, le);
+}
+
+void RegExpMacroAssemblerS390::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ mov(r2, Operand(stack_limit));
+ __ CmpLogicalP(backtrack_stackpointer(), MemOperand(r2));
+ SafeCall(&stack_overflow_label_, le);
+}
+
+void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
+ ExternalReference function, int num_arguments) {
+ // Must pass all arguments in registers. The stub pushes on the stack.
+ DCHECK(num_arguments <= 8);
+ __ mov(code_pointer(), Operand(function));
+ Label ret;
+ __ larl(r14, &ret);
+ __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ __ b(code_pointer());
+ __ bind(&ret);
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ } else {
+ __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+ }
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+bool RegExpMacroAssemblerS390::CanReadUnaligned() {
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ DCHECK(characters == 1);
+ if (mode_ == LATIN1) {
+ __ LoadlB(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+ } else {
+ DCHECK(mode_ == UC16);
+ __ LoadLogicalHalfWordP(
+ current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+ }
+}
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
new file mode 100644
index 0000000000..60ca890f12
--- /dev/null
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -0,0 +1,216 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#define V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+
+#include "src/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/s390/assembler-s390.h"
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
+ virtual ~RegExpMacroAssemblerS390();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward, bool unicode,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address, Code* re_code,
+ Address re_frame);
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Register 6-15(sp)
+ static const int kStoredRegisters = kFramePointer;
+ static const int kCallerFrame =
+ kStoredRegisters + kCalleeRegisterSaveAreaSize;
+ // Stack parameters placed by caller.
+ static const int kCaptureArraySize = kCallerFrame;
+ static const int kStackAreaBase = kCallerFrame + kPointerSize;
+ // kDirectCall again
+ static const int kSecondaryReturnAddress = kStackAreaBase + 2 * kPointerSize;
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+ void CallCFunctionUsingStub(ExternalReference function, int num_arguments);
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return r8; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return r9; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return r10; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return r13; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return r7; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond = al, CRegister cr = cr7);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+};
+
+// Set of non-volatile registers saved/restored by generated regexp code.
+const RegList kRegExpCalleeSaved =
+ 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 | 1 << 11 | 1 << 13;
+
+#endif // V8_INTERPRETED_REGEXP
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 952034fb0c..5d73b436f8 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -1008,7 +1008,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
- PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
+ PROFILE(isolate, RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index 6e6209282f..9f15b1c952 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -510,7 +510,8 @@ void RegExpMacroAssemblerX87::CheckBitInTable(
__ and_(ebx, current_character());
index = ebx;
}
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+ __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize),
+ Immediate(0));
BranchOrBacktrack(not_equal, on_bit_set);
}
@@ -935,7 +936,8 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
isolate()->factory()->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(),
+ RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 6b1655a81b..2df825afc0 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -91,6 +91,10 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_S390
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index e17cbb1d6b..b76785deeb 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -90,7 +90,8 @@ static void GetICCounts(SharedFunctionInfo* shared,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
- if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
+ if (FLAG_trace_opt &&
+ function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
function->ShortPrint();
PrintF(" for recompilation, reason: %s", reason);
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f651ed40e1..ab436c2237 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -5,11 +5,12 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/code-stubs.h"
#include "src/conversions-inl.h"
#include "src/elements.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
#include "src/messages.h"
#include "src/prototype.h"
@@ -29,17 +30,20 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
return Smi::FromInt(0);
}
-
-static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Builtins::Name builtin_name) {
+static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
+ const char* name, Handle<Code> code) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
- Handle<Code> code(isolate->builtins()->builtin(builtin_name));
Handle<JSFunction> optimized =
isolate->factory()->NewFunctionWithoutPrototype(key, code);
optimized->shared()->DontAdaptArguments();
JSObject::AddProperty(holder, key, optimized, NONE);
}
+static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
+ const char* name, Builtins::Name builtin_name) {
+ InstallCode(isolate, holder, name,
+ handle(isolate->builtins()->builtin(builtin_name), isolate));
+}
RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
HandleScope scope(isolate);
@@ -48,7 +52,8 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
isolate->factory()->NewJSObject(isolate->object_function());
InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
- InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
+ FastArrayPushStub stub(isolate);
+ InstallCode(isolate, holder, "push", stub.GetCode());
InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
@@ -88,29 +93,6 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
}
-// Push an object unto an array of objects if it is not already in the
-// array. Returns true if the element was pushed on the stack and
-// false otherwise.
-RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1);
- RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
- int length = Smi::cast(array->length())->value();
- FixedArray* elements = FixedArray::cast(array->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *element) return isolate->heap()->false_value();
- }
-
- // Strict not needed. Used for cycle detection in Array join implementation.
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::AddDataElement(array, length, element, NONE));
- JSObject::ValidateElements(array);
- return isolate->heap()->true_value();
-}
-
-
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
// and are followed by non-existing element. Does not change the length
@@ -234,12 +216,19 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
}
// Erase any keys >= length.
- // TODO(adamk): Remove this step when the contract of %GetArrayKeys
- // is changed to let this happen on the JS side.
Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
+ int j = 0;
for (int i = 0; i < keys->length(); i++) {
- if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
+ if (NumberToUint32(keys->get(i)) >= length) continue;
+ if (i != j) keys->set(j, keys->get(i));
+ j++;
}
+
+ if (j != keys->length()) {
+ isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ *keys, keys->length() - j);
+ }
+
return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -383,7 +372,6 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
caller_args);
}
-
RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index e27685dd3f..3f102256bf 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -124,14 +124,6 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<Map> map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
map->set_is_prototype_map(true);
- if (constructor->map()->is_strong()) {
- map->set_is_strong();
- if (super_class->IsNull()) {
- // Strong class is not permitted to extend null.
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kStrongExtendNull),
- Object);
- }
- }
Map::SetPrototype(map, prototype_parent);
map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
@@ -206,19 +198,7 @@ RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, prototype, 1);
-
JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
-
- if (constructor->map()->is_strong()) {
- DCHECK(prototype->map()->is_strong());
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(prototype, FROZEN,
- Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(constructor, FROZEN,
- Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- }
return *constructor;
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 263c4f9e77..89a6fa15d2 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -21,6 +21,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
#ifdef DEBUG
if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
PrintF("[unoptimized: ");
@@ -28,63 +29,28 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
PrintF("]\n");
}
#endif
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
- // Compile the target function.
- DCHECK(function->shared()->allows_lazy_compilation());
-
- Handle<Code> code;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
- Compiler::GetLazyCode(function));
- DCHECK(code->IsJavaScriptCode());
-
- function->ReplaceCode(*code);
- return *code;
-}
-
-
-namespace {
-
-Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
- Compiler::ConcurrencyMode mode) {
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
-
- Handle<Code> code;
- if (Compiler::GetOptimizedCode(function, mode).ToHandle(&code)) {
- // Optimization succeeded, return optimized code.
- function->ReplaceCode(*code);
- } else {
- // Optimization failed, get unoptimized code.
- if (isolate->has_pending_exception()) { // Possible stack overflow.
- return isolate->heap()->exception();
- }
- code = Handle<Code>(function->shared()->code(), isolate);
- if (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, code, Compiler::GetUnoptimizedCode(function));
- }
- function->ReplaceCode(*code);
+ if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+ return isolate->heap()->exception();
}
-
- DCHECK(function->code()->kind() == Code::FUNCTION ||
- function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
- (function->code()->is_interpreter_entry_trampoline() &&
- function->shared()->HasBytecodeArray()) ||
- function->IsInOptimizationQueue());
+ DCHECK(function->is_compiled());
return function->code();
}
-} // namespace
-
RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return CompileOptimized(isolate, function, Compiler::CONCURRENT);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+ if (!Compiler::CompileOptimized(function, Compiler::CONCURRENT)) {
+ return isolate->heap()->exception();
+ }
+ DCHECK(function->is_compiled());
+ return function->code();
}
@@ -92,7 +58,13 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return CompileOptimized(isolate, function, Compiler::NOT_CONCURRENT);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+ if (!Compiler::CompileOptimized(function, Compiler::NOT_CONCURRENT)) {
+ return isolate->heap()->exception();
+ }
+ DCHECK(function->is_compiled());
+ return function->code();
}
@@ -150,10 +122,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
- JavaScriptFrame* frame = it.frame();
- RUNTIME_ASSERT(frame->function()->IsJSFunction());
- DCHECK(frame->function() == *function);
-
// Ensure the context register is updated for materialized objects.
JavaScriptFrameIterator top_it(isolate);
JavaScriptFrame* top_frame = top_it.frame();
@@ -163,7 +131,10 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Search for other activations of the same function and code.
+ // Search for other activations of the same optimized code.
+ // At this point {it} is at the topmost frame of all the frames materialized
+ // by the deoptimizer. Note that this frame does not necessarily represent
+ // an activation of {function} because of potential inlined tail-calls.
ActivationsFinder activations_finder(*optimized_code);
activations_finder.VisitFrames(&it);
isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
@@ -240,59 +211,17 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DCHECK(caller_code->contains(frame->pc()));
#endif // DEBUG
-
BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
DCHECK(!ast_id.IsNone());
- // Disable concurrent OSR for asm.js, to enable frame specialization.
- Compiler::ConcurrencyMode mode = (isolate->concurrent_osr_enabled() &&
- !function->shared()->asm_function() &&
- function->shared()->ast_node_count() > 512)
- ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
-
- OptimizedCompileJob* job = NULL;
- if (mode == Compiler::CONCURRENT) {
- // Gate the OSR entry with a stack check.
- BackEdgeTable::AddStackCheck(caller_code, pc_offset);
- // Poll already queued compilation jobs.
- OptimizingCompileDispatcher* dispatcher =
- isolate->optimizing_compile_dispatcher();
- if (dispatcher->IsQueuedForOSR(function, ast_id)) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Still waiting for queued: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
- return NULL;
- }
-
- job = dispatcher->FindReadyOSRCandidate(function, ast_id);
- }
-
MaybeHandle<Code> maybe_result;
- if (job != NULL) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Found ready: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
- }
- maybe_result = Compiler::GetConcurrentlyOptimizedCode(job);
- } else if (IsSuitableForOnStackReplacement(isolate, function)) {
+ if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
function->PrintName();
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- maybe_result = Compiler::GetOptimizedCode(
- function, mode, ast_id,
- (mode == Compiler::NOT_CONCURRENT) ? frame : nullptr);
- Handle<Code> result;
- if (maybe_result.ToHandle(&result) &&
- result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
- // Optimization is queued. Return to check later.
- return NULL;
- }
+ maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
}
// Revert the patched back edge table, regardless of whether OSR succeeds.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index c29ea9a35d..ad8375a8d4 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -5,11 +5,13 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/debug/debug.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
+#include "src/debug/debug.h"
#include "src/frames-inl.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
@@ -18,11 +20,39 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_DebugBreak) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ isolate->debug()->set_return_value(value);
+
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
- isolate->debug()->Break(args, it.frame());
- return isolate->debug()->SetAfterBreakTarget(it.frame());
+ isolate->debug()->Break(it.frame());
+
+ isolate->debug()->SetAfterBreakTarget(it.frame());
+ return *isolate->debug()->return_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ isolate->debug()->set_return_value(value);
+
+ // Get the top-most JavaScript frame.
+ JavaScriptFrameIterator it(isolate);
+ isolate->debug()->Break(it.frame());
+
+ // Return the handler from the original bytecode array.
+ DCHECK(it.frame()->is_interpreted());
+ InterpretedFrame* interpreted_frame =
+ reinterpret_cast<InterpretedFrame*>(it.frame());
+ SharedFunctionInfo* shared = interpreted_frame->function()->shared();
+ BytecodeArray* bytecode_array = shared->bytecode_array();
+ int bytecode_offset = interpreted_frame->GetBytecodeOffset();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+ return isolate->interpreter()->GetBytecodeHandler(
+ bytecode, interpreter::OperandScale::kSingle);
}
@@ -302,8 +332,8 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
Handle<Object> element_or_char;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_or_char,
- Object::GetElement(isolate, obj, index));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, element_or_char, JSReceiver::GetElement(isolate, obj, index));
details->set(0, *element_or_char);
details->set(1, PropertyDetails::Empty().AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -418,8 +448,8 @@ RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::GetElement(isolate, obj, index));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSReceiver::GetElement(isolate, obj, index));
return *result;
}
@@ -554,7 +584,11 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Use the value from the stack.
if (scope_info->LocalIsSynthetic(i)) continue;
locals->set(local * 2, scope_info->LocalName(i));
- locals->set(local * 2 + 1, *(frame_inspector.GetExpression(i)));
+ Handle<Object> value = frame_inspector.GetExpression(i);
+ // TODO(yangguo): We convert optimized out values to {undefined} when they
+ // are passed to the debugger. Eventually we should handle them somehow.
+ if (value->IsOptimizedOut()) value = isolate->factory()->undefined_value();
+ locals->set(local * 2 + 1, *value);
local++;
}
if (local < local_count) {
@@ -587,31 +621,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// to the frame information.
Handle<Object> return_value = isolate->factory()->undefined_value();
if (at_return) {
- StackFrameIterator it2(isolate);
- Address internal_frame_sp = NULL;
- while (!it2.done()) {
- if (it2.frame()->is_internal()) {
- internal_frame_sp = it2.frame()->sp();
- } else {
- if (it2.frame()->is_java_script()) {
- if (it2.frame()->id() == it.frame()->id()) {
- // The internal frame just before the JavaScript frame contains the
- // value to return on top. A debug break at return will create an
- // internal frame to store the return value (eax/rax/r0) before
- // entering the debug break exit frame.
- if (internal_frame_sp != NULL) {
- return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp), isolate);
- break;
- }
- }
- }
-
- // Indicate that the previous frame was not an internal frame.
- internal_frame_sp = NULL;
- }
- it2.Advance();
- }
+ return_value = isolate->debug()->return_value();
}
// Now advance to the arguments adapter frame (if any). It contains all
@@ -740,33 +750,6 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
}
-// Returns the list of step-in positions (text offset) in a function of the
-// stack frame in a range from the current debug break position to the end
-// of the corresponding statement.
-RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- RUNTIME_ASSERT(!frame_it.done());
-
- List<int> positions;
- isolate->debug()->GetStepinPositions(frame_it.frame(), id, &positions);
- Factory* factory = isolate->factory();
- Handle<FixedArray> array = factory->NewFixedArray(positions.length());
- for (int i = 0; i < positions.length(); ++i) {
- array->set(i, Smi::FromInt(positions[i]));
- }
- return *factory->NewJSArrayWithElements(array, FAST_SMI_ELEMENTS);
-}
-
-
// Return an array with scope details
// args[0]: number: break id
// args[1]: number: frame index
@@ -1652,15 +1635,6 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
}
-RUNTIME_FUNCTION(Runtime_DebugPromiseEvent) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
- isolate->debug()->OnPromiseEvent(data);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
DCHECK(args.length() == 1);
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index c44945c94c..4b558d124f 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -5,8 +5,10 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/elements.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
+#include "src/keys.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -20,30 +22,82 @@ namespace {
// deletions during a for-in.
MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
Isolate* const isolate = receiver->GetIsolate();
+ FastKeyAccumulator accumulator(isolate, receiver, INCLUDE_PROTOS,
+ ENUMERABLE_STRINGS);
+ accumulator.set_filter_proxy_keys(false);
// Test if we have an enum cache for {receiver}.
- if (!receiver->IsSimpleEnum()) {
+ if (!accumulator.is_receiver_simple_enum()) {
Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(receiver, INCLUDE_PROTOS, ENUMERABLE_STRINGS),
- HeapObject);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, keys, accumulator.GetKeys(KEEP_NUMBERS),
+ HeapObject);
// Test again, since cache may have been built by GetKeys() calls above.
- if (!receiver->IsSimpleEnum()) return keys;
+ if (!accumulator.is_receiver_simple_enum()) return keys;
}
return handle(receiver->map(), isolate);
}
+// This is a slight modifcation of JSReceiver::HasProperty, dealing with
+// the oddities of JSProxy in for-in filter.
+MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key) {
+ bool success = false;
+ Maybe<PropertyAttributes> result = Just(ABSENT);
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, key, &success);
+ if (!success) return isolate->factory()->undefined_value();
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY: {
+ // For proxies we have to invoke the [[GetOwnProperty]] trap.
+ result = JSProxy::GetPropertyAttributes(&it);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ if (result.FromJust() == ABSENT) {
+ // Continue lookup on the proxy's prototype.
+ Handle<JSProxy> proxy = it.GetHolder<JSProxy>();
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, prototype,
+ JSProxy::GetPrototype(proxy), Object);
+ if (prototype->IsNull()) break;
+ // We already have a stack-check in JSProxy::GetPrototype.
+ return HasEnumerableProperty(
+ isolate, Handle<JSReceiver>::cast(prototype), key);
+ } else if (result.FromJust() & DONT_ENUM) {
+ return isolate->factory()->undefined_value();
+ } else {
+ return it.GetName();
+ }
+ }
+ case LookupIterator::INTERCEPTOR: {
+ result = JSObject::GetPropertyAttributesWithInterceptor(&it);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ if (result.FromJust() != ABSENT) return it.GetName();
+ continue;
+ }
+ case LookupIterator::ACCESS_CHECK: {
+ if (it.HasAccess()) continue;
+ result = JSObject::GetPropertyAttributesWithFailedAccessCheck(&it);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ if (result.FromJust() != ABSENT) return it.GetName();
+ return isolate->factory()->undefined_value();
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ // TypedArray out-of-bounds access.
+ return isolate->factory()->undefined_value();
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return it.GetName();
+ }
+ }
+ return isolate->factory()->undefined_value();
+}
MaybeHandle<Object> Filter(Handle<JSReceiver> receiver, Handle<Object> key) {
Isolate* const isolate = receiver->GetIsolate();
- // TODO(turbofan): Fast case for array indices.
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
- Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
- MAYBE_RETURN_NULL(result);
- if (result.FromJust()) return name;
- return isolate->factory()->undefined_value();
+ return HasEnumerableProperty(isolate, receiver, key);
}
} // namespace
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 47a21f8f9b..011f9ff820 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -105,6 +105,14 @@ RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) {
return Smi::FromInt(abstract_code->SourcePosition(offset));
}
+RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+
+ CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ FixedArray* array = fun->native_context()->embedder_data();
+ return array->get(v8::Context::kDebugIdIndex);
+}
RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
SealHandleScope shs(isolate);
@@ -162,7 +170,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
- if (!Compiler::Compile(source, KEEP_EXCEPTION)) {
+ if (!Compiler::Compile(source, Compiler::KEEP_EXCEPTION)) {
return isolate->heap()->exception();
}
@@ -177,7 +185,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
if (source_shared->HasBytecodeArray()) {
- target_shared->set_function_data(source_shared->bytecode_array());
+ target_shared->set_bytecode_array(source_shared->bytecode_array());
}
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
@@ -213,8 +221,8 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
- isolate->logger()->LogExistingFunction(source_shared,
- Handle<Code>(source_shared->code()));
+ isolate->logger()->LogExistingFunction(
+ source_shared, Handle<AbstractCode>(source_shared->abstract_code()));
}
return *target;
@@ -262,7 +270,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
int const argc = args.length() - 2;
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, target, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
@@ -276,61 +284,6 @@ RUNTIME_FUNCTION(Runtime_Call) {
}
-RUNTIME_FUNCTION(Runtime_TailCall) {
- HandleScope scope(isolate);
- DCHECK_LE(2, args.length());
- int const argc = args.length() - 2;
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
- ScopedVector<Handle<Object>> argv(argc);
- for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(2 + i);
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, target, receiver, argc, argv.start()));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_Apply) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
- CONVERT_INT32_ARG_CHECKED(offset, 3);
- CONVERT_INT32_ARG_CHECKED(argc, 4);
- RUNTIME_ASSERT(offset >= 0);
- // Loose upper bound to allow fuzzing. We'll most likely run out of
- // stack space before hitting this limit.
- static int kMaxArgc = 1000000;
- RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc);
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- base::SmartArrayPointer<Handle<Object> > argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = base::SmartArrayPointer<Handle<Object> >(argv);
- }
-
- for (int i = 0; i < argc; ++i) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, argv[i], Object::GetElement(isolate, arguments, offset + i));
- }
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, fun, receiver, argc, argv));
- return *result;
-}
-
-
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
@@ -351,14 +304,6 @@ RUNTIME_FUNCTION(Runtime_IsFunction) {
}
-RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(MessageTemplate::kStrongArity));
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionToString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index dab0621592..181b5f9540 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -43,6 +43,8 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
JavaScriptFrame* frame = stack_iterator.frame();
RUNTIME_ASSERT(frame->function()->shared()->is_generator());
DCHECK_EQ(frame->function(), generator_object->function());
+ DCHECK(frame->function()->shared()->is_compiled());
+ DCHECK(!frame->function()->IsOptimized());
// The caller should have saved the context and continuation already.
DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
@@ -88,18 +90,18 @@ RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
JavaScriptFrame* frame = stack_iterator.frame();
DCHECK_EQ(frame->function(), generator_object->function());
- DCHECK(frame->function()->is_compiled());
+ DCHECK(frame->function()->shared()->is_compiled());
+ DCHECK(!frame->function()->IsOptimized());
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- Address pc = generator_object->function()->code()->instruction_start();
+ Code* code = generator_object->function()->shared()->code();
int offset = generator_object->continuation();
- DCHECK(offset > 0);
- frame->set_pc(pc + offset);
+ DCHECK_GT(offset, 0);
+ frame->set_pc(code->instruction_start() + offset);
if (FLAG_enable_embedded_constant_pool) {
- frame->set_constant_pool(
- generator_object->function()->code()->constant_pool());
+ frame->set_constant_pool(code->constant_pool());
}
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
@@ -148,16 +150,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
}
-// Returns context of generator activation.
-RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
- return generator->context();
-}
-
-
// Returns receiver of generator activation.
RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
HandleScope scope(isolate);
@@ -203,26 +195,23 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
return isolate->heap()->undefined_value();
}
-
-// Optimization for the following three functions is disabled in
-// js/generator.js and compiler/ast-graph-builder.cc.
-
+// Optimization for builtins calling any of the following three functions is
+// disabled in js/generator.js and compiler.cc, hence they are unreachable.
RUNTIME_FUNCTION(Runtime_GeneratorNext) {
UNREACHABLE();
return nullptr;
}
-
RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
UNREACHABLE();
return nullptr;
}
-
RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
UNREACHABLE();
return nullptr;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index e57f8d3626..27f970bdb4 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -158,8 +158,8 @@ RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
Handle<Name> base = factory->NewStringFromStaticChars("base");
for (unsigned int i = 0; i < length; ++i) {
Handle<Object> locale_id;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_id,
- Object::GetElement(isolate, input, i));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, locale_id, JSReceiver::GetElement(isolate, input, i));
if (!locale_id->IsString()) {
return isolate->Throw(*factory->illegal_argument_string());
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 0ca2e84d3d..d871fc7f5a 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -171,14 +171,6 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
}
-RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStrongImplicitConversion));
-}
-
-
RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -271,7 +263,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, error_object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
if (!isolate->bootstrapper()->IsActive()) {
@@ -317,7 +309,6 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
return *result;
}
-
#define CALLSITE_GET(NAME, RETURN) \
RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) { \
HandleScope scope(isolate); \
@@ -325,7 +316,7 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
Handle<String> result; \
CallSite call_site(isolate, call_site_obj); \
- RUNTIME_ASSERT(call_site.IsValid()) \
+ RUNTIME_ASSERT(call_site.IsValid()); \
return RETURN(call_site.NAME(), isolate); \
}
@@ -366,18 +357,6 @@ RUNTIME_FUNCTION(Runtime_IS_VAR) {
}
-RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(String, name, 0);
-
- if (FLAG_native_code_counters) {
- StatsCounter(isolate, name->ToCString().get()).Increment();
- }
- return isolate->heap()->undefined_value();
-}
-
-
namespace {
bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
@@ -407,7 +386,7 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
- Zone zone;
+ Zone zone(isolate->allocator());
base::SmartPointer<ParseInfo> info(
location.function()->shared()->is_function()
? new ParseInfo(&zone, location.function())
@@ -477,6 +456,12 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_GetOrdinaryHasInstance) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+
+ return isolate->native_context()->ordinary_has_instance();
+}
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 7150a8b287..22ae9113d8 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -16,30 +16,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- return isolate->heap()->ToBoolean(x->BooleanValue());
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterLogicalNot) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, x, 0);
- return isolate->heap()->ToBoolean(!x->BooleanValue());
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- return Object::cast(*Object::TypeOf(isolate, x));
-}
-
-
RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -52,10 +28,24 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
namespace {
+void AdvanceToOffsetForTracing(
+ interpreter::BytecodeArrayIterator& bytecode_iterator, int offset) {
+ while (bytecode_iterator.current_offset() +
+ bytecode_iterator.current_bytecode_size() <=
+ offset) {
+ bytecode_iterator.Advance();
+ }
+ DCHECK(bytecode_iterator.current_offset() == offset ||
+ ((bytecode_iterator.current_offset() + 1) == offset &&
+ bytecode_iterator.current_operand_scale() >
+ interpreter::OperandScale::kSingle));
+}
+
void PrintRegisters(std::ostream& os, bool is_input,
- Handle<BytecodeArray> bytecode_array, int bytecode_offset,
+ interpreter::BytecodeArrayIterator& bytecode_iterator,
Handle<Object> accumulator) {
- static const int kRegFieldWidth = static_cast<int>(strlen("accumulator"));
+ static const char kAccumulator[] = "accumulator";
+ static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
static const char* kInputColourCode = "\033[0;36m";
static const char* kOutputColourCode = "\033[0;35m";
static const char* kNormalColourCode = "\033[0;m";
@@ -64,22 +54,24 @@ void PrintRegisters(std::ostream& os, bool is_input,
os << (is_input ? kInputColourCode : kOutputColourCode);
}
+ interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+
// Print accumulator.
- os << " [ accumulator" << kArrowDirection;
- accumulator->ShortPrint();
- os << " ]" << std::endl;
+ if ((is_input && interpreter::Bytecodes::ReadsAccumulator(bytecode)) ||
+ (!is_input && interpreter::Bytecodes::WritesAccumulator(bytecode))) {
+ os << " [ " << kAccumulator << kArrowDirection;
+ accumulator->ShortPrint();
+ os << " ]" << std::endl;
+ }
// Find the location of the register file.
- JavaScriptFrameIterator frame_iterator(bytecode_array->GetIsolate());
+ JavaScriptFrameIterator frame_iterator(
+ bytecode_iterator.bytecode_array()->GetIsolate());
JavaScriptFrame* frame = frame_iterator.frame();
Address register_file =
frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
// Print the registers.
- interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
- bytecode_iterator.set_current_offset(
- bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag);
- interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
for (int operand_index = 0; operand_index < operand_count; operand_index++) {
interpreter::OperandType operand_type =
@@ -98,7 +90,7 @@ void PrintRegisters(std::ostream& os, bool is_input,
Object* reg_object = Memory::Object_at(reg_location);
os << " [ " << std::setw(kRegFieldWidth)
<< interpreter::Register(reg_index).ToString(
- bytecode_array->parameter_count())
+ bytecode_iterator.bytecode_array()->parameter_count())
<< kArrowDirection;
reg_object->ShortPrint(os);
os << " ]" << std::endl;
@@ -120,20 +112,23 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
OFStream os(stdout);
- // Print bytecode.
- const uint8_t* bytecode_address =
- reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
- Vector<char> buf = Vector<char>::New(50);
- SNPrintF(buf, "%p", bytecode_address);
- os << " -> " << buf.start() << " (" << bytecode_offset << ") : ";
- interpreter::Bytecodes::Decode(os, bytecode_address,
- bytecode_array->parameter_count());
- os << std::endl;
-
- // Print all input registers and accumulator.
- PrintRegisters(os, true, bytecode_array, bytecode_offset, accumulator);
-
- os << std::flush;
+ int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+ interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+ AdvanceToOffsetForTracing(bytecode_iterator, offset);
+ if (offset == bytecode_iterator.current_offset()) {
+ // Print bytecode.
+ const uint8_t* bytecode_address =
+ reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
+ os << " -> " << static_cast<const void*>(bytecode_address)
+ << " (" << bytecode_offset << ") : ";
+ interpreter::Bytecodes::Decode(os, bytecode_address,
+ bytecode_array->parameter_count());
+ os << std::endl;
+ // Print all input registers and accumulator.
+ PrintRegisters(os, true, bytecode_iterator, accumulator);
+
+ os << std::flush;
+ }
return isolate->heap()->undefined_value();
}
@@ -143,11 +138,21 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
- OFStream os(stdout);
- // Print all output registers and accumulator.
- PrintRegisters(os, false, bytecode_array, bytecode_offset, accumulator);
- os << std::flush;
+ int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+ interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+ AdvanceToOffsetForTracing(bytecode_iterator, offset);
+ // The offset comparison here ensures registers only printed when the
+ // (potentially) widened bytecode has completed. The iterator reports
+ // the offset as the offset of the prefix bytecode.
+ if (bytecode_iterator.current_operand_scale() ==
+ interpreter::OperandScale::kSingle ||
+ offset > bytecode_iterator.current_offset()) {
+ OFStream os(stdout);
+ // Print all output registers and accumulator.
+ PrintRegisters(os, false, bytecode_iterator, accumulator);
+ os << std::flush;
+ }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index e73095720e..f14a7cfd84 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -16,7 +16,7 @@ namespace internal {
static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context, Handle<FixedArray> constant_properties,
- bool is_strong, bool* is_result_from_cache) {
+ bool* is_result_from_cache) {
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
@@ -30,18 +30,16 @@ static Handle<Map> ComputeObjectLiteralMap(
}
Isolate* isolate = context->GetIsolate();
return isolate->factory()->ObjectLiteralMapFromCache(
- context, number_of_properties, is_strong, is_result_from_cache);
+ context, number_of_properties, is_result_from_cache);
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> constant_properties, bool is_strong);
-
+ Handle<FixedArray> constant_properties);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> constant_properties, bool should_have_fast_elements,
- bool has_function_literal, bool is_strong) {
+ Handle<FixedArray> constant_properties, bool should_have_fast_elements) {
Handle<Context> context = isolate->native_context();
// In case we have function literals, we want the object to be in
@@ -49,12 +47,8 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// maps with constant functions can't be shared if the functions are
// not the same (which is the common case).
bool is_result_from_cache = false;
- Handle<Map> map = has_function_literal
- ? Handle<Map>(is_strong
- ? context->js_object_strong_map()
- : context->object_function()->initial_map())
- : ComputeObjectLiteralMap(context, constant_properties, is_strong,
- &is_result_from_cache);
+ Handle<Map> map = ComputeObjectLiteralMap(context, constant_properties,
+ &is_result_from_cache);
PretenureFlag pretenure_flag =
isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
@@ -69,7 +63,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
int length = constant_properties->length();
bool should_transform =
!is_result_from_cache && boilerplate->HasFastProperties();
- bool should_normalize = should_transform || has_function_literal;
+ bool should_normalize = should_transform;
if (should_normalize) {
// TODO(verwaest): We might not want to ever normalize here.
JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
@@ -84,44 +78,22 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// simple object or array literal.
Handle<FixedArray> array = Handle<FixedArray>::cast(value);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- CreateLiteralBoilerplate(isolate, literals, array, is_strong),
+ isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
Object);
}
MaybeHandle<Object> maybe_result;
uint32_t element_index = 0;
- if (key->IsInternalizedString()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
- // Array index as string (uint32).
- if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
- maybe_result = JSObject::SetOwnElementIgnoreAttributes(
- boilerplate, element_index, value, NONE);
- } else {
- Handle<String> name(String::cast(*key));
- DCHECK(!name->AsArrayIndex(&element_index));
- maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
- boilerplate, name, value, NONE);
- }
- } else if (key->ToArrayIndex(&element_index)) {
+ if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
maybe_result = JSObject::SetOwnElementIgnoreAttributes(
boilerplate, element_index, value, NONE);
} else {
- // Non-uint32 number.
- DCHECK(key->IsNumber());
- double num = key->Number();
- char arr[100];
- Vector<char> buffer(arr, arraysize(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
+ Handle<String> name = Handle<String>::cast(key);
+ DCHECK(!name->AsArrayIndex(&element_index));
maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name,
value, NONE);
}
- // If setting the property on the boilerplate throws an
- // exception, the exception is converted to an empty handle in
- // the handle based operations. In that case, we need to
- // convert back to an exception.
RETURN_ON_EXCEPTION(isolate, maybe_result, Object);
}
@@ -129,7 +101,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// containing function literals we defer this operation until after all
// computed properties have been assigned so that we can generate
// constant function properties.
- if (should_transform && !has_function_literal) {
+ if (should_transform) {
JSObject::MigrateSlowToFast(boilerplate,
boilerplate->map()->unused_property_fields(),
"FastLiteral");
@@ -137,10 +109,9 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
return boilerplate;
}
-
MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> elements, bool is_strong) {
+ Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor = isolate->array_function();
@@ -159,9 +130,8 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
DisallowHeapAllocation no_gc;
DCHECK(IsFastElementsKind(constant_elements_kind));
Context* native_context = isolate->context()->native_context();
- Strength strength = is_strong ? Strength::STRONG : Strength::WEAK;
- Object* map = native_context->get(
- Context::ArrayMapIndex(constant_elements_kind, strength));
+ Object* map =
+ native_context->get(Context::ArrayMapIndex(constant_elements_kind));
object->set_map(Map::cast(map));
}
@@ -188,20 +158,20 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArray> fixed_array_values_copy =
isolate->factory()->CopyFixedArray(fixed_array_values);
copied_elements_values = fixed_array_values_copy;
- for (int i = 0; i < fixed_array_values->length(); i++) {
- HandleScope scope(isolate);
- if (fixed_array_values->get(i)->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CreateLiteralBoilerplate(isolate, literals, fa, is_strong),
- Object);
- fixed_array_values_copy->set(i, *result);
- }
- }
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
+ if (fixed_array_values->get(i)->IsFixedArray()) {
+ // The value contains the constant_properties of a
+ // simple object or array literal.
+ Handle<FixedArray> fa(
+ FixedArray::cast(fixed_array_values->get(i)));
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ CreateLiteralBoilerplate(isolate, literals, fa), Object);
+ fixed_array_values_copy->set(i, *result);
+ }
+ });
}
}
object->set_elements(*copied_elements_values);
@@ -211,22 +181,18 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
return object;
}
-
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<LiteralsArray> literals, Handle<FixedArray> array,
- bool is_strong) {
+ Isolate* isolate, Handle<LiteralsArray> literals,
+ Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
- const bool kHasNoFunctionLiteral = false;
switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate, literals, elements, true,
- kHasNoFunctionLiteral, is_strong);
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, true);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate, literals, elements, false,
- kHasNoFunctionLiteral, is_strong);
+ return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
case CompileTimeValue::ARRAY_LITERAL:
return Runtime::CreateArrayLiteralBoilerplate(isolate, literals,
- elements, is_strong);
+ elements);
default:
UNREACHABLE();
return MaybeHandle<Object>();
@@ -262,9 +228,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<LiteralsArray> literals(closure->literals(), isolate);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
- bool is_strong = (flags & ObjectLiteral::kIsStrong) != 0;
RUNTIME_ASSERT(literals_index >= 0 &&
literals_index < literals->literals_count());
@@ -278,8 +242,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_boilerplate,
CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
- should_have_fast_elements,
- has_function_literal, is_strong));
+ should_have_fast_elements));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate);
@@ -306,10 +269,9 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
return *copy;
}
-
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
- Handle<FixedArray> elements, bool is_strong) {
+ Handle<FixedArray> elements) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
@@ -318,8 +280,7 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Handle<Object> boilerplate;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, boilerplate,
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements,
- is_strong),
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements),
AllocationSite);
AllocationSiteCreationContext creation_context(isolate);
@@ -346,11 +307,9 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(
literals_index >= 0 && literals_index < literals->literals_count(),
JSObject);
Handle<AllocationSite> site;
- bool is_strong = (flags & ArrayLiteral::kIsStrong) != 0;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
- GetLiteralAllocationSite(isolate, literals, literals_index, elements,
- is_strong),
+ GetLiteralAllocationSite(isolate, literals, literals_index, elements),
JSObject);
bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index 189ec08d33..da342de9d8 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -186,7 +186,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
- RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array))
+ RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array));
LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
return isolate->heap()->undefined_value();
@@ -207,19 +207,21 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
USE(new_shared_array);
RUNTIME_ASSERT(old_shared_array->length()->IsSmi());
RUNTIME_ASSERT(new_shared_array->length() == old_shared_array->length());
- RUNTIME_ASSERT(old_shared_array->HasFastElements())
- RUNTIME_ASSERT(new_shared_array->HasFastElements())
+ RUNTIME_ASSERT(old_shared_array->HasFastElements());
+ RUNTIME_ASSERT(new_shared_array->HasFastElements());
int array_length = Smi::cast(old_shared_array->length())->value();
for (int i = 0; i < array_length; i++) {
Handle<Object> old_element;
Handle<Object> new_element;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, old_element, Object::GetElement(isolate, old_shared_array, i));
+ isolate, old_element,
+ JSReceiver::GetElement(isolate, old_shared_array, i));
RUNTIME_ASSERT(
old_element->IsJSValue() &&
Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_element, Object::GetElement(isolate, new_shared_array, i));
+ isolate, new_element,
+ JSReceiver::GetElement(isolate, new_shared_array, i));
RUNTIME_ASSERT(
new_element->IsUndefined() ||
(new_element->IsJSValue() &&
@@ -242,7 +244,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
Handle<JSArray> result = LiveEdit::CompareStrings(s1, s2);
- uint32_t array_length;
+ uint32_t array_length = 0;
CHECK(result->length()->ToArrayLength(&array_length));
if (array_length > 0) {
isolate->debug()->feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 9c4fde1cef..91b6181ab7 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -23,9 +23,6 @@ namespace internal {
return *isolate->factory()->NewHeapNumber(std::name(x)); \
}
-RUNTIME_UNARY_MATH(Acos, acos)
-RUNTIME_UNARY_MATH(Asin, asin)
-RUNTIME_UNARY_MATH(Atan, atan)
RUNTIME_UNARY_MATH(LogRT, log)
#undef RUNTIME_UNARY_MATH
@@ -111,27 +108,6 @@ RUNTIME_FUNCTION(Runtime_MathExpRT) {
}
-RUNTIME_FUNCTION(Runtime_MathClz32) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_clz32_runtime()->Increment();
-
- CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
- return *isolate->factory()->NewNumberFromUint(
- base::bits::CountLeadingZeros32(x));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFloor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_floor_runtime()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(Floor(x));
-}
-
-
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if VFP3 is not available.
RUNTIME_FUNCTION(Runtime_MathPow) {
@@ -174,75 +150,21 @@ RUNTIME_FUNCTION(Runtime_MathPowRT) {
}
-RUNTIME_FUNCTION(Runtime_RoundNumber) {
+RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
- isolate->counters()->math_round_runtime()->Increment();
-
- if (!input->IsHeapNumber()) {
- DCHECK(input->IsSmi());
- return *input;
- }
-
- Handle<HeapNumber> number = Handle<HeapNumber>::cast(input);
-
- double value = number->value();
- int exponent = number->get_exponent();
- int sign = number->get_sign();
-
- if (exponent < -1) {
- // Number in range ]-0.5..0.5[. These always round to +/-zero.
- if (sign) return isolate->heap()->minus_zero_value();
- return Smi::FromInt(0);
- }
-
- // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
- // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
- // argument holds for 32-bit smis).
- if (!sign && exponent < kSmiValueSize - 2) {
- return Smi::FromInt(static_cast<int>(value + 0.5));
- }
-
- // If the magnitude is big enough, there's no place for fraction part. If we
- // try to add 0.5 to this number, 1.0 will be added instead.
- if (exponent >= 52) {
- return *number;
+ if (isolate->serializer_enabled()) {
+ // Random numbers in the snapshot are not really that random. And we cannot
+ // return a typed array as it cannot be serialized. To make calling
+ // Math.random possible when creating a custom startup snapshot, we simply
+ // return a normal array with a single random number.
+ Handle<HeapNumber> random_number = isolate->factory()->NewHeapNumber(
+ isolate->random_number_generator()->NextDouble());
+ Handle<FixedArray> array_backing = isolate->factory()->NewFixedArray(1);
+ array_backing->set(0, *random_number);
+ return *isolate->factory()->NewJSArrayWithElements(array_backing);
}
- if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
-
- // Do not call NumberFromDouble() to avoid extra checks.
- return *isolate->factory()->NewNumber(Floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathSqrt) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_sqrt_runtime()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_sqrt(isolate);
- return *isolate->factory()->NewNumber(fast_sqrt(x, isolate));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFround) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- float xf = DoubleToFloat32(x);
- return *isolate->factory()->NewNumber(xf);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- // Random numbers in the snapshot are not really that random.
- DCHECK(!isolate->bootstrapper()->IsActive());
static const int kState0Offset = 0;
static const int kState1Offset = 1;
static const int kRandomBatchSize = 64;
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 46fbff3463..efbdeb2f33 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -208,19 +208,6 @@ RUNTIME_FUNCTION(Runtime_NumberToSmi) {
}
-RUNTIME_FUNCTION(Runtime_NumberImul) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- // We rely on implementation-defined behavior below, but at least not on
- // undefined behavior.
- CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]);
- int32_t product = static_cast<int32_t>(x * y);
- return *isolate->factory()->NewNumberFromInt(product);
-}
-
-
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 45a49925bd..5bdb08541f 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -125,6 +125,82 @@ Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
return JSReceiver::DeleteProperty(&it, language_mode);
}
+// ES6 19.1.3.2
+RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
+ HandleScope scope(isolate);
+ Handle<Object> property = args.at<Object>(1);
+
+ Handle<Name> key;
+ uint32_t index;
+ bool key_is_array_index = property->ToArrayIndex(&index);
+
+ if (!key_is_array_index) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToName(isolate, property));
+ key_is_array_index = key->AsArrayIndex(&index);
+ }
+
+ Handle<Object> object = args.at<Object>(0);
+
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
+ // Fast case: either the key is a real named property or it is not
+ // an array index and there are no interceptors or hidden
+ // prototypes.
+ // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
+ // handle all cases directly (without this custom fast path).
+ {
+ LookupIterator::Configuration c = LookupIterator::OWN_SKIP_INTERCEPTOR;
+ LookupIterator it =
+ key_is_array_index ? LookupIterator(isolate, js_obj, index, js_obj, c)
+ : LookupIterator(js_obj, key, js_obj, c);
+ Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+ if (maybe.IsNothing()) return isolate->heap()->exception();
+ DCHECK(!isolate->has_pending_exception());
+ if (maybe.FromJust()) return isolate->heap()->true_value();
+ }
+
+ Map* map = js_obj->map();
+ if (!map->has_hidden_prototype() &&
+ (key_is_array_index ? !map->has_indexed_interceptor()
+ : !map->has_named_interceptor())) {
+ return isolate->heap()->false_value();
+ }
+
+ // Slow case.
+ LookupIterator::Configuration c = LookupIterator::HIDDEN;
+ LookupIterator it = key_is_array_index
+ ? LookupIterator(isolate, js_obj, index, js_obj, c)
+ : LookupIterator(js_obj, key, js_obj, c);
+
+ Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+ if (maybe.IsNothing()) return isolate->heap()->exception();
+ DCHECK(!isolate->has_pending_exception());
+ return isolate->heap()->ToBoolean(maybe.FromJust());
+
+ } else if (object->IsJSProxy()) {
+ if (key.is_null()) {
+ DCHECK(key_is_array_index);
+ key = isolate->factory()->Uint32ToString(index);
+ }
+
+ Maybe<bool> result =
+ JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
+
+ } else if (object->IsString()) {
+ return isolate->heap()->ToBoolean(
+ key_is_array_index
+ ? index < static_cast<uint32_t>(String::cast(*object)->length())
+ : key->Equals(isolate->heap()->length_string()));
+ } else if (object->IsNull() || object->IsUndefined()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+ }
+
+ return isolate->heap()->false_value();
+}
MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
@@ -205,7 +281,7 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
Factory* factory = isolate->factory();
// Get attributes.
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name,
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name, obj,
LookupIterator::HIDDEN);
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
@@ -293,7 +369,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
Handle<JSGlobalObject> global_object(script_context->global_object(),
isolate);
- LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+ LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
// a hidden prototype.
@@ -328,7 +404,7 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
Handle<JSGlobalObject> global_object(script_context->global_object(),
isolate);
- LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+ LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
// a hidden prototype.
@@ -413,7 +489,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
#ifdef DEBUG
uint32_t index = 0;
DCHECK(!name->ToArrayIndex(&index));
- LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
RUNTIME_ASSERT(!it.IsFound());
@@ -441,7 +517,7 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
CHECK(key->ToArrayIndex(&index));
#ifdef DEBUG
- LookupIterator it(isolate, object, index,
+ LookupIterator it(isolate, object, index, object,
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
@@ -532,85 +608,6 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty_Strict) {
}
-static Object* HasOwnPropertyImplementation(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Name> key) {
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(object, key);
- if (!maybe.IsJust()) return isolate->heap()->exception();
- if (maybe.FromJust()) return isolate->heap()->true_value();
- // Handle hidden prototypes. If there's a hidden prototype above this thing
- // then we have to check it for properties, because they are supposed to
- // look like they are on this object.
- if (object->map()->has_hidden_prototype()) {
- PrototypeIterator iter(isolate, object);
- DCHECK(!iter.IsAtEnd());
-
- // TODO(verwaest): The recursion is not necessary for keys that are array
- // indices. Removing this.
- // Casting to JSObject is fine because JSProxies are never used as
- // hidden prototypes.
- return HasOwnPropertyImplementation(
- isolate, PrototypeIterator::GetCurrent<JSObject>(iter), key);
- }
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0)
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
-
- uint32_t index;
- const bool key_is_array_index = key->AsArrayIndex(&index);
-
- // Only JS objects can have properties.
- if (object->IsJSObject()) {
- Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
- // Fast case: either the key is a real named property or it is not
- // an array index and there are no interceptors or hidden
- // prototypes.
- // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
- // handle all cases directly (without this custom fast path).
- Maybe<bool> maybe = Nothing<bool>();
- if (key_is_array_index) {
- LookupIterator it(js_obj->GetIsolate(), js_obj, index,
- LookupIterator::HIDDEN);
- maybe = JSReceiver::HasProperty(&it);
- } else {
- maybe = JSObject::HasRealNamedProperty(js_obj, key);
- }
- if (!maybe.IsJust()) return isolate->heap()->exception();
- DCHECK(!isolate->has_pending_exception());
- if (maybe.FromJust()) {
- return isolate->heap()->true_value();
- }
- Map* map = js_obj->map();
- if (!key_is_array_index && !map->has_named_interceptor() &&
- !map->has_hidden_prototype()) {
- return isolate->heap()->false_value();
- }
- // Slow case.
- return HasOwnPropertyImplementation(isolate, Handle<JSObject>(js_obj),
- Handle<Name>(key));
- } else if (object->IsString() && key_is_array_index) {
- // Well, there is one exception: Handle [] on strings.
- Handle<String> string = Handle<String>::cast(object);
- if (index < static_cast<uint32_t>(string->length())) {
- return isolate->heap()->true_value();
- }
- } else if (object->IsJSProxy()) {
- Maybe<bool> result =
- JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
- if (!result.IsJust()) return isolate->heap()->exception();
- return isolate->heap()->ToBoolean(result.FromJust());
- }
- return isolate->heap()->false_value();
-}
-
-
// ES6 section 12.9.3, operator in.
RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
@@ -840,8 +837,8 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
- LookupIterator::OWN);
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, object, LookupIterator::OWN);
if (it.state() == LookupIterator::ACCESS_CHECK && !it.HasAccess()) {
return isolate->heap()->undefined_value();
}
@@ -869,8 +866,8 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
isolate->factory()->empty_string());
}
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
- LookupIterator::OWN);
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, object, LookupIterator::OWN);
// Cannot fail since this should only be called when
// creating an object literal.
CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
@@ -915,15 +912,6 @@ RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
}
-RUNTIME_FUNCTION(Runtime_IsStrong) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSReceiver() &&
- JSReceiver::cast(obj)->map()->is_strong());
-}
-
-
RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1118,6 +1106,9 @@ RUNTIME_FUNCTION(Runtime_Compare) {
RUNTIME_FUNCTION(Runtime_InstanceOf) {
+ // TODO(4447): Remove this function when ES6 instanceof ships for good.
+ DCHECK(!FLAG_harmony_instanceof);
+
// ECMA-262, section 11.8.6, page 54.
HandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -1146,7 +1137,50 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
Handle<Object> prototype;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, prototype,
- Object::GetProperty(callable, isolate->factory()->prototype_string()));
+ JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
+ isolate->factory()->prototype_string()));
+ if (!prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
+ }
+ // Return whether or not {prototype} is in the prototype chain of {object}.
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+ Maybe<bool> result =
+ JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
+ // ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
+ HandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
+ // {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) {
+ return isolate->heap()->false_value();
+ }
+ // If {object} is not a receiver, return false.
+ if (!object->IsJSReceiver()) {
+ return isolate->heap()->false_value();
+ }
+ // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+ // and use that instead of {callable}.
+ while (callable->IsJSBoundFunction()) {
+ callable =
+ handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
+ }
+ DCHECK(callable->IsCallable());
+ // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype,
+ JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
+ isolate->factory()->prototype_string()));
if (!prototype->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index df86aa870c..aead0171ce 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -642,7 +642,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
if (!heap->lo_space()->Contains(*answer)) {
- heap->CreateFillerObjectAt(end_of_string, delta);
+ heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
}
heap->AdjustLiveBytes(*answer, -delta, Heap::CONCURRENT_TO_SWEEPER);
return *answer;
@@ -734,9 +734,9 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
- Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- JSObject::EnsureCanContainHeapObjectElements(result);
- result->set_length(Smi::FromInt(part_count));
+ Handle<JSArray> result =
+ isolate->factory()->NewJSArray(FAST_ELEMENTS, part_count, part_count,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
DCHECK(result->HasFastObjectElements());
@@ -746,14 +746,13 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
elements->set(0, *subject);
} else {
int part_start = 0;
- for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < part_count, i++, {
int part_end = indices.at(i);
Handle<String> substring =
isolate->factory()->NewProperSubString(subject, part_start, part_end);
elements->set(i, *substring);
part_start = part_end + pattern_length;
- }
+ });
}
if (limit == 0xffffffffu) {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index a8f3a74918..de0d66a74e 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -44,7 +44,8 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
}
// Do the lookup own properties only, see ES5 erratum.
- LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ LookupIterator it(global, name, global,
+ LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
@@ -102,8 +103,7 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
- for (int i = 0; i < length; i += 2) {
- HandleScope scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 2, {
Handle<String> name(String::cast(pairs->get(i)));
Handle<Object> initial_value(pairs->get(i + 1), isolate);
@@ -142,7 +142,7 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
static_cast<PropertyAttributes>(attr),
is_var, is_const, is_function);
if (isolate->has_pending_exception()) return result;
- }
+ });
return isolate->heap()->undefined_value();
}
@@ -182,7 +182,8 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
Handle<JSGlobalObject> global = isolate->global_object();
// Lookup the property as own on the global object.
- LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ LookupIterator it(global, name, global,
+ LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
PropertyAttributes old_attributes = maybe.FromJust();
@@ -394,7 +395,8 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
// code can run in between that modifies the declared property.
DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
- LookupIterator it(holder, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ LookupIterator it(holder, name, Handle<JSReceiver>::cast(holder),
+ LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
PropertyAttributes old_attributes = maybe.FromJust();
@@ -640,9 +642,9 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
base::SmartArrayPointer<Handle<Object>> arguments =
GetCallerArguments(isolate, &argument_count);
int num_elements = std::max(0, argument_count - start_index);
- Handle<JSObject> result = isolate->factory()->NewJSArray(
- FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
+ Handle<JSObject> result =
+ isolate->factory()->NewJSArray(FAST_ELEMENTS, num_elements, num_elements,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
{
DisallowHeapAllocation no_gc;
FixedArray* elements = FixedArray::cast(result->elements());
@@ -708,7 +710,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
}
if (IsLexicalVariableMode(mode)) {
- LookupIterator it(global_object, name,
+ LookupIterator it(global_object, name, global_object,
LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index fcec47ddab..6786fa99fb 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -371,14 +371,13 @@ RUNTIME_FUNCTION(Runtime_StringMatch) {
Handle<String> substring =
isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
elements->set(0, *substring);
- for (int i = 1; i < matches; i++) {
- HandleScope temp_scope(isolate);
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 1, i, i < matches, i++, {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
Handle<String> substring =
isolate->factory()->NewProperSubString(subject, from, to);
elements->set(i, *substring);
- }
+ });
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
return *result;
@@ -557,6 +556,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
RUNTIME_ASSERT(fixed_array->get(0)->IsString());
String* first = String::cast(fixed_array->get(0));
String* separator_raw = *separator;
+
int first_length = first->length();
String::WriteToFlat(first, sink, 0, first_length);
sink += first_length;
@@ -580,6 +580,26 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
return *answer;
}
+template <typename sinkchar>
+static void WriteRepeatToFlat(String* src, Vector<sinkchar> buffer, int cursor,
+ int repeat, int length) {
+ if (repeat == 0) return;
+
+ sinkchar* start = &buffer[cursor];
+ String::WriteToFlat<sinkchar>(src, start, 0, length);
+
+ int done = 1;
+ sinkchar* next = start + length;
+
+ while (done < repeat) {
+ int block = Min(done, repeat - done);
+ int block_chars = block * length;
+ CopyChars(next, start, block_chars);
+ next += block_chars;
+ done += block;
+ }
+}
+
template <typename Char>
static void JoinSparseArrayWithSeparator(FixedArray* elements,
int elements_length,
@@ -589,34 +609,30 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
DisallowHeapAllocation no_gc;
int previous_separator_position = 0;
int separator_length = separator->length();
+ DCHECK_LT(0, separator_length);
int cursor = 0;
for (int i = 0; i < elements_length; i += 2) {
int position = NumberToInt32(elements->get(i));
String* string = String::cast(elements->get(i + 1));
int string_length = string->length();
if (string->length() > 0) {
- while (previous_separator_position < position) {
- String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
- separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
+ int repeat = position - previous_separator_position;
+ WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat,
+ separator_length);
+ cursor += repeat * separator_length;
+ previous_separator_position = position;
String::WriteToFlat<Char>(string, &buffer[cursor], 0, string_length);
cursor += string->length();
}
}
- if (separator_length > 0) {
- // Array length must be representable as a signed 32-bit number,
- // otherwise the total string length would have been too large.
- DCHECK(array_length <= 0x7fffffff); // Is int32_t.
- int last_array_index = static_cast<int>(array_length - 1);
- while (previous_separator_position < last_array_index) {
- String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
- separator_length);
- cursor += separator_length;
- previous_separator_position++;
- }
- }
+
+ int last_array_index = static_cast<int>(array_length - 1);
+ // Array length must be representable as a signed 32-bit number,
+ // otherwise the total string length would have been too large.
+ DCHECK(array_length <= 0x7fffffff); // Is int32_t.
+ int repeat = last_array_index - previous_separator_position;
+ WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
+ cursor += repeat * separator_length;
DCHECK(cursor <= buffer.length());
}
@@ -642,13 +658,6 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
FixedArray* elements = FixedArray::cast(elements_array->elements());
- for (int i = 0; i < elements_length; i += 2) {
- RUNTIME_ASSERT(elements->get(i)->IsNumber());
- CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i));
- RUNTIME_ASSERT(position < array_length);
- RUNTIME_ASSERT(elements->get(i + 1)->IsString());
- }
-
{
DisallowHeapAllocation no_gc;
for (int i = 0; i < elements_length; i += 2) {
@@ -1145,24 +1154,93 @@ RUNTIME_FUNCTION(Runtime_NewString) {
return *result;
}
+RUNTIME_FUNCTION(Runtime_StringLessThan) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ switch (String::Compare(x, y)) {
+ case ComparisonResult::kLessThan:
+ return isolate->heap()->true_value();
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ return isolate->heap()->false_value();
+ case ComparisonResult::kUndefined:
+ break;
+ }
+ UNREACHABLE();
+ return Smi::FromInt(0);
+}
-RUNTIME_FUNCTION(Runtime_StringEquals) {
+RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ switch (String::Compare(x, y)) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kLessThan:
+ return isolate->heap()->true_value();
+ case ComparisonResult::kGreaterThan:
+ return isolate->heap()->false_value();
+ case ComparisonResult::kUndefined:
+ break;
+ }
+ UNREACHABLE();
+ return Smi::FromInt(0);
+}
+RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ switch (String::Compare(x, y)) {
+ case ComparisonResult::kGreaterThan:
+ return isolate->heap()->true_value();
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kLessThan:
+ return isolate->heap()->false_value();
+ case ComparisonResult::kUndefined:
+ break;
+ }
+ UNREACHABLE();
+ return Smi::FromInt(0);
+}
- bool not_equal = !String::Equals(x, y);
- // This is slightly convoluted because the value that signifies
- // equality is 0 and inequality is 1 so we have to negate the result
- // from String::Equals.
- DCHECK(not_equal == 0 || not_equal == 1);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(NOT_EQUAL == 1);
- return Smi::FromInt(not_equal);
+RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ switch (String::Compare(x, y)) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ return isolate->heap()->true_value();
+ case ComparisonResult::kLessThan:
+ return isolate->heap()->false_value();
+ case ComparisonResult::kUndefined:
+ break;
+ }
+ UNREACHABLE();
+ return Smi::FromInt(0);
}
+RUNTIME_FUNCTION(Runtime_StringEqual) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ return isolate->heap()->ToBoolean(String::Equals(x, y));
+}
+
+RUNTIME_FUNCTION(Runtime_StringNotEqual) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+ return isolate->heap()->ToBoolean(!String::Equals(x, y));
+}
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 5f27a609a6..a0f05665a3 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -398,7 +398,7 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
DCHECK(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!Compiler::Compile(func, KEEP_EXCEPTION)) {
+ if (!Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
return isolate->heap()->exception();
}
OFStream os(stdout);
@@ -503,5 +503,14 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
+RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index c673b5a155..17c78d5a0b 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -5,19 +5,49 @@
#ifndef V8_RUNTIME_RUNTIME_UTILS_H_
#define V8_RUNTIME_RUNTIME_UTILS_H_
+#include "src/base/logging.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-#define RUNTIME_ASSERT(value) \
- if (!(value)) return isolate->ThrowIllegalOperation();
+#ifdef DEBUG
+
+#define RUNTIME_ASSERT(value) \
+ do { \
+ if (!(value)) { \
+ V8_RuntimeError(__FILE__, __LINE__, #value); \
+ return isolate->ThrowIllegalOperation(); \
+ } \
+ } while (0)
+
+#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
+ do { \
+ if (!(value)) { \
+ V8_RuntimeError(__FILE__, __LINE__, #value); \
+ isolate->ThrowIllegalOperation(); \
+ return MaybeHandle<T>(); \
+ } \
+ } while (0)
+
+#else
+
+#define RUNTIME_ASSERT(value) \
+ do { \
+ if (!(value)) { \
+ return isolate->ThrowIllegalOperation(); \
+ } \
+ } while (0)
#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
- if (!(value)) { \
- isolate->ThrowIllegalOperation(); \
- return MaybeHandle<T>(); \
- }
+ do { \
+ if (!(value)) { \
+ isolate->ThrowIllegalOperation(); \
+ return MaybeHandle<T>(); \
+ } \
+ } while (0)
+
+#endif
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 7019c3bf04..dc1678bb73 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -35,7 +35,6 @@ namespace internal {
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 0, 1) \
F(TransitionElementsKind, 2, 1) \
- F(PushIfAbsent, 2, 1) \
F(RemoveArrayHoles, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
@@ -43,6 +42,7 @@ namespace internal {
F(ArrayConstructor, -1, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
F(InternalArrayConstructor, -1, 1) \
+ F(ArrayPush, -1, 1) \
F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \
@@ -53,7 +53,6 @@ namespace internal {
F(FixedArraySet, 3, 1) \
F(ArraySpeciesConstructor, 1, 1)
-
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(AtomicsCompareExchange, 4, 1) \
F(AtomicsLoad, 2, 1) \
@@ -138,10 +137,10 @@ namespace internal {
F(DateCurrentTime, 0, 1) \
F(ThrowNotDateError, 0, 1)
-
#define FOR_EACH_INTRINSIC_DEBUG(F) \
F(HandleDebuggerStatement, 0, 1) \
- F(DebugBreak, 0, 1) \
+ F(DebugBreak, 1, 1) \
+ F(DebugBreakOnBytecode, 1, 1) \
F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
F(DebugGetInternalProperties, 1, 1) \
@@ -156,7 +155,6 @@ namespace internal {
F(GetFrameCount, 1, 1) \
F(GetFrameDetails, 2, 1) \
F(GetScopeCount, 2, 1) \
- F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
F(GetAllScopesDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
@@ -192,12 +190,10 @@ namespace internal {
F(DebugPrepareStepInIfStepping, 1, 1) \
F(DebugPushPromise, 2, 1) \
F(DebugPopPromise, 0, 1) \
- F(DebugPromiseEvent, 1, 1) \
F(DebugAsyncTaskEvent, 1, 1) \
F(DebugIsActive, 0, 1) \
F(DebugBreakInOptimizedCode, 0, 1)
-
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInDone, 2, 1) \
F(ForInEnumerate, 1, 1) \
@@ -206,9 +202,6 @@ namespace internal {
F(ForInStep, 1, 1)
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
- F(InterpreterToBoolean, 1, 1) \
- F(InterpreterLogicalNot, 1, 1) \
- F(InterpreterTypeOf, 1, 1) \
F(InterpreterNewClosure, 2, 1) \
F(InterpreterTraceBytecodeEntry, 3, 1) \
F(InterpreterTraceBytecodeExit, 3, 1) \
@@ -223,18 +216,16 @@ namespace internal {
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetPositionForOffset, 2, 1) \
+ F(FunctionGetContextData, 1, 1) \
F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
- F(ThrowStrongModeTooFewArguments, 0, 1) \
F(IsConstructor, 1, 1) \
F(SetForceInlineFlag, 1, 1) \
F(Call, -1 /* >= 2 */, 1) \
- F(TailCall, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
F(ConvertReceiver, 1, 1) \
F(IsFunction, 1, 1) \
F(FunctionToString, 1, 1)
@@ -245,7 +236,6 @@ namespace internal {
F(ResumeJSGeneratorObject, 3, 1) \
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
- F(GeneratorGetContext, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
F(GeneratorGetInput, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
@@ -284,7 +274,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_I18N(F)
#endif
-
#define FOR_EACH_INTRINSIC_INTERNAL(F) \
F(CheckIsBootstrapping, 0, 1) \
F(ExportFromRuntime, 1, 1) \
@@ -302,7 +291,6 @@ namespace internal {
F(ThrowIllegalInvocation, 0, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
- F(ThrowStrongModeImplicitConversion, 0, 1) \
F(PromiseRejectEvent, 3, 1) \
F(PromiseRevokeReject, 1, 1) \
F(StackGuard, 0, 1) \
@@ -324,15 +312,14 @@ namespace internal {
F(CallSiteIsEvalRT, 1, 1) \
F(CallSiteIsConstructorRT, 1, 1) \
F(IS_VAR, 1, 1) \
- F(IncrementStatsCounter, 1, 1) \
F(ThrowConstructedNonConstructable, 1, 1) \
F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
F(ThrowCalledNonCallable, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
F(IncrementUseCounter, 1, 1) \
+ F(GetOrdinaryHasInstance, 0, 1) \
F(GetAndResetRuntimeCallStats, 0, 1)
-
#define FOR_EACH_INTRINSIC_JSON(F) \
F(QuoteJSONString, 1, 1) \
F(BasicJSONStringify, 1, 1) \
@@ -361,9 +348,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_MATHS(F) \
- F(MathAcos, 1, 1) \
- F(MathAsin, 1, 1) \
- F(MathAtan, 1, 1) \
F(MathLogRT, 1, 1) \
F(DoubleHi, 1, 1) \
F(DoubleLo, 1, 1) \
@@ -371,13 +355,8 @@ namespace internal {
F(RemPiO2, 2, 1) \
F(MathAtan2, 2, 1) \
F(MathExpRT, 1, 1) \
- F(MathClz32, 1, 1) \
- F(MathFloor, 1, 1) \
F(MathPow, 2, 1) \
F(MathPowRT, 2, 1) \
- F(RoundNumber, 1, 1) \
- F(MathSqrt, 1, 1) \
- F(MathFround, 1, 1) \
F(GenerateRandomNumbers, 1, 1)
@@ -394,7 +373,6 @@ namespace internal {
F(NumberToStringSkipCache, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToSmi, 1, 1) \
- F(NumberImul, 2, 1) \
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
F(IsSmi, 1, 1) \
@@ -404,6 +382,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_OBJECT(F) \
F(GetPrototype, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
F(GetOwnProperty_Legacy, 2, 1) \
@@ -419,7 +398,6 @@ namespace internal {
F(AppendElement, 2, 1) \
F(DeleteProperty_Sloppy, 2, 1) \
F(DeleteProperty_Strict, 2, 1) \
- F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
F(PropertyIsEnumerable, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
@@ -440,7 +418,6 @@ namespace internal {
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
F(IsJSReceiver, 1, 1) \
- F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
@@ -457,6 +434,7 @@ namespace internal {
F(SameValueZero, 2, 1) \
F(Compare, 3, 1) \
F(InstanceOf, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(IsAccessCheckNeeded, 1, 1) \
@@ -854,7 +832,6 @@ namespace internal {
F(Bool8x16Equal, 2, 1) \
F(Bool8x16NotEqual, 2, 1)
-
#define FOR_EACH_INTRINSIC_STRINGS(F) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
@@ -875,7 +852,12 @@ namespace internal {
F(StringTrim, 3, 1) \
F(TruncateString, 2, 1) \
F(NewString, 2, 1) \
- F(StringEquals, 2, 1) \
+ F(StringLessThan, 2, 1) \
+ F(StringLessThanOrEqual, 2, 1) \
+ F(StringGreaterThan, 2, 1) \
+ F(StringGreaterThanOrEqual, 2, 1) \
+ F(StringEqual, 2, 1) \
+ F(StringNotEqual, 2, 1) \
F(FlattenString, 1, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
@@ -885,7 +867,6 @@ namespace internal {
F(TwoByteSeqStringSetChar, 3, 1) \
F(StringCharCodeAt, 2, 1)
-
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
@@ -941,7 +922,8 @@ namespace internal {
F(HasFixedInt32Elements, 1, 1) \
F(HasFixedFloat32Elements, 1, 1) \
F(HasFixedFloat64Elements, 1, 1) \
- F(HasFixedUint8ClampedElements, 1, 1)
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(SpeciesProtector, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -995,7 +977,6 @@ namespace internal {
F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
F(CallIC_Miss, 3, 1) \
F(CompareIC_Miss, 3, 1) \
- F(CompareNilIC_Miss, 1, 1) \
F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
@@ -1151,7 +1132,7 @@ class Runtime : public AllStatic {
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> elements, bool is_strong);
+ Handle<FixedArray> elements);
static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
Handle<Object>);
diff --git a/deps/v8/src/s390/OWNERS b/deps/v8/src/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
new file mode 100644
index 0000000000..400d5436a1
--- /dev/null
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -0,0 +1,593 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_S390_ASSEMBLER_S390_INL_H_
+#define V8_S390_ASSEMBLER_S390_INL_H_
+
+#include "src/s390/assembler-s390.h"
+
+#include "src/assembler.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+void RelocInfo::apply(intptr_t delta) {
+ // Absolute code pointer inside code object moves with the code object.
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ Address target = Memory::Address_at(pc_);
+ Memory::Address_at(pc_) = target + delta;
+ } else if (IsCodeTarget(rmode_)) {
+ SixByteInstr instr =
+ Instruction::InstructionBits(reinterpret_cast<const byte*>(pc_));
+ int32_t dis = static_cast<int32_t>(instr & 0xFFFFFFFF) * 2 // halfwords
+ - static_cast<int32_t>(delta);
+ instr >>= 32; // Clear the 4-byte displacement field.
+ instr <<= 32;
+ instr |= static_cast<uint32_t>(dis / 2);
+ Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc_),
+ instr);
+ } else {
+ // mov sequence
+ DCHECK(IsInternalReferenceEncoded(rmode_));
+ Address target = Assembler::target_address_at(pc_, host_);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
+ SKIP_ICACHE_FLUSH);
+ }
+}
+
+Address RelocInfo::target_internal_reference() {
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ return Memory::Address_at(pc_);
+ } else {
+ // mov sequence
+ DCHECK(IsInternalReferenceEncoded(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+ }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ return reinterpret_cast<Address>(pc_);
+}
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::target_address_address() {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LIS/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written.
+ return reinterpret_cast<Address>(pc_);
+}
+
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Sequence is:
+ // BRASL r14, RI
+ return pc - kCallTargetAddressOffset;
+}
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // Sequence is:
+ // BRASL r14, RI
+ return pc + kCallTargetAddressOffset;
+}
+
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+ SixByteInstr instr =
+ Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+ int index = instr & 0xFFFFFFFF;
+ return code_targets_[index];
+}
+
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
+
+Object* RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ if (rmode_ == EMBEDDED_OBJECT) {
+ return Handle<Object>(
+ reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
+ } else {
+ return origin->code_target_object_handle_at(pc_);
+ }
+}
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
+ }
+}
+
+Address RelocInfo::target_external_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+Cell* RelocInfo::target_cell() {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
+ Memory::Address_at(pc_) = address;
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
+ }
+}
+
+#if V8_TARGET_ARCH_S390X
+// NOP(2byte) + PUSH + MOV + BASR =
+// NOP + LAY + STG + IIHF + IILF + BASR
+static const int kCodeAgingSequenceLength = 28;
+static const int kCodeAgingTargetDelta = 14; // Jump past NOP + PUSH to IIHF
+ // LAY + 4 * STG + LA
+static const int kNoCodeAgeSequenceLength = 34;
+#else
+#if (V8_HOST_ARCH_S390)
+// NOP + NILH + LAY + ST + IILF + BASR
+static const int kCodeAgingSequenceLength = 24;
+static const int kCodeAgingTargetDelta = 16; // Jump past NOP to IILF
+// NILH + LAY + 4 * ST + LA
+static const int kNoCodeAgeSequenceLength = 30;
+#else
+// NOP + LAY + ST + IILF + BASR
+static const int kCodeAgingSequenceLength = 20;
+static const int kCodeAgingTargetDelta = 12; // Jump past NOP to IILF
+// LAY + 4 * ST + LA
+static const int kNoCodeAgeSequenceLength = 26;
+#endif
+#endif
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on S390.
+ return Handle<Object>();
+}
+
+Code* RelocInfo::code_age_stub() {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
+}
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
+ stub->instruction_start(),
+ icache_flush_mode);
+}
+
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsInternalReferenceEncoded(rmode_)) {
+ // mov sequence
+ // Currently used only by deserializer, no need to flush.
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+ SKIP_ICACHE_FLUSH);
+ } else {
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
+ }
+}
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
+ visitor->VisitDebugTarget(this);
+ } else if (IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+template <typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+// Operand constructors
+Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm_ = immediate;
+ rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm_ = reinterpret_cast<intptr_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = kRelocInfo_NONEPTR;
+}
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rmode_ = kRelocInfo_NONEPTR; // S390 -why doesn't ARM do this?
+}
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+int32_t Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+ SetRecordedAstId(ast_id);
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID);
+ } else {
+ RecordRelocInfo(rmode);
+ }
+
+ int current = code_targets_.length();
+ if (current > 0 && code_targets_.last().is_identical_to(target)) {
+ // Optimization if we keep jumping to the same code target.
+ current--;
+ } else {
+ code_targets_.Add(target);
+ }
+ return current;
+}
+
+// Helper to emit the binary encoding of a 2 byte instruction
+void Assembler::emit2bytes(uint16_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
+#endif
+ *reinterpret_cast<uint16_t*>(pc_) = x;
+ pc_ += 2;
+}
+
+// Helper to emit the binary encoding of a 4 byte instruction
+void Assembler::emit4bytes(uint32_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
+ ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
+#endif
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += 4;
+}
+
+// Helper to emit the binary encoding of a 6 byte instruction
+void Assembler::emit6bytes(uint64_t x) {
+ CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+ // We need to emit instructions in big endian format as disassembler /
+ // simulator require the first byte of the instruction in order to decode
+ // the instruction length. Swap the bytes.
+ x = (static_cast<uint64_t>(x & 0xFF) << 40) |
+ (static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
+ (static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
+ (static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
+ (static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
+ (static_cast<uint64_t>((x >> 40) & 0xFF));
+ x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
+#else
+ // We need to pad two bytes of zeros in order to get the 6-bytes
+ // stored from low address.
+ x = x << 16;
+ x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
+#endif
+ // It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
+ // space left over.
+ *reinterpret_cast<uint64_t*>(pc_) = x;
+ pc_ += 6;
+}
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
+ // S390 Instruction!
+ // We want to check for instructions generated by Asm::mov()
+ Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
+ SixByteInstr instr_1 =
+ Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+
+ if (BRASL == op1 || BRCL == op1) {
+ int32_t dis = static_cast<int32_t>(instr_1 & 0xFFFFFFFF) * 2;
+ return reinterpret_cast<Address>(reinterpret_cast<uint64_t>(pc) + dis);
+ }
+
+#if V8_TARGET_ARCH_S390X
+ int instr1_length =
+ Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
+ Opcode op2 = Instruction::S390OpcodeValue(
+ reinterpret_cast<const byte*>(pc + instr1_length));
+ SixByteInstr instr_2 = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(pc + instr1_length));
+ // IIHF for hi_32, IILF for lo_32
+ if (IIHF == op1 && IILF == op2) {
+ return reinterpret_cast<Address>(((instr_1 & 0xFFFFFFFF) << 32) |
+ ((instr_2 & 0xFFFFFFFF)));
+ }
+#else
+ // IILF loads 32-bits
+ if (IILF == op1 || CFI == op1) {
+ return reinterpret_cast<Address>((instr_1 & 0xFFFFFFFF));
+ }
+#endif
+
+ UNIMPLEMENTED();
+ return (Address)0;
+}
+
+// This sets the branch destination (which gets loaded at the call address).
+// This is for calls and branches within generated code. The serializer
+// has already deserialized the mov instructions etc.
+// There is a FIXED_SEQUENCE assumption here
+void Assembler::deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ if (RelocInfo::IsInternalReferenceEncoded(mode)) {
+ Code* code = NULL;
+ set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+ } else {
+ Memory::Address_at(pc) = target;
+ }
+}
+
+// This code assumes the FIXED_SEQUENCE of IIHF/IILF
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode) {
+ // Check for instructions generated by Asm::mov()
+ Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
+ SixByteInstr instr_1 =
+ Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+ bool patched = false;
+
+ if (BRASL == op1 || BRCL == op1) {
+ instr_1 >>= 32; // Zero out the lower 32-bits
+ instr_1 <<= 32;
+ int32_t halfwords = (target - pc) / 2; // number of halfwords
+ instr_1 |= static_cast<uint32_t>(halfwords);
+ Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+ instr_1);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate, pc, 6);
+ }
+ patched = true;
+ } else {
+#if V8_TARGET_ARCH_S390X
+ int instr1_length =
+ Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
+ Opcode op2 = Instruction::S390OpcodeValue(
+ reinterpret_cast<const byte*>(pc + instr1_length));
+ SixByteInstr instr_2 = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(pc + instr1_length));
+ // IIHF for hi_32, IILF for lo_32
+ if (IIHF == op1 && IILF == op2) {
+ // IIHF
+ instr_1 >>= 32; // Zero out the lower 32-bits
+ instr_1 <<= 32;
+ instr_1 |= reinterpret_cast<uint64_t>(target) >> 32;
+
+ Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+ instr_1);
+
+ // IILF
+ instr_2 >>= 32;
+ instr_2 <<= 32;
+ instr_2 |= reinterpret_cast<uint64_t>(target) & 0xFFFFFFFF;
+
+ Instruction::SetInstructionBits<SixByteInstr>(
+ reinterpret_cast<byte*>(pc + instr1_length), instr_2);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate, pc, 12);
+ }
+ patched = true;
+ }
+#else
+ // IILF loads 32-bits
+ if (IILF == op1 || CFI == op1) {
+ instr_1 >>= 32; // Zero out the lower 32-bits
+ instr_1 <<= 32;
+ instr_1 |= reinterpret_cast<uint32_t>(target);
+
+ Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+ instr_1);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate, pc, 6);
+ }
+ patched = true;
+ }
+#endif
+ }
+ if (!patched) UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_ASSEMBLER_S390_INL_H_
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
new file mode 100644
index 0000000000..35ba4315db
--- /dev/null
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -0,0 +1,3061 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#include "src/s390/assembler-s390.h"
+
+#if V8_TARGET_ARCH_S390
+
+#if V8_HOST_ARCH_S390
+#include <elf.h> // Required for auxv checks for STFLE support
+#endif
+
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
+#include "src/s390/assembler-s390-inl.h"
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Get the CPU features enabled by the build.
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
+ return answer;
+}
+
+// Check whether Store Facility STFLE instruction is available on the platform.
+// Instruction returns a bit vector of the enabled hardware facilities.
+static bool supportsSTFLE() {
+#if V8_HOST_ARCH_S390
+ static bool read_tried = false;
+ static uint32_t auxv_hwcap = 0;
+
+ if (!read_tried) {
+ // Open the AUXV (auxilliary vector) psuedo-file
+ int fd = open("/proc/self/auxv", O_RDONLY);
+
+ read_tried = true;
+ if (fd != -1) {
+#if V8_TARGET_ARCH_S390X
+ static Elf64_auxv_t buffer[16];
+ Elf64_auxv_t* auxv_element;
+#else
+ static Elf32_auxv_t buffer[16];
+ Elf32_auxv_t* auxv_element;
+#endif
+ int bytes_read = 0;
+ while (bytes_read >= 0) {
+ // Read a chunk of the AUXV
+ bytes_read = read(fd, buffer, sizeof(buffer));
+ // Locate and read the platform field of AUXV if it is in the chunk
+ for (auxv_element = buffer;
+ auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
+ auxv_element->a_type != AT_NULL;
+ auxv_element++) {
+ // We are looking for HWCAP entry in AUXV to search for STFLE support
+ if (auxv_element->a_type == AT_HWCAP) {
+ /* Note: Both auxv_hwcap and buffer are static */
+ auxv_hwcap = auxv_element->a_un.a_val;
+ goto done_reading;
+ }
+ }
+ }
+ done_reading:
+ close(fd);
+ }
+ }
+
+ // Did not find result
+ if (0 == auxv_hwcap) {
+ return false;
+ }
+
+ // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h. Currently
+ // hardcoded in case that include file does not exist.
+ const uint32_t HWCAP_S390_STFLE = 4;
+ return (auxv_hwcap & HWCAP_S390_STFLE);
+#else
+ // STFLE is not available on non-s390 hosts
+ return false;
+#endif
+}
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+ icache_line_size_ = 256;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+
+ static bool performSTFLE = supportsSTFLE();
+
+// Need to define host, as we are generating inlined S390 assembly to test
+// for facilities.
+#if V8_HOST_ARCH_S390
+ if (performSTFLE) {
+ // STFLE D(B) requires:
+ // GPR0 to specify # of double words to update minus 1.
+ // i.e. GPR0 = 0 for 1 doubleword
+ // D(B) to specify to memory location to store the facilities bits
+ // The facilities we are checking for are:
+ // Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
+ // As such, we require only 1 double word
+ int64_t facilities[1];
+ facilities[0] = 0;
+ // LHI sets up GPR0
+ // STFLE is specified as .insn, as opcode is not recognized.
+ // We register the instructions kill r0 (LHI) and the CC (STFLE).
+ asm volatile(
+ "lhi 0,0\n"
+ ".insn s,0xb2b00000,%0\n"
+ : "=Q"(facilities)
+ :
+ : "cc", "r0");
+
+ // Test for Distinct Operands Facility - Bit 45
+ if (facilities[0] & (1lu << (63 - 45))) {
+ supported_ |= (1u << DISTINCT_OPS);
+ }
+ // Test for General Instruction Extension Facility - Bit 34
+ if (facilities[0] & (1lu << (63 - 34))) {
+ supported_ |= (1u << GENERAL_INSTR_EXT);
+ }
+ // Test for Floating Point Extension Facility - Bit 37
+ if (facilities[0] & (1lu << (63 - 37))) {
+ supported_ |= (1u << FLOATING_POINT_EXT);
+ }
+ }
+#else
+ // All distinct ops instructions can be simulated
+ supported_ |= (1u << DISTINCT_OPS);
+ // RISBG can be simulated
+ supported_ |= (1u << GENERAL_INSTR_EXT);
+
+ supported_ |= (1u << FLOATING_POINT_EXT);
+ USE(performSTFLE); // To avoid assert
+#endif
+ supported_ |= (1u << FPU);
+}
+
+void CpuFeatures::PrintTarget() {
+ const char* s390_arch = NULL;
+
+#if V8_TARGET_ARCH_S390X
+ s390_arch = "s390x";
+#else
+ s390_arch = "s390";
+#endif
+
+ printf("target %s\n", s390_arch);
+}
+
+void CpuFeatures::PrintFeatures() {
+ printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
+ printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
+ printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+ printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
+}
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {r0, r1, r2, r3, r4, r5, r6, r7,
+ r8, r9, r10, fp, ip, r13, r14, sp};
+ return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially
+ // coded. Being specially coded on S390 means that it is an iihf/iilf
+ // instruction sequence, and that is always the case inside code
+ // objects.
+ return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-s390-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ imm_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ imm_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = kRelocInfo_NONEPTR;
+ }
+}
+
+MemOperand::MemOperand(Register rn, int32_t offset) {
+ baseRegister = rn;
+ indexRegister = r0;
+ offset_ = offset;
+}
+
+MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
+ baseRegister = rb;
+ indexRegister = rx;
+ offset_ = offset;
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ code_targets_(100),
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+ last_bound_pos_ = 0;
+ ClearRecordedAstId();
+ relocations_.reserve(128);
+}
+
+void Assembler::GetCode(CodeDesc* desc) {
+ EmitRelocations();
+
+ // Set up code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+}
+
+void Assembler::Align(int m) {
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop(0);
+ }
+}
+
+void Assembler::CodeTargetAlign() { Align(8); }
+
+Condition Assembler::GetCondition(Instr instr) {
+ switch (instr & kCondMask) {
+ case BT:
+ return eq;
+ case BF:
+ return ne;
+ default:
+ UNIMPLEMENTED();
+ }
+ return al;
+}
+
+#if V8_TARGET_ARCH_S390X
+// This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
+bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
+ // Check the instructions are the iihf/iilf load into ip
+ return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
+}
+#else
+// This code assumes a FIXED_SEQUENCE for 32bit loads (iilf)
+bool Assembler::Is32BitLoadIntoIP(SixByteInstr instr) {
+ // Check the instruction is an iilf load into ip/r12.
+ return ((instr >> 32) == 0xC0C9);
+}
+#endif
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+// Returns the target address of the relative instructions, typically
+// of the form: pos + imm (where immediate is in # of halfwords for
+// BR* and LARL).
+int Assembler::target_at(int pos) {
+ SixByteInstr instr = instr_at(pos);
+ // check which type of branch this is 16 or 26 bit offset
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
+ imm16 <<= 1; // BRC immediate is in # of halfwords
+ if (imm16 == 0) return kEndOfChain;
+ return pos + imm16;
+ } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
+ BRASL == opcode) {
+ int32_t imm32 =
+ static_cast<int32_t>(instr & (static_cast<uint64_t>(0xffffffff)));
+ if (LLILF != opcode)
+ imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
+ if (imm32 == 0) return kEndOfChain;
+ return pos + imm32;
+ }
+
+ // Unknown condition
+ DCHECK(false);
+ return -1;
+}
+
+// Update the target address of the current relative instruction.
+void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
+ SixByteInstr instr = instr_at(pos);
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+ if (is_branch != nullptr) {
+ *is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
+ opcode == BRCL || opcode == BRASL);
+ }
+
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ int16_t imm16 = target_pos - pos;
+ instr &= (~0xffff);
+ DCHECK(is_int16(imm16));
+ instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
+ return;
+ } else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
+ // Immediate is in # of halfwords
+ int32_t imm32 = target_pos - pos;
+ instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
+ return;
+ } else if (LLILF == opcode) {
+ DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ instr &= (~static_cast<uint64_t>(0xffffffff));
+ instr_at_put<SixByteInstr>(pos, instr | imm32);
+ return;
+ }
+ DCHECK(false);
+}
+
+// Returns the maximum number of bits given instruction can address.
+int Assembler::max_reach_from(int pos) {
+ Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+ // Check which type of instr. In theory, we can return
+ // the values below + 1, given offset is # of halfwords
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+ return 16;
+ } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
+ BRASL == opcode) {
+ return 31; // Using 31 as workaround instead of 32 as
+ // is_intn(x,32) doesn't work on 32-bit platforms.
+ // llilf: Emitted label constant, not part of
+ // a branch (regexp PushBacktrack).
+ }
+ DCHECK(false);
+ return 16;
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ bool is_branch = false;
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+#ifdef DEBUG
+ int32_t offset = pos - fixup_pos;
+ int maxReach = max_reach_from(fixup_pos);
+#endif
+ next(L); // call next before overwriting link with target at fixup_pos
+ DCHECK(is_intn(offset, maxReach));
+ target_at_put(fixup_pos, pos, &is_branch);
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos());
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else {
+ DCHECK(link >= 0);
+ L->link_to(link);
+ }
+}
+
+bool Assembler::is_near(Label* L, Condition cond) {
+ DCHECK(L->is_bound());
+ if (L->is_bound() == false) return false;
+
+ int maxReach = ((cond == al) ? 26 : 16);
+ int offset = L->pos() - pc_offset();
+
+ return is_intn(offset, maxReach);
+}
+
+int Assembler::link(Label* L) {
+ int position;
+ if (L->is_bound()) {
+ position = L->pos();
+ } else {
+ if (L->is_linked()) {
+ position = L->pos(); // L's link
+ } else {
+ // was: target_pos = kEndOfChain;
+ // However, using self to mark the first reference
+ // should avoid most instances of branch offset overflow. See
+ // target_at() for where this is converted back to kEndOfChain.
+ position = pc_offset();
+ }
+ L->link_to(pc_offset());
+ }
+
+ return position;
+}
+
+void Assembler::load_label_offset(Register r1, Label* L) {
+ int target_pos;
+ int constant;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ // was: target_pos = kEndOfChain;
+ // However, using branch to self to mark the first reference
+ // should avoid most instances of branch offset overflow. See
+ // target_at() for where this is converted back to kEndOfChain.
+ target_pos = pc_offset();
+ }
+ L->link_to(pc_offset());
+
+ constant = target_pos - pc_offset();
+ }
+ llilf(r1, Operand(constant));
+}
+
+// Pseudo op - branch on condition
+void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
+ int offset = branch_offset;
+ if (is_bound && is_int16(offset)) {
+ brc(c, Operand(offset & 0xFFFF)); // short jump
+ } else {
+ brcl(c, Operand(offset)); // long jump
+ }
+}
+
+// 32-bit Store Multiple - short displacement (12-bits unsigned)
+void Assembler::stm(Register r1, Register r2, const MemOperand& src) {
+ rs_form(STM, r1, r2, src.rb(), src.offset());
+}
+
+// 32-bit Store Multiple - long displacement (20-bits signed)
+void Assembler::stmy(Register r1, Register r2, const MemOperand& src) {
+ rsy_form(STMY, r1, r2, src.rb(), src.offset());
+}
+
+// 64-bit Store Multiple - long displacement (20-bits signed)
+void Assembler::stmg(Register r1, Register r2, const MemOperand& src) {
+ rsy_form(STMG, r1, r2, src.rb(), src.offset());
+}
+
+// Exception-generating instructions and debugging support.
+// Stops with a non-negative code less than kNumOfWatchedStops support
+// enabling/disabling and a counter feature. See simulator-s390.h .
+void Assembler::stop(const char* msg, Condition cond, int32_t code,
+ CRegister cr) {
+ if (cond != al) {
+ Label skip;
+ b(NegateCondition(cond), &skip, Label::kNear);
+ bkpt(0);
+ bind(&skip);
+ } else {
+ bkpt(0);
+ }
+}
+
+void Assembler::bkpt(uint32_t imm16) {
+ // GDB software breakpoint instruction
+ emit2bytes(0x0001);
+}
+
+// Pseudo instructions.
+void Assembler::nop(int type) {
+ switch (type) {
+ case 0:
+ lr(r0, r0);
+ break;
+ case DEBUG_BREAK_NOP:
+ // TODO(john.yan): Use a better NOP break
+ oill(r3, Operand::Zero());
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+// RR format: <insn> R1,R2
+// +--------+----+----+
+// | OpCode | R1 | R2 |
+// +--------+----+----+
+// 0 8 12 15
+#define RR_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r2) { rr_form(op, r1, r2); }
+
+void Assembler::rr_form(Opcode op, Register r1, Register r2) {
+ DCHECK(is_uint8(op));
+ emit2bytes(op * B8 | r1.code() * B4 | r2.code());
+}
+
+void Assembler::rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
+ DCHECK(is_uint8(op));
+ emit2bytes(op * B8 | r1.code() * B4 | r2.code());
+}
+
+// RR2 format: <insn> M1,R2
+// +--------+----+----+
+// | OpCode | M1 | R2 |
+// +--------+----+----+
+// 0 8 12 15
+#define RR2_FORM_EMIT(name, op) \
+ void Assembler::name(Condition m1, Register r2) { rr_form(op, m1, r2); }
+
+void Assembler::rr_form(Opcode op, Condition m1, Register r2) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint4(m1));
+ emit2bytes(op * B8 | m1 * B4 | r2.code());
+}
+
+// RX format: <insn> R1,D2(X2,B2)
+// +--------+----+----+----+-------------+
+// | OpCode | R1 | X2 | B2 | D2 |
+// +--------+----+----+----+-------------+
+// 0 8 12 16 20 31
+#define RX_FORM_EMIT(name, op) \
+ void Assembler::name(Register r, const MemOperand& opnd) { \
+ name(r, opnd.getIndexRegister(), opnd.getBaseRegister(), \
+ opnd.getDisplacement()); \
+ } \
+ void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+ rx_form(op, r1, x2, b2, d2); \
+ }
+void Assembler::rx_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint12(d2));
+ emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
+ d2);
+}
+
+void Assembler::rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+ Disp d2) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint12(d2));
+ emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
+ d2);
+}
+
+// RI1 format: <insn> R1,I2
+// +--------+----+----+------------------+
+// | OpCode | R1 |OpCd| I2 |
+// +--------+----+----+------------------+
+// 0 8 12 16 31
+#define RI1_FORM_EMIT(name, op) \
+ void Assembler::name(Register r, const Operand& i2) { ri_form(op, r, i2); }
+
+void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
+ DCHECK(is_uint12(op));
+ DCHECK(is_uint16(i2.imm_) || is_int16(i2.imm_));
+ emit4bytes((op & 0xFF0) * B20 | r1.code() * B20 | (op & 0xF) * B16 |
+ (i2.imm_ & 0xFFFF));
+}
+
+// RI2 format: <insn> M1,I2
+// +--------+----+----+------------------+
+// | OpCode | M1 |OpCd| I2 |
+// +--------+----+----+------------------+
+// 0 8 12 16 31
+#define RI2_FORM_EMIT(name, op) \
+ void Assembler::name(Condition m, const Operand& i2) { ri_form(op, m, i2); }
+
+void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
+ DCHECK(is_uint12(op));
+ DCHECK(is_uint4(m1));
+ DCHECK(is_uint16(i2.imm_));
+ emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
+ (i2.imm_ & 0xFFFF));
+}
+
+// RIE-f format: <insn> R1,R2,I3,I4,I5
+// +--------+----+----+------------------+--------+--------+
+// | OpCode | R1 | R2 | I3 | I4 | I5 | OpCode |
+// +--------+----+----+------------------+--------+--------+
+// 0 8 12 16 24 32 40 47
+void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
+ const Operand& i3, const Operand& i4,
+ const Operand& i5) {
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(i3.imm_));
+ DCHECK(is_uint8(i4.imm_));
+ DCHECK(is_uint8(i5.imm_));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(r2.code())) * B32 |
+ (static_cast<uint64_t>(i3.imm_)) * B24 |
+ (static_cast<uint64_t>(i4.imm_)) * B16 |
+ (static_cast<uint64_t>(i5.imm_)) * B8 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RIE format: <insn> R1,R3,I2
+// +--------+----+----+------------------+--------+--------+
+// | OpCode | R1 | R3 | I2 |////////| OpCode |
+// +--------+----+----+------------------+--------+--------+
+// 0 8 12 16 32 40 47
+#define RIE_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, const Operand& i2) { \
+ rie_form(op, r1, r3, i2); \
+ }
+
+void Assembler::rie_form(Opcode op, Register r1, Register r3,
+ const Operand& i2) {
+ DCHECK(is_uint16(op));
+ DCHECK(is_int16(i2.imm_));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(r3.code())) * B32 |
+ (static_cast<uint64_t>(i2.imm_ & 0xFFFF)) * B16 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RIL1 format: <insn> R1,I2
+// +--------+----+----+------------------------------------+
+// | OpCode | R1 |OpCd| I2 |
+// +--------+----+----+------------------------------------+
+// 0 8 12 16 47
+#define RIL1_FORM_EMIT(name, op) \
+ void Assembler::name(Register r, const Operand& i2) { ril_form(op, r, i2); }
+
+void Assembler::ril_form(Opcode op, Register r1, const Operand& i2) {
+ DCHECK(is_uint12(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(op & 0x00F)) * B32 |
+ (static_cast<uint64_t>(i2.imm_) & 0xFFFFFFFF);
+ emit6bytes(code);
+}
+
+// RIL2 format: <insn> M1,I2
+// +--------+----+----+------------------------------------+
+// | OpCode | M1 |OpCd| I2 |
+// +--------+----+----+------------------------------------+
+// 0 8 12 16 47
+#define RIL2_FORM_EMIT(name, op) \
+ void Assembler::name(Condition m1, const Operand& i2) { \
+ ril_form(op, m1, i2); \
+ }
+
+void Assembler::ril_form(Opcode op, Condition m1, const Operand& i2) {
+ DCHECK(is_uint12(op));
+ DCHECK(is_uint4(m1));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+ (static_cast<uint64_t>(m1)) * B36 |
+ (static_cast<uint64_t>(op & 0x00F)) * B32 |
+ (static_cast<uint64_t>(i2.imm_ & 0xFFFFFFFF));
+ emit6bytes(code);
+}
+
+// RRE format: <insn> R1,R2
+// +------------------+--------+----+----+
+// | OpCode |////////| R1 | R2 |
+// +------------------+--------+----+----+
+// 0 16 24 28 31
+#define RRE_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r2) { rre_form(op, r1, r2); }
+
+void Assembler::rre_form(Opcode op, Register r1, Register r2) {
+ DCHECK(is_uint16(op));
+ emit4bytes(op << 16 | r1.code() * B4 | r2.code());
+}
+
+void Assembler::rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
+ DCHECK(is_uint16(op));
+ emit4bytes(op << 16 | r1.code() * B4 | r2.code());
+}
+
+// RRD format: <insn> R1,R3, R2
+// +------------------+----+----+----+----+
+// | OpCode | R1 |////| R3 | R2 |
+// +------------------+----+----+----+----+
+// 0 16 20 24 28 31
+#define RRD_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register r2) { \
+ rrd_form(op, r1, r3, r2); \
+ }
+
+void Assembler::rrd_form(Opcode op, Register r1, Register r3, Register r2) {
+ emit4bytes(op << 16 | r1.code() * B12 | r3.code() * B4 | r2.code());
+}
+
+// RS1 format: <insn> R1,R3,D2(B2)
+// +--------+----+----+----+-------------+
+// | OpCode | R1 | R3 | B2 | D2 |
+// +--------+----+----+----+-------------+
+// 0 8 12 16 20 31
+#define RS1_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register b2, Disp d2) { \
+ rs_form(op, r1, r3, b2, d2); \
+ } \
+ void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::rs_form(Opcode op, Register r1, Register r3, Register b2,
+ const Disp d2) {
+ DCHECK(is_uint12(d2));
+ emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | b2.code() * B12 |
+ d2);
+}
+
+// RS2 format: <insn> R1,M3,D2(B2)
+// +--------+----+----+----+-------------+
+// | OpCode | R1 | M3 | B2 | D2 |
+// +--------+----+----+----+-------------+
+// 0 8 12 16 20 31
+#define RS2_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Condition m3, Register b2, Disp d2) { \
+ rs_form(op, r1, m3, b2, d2); \
+ } \
+ void Assembler::name(Register r1, Condition m3, const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::rs_form(Opcode op, Register r1, Condition m3, Register b2,
+ const Disp d2) {
+ DCHECK(is_uint12(d2));
+ emit4bytes(op * B24 | r1.code() * B20 | m3 * B16 | b2.code() * B12 | d2);
+}
+
+// RSI format: <insn> R1,R3,I2
+// +--------+----+----+------------------+
+// | OpCode | R1 | R3 | RI2 |
+// +--------+----+----+------------------+
+// 0 8 12 16 31
+#define RSI_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, const Operand& i2) { \
+ rsi_form(op, r1, r3, i2); \
+ }
+
+void Assembler::rsi_form(Opcode op, Register r1, Register r3,
+ const Operand& i2) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint16(i2.imm_));
+ emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | (i2.imm_ & 0xFFFF));
+}
+
+// RSL format: <insn> R1,R3,D2(B2)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | L1 | | B2 | D2 | | OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 40 47
+#define RSL_FORM_EMIT(name, op) \
+ void Assembler::name(Length l1, Register b2, Disp d2) { \
+ rsl_form(op, l1, b2, d2); \
+ }
+
+void Assembler::rsl_form(Opcode op, Length l1, Register b2, Disp d2) {
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(l1)) * B36 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2)) * B16 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RSY1 format: <insn> R1,R3,D2(B2)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | R1 | R3 | B2 | DL2 | DH2 | OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 40 47
+#define RSY1_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register b2, Disp d2) { \
+ rsy_form(op, r1, r3, b2, d2); \
+ } \
+ void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::rsy_form(Opcode op, Register r1, Register r3, Register b2,
+ const Disp d2) {
+ DCHECK(is_int20(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(r3.code())) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RSY2 format: <insn> R1,M3,D2(B2)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | R1 | M3 | B2 | DL2 | DH2 | OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 40 47
+#define RSY2_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Condition m3, Register b2, Disp d2) { \
+ rsy_form(op, r1, m3, b2, d2); \
+ } \
+ void Assembler::name(Register r1, Condition m3, const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::rsy_form(Opcode op, Register r1, Condition m3, Register b2,
+ const Disp d2) {
+ DCHECK(is_int20(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(m3)) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RXE format: <insn> R1,D2(X2,B2)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | R1 | X2 | B2 | D2 |////////| OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 40 47
+#define RXE_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+ rxe_form(op, r1, x2, b2, d2); \
+ } \
+ void Assembler::name(Register r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
+ opnd.getDisplacement()); \
+ }
+
+void Assembler::rxe_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(x2.code())) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RXY format: <insn> R1,D2(X2,B2)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | R1 | X2 | B2 | DL2 | DH2 | OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 36 40 47
+#define RXY_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+ rxy_form(op, r1, x2, b2, d2); \
+ } \
+ void Assembler::name(Register r1, const MemOperand& opnd) { \
+ name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(), \
+ opnd.getDisplacement()); \
+ }
+
+void Assembler::rxy_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2) {
+ DCHECK(is_int20(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(x2.code())) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+void Assembler::rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+ Disp d2) {
+ DCHECK(is_int20(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(x2.code())) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RRS format: <insn> R1,R2,M3,D4(B4)
+// +--------+----+----+----+-------------+----+---+--------+
+// | OpCode | R1 | R2 | B4 | D4 | M3 |///| OpCode |
+// +--------+----+----+----+-------------+----+---+--------+
+// 0 8 12 16 20 32 36 40 47
+#define RRS_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r2, Register b4, Disp d4, \
+ Condition m3) { \
+ rrs_form(op, r1, r2, b4, d4, m3); \
+ } \
+ void Assembler::name(Register r1, Register r2, Condition m3, \
+ const MemOperand& opnd) { \
+ name(r1, r2, opnd.getBaseRegister(), opnd.getDisplacement(), m3); \
+ }
+
+void Assembler::rrs_form(Opcode op, Register r1, Register r2, Register b4,
+ Disp d4, Condition m3) {
+ DCHECK(is_uint12(d4));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(r2.code())) * B32 |
+ (static_cast<uint64_t>(b4.code())) * B28 |
+ (static_cast<uint64_t>(d4)) * B16 |
+ (static_cast<uint64_t>(m3)) << 12 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// RIS format: <insn> R1,I2,M3,D4(B4)
+// +--------+----+----+----+-------------+--------+--------+
+// | OpCode | R1 | M3 | B4 | D4 | I2 | OpCode |
+// +--------+----+----+----+-------------+--------+--------+
+// 0 8 12 16 20 32 40 47
+#define RIS_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Condition m3, Register b4, Disp d4, \
+ const Operand& i2) { \
+ ris_form(op, r1, m3, b4, d4, i2); \
+ } \
+ void Assembler::name(Register r1, const Operand& i2, Condition m3, \
+ const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement(), i2); \
+ }
+
+void Assembler::ris_form(Opcode op, Register r1, Condition m3, Register b4,
+ Disp d4, const Operand& i2) {
+ DCHECK(is_uint12(d4));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(i2.imm_));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(m3)) * B32 |
+ (static_cast<uint64_t>(b4.code())) * B28 |
+ (static_cast<uint64_t>(d4)) * B16 |
+ (static_cast<uint64_t>(i2.imm_)) << 8 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// S format: <insn> D2(B2)
+// +------------------+----+-------------+
+// | OpCode | B2 | D2 |
+// +------------------+----+-------------+
+// 0 16 20 31
+#define S_FORM_EMIT(name, op) \
+ void Assembler::name(Register b1, Disp d2) { s_form(op, b1, d2); } \
+ void Assembler::name(const MemOperand& opnd) { \
+ name(opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::s_form(Opcode op, Register b1, Disp d2) {
+ DCHECK(is_uint12(d2));
+ emit4bytes(op << 16 | b1.code() * B12 | d2);
+}
+
+// SI format: <insn> D1(B1),I2
+// +--------+---------+----+-------------+
+// | OpCode | I2 | B1 | D1 |
+// +--------+---------+----+-------------+
+// 0 8 16 20 31
+#define SI_FORM_EMIT(name, op) \
+ void Assembler::name(const Operand& i2, Register b1, Disp d1) { \
+ si_form(op, i2, b1, d1); \
+ } \
+ void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
+ emit4bytes((op & 0x00FF) << 24 | i2.imm_ * B16 | b1.code() * B12 | d1);
+}
+
+// SIY format: <insn> D1(B1),I2
+// +--------+---------+----+-------------+--------+--------+
+// | OpCode | I2 | B1 | DL1 | DH1 | OpCode |
+// +--------+---------+----+-------------+--------+--------+
+// 0 8 16 20 32 36 40 47
+#define SIY_FORM_EMIT(name, op) \
+ void Assembler::name(const Operand& i2, Register b1, Disp d1) { \
+ siy_form(op, i2, b1, d1); \
+ } \
+ void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), opnd.getDisplacement()); \
+ }
+
+void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
+ DCHECK(is_uint20(d1));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(i2.imm_));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(i2.imm_)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d1 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// SIL format: <insn> D1(B1),I2
+// +------------------+----+-------------+-----------------+
+// | OpCode | B1 | D1 | I2 |
+// +------------------+----+-------------+-----------------+
+// 0 16 20 32 47
+#define SIL_FORM_EMIT(name, op) \
+ void Assembler::name(Register b1, Disp d1, const Operand& i2) { \
+ sil_form(op, b1, d1, i2); \
+ } \
+ void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+ name(opnd.getBaseRegister(), opnd.getDisplacement(), i2); \
+ }
+
+void Assembler::sil_form(Opcode op, Register b1, Disp d1, const Operand& i2) {
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint16(i2.imm_));
+ uint64_t code = (static_cast<uint64_t>(op)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(i2.imm_));
+ emit6bytes(code);
+}
+
+// RXF format: <insn> R1,R3,D2(X2,B2)
+// +--------+----+----+----+-------------+----+---+--------+
+// | OpCode | R3 | X2 | B2 | D2 | R1 |///| OpCode |
+// +--------+----+----+----+-------------+----+---+--------+
+// 0 8 12 16 20 32 36 40 47
+#define RXF_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register b2, Register x2, \
+ Disp d2) { \
+ rxf_form(op, r1, r3, b2, x2, d2); \
+ } \
+ void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), opnd.getIndexRegister(), \
+ opnd.getDisplacement()); \
+ }
+
+void Assembler::rxf_form(Opcode op, Register r1, Register r3, Register b2,
+ Register x2, Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r3.code())) * B36 |
+ (static_cast<uint64_t>(x2.code())) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2)) * B16 |
+ (static_cast<uint64_t>(r1.code())) * B12 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
+// SS1 format: <insn> D1(L,B1),D2(B3)
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | L | B1 | D1 | B2 | D2 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SS1_FORM_EMIT(name, op) \
+ void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l) { \
+ ss_form(op, l, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
+ Length length) { \
+ name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
+ opnd2.getBaseRegister(), opnd2.getDisplacement(), length); \
+ }
+
+void Assembler::ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
+ Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint8(l));
+ uint64_t code =
+ (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// SS2 format: <insn> D1(L1,B1), D2(L3,B3)
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | L1 | L2 | B1 | D1 | B2 | D2 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SS2_FORM_EMIT(name, op) \
+ void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l1, \
+ Length l2) { \
+ ss_form(op, l1, l2, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
+ Length length1, Length length2) { \
+ name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
+ opnd2.getBaseRegister(), opnd2.getDisplacement(), length1, length2); \
+ }
+
+void Assembler::ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
+ Register b2, Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint4(l2));
+ DCHECK(is_uint4(l1));
+ uint64_t code =
+ (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
+ (static_cast<uint64_t>(l2)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// SS3 format: <insn> D1(L1,B1), D2(I3,B2)
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | L1 | I3 | B1 | D1 | B2 | D2 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SS3_FORM_EMIT(name, op) \
+ void Assembler::name(const Operand& i3, Register b1, Disp d1, Register b2, \
+ Disp d2, Length l1) { \
+ ss_form(op, l1, i3, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2, \
+ Length length) { \
+ DCHECK(false); \
+ }
+void Assembler::ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
+ Disp d1, Register b2, Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint4(l1));
+ DCHECK(is_uint4(i3.imm_));
+ uint64_t code =
+ (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
+ (static_cast<uint64_t>(i3.imm_)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// SS4 format: <insn> D1(R1,B1), D2(R3,B2)
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | R1 | R3 | B1 | D1 | B2 | D2 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SS4_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register b1, Disp d1, \
+ Register b2, Disp d2) { \
+ ss_form(op, r1, r3, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+ DCHECK(false); \
+ }
+void Assembler::ss_form(Opcode op, Register r1, Register r3, Register b1,
+ Disp d1, Register b2, Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint8(op));
+ uint64_t code = (static_cast<uint64_t>(op)) * B40 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(r3.code())) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 |
+ (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// SS5 format: <insn> D1(R1,B1), D2(R3,B2)
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | R1 | R3 | B2 | D2 | B4 | D4 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SS5_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r3, Register b2, Disp d2, \
+ Register b4, Disp d4) { \
+ ss_form(op, r1, r3, b2, d2, b4, d4); /*SS5 use the same form as SS4*/ \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+ DCHECK(false); \
+ }
+
+#define SS6_FORM_EMIT(name, op) SS1_FORM_EMIT(name, op)
+
+// SSE format: <insn> D1(B1),D2(B2)
+// +------------------+----+-------------+----+------------+
+// | OpCode | B1 | D1 | B2 | D2 |
+// +------------------+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SSE_FORM_EMIT(name, op) \
+ void Assembler::name(Register b1, Disp d1, Register b2, Disp d2) { \
+ sse_form(op, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+ name(opnd1.getBaseRegister(), opnd1.getDisplacement(), \
+ opnd2.getBaseRegister(), opnd2.getDisplacement()); \
+ }
+void Assembler::sse_form(Opcode op, Register b1, Disp d1, Register b2,
+ Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 |
+ (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// SSF format: <insn> R3, D1(B1),D2(B2),R3
+// +--------+----+----+----+-------------+----+------------+
+// | OpCode | R3 |OpCd| B1 | D1 | B2 | D2 |
+// +--------+----+----+----+-------------+----+------------+
+// 0 8 12 16 20 32 36 47
+#define SSF_FORM_EMIT(name, op) \
+ void Assembler::name(Register r3, Register b1, Disp d1, Register b2, \
+ Disp d2) { \
+ ssf_form(op, r3, b1, d1, b2, d2); \
+ } \
+ void Assembler::name(Register r3, const MemOperand& opnd1, \
+ const MemOperand& opnd2) { \
+ name(r3, opnd1.getBaseRegister(), opnd1.getDisplacement(), \
+ opnd2.getBaseRegister(), opnd2.getDisplacement()); \
+ }
+
+void Assembler::ssf_form(Opcode op, Register r3, Register b1, Disp d1,
+ Register b2, Disp d2) {
+ DCHECK(is_uint12(d2));
+ DCHECK(is_uint12(d1));
+ DCHECK(is_uint12(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+ (static_cast<uint64_t>(r3.code())) * B36 |
+ (static_cast<uint64_t>(op & 0x00F)) * B32 |
+ (static_cast<uint64_t>(b1.code())) * B28 |
+ (static_cast<uint64_t>(d1)) * B16 |
+ (static_cast<uint64_t>(b2.code())) * B12 |
+ (static_cast<uint64_t>(d2));
+ emit6bytes(code);
+}
+
+// RRF1 format: <insn> R1,R2,R3
+// +------------------+----+----+----+----+
+// | OpCode | R3 | | R1 | R2 |
+// +------------------+----+----+----+----+
+// 0 16 20 24 28 31
+#define RRF1_FORM_EMIT(name, op) \
+ void Assembler::name(Register r1, Register r2, Register r3) { \
+ rrf1_form(op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code()); \
+ }
+
+void Assembler::rrf1_form(Opcode op, Register r1, Register r2, Register r3) {
+ uint32_t code = op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code();
+ emit4bytes(code);
+}
+
+void Assembler::rrf1_form(uint32_t code) { emit4bytes(code); }
+
+// RRF2 format: <insn> R1,R2,M3
+// +------------------+----+----+----+----+
+// | OpCode | M3 | | R1 | R2 |
+// +------------------+----+----+----+----+
+// 0 16 20 24 28 31
+#define RRF2_FORM_EMIT(name, op) \
+ void Assembler::name(Condition m3, Register r1, Register r2) { \
+ rrf2_form(op << 16 | m3 * B12 | r1.code() * B4 | r2.code()); \
+ }
+
+void Assembler::rrf2_form(uint32_t code) { emit4bytes(code); }
+
+// RRF3 format: <insn> R1,R2,R3,M4
+// +------------------+----+----+----+----+
+// | OpCode | R3 | M4 | R1 | R2 |
+// +------------------+----+----+----+----+
+// 0 16 20 24 28 31
+#define RRF3_FORM_EMIT(name, op) \
+ void Assembler::name(Register r3, Conition m4, Register r1, Register r2) { \
+ rrf3_form(op << 16 | r3.code() * B12 | m4 * B8 | r1.code() * B4 | \
+ r2.code()); \
+ }
+
+void Assembler::rrf3_form(uint32_t code) { emit4bytes(code); }
+
+// RRF-e format: <insn> R1,M3,R2,M4
+// +------------------+----+----+----+----+
+// | OpCode | M3 | M4 | R1 | R2 |
+// +------------------+----+----+----+----+
+// 0 16 20 24 28 31
+void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
+ Register r2) {
+ uint32_t code = op << 16 | m3 * B12 | m4 * B8 | r1.code() * B4 | r2.code();
+ emit4bytes(code);
+}
+
+// end of S390 Instruction generation
+
+// start of S390 instruction
+RX_FORM_EMIT(bc, BC)
+RR_FORM_EMIT(bctr, BCTR)
+RXE_FORM_EMIT(ceb, CEB)
+RRE_FORM_EMIT(cefbr, CEFBR)
+SS1_FORM_EMIT(ed, ED)
+RX_FORM_EMIT(ex, EX)
+RRE_FORM_EMIT(flogr, FLOGR)
+RRE_FORM_EMIT(lcgr, LCGR)
+RR_FORM_EMIT(lcr, LCR)
+RX_FORM_EMIT(le_z, LE)
+RXY_FORM_EMIT(ley, LEY)
+RIL1_FORM_EMIT(llihf, LLIHF)
+RIL1_FORM_EMIT(llilf, LLILF)
+RRE_FORM_EMIT(lngr, LNGR)
+RR_FORM_EMIT(lnr, LNR)
+RSY1_FORM_EMIT(loc, LOC)
+RXY_FORM_EMIT(lrv, LRV)
+RXY_FORM_EMIT(lrvh, LRVH)
+SS1_FORM_EMIT(mvn, MVN)
+SS1_FORM_EMIT(nc, NC)
+SI_FORM_EMIT(ni, NI)
+RIL1_FORM_EMIT(nihf, NIHF)
+RIL1_FORM_EMIT(nilf, NILF)
+RI1_FORM_EMIT(nilh, NILH)
+RI1_FORM_EMIT(nill, NILL)
+RIL1_FORM_EMIT(oihf, OIHF)
+RIL1_FORM_EMIT(oilf, OILF)
+RI1_FORM_EMIT(oill, OILL)
+RRE_FORM_EMIT(popcnt, POPCNT_Z)
+RIL1_FORM_EMIT(slfi, SLFI)
+RXY_FORM_EMIT(slgf, SLGF)
+RIL1_FORM_EMIT(slgfi, SLGFI)
+RXY_FORM_EMIT(strv, STRV)
+RI1_FORM_EMIT(tmll, TMLL)
+SS1_FORM_EMIT(tr, TR)
+S_FORM_EMIT(ts, TS)
+RIL1_FORM_EMIT(xihf, XIHF)
+RIL1_FORM_EMIT(xilf, XILF)
+
+// -------------------------
+// Load Address Instructions
+// -------------------------
+// Load Address Register-Storage
+void Assembler::la(Register r1, const MemOperand& opnd) {
+ rx_form(LA, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Address Register-Storage
+void Assembler::lay(Register r1, const MemOperand& opnd) {
+ rxy_form(LAY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Address Relative Long
+void Assembler::larl(Register r1, const Operand& opnd) {
+ ril_form(LARL, r1, opnd);
+}
+
+// Load Address Relative Long
+void Assembler::larl(Register r1, Label* l) {
+ larl(r1, Operand(branch_offset(l)));
+}
+
+// -----------------
+// Load Instructions
+// -----------------
+// Load Byte Register-Storage (32<-8)
+void Assembler::lb(Register r, const MemOperand& src) {
+ rxy_form(LB, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Byte Register-Register (32<-8)
+void Assembler::lbr(Register r1, Register r2) { rre_form(LBR, r1, r2); }
+
+// Load Byte Register-Storage (64<-8)
+void Assembler::lgb(Register r, const MemOperand& src) {
+ rxy_form(LGB, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Byte Register-Register (64<-8)
+void Assembler::lgbr(Register r1, Register r2) { rre_form(LGBR, r1, r2); }
+
+// Load Halfword Register-Storage (32<-16)
+void Assembler::lh(Register r, const MemOperand& src) {
+ rx_form(LH, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Storage (32<-16)
+void Assembler::lhy(Register r, const MemOperand& src) {
+ rxy_form(LHY, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Register (32<-16)
+void Assembler::lhr(Register r1, Register r2) { rre_form(LHR, r1, r2); }
+
+// Load Halfword Register-Storage (64<-16)
+void Assembler::lgh(Register r, const MemOperand& src) {
+ rxy_form(LGH, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Register (64<-16)
+void Assembler::lghr(Register r1, Register r2) { rre_form(LGHR, r1, r2); }
+
+// Load Register-Storage (32)
+void Assembler::l(Register r, const MemOperand& src) {
+ rx_form(L, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Storage (32)
+void Assembler::ly(Register r, const MemOperand& src) {
+ rxy_form(LY, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Register (32)
+void Assembler::lr(Register r1, Register r2) { rr_form(LR, r1, r2); }
+
+// Load Register-Storage (64)
+void Assembler::lg(Register r, const MemOperand& src) {
+ rxy_form(LG, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Register (64)
+void Assembler::lgr(Register r1, Register r2) { rre_form(LGR, r1, r2); }
+
+// Load Register-Storage (64<-32)
+void Assembler::lgf(Register r, const MemOperand& src) {
+ rxy_form(LGF, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Sign Extended Register-Register (64<-32)
+void Assembler::lgfr(Register r1, Register r2) { rre_form(LGFR, r1, r2); }
+
+// Load Halfword Immediate (32)
+void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
+
+// Load Halfword Immediate (64)
+void Assembler::lghi(Register r, const Operand& imm) { ri_form(LGHI, r, imm); }
+
+// --------------------------
+// Load And Test Instructions
+// --------------------------
+// Load and Test Register-Storage (32)
+void Assembler::lt_z(Register r1, const MemOperand& opnd) {
+ rxy_form(LT, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load and Test Register-Storage (64)
+void Assembler::ltg(Register r1, const MemOperand& opnd) {
+ rxy_form(LTG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load and Test Register-Register (32)
+void Assembler::ltr(Register r1, Register r2) { rr_form(LTR, r1, r2); }
+
+// Load and Test Register-Register (64)
+void Assembler::ltgr(Register r1, Register r2) { rre_form(LTGR, r1, r2); }
+
+// Load and Test Register-Register (64<-32)
+void Assembler::ltgfr(Register r1, Register r2) { rre_form(LTGFR, r1, r2); }
+
+// -------------------------
+// Load Logical Instructions
+// -------------------------
+// Load Logical Character (32) - loads a byte and zero ext.
+void Assembler::llc(Register r1, const MemOperand& opnd) {
+ rxy_form(LLC, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical Character (64) - loads a byte and zero ext.
+void Assembler::llgc(Register r1, const MemOperand& opnd) {
+ rxy_form(LLGC, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Storage (64<-32)
+void Assembler::llgf(Register r1, const MemOperand& opnd) {
+ rxy_form(LLGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical Register-Register (64<-32)
+void Assembler::llgfr(Register r1, Register r2) { rre_form(LLGFR, r1, r2); }
+
+// Load Logical halfword Register-Storage (32)
+void Assembler::llh(Register r1, const MemOperand& opnd) {
+ rxy_form(LLH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Storage (64)
+void Assembler::llgh(Register r1, const MemOperand& opnd) {
+ rxy_form(LLGH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Register (32)
+void Assembler::llhr(Register r1, Register r2) { rre_form(LLHR, r1, r2); }
+
+// Load Logical halfword Register-Register (64)
+void Assembler::llghr(Register r1, Register r2) { rre_form(LLGHR, r1, r2); }
+
+// -------------------
+// Branch Instructions
+// -------------------
+// Branch and Save
+void Assembler::basr(Register r1, Register r2) { rr_form(BASR, r1, r2); }
+
+// Indirect Conditional Branch via register
+void Assembler::bcr(Condition m, Register target) { rr_form(BCR, m, target); }
+
+// Branch on Count (32)
+void Assembler::bct(Register r, const MemOperand& opnd) {
+ rx_form(BCT, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Branch on Count (64)
+void Assembler::bctg(Register r, const MemOperand& opnd) {
+ rxy_form(BCTG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Branch Relative and Save (32)
+void Assembler::bras(Register r, const Operand& opnd) {
+ ri_form(BRAS, r, opnd);
+}
+
+// Branch Relative and Save (64)
+void Assembler::brasl(Register r, const Operand& opnd) {
+ ril_form(BRASL, r, opnd);
+}
+
+// Branch relative on Condition (32)
+void Assembler::brc(Condition c, const Operand& opnd) {
+ // BRC actually encodes # of halfwords, so divide by 2.
+ int16_t numHalfwords = static_cast<int16_t>(opnd.immediate()) / 2;
+ Operand halfwordOp = Operand(numHalfwords);
+ halfwordOp.setBits(16);
+ ri_form(BRC, c, halfwordOp);
+}
+
+// Branch Relative on Condition (64)
+void Assembler::brcl(Condition c, const Operand& opnd, bool isCodeTarget) {
+ Operand halfwordOp = opnd;
+ // Operand for code targets will be index to code_targets_
+ if (!isCodeTarget) {
+ // BRCL actually encodes # of halfwords, so divide by 2.
+ int32_t numHalfwords = static_cast<int32_t>(opnd.immediate()) / 2;
+ halfwordOp = Operand(numHalfwords);
+ }
+ ril_form(BRCL, c, halfwordOp);
+}
+
+// Branch On Count (32)
+void Assembler::brct(Register r1, const Operand& imm) {
+ // BRCT encodes # of halfwords, so divide by 2.
+ int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
+ Operand halfwordOp = Operand(numHalfwords);
+ halfwordOp.setBits(16);
+ ri_form(BRCT, r1, halfwordOp);
+}
+
+// Branch On Count (32)
+void Assembler::brctg(Register r1, const Operand& imm) {
+ // BRCTG encodes # of halfwords, so divide by 2.
+ int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
+ Operand halfwordOp = Operand(numHalfwords);
+ halfwordOp.setBits(16);
+ ri_form(BRCTG, r1, halfwordOp);
+}
+
+// --------------------
+// Compare Instructions
+// --------------------
+// Compare Register-Storage (32)
+void Assembler::c(Register r, const MemOperand& opnd) {
+ rx_form(C, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Storage (32)
+void Assembler::cy(Register r, const MemOperand& opnd) {
+ rxy_form(CY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Register (32)
+void Assembler::cr_z(Register r1, Register r2) { rr_form(CR, r1, r2); }
+
+// Compare Register-Storage (64)
+void Assembler::cg(Register r, const MemOperand& opnd) {
+ rxy_form(CG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Register (64)
+void Assembler::cgr(Register r1, Register r2) { rre_form(CGR, r1, r2); }
+
+// Compare Halfword Register-Storage (32)
+void Assembler::ch(Register r, const MemOperand& opnd) {
+ rx_form(CH, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Halfword Register-Storage (32)
+void Assembler::chy(Register r, const MemOperand& opnd) {
+ rxy_form(CHY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Halfword Immediate (32)
+void Assembler::chi(Register r, const Operand& opnd) { ri_form(CHI, r, opnd); }
+
+// Compare Halfword Immediate (64)
+void Assembler::cghi(Register r, const Operand& opnd) {
+ ri_form(CGHI, r, opnd);
+}
+
+// Compare Immediate (32)
+void Assembler::cfi(Register r, const Operand& opnd) { ril_form(CFI, r, opnd); }
+
+// Compare Immediate (64)
+void Assembler::cgfi(Register r, const Operand& opnd) {
+ ril_form(CGFI, r, opnd);
+}
+
+// ----------------------------
+// Compare Logical Instructions
+// ----------------------------
+// Compare Logical Register-Storage (32)
+void Assembler::cl(Register r, const MemOperand& opnd) {
+ rx_form(CL, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Storage (32)
+void Assembler::cly(Register r, const MemOperand& opnd) {
+ rxy_form(CLY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Register (32)
+void Assembler::clr(Register r1, Register r2) { rr_form(CLR, r1, r2); }
+
+// Compare Logical Register-Storage (64)
+void Assembler::clg(Register r, const MemOperand& opnd) {
+ rxy_form(CLG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Register (64)
+void Assembler::clgr(Register r1, Register r2) { rre_form(CLGR, r1, r2); }
+
+// Compare Logical Immediate (32)
+void Assembler::clfi(Register r1, const Operand& i2) { ril_form(CLFI, r1, i2); }
+
+// Compare Logical Immediate (64<32)
+void Assembler::clgfi(Register r1, const Operand& i2) {
+ ril_form(CLGFI, r1, i2);
+}
+
+// Compare Immediate (Mem - Imm) (8)
+void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
+ si_form(CLI, imm, opnd.rb(), opnd.offset());
+}
+
+// Compare Immediate (Mem - Imm) (8)
+void Assembler::cliy(const MemOperand& opnd, const Operand& imm) {
+ siy_form(CLIY, imm, opnd.rb(), opnd.offset());
+}
+
+// Compare logical - mem to mem operation
+void Assembler::clc(const MemOperand& opnd1, const MemOperand& opnd2,
+ Length length) {
+ ss_form(CLC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+ opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// ----------------------------
+// Test Under Mask Instructions
+// ----------------------------
+// Test Under Mask (Mem - Imm) (8)
+void Assembler::tm(const MemOperand& opnd, const Operand& imm) {
+ si_form(TM, imm, opnd.rb(), opnd.offset());
+}
+
+// Test Under Mask (Mem - Imm) (8)
+void Assembler::tmy(const MemOperand& opnd, const Operand& imm) {
+ siy_form(TMY, imm, opnd.rb(), opnd.offset());
+}
+
+// -------------------------------
+// Rotate and Insert Selected Bits
+// -------------------------------
+// Rotate-And-Insert-Selected-Bits
+void Assembler::risbg(Register dst, Register src, const Operand& startBit,
+ const Operand& endBit, const Operand& shiftAmt,
+ bool zeroBits) {
+ // High tag the top bit of I4/EndBit to zero out any unselected bits
+ if (zeroBits)
+ rie_f_form(RISBG, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+ shiftAmt);
+ else
+ rie_f_form(RISBG, dst, src, startBit, endBit, shiftAmt);
+}
+
+// Rotate-And-Insert-Selected-Bits
+void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
+ const Operand& endBit, const Operand& shiftAmt,
+ bool zeroBits) {
+ // High tag the top bit of I4/EndBit to zero out any unselected bits
+ if (zeroBits)
+ rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+ shiftAmt);
+ else
+ rie_f_form(RISBGN, dst, src, startBit, endBit, shiftAmt);
+}
+
+// ---------------------------
+// Move Character Instructions
+// ---------------------------
+// Move charactor - mem to mem operation
+void Assembler::mvc(const MemOperand& opnd1, const MemOperand& opnd2,
+ uint32_t length) {
+ ss_form(MVC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+ opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// -----------------------
+// 32-bit Add Instructions
+// -----------------------
+// Add Register-Storage (32)
+void Assembler::a(Register r1, const MemOperand& opnd) {
+ rx_form(A, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Register-Storage (32)
+void Assembler::ay(Register r1, const MemOperand& opnd) {
+ rxy_form(AY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Immediate (32)
+void Assembler::afi(Register r1, const Operand& opnd) {
+ ril_form(AFI, r1, opnd);
+}
+
+// Add Halfword Register-Storage (32)
+void Assembler::ah(Register r1, const MemOperand& opnd) {
+ rx_form(AH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Halfword Register-Storage (32)
+void Assembler::ahy(Register r1, const MemOperand& opnd) {
+ rxy_form(AHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Halfword Immediate (32)
+void Assembler::ahi(Register r1, const Operand& i2) { ri_form(AHI, r1, i2); }
+
+// Add Halfword Immediate (32)
+void Assembler::ahik(Register r1, Register r3, const Operand& i2) {
+ rie_form(AHIK, r1, r3, i2);
+}
+
+// Add Register (32)
+void Assembler::ar(Register r1, Register r2) { rr_form(AR, r1, r2); }
+
+// Add Register-Register-Register (32)
+void Assembler::ark(Register r1, Register r2, Register r3) {
+ rrf1_form(ARK, r1, r2, r3);
+}
+
+// Add Storage-Imm (32)
+void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
+ DCHECK(is_int8(imm.imm_));
+ DCHECK(is_int20(opnd.offset()));
+ siy_form(ASI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+}
+
+// -----------------------
+// 64-bit Add Instructions
+// -----------------------
+// Add Register-Storage (64)
+void Assembler::ag(Register r1, const MemOperand& opnd) {
+ rxy_form(AG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Register-Storage (64<-32)
+void Assembler::agf(Register r1, const MemOperand& opnd) {
+ rxy_form(AGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Immediate (64)
+void Assembler::agfi(Register r1, const Operand& opnd) {
+ ril_form(ALFI, r1, opnd);
+}
+
+// Add Register-Register (64<-32)
+void Assembler::agfr(Register r1, Register r2) { rre_form(AGFR, r1, r2); }
+
+// Add Halfword Immediate (64)
+void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
+
+// Add Halfword Immediate (64)
+void Assembler::aghik(Register r1, Register r3, const Operand& i2) {
+ rie_form(AGHIK, r1, r3, i2);
+}
+
+// Add Register (64)
+void Assembler::agr(Register r1, Register r2) { rre_form(AGR, r1, r2); }
+
+// Add Register-Register-Register (64)
+void Assembler::agrk(Register r1, Register r2, Register r3) {
+ rrf1_form(AGRK, r1, r2, r3);
+}
+
+// Add Storage-Imm (64)
+void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
+ DCHECK(is_int8(imm.imm_));
+ DCHECK(is_int20(opnd.offset()));
+ siy_form(AGSI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+}
+
+// -------------------------------
+// 32-bit Add Logical Instructions
+// -------------------------------
+// Add Logical Register-Storage (32)
+void Assembler::al_z(Register r1, const MemOperand& opnd) {
+ rx_form(AL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Register-Storage (32)
+void Assembler::aly(Register r1, const MemOperand& opnd) {
+ rxy_form(ALY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Immediate (32)
+void Assembler::alfi(Register r1, const Operand& opnd) {
+ ril_form(ALFI, r1, opnd);
+}
+
+// Add Logical Register-Register (32)
+void Assembler::alr(Register r1, Register r2) { rr_form(ALR, r1, r2); }
+
+// Add Logical With Carry Register-Register (32)
+void Assembler::alcr(Register r1, Register r2) { rre_form(ALCR, r1, r2); }
+
+// Add Logical Register-Register-Register (32)
+void Assembler::alrk(Register r1, Register r2, Register r3) {
+ rrf1_form(ALRK, r1, r2, r3);
+}
+
+// -------------------------------
+// 64-bit Add Logical Instructions
+// -------------------------------
+// Add Logical Register-Storage (64)
+void Assembler::alg(Register r1, const MemOperand& opnd) {
+ rxy_form(ALG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Immediate (64)
+void Assembler::algfi(Register r1, const Operand& opnd) {
+ ril_form(ALGFI, r1, opnd);
+}
+
+// Add Logical Register-Register (64)
+void Assembler::algr(Register r1, Register r2) { rre_form(ALGR, r1, r2); }
+
+// Add Logical Register-Register-Register (64)
+void Assembler::algrk(Register r1, Register r2, Register r3) {
+ rrf1_form(ALGRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 32-bit Subtract Instructions
+// ----------------------------
+// Subtract Register-Storage (32)
+void Assembler::s(Register r1, const MemOperand& opnd) {
+ rx_form(S, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register-Storage (32)
+void Assembler::sy(Register r1, const MemOperand& opnd) {
+ rxy_form(SY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Halfword Register-Storage (32)
+void Assembler::sh(Register r1, const MemOperand& opnd) {
+ rx_form(SH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Halfword Register-Storage (32)
+void Assembler::shy(Register r1, const MemOperand& opnd) {
+ rxy_form(SHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register (32)
+void Assembler::sr(Register r1, Register r2) { rr_form(SR, r1, r2); }
+
+// Subtract Register-Register-Register (32)
+void Assembler::srk(Register r1, Register r2, Register r3) {
+ rrf1_form(SRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 64-bit Subtract Instructions
+// ----------------------------
+// Subtract Register-Storage (64)
+void Assembler::sg(Register r1, const MemOperand& opnd) {
+ rxy_form(SG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register-Storage (64<-32)
+void Assembler::sgf(Register r1, const MemOperand& opnd) {
+ rxy_form(SGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register (64)
+void Assembler::sgr(Register r1, Register r2) { rre_form(SGR, r1, r2); }
+
+// Subtract Register (64<-32)
+void Assembler::sgfr(Register r1, Register r2) { rre_form(SGFR, r1, r2); }
+
+// Subtract Register-Register-Register (64)
+void Assembler::sgrk(Register r1, Register r2, Register r3) {
+ rrf1_form(SGRK, r1, r2, r3);
+}
+
+// ------------------------------------
+// 32-bit Subtract Logical Instructions
+// ------------------------------------
+// Subtract Logical Register-Storage (32)
+void Assembler::sl(Register r1, const MemOperand& opnd) {
+ rx_form(SL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Storage (32)
+void Assembler::sly(Register r1, const MemOperand& opnd) {
+ rxy_form(SLY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Register (32)
+void Assembler::slr(Register r1, Register r2) { rr_form(SLR, r1, r2); }
+
+// Subtract Logical With Borrow Register-Register (32)
+void Assembler::slbr(Register r1, Register r2) { rre_form(SLBR, r1, r2); }
+
+// Subtract Logical Register-Register-Register (32)
+void Assembler::slrk(Register r1, Register r2, Register r3) {
+ rrf1_form(SLRK, r1, r2, r3);
+}
+
+// ------------------------------------
+// 64-bit Subtract Logical Instructions
+// ------------------------------------
+// Subtract Logical Register-Storage (64)
+void Assembler::slg(Register r1, const MemOperand& opnd) {
+ rxy_form(SLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Register (64)
+void Assembler::slgr(Register r1, Register r2) { rre_form(SLGR, r1, r2); }
+
+// Subtract Logical Register-Register-Register (64)
+void Assembler::slgrk(Register r1, Register r2, Register r3) {
+ rrf1_form(SLGRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 32-bit Multiply Instructions
+// ----------------------------
+// Multiply Register-Storage (64<32)
+void Assembler::m(Register r1, const MemOperand& opnd) {
+ rx_form(M, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Register (64<32)
+void Assembler::mr_z(Register r1, Register r2) {
+ DCHECK(r1.code() % 2 == 0);
+ rr_form(MR, r1, r2);
+}
+
+// Multiply Logical Register-Storage (64<32)
+void Assembler::ml(Register r1, const MemOperand& opnd) {
+ rxy_form(ML, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Logical Register (64<32)
+void Assembler::mlr(Register r1, Register r2) {
+ DCHECK(r1.code() % 2 == 0);
+ rre_form(MLR, r1, r2);
+}
+
+// Multiply Single Register-Storage (32)
+void Assembler::ms(Register r1, const MemOperand& opnd) {
+ rx_form(MS, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Register-Storage (32)
+void Assembler::msy(Register r1, const MemOperand& opnd) {
+ rxy_form(MSY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Immediate (32)
+void Assembler::msfi(Register r1, const Operand& opnd) {
+ ril_form(MSFI, r1, opnd);
+}
+
+// Multiply Single Register (64<32)
+void Assembler::msr(Register r1, Register r2) { rre_form(MSR, r1, r2); }
+
+// Multiply Halfword Register-Storage (32)
+void Assembler::mh(Register r1, const MemOperand& opnd) {
+ rx_form(MH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Halfword Register-Storage (32)
+void Assembler::mhy(Register r1, const MemOperand& opnd) {
+ rxy_form(MHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Halfword Immediate (32)
+void Assembler::mhi(Register r1, const Operand& opnd) {
+ ri_form(MHI, r1, opnd);
+}
+
+// ----------------------------
+// 64-bit Multiply Instructions
+// ----------------------------
+// Multiply Logical Register-Storage (128<64)
+void Assembler::mlg(Register r1, const MemOperand& opnd) {
+ rxy_form(MLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Register (128<64)
+void Assembler::mlgr(Register r1, Register r2) { rre_form(MLGR, r1, r2); }
+
+// Multiply Halfword Immediate (64)
+void Assembler::mghi(Register r1, const Operand& opnd) {
+ ri_form(MGHI, r1, opnd);
+}
+
+// Multiply Single Immediate (64)
+void Assembler::msgfi(Register r1, const Operand& opnd) {
+ ril_form(MSGFI, r1, opnd);
+}
+
+// Multiply Single Register-Storage (64)
+void Assembler::msg(Register r1, const MemOperand& opnd) {
+ rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Register-Register (64)
+void Assembler::msgr(Register r1, Register r2) { rre_form(MSGR, r1, r2); }
+
+// --------------------------
+// 32-bit Divide Instructions
+// --------------------------
+// Divide Register-Storage (32<-64)
+void Assembler::d(Register r1, const MemOperand& opnd) {
+ rx_form(D, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Divide Register (32<-64)
+void Assembler::dr(Register r1, Register r2) {
+ DCHECK(r1.code() % 2 == 0);
+ rr_form(DR, r1, r2);
+}
+
+// Divide Logical Register-Storage (32<-64)
+void Assembler::dl(Register r1, const MemOperand& opnd) {
+ rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Divide Logical Register (32<-64)
+void Assembler::dlr(Register r1, Register r2) { rre_form(DLR, r1, r2); }
+
+// --------------------------
+// 64-bit Divide Instructions
+// --------------------------
+// Divide Logical Register (64<-128)
+void Assembler::dlgr(Register r1, Register r2) { rre_form(DLGR, r1, r2); }
+
+// Divide Single Register (64<-32)
+void Assembler::dsgr(Register r1, Register r2) { rre_form(DSGR, r1, r2); }
+
+// --------------------
+// Bitwise Instructions
+// --------------------
+// AND Register-Storage (32)
+void Assembler::n(Register r1, const MemOperand& opnd) {
+ rx_form(N, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register-Storage (32)
+void Assembler::ny(Register r1, const MemOperand& opnd) {
+ rxy_form(NY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register (32)
+void Assembler::nr(Register r1, Register r2) { rr_form(NR, r1, r2); }
+
+// AND Register-Register-Register (32)
+void Assembler::nrk(Register r1, Register r2, Register r3) {
+ rrf1_form(NRK, r1, r2, r3);
+}
+
+// AND Register-Storage (64)
+void Assembler::ng(Register r1, const MemOperand& opnd) {
+ rxy_form(NG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register (64)
+void Assembler::ngr(Register r1, Register r2) { rre_form(NGR, r1, r2); }
+
+// AND Register-Register-Register (64)
+void Assembler::ngrk(Register r1, Register r2, Register r3) {
+ rrf1_form(NGRK, r1, r2, r3);
+}
+
+// OR Register-Storage (32)
+void Assembler::o(Register r1, const MemOperand& opnd) {
+ rx_form(O, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register-Storage (32)
+void Assembler::oy(Register r1, const MemOperand& opnd) {
+ rxy_form(OY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register (32)
+void Assembler::or_z(Register r1, Register r2) { rr_form(OR, r1, r2); }
+
+// OR Register-Register-Register (32)
+void Assembler::ork(Register r1, Register r2, Register r3) {
+ rrf1_form(ORK, r1, r2, r3);
+}
+
+// OR Register-Storage (64)
+void Assembler::og(Register r1, const MemOperand& opnd) {
+ rxy_form(OG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register (64)
+void Assembler::ogr(Register r1, Register r2) { rre_form(OGR, r1, r2); }
+
+// OR Register-Register-Register (64)
+void Assembler::ogrk(Register r1, Register r2, Register r3) {
+ rrf1_form(OGRK, r1, r2, r3);
+}
+
+// XOR Register-Storage (32)
+void Assembler::x(Register r1, const MemOperand& opnd) {
+ rx_form(X, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register-Storage (32)
+void Assembler::xy(Register r1, const MemOperand& opnd) {
+ rxy_form(XY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register (32)
+void Assembler::xr(Register r1, Register r2) { rr_form(XR, r1, r2); }
+
+// XOR Register-Register-Register (32)
+void Assembler::xrk(Register r1, Register r2, Register r3) {
+ rrf1_form(XRK, r1, r2, r3);
+}
+
+// XOR Register-Storage (64)
+void Assembler::xg(Register r1, const MemOperand& opnd) {
+ rxy_form(XG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register (64)
+void Assembler::xgr(Register r1, Register r2) { rre_form(XGR, r1, r2); }
+
+// XOR Register-Register-Register (64)
+void Assembler::xgrk(Register r1, Register r2, Register r3) {
+ rrf1_form(XGRK, r1, r2, r3);
+}
+
+// XOR Storage-Storage
+void Assembler::xc(const MemOperand& opnd1, const MemOperand& opnd2,
+ Length length) {
+ ss_form(XC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+ opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// -------------------------------------------
+// Bitwise GPR <-> FPR Conversion Instructions
+// -------------------------------------------
+// Load GR from FPR (64 <- L)
+void Assembler::lgdr(Register r1, DoubleRegister f2) {
+ rre_form(LGDR, r1, Register::from_code(f2.code()));
+}
+
+// Load FPR from FR (L <- 64)
+void Assembler::ldgr(DoubleRegister f1, Register r2) {
+ rre_form(LDGR, Register::from_code(f1.code()), r2);
+}
+
+void Assembler::EnsureSpaceFor(int space_needed) {
+ if (buffer_space() <= (kGap + space_needed)) {
+ GrowBuffer(space_needed);
+ }
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(RLL, r1, r3, opnd, 0);
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(RLL, r1, r3, r0, opnd.immediate());
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, Register r2,
+ const Operand& opnd) {
+ rsy_form(RLL, r1, r3, r2, opnd.immediate());
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(RLLG, r1, r3, opnd, 0);
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(RLLG, r1, r3, r0, opnd.immediate());
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, Register r2,
+ const Operand& opnd) {
+ rsy_form(RLLG, r1, r3, r2, opnd.immediate());
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sll(Register r1, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rs_form(SLL, r1, r0, opnd, 0);
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sll(Register r1, const Operand& opnd) {
+ rs_form(SLL, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sllk(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SLLK, r1, r3, opnd, 0);
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sllk(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SLLK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single Logical (64)
+void Assembler::sllg(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SLLG, r1, r3, opnd, 0);
+}
+
+// Shift Left Single Logical (64)
+void Assembler::sllg(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SLLG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Double Logical (64)
+void Assembler::sldl(Register r1, Register b2, const Operand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rs_form(SLDL, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srl(Register r1, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rs_form(SRL, r1, r0, opnd, 0);
+}
+
+// Shift Right Double Arith (64)
+void Assembler::srda(Register r1, Register b2, const Operand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rs_form(SRDA, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Double Logical (64)
+void Assembler::srdl(Register r1, Register b2, const Operand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rs_form(SRDL, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srl(Register r1, const Operand& opnd) {
+ rs_form(SRL, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srlk(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SRLK, r1, r3, opnd, 0);
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srlk(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SRLK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single Logical (64)
+void Assembler::srlg(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SRLG, r1, r3, opnd, 0);
+}
+
+// Shift Right Single Logical (64)
+void Assembler::srlg(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SRLG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single (32)
+void Assembler::sla(Register r1, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rs_form(SLA, r1, r0, opnd, 0);
+}
+
+// Shift Left Single (32)
+void Assembler::sla(Register r1, const Operand& opnd) {
+ rs_form(SLA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Left Single (32)
+void Assembler::slak(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SLAK, r1, r3, opnd, 0);
+}
+
+// Shift Left Single (32)
+void Assembler::slak(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SLAK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single (64)
+void Assembler::slag(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SLAG, r1, r3, opnd, 0);
+}
+
+// Shift Left Single (64)
+void Assembler::slag(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SLAG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single (32)
+void Assembler::sra(Register r1, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rs_form(SRA, r1, r0, opnd, 0);
+}
+
+// Shift Right Single (32)
+void Assembler::sra(Register r1, const Operand& opnd) {
+ rs_form(SRA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Single (32)
+void Assembler::srak(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SRAK, r1, r3, opnd, 0);
+}
+
+// Shift Right Single (32)
+void Assembler::srak(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SRAK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single (64)
+void Assembler::srag(Register r1, Register r3, Register opnd) {
+ DCHECK(!opnd.is(r0));
+ rsy_form(SRAG, r1, r3, opnd, 0);
+}
+
+void Assembler::srag(Register r1, Register r3, const Operand& opnd) {
+ rsy_form(SRAG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Double
+void Assembler::srda(Register r1, const Operand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rs_form(SRDA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Double Logical
+void Assembler::srdl(Register r1, const Operand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rs_form(SRDL, r1, r0, r0, opnd.immediate());
+}
+
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+
+ int32_t target_index = emit_code_target(target, rmode, ast_id);
+ brasl(r14, Operand(target_index));
+}
+
+void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
+ Condition cond) {
+ EnsureSpace ensure_space(this);
+
+ int32_t target_index = emit_code_target(target, rmode);
+ brcl(cond, Operand(target_index), true);
+}
+
+// Store (32)
+void Assembler::st(Register src, const MemOperand& dst) {
+ rx_form(ST, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store (32)
+void Assembler::sty(Register src, const MemOperand& dst) {
+ rxy_form(STY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Halfword
+void Assembler::sth(Register src, const MemOperand& dst) {
+ rx_form(STH, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Halfword
+void Assembler::sthy(Register src, const MemOperand& dst) {
+ rxy_form(STHY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Character
+void Assembler::stc(Register src, const MemOperand& dst) {
+ rx_form(STC, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Character
+void Assembler::stcy(Register src, const MemOperand& dst) {
+ rxy_form(STCY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// 32-bit Load Multiple - short displacement (12-bits unsigned)
+void Assembler::lm(Register r1, Register r2, const MemOperand& src) {
+ rs_form(LM, r1, r2, src.rb(), src.offset());
+}
+
+// 32-bit Load Multiple - long displacement (20-bits signed)
+void Assembler::lmy(Register r1, Register r2, const MemOperand& src) {
+ rsy_form(LMY, r1, r2, src.rb(), src.offset());
+}
+
+// 64-bit Load Multiple - long displacement (20-bits signed)
+void Assembler::lmg(Register r1, Register r2, const MemOperand& src) {
+ rsy_form(LMG, r1, r2, src.rb(), src.offset());
+}
+
+// Move integer (32)
+void Assembler::mvhi(const MemOperand& opnd1, const Operand& i2) {
+ sil_form(MVHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
+}
+
+// Move integer (64)
+void Assembler::mvghi(const MemOperand& opnd1, const Operand& i2) {
+ sil_form(MVGHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
+}
+
+// Store Register (64)
+void Assembler::stg(Register src, const MemOperand& dst) {
+ DCHECK(!(dst.rb().code() == 15 && dst.offset() < 0));
+ rxy_form(STG, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Insert Character
+void Assembler::ic_z(Register r1, const MemOperand& opnd) {
+ rx_form(IC_z, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Insert Character
+void Assembler::icy(Register r1, const MemOperand& opnd) {
+ rxy_form(ICY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Insert Immediate (High)
+void Assembler::iihf(Register r1, const Operand& opnd) {
+ ril_form(IIHF, r1, opnd);
+}
+
+// Insert Immediate (low)
+void Assembler::iilf(Register r1, const Operand& opnd) {
+ ril_form(IILF, r1, opnd);
+}
+
+// Insert Immediate (high high)
+void Assembler::iihh(Register r1, const Operand& opnd) {
+ ri_form(IIHH, r1, opnd);
+}
+
+// Insert Immediate (high low)
+void Assembler::iihl(Register r1, const Operand& opnd) {
+ ri_form(IIHL, r1, opnd);
+}
+
+// Insert Immediate (low high)
+void Assembler::iilh(Register r1, const Operand& opnd) {
+ ri_form(IILH, r1, opnd);
+}
+
+// Insert Immediate (low low)
+void Assembler::iill(Register r1, const Operand& opnd) {
+ ri_form(IILL, r1, opnd);
+}
+
+// GPR <-> FPR Instructions
+
+// Floating point instructions
+//
+// Load zero Register (64)
+void Assembler::lzdr(DoubleRegister r1) {
+ rre_form(LZDR, Register::from_code(r1.code()), Register::from_code(0));
+}
+
+// Add Register-Register (LB)
+void Assembler::aebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(AEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Add Register-Storage (LB)
+void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(ADB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Add Register-Register (LB)
+void Assembler::adbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(ADBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Compare Register-Register (LB)
+void Assembler::cebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(CEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Compare Register-Storage (LB)
+void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
+ rx_form(CD, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Compare Register-Register (LB)
+void Assembler::cdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(CDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Divide Register-Register (LB)
+void Assembler::debr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(DEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Divide Register-Storage (LB)
+void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Divide Register-Register (LB)
+void Assembler::ddbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(DDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Multiply Register-Register (LB)
+void Assembler::meebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(MEEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Multiply Register-Storage (LB)
+void Assembler::mdb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(MDB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
+ opnd.offset());
+}
+
+// Multiply Register-Register (LB)
+void Assembler::mdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(MDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Subtract Register-Register (LB)
+void Assembler::sebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(SEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Subtract Register-Storage (LB)
+void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(SDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Subtract Register-Register (LB)
+void Assembler::sdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(SDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Square Root (LB)
+void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
+ rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Square Root Register-Register (LB)
+void Assembler::sqebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(SQEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Square Root Register-Register (LB)
+void Assembler::sqdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(SQDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Load Rounded (double -> float)
+void Assembler::ledbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LEDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Load Lengthen (float -> double)
+void Assembler::ldebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LDEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Load Complement Register-Register (LB)
+void Assembler::lcdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LCDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Load Positive Register-Register (LB)
+void Assembler::lpebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LPEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Load Positive Register-Register (LB)
+void Assembler::lpdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LPDBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
+// Store Double (64)
+void Assembler::std(DoubleRegister r1, const MemOperand& opnd) {
+ rx_form(STD, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Double (64)
+void Assembler::stdy(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
+ rxy_form(STDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Float (32)
+void Assembler::ste(DoubleRegister r1, const MemOperand& opnd) {
+ rx_form(STE, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Float (32)
+void Assembler::stey(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
+ rxy_form(STEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Double (64)
+void Assembler::ld(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(is_uint12(opnd.offset()));
+ rx_form(LD, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
+}
+
+// Load Double (64)
+void Assembler::ldy(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ rxy_form(LDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Float (32)
+void Assembler::le_z(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(is_uint12(opnd.offset()));
+ rx_form(LE, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
+}
+
+// Load Float (32)
+void Assembler::ley(DoubleRegister r1, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ rxy_form(LEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Double Register-Register (64)
+void Assembler::ldr(DoubleRegister r1, DoubleRegister r2) {
+ rr_form(LDR, r1, r2);
+}
+
+// Load And Test Register-Register (L)
+void Assembler::ltebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LTEBR, r1, r2);
+}
+
+// Load And Test Register-Register (L)
+void Assembler::ltdbr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LTDBR, r1, r2);
+}
+
+// Convert to Fixed point (64<-S)
+void Assembler::cgebr(Condition m, Register r1, DoubleRegister r2) {
+ rrfe_form(CGEBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed point (64<-L)
+void Assembler::cgdbr(Condition m, Register r1, DoubleRegister r2) {
+ rrfe_form(CGDBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed point (32<-L)
+void Assembler::cfdbr(Condition m, Register r1, DoubleRegister r2) {
+ rrfe_form(CFDBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert from Fixed point (L<-64)
+void Assembler::cegbr(DoubleRegister r1, Register r2) {
+ rre_form(CEGBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (L<-64)
+void Assembler::cdgbr(DoubleRegister r1, Register r2) {
+ rre_form(CDGBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (L<-32)
+void Assembler::cdfbr(DoubleRegister r1, Register r2) {
+ rre_form(CDFBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert to Fixed Logical (64<-L)
+void Assembler::clgdbr(Condition m3, Condition m4, Register r1,
+ DoubleRegister r2) {
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CLGDBR, m3, m4, r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (64<-F32)
+void Assembler::clgebr(Condition m3, Condition m4, Register r1,
+ DoubleRegister r2) {
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CLGEBR, m3, m4, r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (32<-F64)
+void Assembler::clfdbr(Condition m3, Condition m4, Register r1,
+ DoubleRegister r2) {
+ DCHECK_EQ(m3, Condition(0));
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CLFDBR, Condition(0), Condition(0), r1,
+ Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (32<-F32)
+void Assembler::clfebr(Condition m3, Condition m4, Register r1,
+ DoubleRegister r2) {
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CLFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert from Fixed Logical (L<-64)
+void Assembler::celgbr(Condition m3, Condition m4, DoubleRegister r1,
+ Register r2) {
+ DCHECK_EQ(m3, Condition(0));
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CELGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+ r2);
+}
+
+// Convert from Fixed Logical (F32<-32)
+void Assembler::celfbr(Condition m3, Condition m4, DoubleRegister r1,
+ Register r2) {
+ DCHECK_EQ(m3, Condition(0));
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CELFBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+ r2);
+}
+
+// Convert from Fixed Logical (L<-64)
+void Assembler::cdlgbr(Condition m3, Condition m4, DoubleRegister r1,
+ Register r2) {
+ DCHECK_EQ(m3, Condition(0));
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CDLGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+ r2);
+}
+
+// Convert from Fixed Logical (L<-32)
+void Assembler::cdlfbr(Condition m3, Condition m4, DoubleRegister r1,
+ Register r2) {
+ DCHECK_EQ(m4, Condition(0));
+ rrfe_form(CDLFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (S<-32)
+void Assembler::cefbr(DoubleRegister r1, Register r2) {
+ rre_form(CEFBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert to Fixed point (32<-S)
+void Assembler::cfebr(Condition m3, Register r1, DoubleRegister r2) {
+ rrfe_form(CFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Load (L <- S)
+void Assembler::ldeb(DoubleRegister d1, const MemOperand& opnd) {
+ rxe_form(LDEB, Register::from_code(d1.code()), opnd.rx(), opnd.rb(),
+ opnd.offset());
+}
+
+// Load FP Integer
+void Assembler::fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
+ rrf2_form(FIEBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
+}
+
+// Load FP Integer
+void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
+ rrf2_form(FIDBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
+}
+
+// Multiply and Add - MADBR R1, R3, R2
+// R1 = R3 * R2 + R1
+void Assembler::madbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
+ rrd_form(MADBR, Register::from_code(d1.code()),
+ Register::from_code(d3.code()), Register::from_code(d2.code()));
+}
+
+// Multiply and Subtract - MSDBR R1, R3, R2
+// R1 = R3 * R2 - R1
+void Assembler::msdbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
+ rrd_form(MSDBR, Register::from_code(d1.code()),
+ Register::from_code(d3.code()), Register::from_code(d2.code()));
+}
+
+// end of S390instructions
+
+bool Assembler::IsNop(SixByteInstr instr, int type) {
+ DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
+ if (DEBUG_BREAK_NOP == type) {
+ return ((instr & 0xffffffff) == 0xa53b0000); // oill r3, 0
+ }
+ return ((instr & 0xffff) == 0x1800); // lr r0,r0
+}
+
+void Assembler::GrowBuffer(int needed) {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ int space = buffer_space() + (desc.buffer_size - buffer_size_);
+ if (space < needed) {
+ desc.buffer_size += needed - space;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta =
+ (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+}
+
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+void Assembler::dq(uint64_t value) {
+ CheckBuffer();
+ *reinterpret_cast<uint64_t*>(pc_) = value;
+ pc_ += sizeof(uint64_t);
+}
+
+void Assembler::dp(uintptr_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uintptr_t*>(pc_) = data;
+ pc_ += sizeof(uintptr_t);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (RelocInfo::IsNone(rmode) ||
+ // Don't record external references unless the heap will be serialized.
+ (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
+ !emit_debug_code())) {
+ return;
+ }
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ data = RecordedAstId().ToInt();
+ ClearRecordedAstId();
+ }
+ DeferredRelocInfo rinfo(pc_offset(), rmode, data);
+ relocations_.push_back(rinfo);
+}
+
+void Assembler::emit_label_addr(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ int position = link(label);
+ DCHECK(label->is_bound());
+ // Keep internal references relative until EmitRelocations.
+ dp(position);
+}
+
+void Assembler::EmitRelocations() {
+ EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
+
+ for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
+ it != relocations_.end(); it++) {
+ RelocInfo::Mode rmode = it->rmode();
+ Address pc = buffer_ + it->position();
+ Code* code = NULL;
+ RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
+
+ // Fix up internal references now that they are guaranteed to be bound.
+ if (RelocInfo::IsInternalReference(rmode)) {
+ // Jump table entry
+ intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
+ Memory::Address_at(pc) = buffer_ + pos;
+ } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
+ // mov sequence
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
+ set_target_address_at(isolate(), pc, code, buffer_ + pos,
+ SKIP_ICACHE_FLUSH);
+ }
+
+ reloc_info_writer.Write(&rinfo);
+ }
+
+ reloc_info_writer.Finish();
+}
+
+} // namespace internal
+} // namespace v8
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
new file mode 100644
index 0000000000..0b9fa38539
--- /dev/null
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -0,0 +1,1466 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+// A light-weight S390 Assembler
+// Generates user mode instructions for z/Architecture
+
+#ifndef V8_S390_ASSEMBLER_S390_H_
+#define V8_S390_ASSEMBLER_S390_H_
+#include <stdio.h>
+#if V8_HOST_ARCH_S390
+// elf.h include is required for auxv check for STFLE facility used
+// for hardware detection, which is sensible only on s390 hosts.
+#include <elf.h>
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+#include "src/assembler.h"
+#include "src/s390/constants-s390.h"
+
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+
+#define ABI_PASSES_HANDLES_IN_REGS 1
+
+// ObjectPair is defined under runtime/runtime-util.h.
+// On 31-bit, ObjectPair == uint64_t. ABI dictates long long
+// be returned with the lower addressed half in r2
+// and the higher addressed half in r3. (Returns in Regs)
+// On 64-bit, ObjectPair is a Struct. ABI dictaes Structs be
+// returned in a storage buffer allocated by the caller,
+// with the address of this buffer passed as a hidden
+// argument in r2. (Does NOT return in Regs)
+// For x86 linux, ObjectPair is returned in registers.
+#if V8_TARGET_ARCH_S390X
+#define ABI_RETURNS_OBJECTPAIR_IN_REGS 0
+#else
+#define ABI_RETURNS_OBJECTPAIR_IN_REGS 1
+#endif
+
+#define ABI_CALL_VIA_IP 1
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(fp) V(ip) V(r13) V(r14) V(sp)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r13)
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
+// clang-format on
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+struct Register {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+ static const int kNumRegisters = Code::kAfterLast;
+
+#define REGISTER_COUNT(R) 1 +
+ static const int kNumAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
+#undef REGISTER_COUNT
+
+#define REGISTER_BIT(R) 1 << kCode_##R |
+ static const RegList kAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
+#undef REGISTER_BIT
+
+ static Register from_code(int code) {
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
+ return r;
+ }
+
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+
+ void set_code(int code) {
+ reg_code = code;
+ DCHECK(is_valid());
+ }
+
+#if V8_TARGET_LITTLE_ENDIAN
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#else
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#endif
+
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+typedef struct Register Register;
+
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
+// Register aliases
+const Register kLithiumScratch = r1; // lithium scratch.
+const Register kRootRegister = r10; // Roots array pointer.
+const Register cp = r13; // JavaScript context pointer.
+
+// Double word FP register.
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kNumRegisters = Code::kAfterLast;
+ static const int kMaxNumRegisters = kNumRegisters;
+
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
+ }
+
+ int reg_code;
+};
+
+typedef DoubleRegister DoubleRegister;
+
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_dreg = {Register::kCode_no_reg};
+
+// Aliases for double registers. Defined using #define instead of
+// "static const DoubleRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d13
+
+Register ToRegister(int num);
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
+ bool is(CRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+const CRegister no_creg = {-1};
+
+const CRegister cr0 = {0};
+const CRegister cr1 = {1};
+const CRegister cr2 = {2};
+const CRegister cr3 = {3};
+const CRegister cr4 = {4};
+const CRegister cr5 = {5};
+const CRegister cr6 = {6};
+const CRegister cr7 = {7};
+
+// TODO(john.yan) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+#if V8_TARGET_ARCH_S390X
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
+#else
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
+#endif
+
+// Class Operand represents a shifter operand in data processing instructions
+// defining immediate numbers and masks
+typedef uint8_t Length;
+
+struct Mask {
+ uint8_t mask;
+ uint8_t value() { return mask; }
+ static Mask from_value(uint8_t input) {
+ DCHECK(input <= 0x0F);
+ Mask m = {input};
+ return m;
+ }
+};
+
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(intptr_t immediate,
+ RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+ INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
+ INLINE(explicit Operand(const ExternalReference& f));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ bool must_output_reloc_info(const Assembler* assembler) const;
+
+ inline intptr_t immediate() const {
+ DCHECK(!rm_.is_valid());
+ return imm_;
+ }
+
+ inline void setBits(int n) {
+ imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n);
+ }
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ intptr_t imm_; // valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+typedef int32_t Disp;
+
+// Class MemOperand represents a memory operand in load and store instructions
+// On S390, we have various flavours of memory operands:
+// 1) a base register + 16 bit unsigned displacement
+// 2) a base register + index register + 16 bit unsigned displacement
+// 3) a base register + index register + 20 bit signed displacement
+class MemOperand BASE_EMBEDDED {
+ public:
+ explicit MemOperand(Register rx, Disp offset = 0);
+ explicit MemOperand(Register rx, Register rb, Disp offset = 0);
+
+ int32_t offset() const { return offset_; }
+ uint32_t getDisplacement() const { return offset(); }
+
+ // Base register
+ Register rb() const {
+ DCHECK(!baseRegister.is(no_reg));
+ return baseRegister;
+ }
+
+ Register getBaseRegister() const { return rb(); }
+
+ // Index Register
+ Register rx() const {
+ DCHECK(!indexRegister.is(no_reg));
+ return indexRegister;
+ }
+ Register getIndexRegister() const { return rx(); }
+
+ private:
+ Register baseRegister; // base
+ Register indexRegister; // index
+ int32_t offset_; // offset
+
+ friend class Assembler;
+};
+
+class DeferredRelocInfo {
+ public:
+ DeferredRelocInfo() {}
+ DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
+ : position_(position), rmode_(rmode), data_(data) {}
+
+ int position() const { return position_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+ intptr_t data() const { return data_; }
+
+ private:
+ int position_;
+ RelocInfo::Mode rmode_;
+ intptr_t data_;
+};
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~Assembler() {}
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Links a label at the current pc_offset(). If already bound, returns the
+ // bound position. If already linked, returns the position of the prior link.
+ // Otherwise, returns the current pc_offset().
+ int link(Label* L);
+
+ // Determines if Label is bound and near enough so that a single
+ // branch instruction can be used to reach it.
+ bool is_near(Label* L, Condition cond);
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ int branch_offset(Label* L) { return link(L) - pc_offset(); }
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+ void load_label_offset(Register r1, Label* L);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc, Address constant_pool));
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ Address constant_pool = NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ Address constant_pool = NULL;
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address
+ // in the instruction stream that the call will return to.
+ INLINE(static Address return_address_from_call_start(Address pc));
+
+ inline Handle<Object> code_target_object_handle_at(Address pc);
+ // This sets the branch destination.
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target);
+
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Isolate* isolate, Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+ // Here we are patching the address in the IIHF/IILF instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // S390 platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kSpecialTargetSize = 0;
+
+// Number of bytes for instructions used to store pointer sized constant.
+#if V8_TARGET_ARCH_S390X
+ static const int kBytesForPtrConstant = 12; // IIHF + IILF
+#else
+ static const int kBytesForPtrConstant = 6; // IILF
+#endif
+
+ // Distance between the instruction referring to the address of the call
+ // target and the return address.
+
+ // Offset between call target address and return address
+ // for BRASL calls
+ // Patch will be appiled to other FIXED_SEQUENCE call
+ static const int kCallTargetAddressOffset = 6;
+
+// The length of FIXED_SEQUENCE call
+// iihf r8, <address_hi> // <64-bit only>
+// iilf r8, <address_lo>
+// basr r14, r8
+#if V8_TARGET_ARCH_S390X
+ static const int kCallSequenceLength = 14;
+#else
+ static const int kCallSequenceLength = 8;
+#endif
+
+ // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
+ // code patch FIXED_SEQUENCE in bytes!
+ // JS Return Sequence = Call Sequence + BKPT
+ // static const int kJSReturnSequenceLength = kCallSequenceLength + 2;
+
+ // This is the length of the code sequence from SetDebugBreakAtSlot()
+ // FIXED_SEQUENCE in bytes!
+ static const int kDebugBreakSlotLength = kCallSequenceLength;
+ static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
+
+ // Length to patch between the start of the JS return sequence
+ // from SetDebugBreakAtReturn and the address from
+ // break_address_from_return_address.
+ //
+ // frame->pc() in Debug::SetAfterBreakTarget will point to BKPT in
+ // JS return sequence, so the length to patch will not include BKPT
+ // instruction length.
+ // static const int kPatchReturnSequenceAddressOffset =
+ // kCallSequenceLength - kPatchDebugBreakSlotReturnOffset;
+
+ // Length to patch between the start of the FIXED call sequence from
+ // SetDebugBreakAtSlot() and the the address from
+ // break_address_from_return_address.
+ static const int kPatchDebugBreakSlotAddressOffset =
+ kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
+
+ static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
+ return ((cr.code() * CRWIDTH) + crbit);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Helper for unconditional branch to Label with update to save register
+ void b(Register r, Label* l) {
+ positions_recorder()->WriteRecordedPositions();
+ int32_t halfwords = branch_offset(l) / 2;
+ brasl(r, Operand(halfwords));
+ }
+
+ // Conditional Branch Instruction - Generates either BRC / BRCL
+ void branchOnCond(Condition c, int branch_offset, bool is_bound = false);
+
+ // Helpers for conditional branch to Label
+ void b(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
+ branchOnCond(cond, branch_offset(l),
+ l->is_bound() || (dist == Label::kNear));
+ }
+
+ void bc_short(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
+ b(cond, l, Label::kNear);
+ }
+ // Helpers for conditional branch to Label
+ void beq(Label* l, Label::Distance dist = Label::kFar) { b(eq, l, dist); }
+ void bne(Label* l, Label::Distance dist = Label::kFar) { b(ne, l, dist); }
+ void blt(Label* l, Label::Distance dist = Label::kFar) { b(lt, l, dist); }
+ void ble(Label* l, Label::Distance dist = Label::kFar) { b(le, l, dist); }
+ void bgt(Label* l, Label::Distance dist = Label::kFar) { b(gt, l, dist); }
+ void bge(Label* l, Label::Distance dist = Label::kFar) { b(ge, l, dist); }
+ void b(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
+ void jmp(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
+ void bunordered(Label* l, Label::Distance dist = Label::kFar) {
+ b(unordered, l, dist);
+ }
+ void bordered(Label* l, Label::Distance dist = Label::kFar) {
+ b(ordered, l, dist);
+ }
+
+ // Helpers for conditional indirect branch off register
+ void b(Condition cond, Register r) { bcr(cond, r); }
+ void beq(Register r) { b(eq, r); }
+ void bne(Register r) { b(ne, r); }
+ void blt(Register r) { b(lt, r); }
+ void ble(Register r) { b(le, r); }
+ void bgt(Register r) { b(gt, r); }
+ void bge(Register r) { b(ge, r); }
+ void b(Register r) { b(al, r); }
+ void jmp(Register r) { b(al, r); }
+ void bunordered(Register r) { b(unordered, r); }
+ void bordered(Register r) { b(ordered, r); }
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ void breakpoint(bool do_print) {
+ if (do_print) {
+ printf("DebugBreak is inserted to %p\n", pc_);
+ }
+#if V8_HOST_ARCH_64_BIT
+ int64_t value = reinterpret_cast<uint64_t>(&v8::base::OS::DebugBreak);
+ int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+ int32_t lo_32 = static_cast<int32_t>(value);
+
+ iihf(r1, Operand(hi_32));
+ iilf(r1, Operand(lo_32));
+#else
+ iilf(r1, Operand(reinterpret_cast<uint32_t>(&v8::base::OS::DebugBreak)));
+#endif
+ basr(r14, r1);
+ }
+
+ void call(Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
+
+// S390 instruction generation
+#define I_FORM(name) void name(const Operand& i)
+
+#define RR_FORM(name) void name(Register r1, Register r2)
+
+#define RR2_FORM(name) void name(Condition m1, Register r2)
+
+#define RX_FORM(name) \
+ void name(Register r1, Register x2, Register b2, Disp d2); \
+ void name(Register r1, const MemOperand& opnd)
+
+#define RI1_FORM(name) void name(Register r, const Operand& i)
+
+#define RI2_FORM(name) void name(Condition m, const Operand& i)
+
+#define RIE_FORM(name) void name(Register r1, Register R3, const Operand& i)
+
+#define RIE_F_FORM(name) \
+ void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
+ const Operand& i5)
+
+#define RIL1_FORM(name) void name(Register r1, const Operand& i2)
+
+#define RIL2_FORM(name) void name(Condition m1, const Operand& i2)
+
+#define RXE_FORM(name) \
+ void name(Register r1, const MemOperand& opnd); \
+ void name(Register r1, Register b2, Register x2, Disp d2)
+
+#define RXF_FORM(name) \
+ void name(Register r1, Register r3, const MemOperand& opnd); \
+ void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
+
+#define RXY_FORM(name) \
+ void name(Register r1, Register x2, Register b2, Disp d2); \
+ void name(Register r1, const MemOperand& opnd)
+
+#define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
+
+#define RIS_FORM(name) \
+ void name(Register r1, Condition m3, Register b4, Disp d4, \
+ const Operand& i2); \
+ void name(Register r1, const Operand& i2, Condition m3, \
+ const MemOperand& opnd)
+
+#define SI_FORM(name) \
+ void name(const MemOperand& opnd, const Operand& i); \
+ void name(const Operand& i2, Register b1, Disp d1)
+
+#define SIL_FORM(name) \
+ void name(Register b1, Disp d1, const Operand& i2); \
+ void name(const MemOperand& opnd, const Operand& i2)
+
+#define RRE_FORM(name) void name(Register r1, Register r2)
+
+#define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
+
+#define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
+
+#define RRF3_FORM(name) \
+ void name(Register r3, Condition m4, Register r1, Register r2)
+
+#define RS1_FORM(name) \
+ void name(Register r1, Register r3, const MemOperand& opnd); \
+ void name(Register r1, Register r3, Register b2, Disp d2)
+
+#define RS2_FORM(name) \
+ void name(Register r1, Condition m3, const MemOperand& opnd); \
+ void name(Register r1, Condition m3, Register b2, Disp d2)
+
+#define RSE_FORM(name) \
+ void name(Register r1, Register r3, const MemOperand& opnd); \
+ void name(Register r1, Register r3, Register b2, Disp d2)
+
+#define RSL_FORM(name) \
+ void name(Length l, Register b2, Disp d2); \
+ void name(const MemOperand& opnd)
+
+#define RSY1_FORM(name) \
+ void name(Register r1, Register r3, Register b2, Disp d2); \
+ void name(Register r1, Register r3, const MemOperand& opnd)
+
+#define RSY2_FORM(name) \
+ void name(Register r1, Condition m3, Register b2, Disp d2); \
+ void name(Register r1, Condition m3, const MemOperand& opnd)
+
+#define RRD_FORM(name) void name(Register r1, Register r3, Register r2)
+
+#define RRS_FORM(name) \
+ void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
+ void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
+
+#define S_FORM(name) \
+ void name(Register b2, Disp d2); \
+ void name(const MemOperand& opnd)
+
+#define SIY_FORM(name) \
+ void name(const Operand& i2, Register b1, Disp d1); \
+ void name(const MemOperand& opnd, const Operand& i)
+
+#define SS1_FORM(name) \
+ void name(Register b1, Disp d1, Register b3, Disp d2, Length length); \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length)
+
+#define SS2_FORM(name) \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length1, \
+ Length length2); \
+ void name(Register b1, Disp d1, Register b2, Disp d2, Length l1, Length l2)
+
+#define SS3_FORM(name) \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length); \
+ void name(const Operand& i3, Register b1, Disp d1, Register b2, Disp d2, \
+ Length l1)
+
+#define SS4_FORM(name) \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2); \
+ void name(Register r1, Register r3, Register b1, Disp d1, Register b2, \
+ Disp d2)
+
+#define SS5_FORM(name) \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2); \
+ void name(Register r1, Register r3, Register b3, Disp d2, Register b4, \
+ Disp d4)
+
+#define SSE_FORM(name) \
+ void name(Register b1, Disp d1, Register b2, Disp d2); \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2)
+
+#define SSF_FORM(name) \
+ void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
+ void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
+
+ // S390 instruction sets
+ RX_FORM(bc);
+ RR_FORM(bctr);
+ RX_FORM(cd);
+ RRE_FORM(cdr);
+ RXE_FORM(cdb);
+ RXE_FORM(ceb);
+ RRE_FORM(cefbr);
+ RXE_FORM(ddb);
+ RRE_FORM(ddbr);
+ SS1_FORM(ed);
+ RRE_FORM(epair);
+ RX_FORM(ex);
+ RRF2_FORM(fidbr);
+ RRE_FORM(flogr);
+ RX_FORM(ic_z);
+ RXY_FORM(icy);
+ RIL1_FORM(iihf);
+ RI1_FORM(iihh);
+ RI1_FORM(iihl);
+ RIL1_FORM(iilf);
+ RI1_FORM(iilh);
+ RI1_FORM(iill);
+ RRE_FORM(lcgr);
+ RR_FORM(lcr);
+ RX_FORM(le_z);
+ RXY_FORM(ley);
+ RIL1_FORM(llihf);
+ RIL1_FORM(llilf);
+ RRE_FORM(lngr);
+ RR_FORM(lnr);
+ RSY1_FORM(loc);
+ RXY_FORM(lrv);
+ RXY_FORM(lrvh);
+ RXE_FORM(mdb);
+ RRE_FORM(mdbr);
+ SS4_FORM(mvck);
+ SSF_FORM(mvcos);
+ SS4_FORM(mvcs);
+ SS1_FORM(mvn);
+ SS1_FORM(nc);
+ SI_FORM(ni);
+ RIL1_FORM(nihf);
+ RIL1_FORM(nilf);
+ RI1_FORM(nilh);
+ RI1_FORM(nill);
+ RIL1_FORM(oihf);
+ RIL1_FORM(oilf);
+ RI1_FORM(oill);
+ RRE_FORM(popcnt);
+ RXE_FORM(sdb);
+ RRE_FORM(sdbr);
+ RIL1_FORM(slfi);
+ RXY_FORM(slgf);
+ RIL1_FORM(slgfi);
+ RS1_FORM(srdl);
+ RX_FORM(ste);
+ RXY_FORM(stey);
+ RXY_FORM(strv);
+ RI1_FORM(tmll);
+ SS1_FORM(tr);
+ S_FORM(ts);
+ RIL1_FORM(xihf);
+ RIL1_FORM(xilf);
+
+ // Load Address Instructions
+ void la(Register r, const MemOperand& opnd);
+ void lay(Register r, const MemOperand& opnd);
+ void larl(Register r1, const Operand& opnd);
+ void larl(Register r, Label* l);
+
+ // Load Instructions
+ void lb(Register r, const MemOperand& src);
+ void lbr(Register r1, Register r2);
+ void lgb(Register r, const MemOperand& src);
+ void lgbr(Register r1, Register r2);
+ void lh(Register r, const MemOperand& src);
+ void lhy(Register r, const MemOperand& src);
+ void lhr(Register r1, Register r2);
+ void lgh(Register r, const MemOperand& src);
+ void lghr(Register r1, Register r2);
+ void l(Register r, const MemOperand& src);
+ void ly(Register r, const MemOperand& src);
+ void lr(Register r1, Register r2);
+ void lg(Register r, const MemOperand& src);
+ void lgr(Register r1, Register r2);
+ void lgf(Register r, const MemOperand& src);
+ void lgfr(Register r1, Register r2);
+ void lhi(Register r, const Operand& imm);
+ void lghi(Register r, const Operand& imm);
+
+ // Load And Test Instructions
+ void lt_z(Register r, const MemOperand& src);
+ void ltg(Register r, const MemOperand& src);
+ void ltr(Register r1, Register r2);
+ void ltgr(Register r1, Register r2);
+ void ltgfr(Register r1, Register r2);
+
+ // Load Logical Instructions
+ void llc(Register r, const MemOperand& src);
+ void llgc(Register r, const MemOperand& src);
+ void llgf(Register r, const MemOperand& src);
+ void llgfr(Register r1, Register r2);
+ void llh(Register r, const MemOperand& src);
+ void llgh(Register r, const MemOperand& src);
+ void llhr(Register r1, Register r2);
+ void llghr(Register r1, Register r2);
+
+ // Load Multiple Instructions
+ void lm(Register r1, Register r2, const MemOperand& src);
+ void lmy(Register r1, Register r2, const MemOperand& src);
+ void lmg(Register r1, Register r2, const MemOperand& src);
+
+ // Store Instructions
+ void st(Register r, const MemOperand& src);
+ void stc(Register r, const MemOperand& src);
+ void stcy(Register r, const MemOperand& src);
+ void stg(Register r, const MemOperand& src);
+ void sth(Register r, const MemOperand& src);
+ void sthy(Register r, const MemOperand& src);
+ void sty(Register r, const MemOperand& src);
+
+ // Store Multiple Instructions
+ void stm(Register r1, Register r2, const MemOperand& src);
+ void stmy(Register r1, Register r2, const MemOperand& src);
+ void stmg(Register r1, Register r2, const MemOperand& src);
+
+ // Compare Instructions
+ void c(Register r, const MemOperand& opnd);
+ void cy(Register r, const MemOperand& opnd);
+ void cr_z(Register r1, Register r2);
+ void cg(Register r, const MemOperand& opnd);
+ void cgr(Register r1, Register r2);
+ void ch(Register r, const MemOperand& opnd);
+ void chy(Register r, const MemOperand& opnd);
+ void chi(Register r, const Operand& opnd);
+ void cghi(Register r, const Operand& opnd);
+ void cfi(Register r, const Operand& opnd);
+ void cgfi(Register r, const Operand& opnd);
+
+ // Compare Logical Instructions
+ void cl(Register r, const MemOperand& opnd);
+ void cly(Register r, const MemOperand& opnd);
+ void clr(Register r1, Register r2);
+ void clg(Register r, const MemOperand& opnd);
+ void clgr(Register r1, Register r2);
+ void clfi(Register r, const Operand& opnd);
+ void clgfi(Register r, const Operand& opnd);
+ void cli(const MemOperand& mem, const Operand& imm);
+ void cliy(const MemOperand& mem, const Operand& imm);
+ void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
+
+ // Test Under Mask Instructions
+ void tm(const MemOperand& mem, const Operand& imm);
+ void tmy(const MemOperand& mem, const Operand& imm);
+
+ // Rotate Instructions
+ void rll(Register r1, Register r3, Register opnd);
+ void rll(Register r1, Register r3, const Operand& opnd);
+ void rll(Register r1, Register r3, Register r2, const Operand& opnd);
+ void rllg(Register r1, Register r3, const Operand& opnd);
+ void rllg(Register r1, Register r3, const Register opnd);
+ void rllg(Register r1, Register r3, Register r2, const Operand& opnd);
+
+ // Shift Instructions (32)
+ void sll(Register r1, Register opnd);
+ void sll(Register r1, const Operand& opnd);
+ void sllk(Register r1, Register r3, Register opnd);
+ void sllk(Register r1, Register r3, const Operand& opnd);
+ void srl(Register r1, Register opnd);
+ void srl(Register r1, const Operand& opnd);
+ void srlk(Register r1, Register r3, Register opnd);
+ void srlk(Register r1, Register r3, const Operand& opnd);
+ void sra(Register r1, Register opnd);
+ void sra(Register r1, const Operand& opnd);
+ void srak(Register r1, Register r3, Register opnd);
+ void srak(Register r1, Register r3, const Operand& opnd);
+ void sla(Register r1, Register opnd);
+ void sla(Register r1, const Operand& opnd);
+ void slak(Register r1, Register r3, Register opnd);
+ void slak(Register r1, Register r3, const Operand& opnd);
+
+ // Shift Instructions (64)
+ void sllg(Register r1, Register r3, const Operand& opnd);
+ void sllg(Register r1, Register r3, const Register opnd);
+ void srlg(Register r1, Register r3, const Operand& opnd);
+ void srlg(Register r1, Register r3, const Register opnd);
+ void srag(Register r1, Register r3, const Operand& opnd);
+ void srag(Register r1, Register r3, const Register opnd);
+ void srda(Register r1, const Operand& opnd);
+ void srdl(Register r1, const Operand& opnd);
+ void slag(Register r1, Register r3, const Operand& opnd);
+ void slag(Register r1, Register r3, const Register opnd);
+ void sldl(Register r1, Register b2, const Operand& opnd);
+ void srdl(Register r1, Register b2, const Operand& opnd);
+ void srda(Register r1, Register b2, const Operand& opnd);
+
+ // Rotate and Insert Selected Bits
+ void risbg(Register dst, Register src, const Operand& startBit,
+ const Operand& endBit, const Operand& shiftAmt,
+ bool zeroBits = true);
+ void risbgn(Register dst, Register src, const Operand& startBit,
+ const Operand& endBit, const Operand& shiftAmt,
+ bool zeroBits = true);
+
+ // Move Character (Mem to Mem)
+ void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
+
+ // Branch Instructions
+ void basr(Register r1, Register r2);
+ void bcr(Condition m, Register target);
+ void bct(Register r, const MemOperand& opnd);
+ void bctg(Register r, const MemOperand& opnd);
+ void bras(Register r, const Operand& opnd);
+ void brasl(Register r, const Operand& opnd);
+ void brc(Condition c, const Operand& opnd);
+ void brcl(Condition m, const Operand& opnd, bool isCodeTarget = false);
+ void brct(Register r1, const Operand& opnd);
+ void brctg(Register r1, const Operand& opnd);
+
+ // 32-bit Add Instructions
+ void a(Register r1, const MemOperand& opnd);
+ void ay(Register r1, const MemOperand& opnd);
+ void afi(Register r1, const Operand& opnd);
+ void ah(Register r1, const MemOperand& opnd);
+ void ahy(Register r1, const MemOperand& opnd);
+ void ahi(Register r1, const Operand& opnd);
+ void ahik(Register r1, Register r3, const Operand& opnd);
+ void ar(Register r1, Register r2);
+ void ark(Register r1, Register r2, Register r3);
+ void asi(const MemOperand&, const Operand&);
+
+ // 64-bit Add Instructions
+ void ag(Register r1, const MemOperand& opnd);
+ void agf(Register r1, const MemOperand& opnd);
+ void agfi(Register r1, const Operand& opnd);
+ void agfr(Register r1, Register r2);
+ void aghi(Register r1, const Operand& opnd);
+ void aghik(Register r1, Register r3, const Operand& opnd);
+ void agr(Register r1, Register r2);
+ void agrk(Register r1, Register r2, Register r3);
+ void agsi(const MemOperand&, const Operand&);
+
+ // 32-bit Add Logical Instructions
+ void al_z(Register r1, const MemOperand& opnd);
+ void aly(Register r1, const MemOperand& opnd);
+ void alfi(Register r1, const Operand& opnd);
+ void alr(Register r1, Register r2);
+ void alcr(Register r1, Register r2);
+ void alrk(Register r1, Register r2, Register r3);
+
+ // 64-bit Add Logical Instructions
+ void alg(Register r1, const MemOperand& opnd);
+ void algfi(Register r1, const Operand& opnd);
+ void algr(Register r1, Register r2);
+ void algrk(Register r1, Register r2, Register r3);
+
+ // 32-bit Subtract Instructions
+ void s(Register r1, const MemOperand& opnd);
+ void sy(Register r1, const MemOperand& opnd);
+ void sh(Register r1, const MemOperand& opnd);
+ void shy(Register r1, const MemOperand& opnd);
+ void sr(Register r1, Register r2);
+ void srk(Register r1, Register r2, Register r3);
+
+ // 64-bit Subtract Instructions
+ void sg(Register r1, const MemOperand& opnd);
+ void sgf(Register r1, const MemOperand& opnd);
+ void sgr(Register r1, Register r2);
+ void sgfr(Register r1, Register r2);
+ void sgrk(Register r1, Register r2, Register r3);
+
+ // 32-bit Subtract Logical Instructions
+ void sl(Register r1, const MemOperand& opnd);
+ void sly(Register r1, const MemOperand& opnd);
+ void slr(Register r1, Register r2);
+ void slrk(Register r1, Register r2, Register r3);
+ void slbr(Register r1, Register r2);
+
+ // 64-bit Subtract Logical Instructions
+ void slg(Register r1, const MemOperand& opnd);
+ void slgr(Register r1, Register r2);
+ void slgrk(Register r1, Register r2, Register r3);
+
+ // 32-bit Multiply Instructions
+ void m(Register r1, const MemOperand& opnd);
+ void mr_z(Register r1, Register r2);
+ void ml(Register r1, const MemOperand& opnd);
+ void mlr(Register r1, Register r2);
+ void ms(Register r1, const MemOperand& opnd);
+ void msy(Register r1, const MemOperand& opnd);
+ void msfi(Register r1, const Operand& opnd);
+ void msr(Register r1, Register r2);
+ void mh(Register r1, const MemOperand& opnd);
+ void mhy(Register r1, const MemOperand& opnd);
+ void mhi(Register r1, const Operand& opnd);
+
+ // 64-bit Multiply Instructions
+ void mlg(Register r1, const MemOperand& opnd);
+ void mlgr(Register r1, Register r2);
+ void mghi(Register r1, const Operand& opnd);
+ void msgfi(Register r1, const Operand& opnd);
+ void msg(Register r1, const MemOperand& opnd);
+ void msgr(Register r1, Register r2);
+
+ // 32-bit Divide Instructions
+ void d(Register r1, const MemOperand& opnd);
+ void dr(Register r1, Register r2);
+ void dl(Register r1, const MemOperand& opnd);
+ void dlr(Register r1, Register r2);
+
+ // 64-bit Divide Instructions
+ void dlgr(Register r1, Register r2);
+ void dsgr(Register r1, Register r2);
+
+ // Bitwise Instructions (AND / OR / XOR)
+ void n(Register r1, const MemOperand& opnd);
+ void ny(Register r1, const MemOperand& opnd);
+ void nr(Register r1, Register r2);
+ void nrk(Register r1, Register r2, Register r3);
+ void ng(Register r1, const MemOperand& opnd);
+ void ngr(Register r1, Register r2);
+ void ngrk(Register r1, Register r2, Register r3);
+ void o(Register r1, const MemOperand& opnd);
+ void oy(Register r1, const MemOperand& opnd);
+ void or_z(Register r1, Register r2);
+ void ork(Register r1, Register r2, Register r3);
+ void og(Register r1, const MemOperand& opnd);
+ void ogr(Register r1, Register r2);
+ void ogrk(Register r1, Register r2, Register r3);
+ void x(Register r1, const MemOperand& opnd);
+ void xy(Register r1, const MemOperand& opnd);
+ void xr(Register r1, Register r2);
+ void xrk(Register r1, Register r2, Register r3);
+ void xg(Register r1, const MemOperand& opnd);
+ void xgr(Register r1, Register r2);
+ void xgrk(Register r1, Register r2, Register r3);
+ void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
+
+ // Bitwise GPR <-> FPR Conversion Instructions
+ void lgdr(Register r1, DoubleRegister f2);
+ void ldgr(DoubleRegister f1, Register r2);
+
+ // Floating Point Load / Store Instructions
+ void ld(DoubleRegister r1, const MemOperand& opnd);
+ void ldy(DoubleRegister r1, const MemOperand& opnd);
+ void le_z(DoubleRegister r1, const MemOperand& opnd);
+ void ley(DoubleRegister r1, const MemOperand& opnd);
+ void ldr(DoubleRegister r1, DoubleRegister r2);
+ void ltdbr(DoubleRegister r1, DoubleRegister r2);
+ void ltebr(DoubleRegister r1, DoubleRegister r2);
+ void std(DoubleRegister r1, const MemOperand& opnd);
+ void stdy(DoubleRegister r1, const MemOperand& opnd);
+ void ste(DoubleRegister r1, const MemOperand& opnd);
+ void stey(DoubleRegister r1, const MemOperand& opnd);
+
+ // Floating Point Load Rounded/Positive Instructions
+ void ledbr(DoubleRegister r1, DoubleRegister r2);
+ void ldebr(DoubleRegister r1, DoubleRegister r2);
+ void lpebr(DoubleRegister r1, DoubleRegister r2);
+ void lpdbr(DoubleRegister r1, DoubleRegister r2);
+
+ // Floating <-> Fixed Point Conversion Instructions
+ void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
+ Register fixReg);
+ void cdlgbr(Condition m3, Condition m4, DoubleRegister fltReg,
+ Register fixReg);
+ void celgbr(Condition m3, Condition m4, DoubleRegister fltReg,
+ Register fixReg);
+ void celfbr(Condition m3, Condition m4, DoubleRegister fltReg,
+ Register fixReg);
+ void clfdbr(Condition m3, Condition m4, Register fixReg,
+ DoubleRegister fltReg);
+ void clfebr(Condition m3, Condition m4, Register fixReg,
+ DoubleRegister fltReg);
+ void clgdbr(Condition m3, Condition m4, Register fixReg,
+ DoubleRegister fltReg);
+ void clgebr(Condition m3, Condition m4, Register fixReg,
+ DoubleRegister fltReg);
+ void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
+ void cdfbr(DoubleRegister fltReg, Register fixReg);
+ void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
+ void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
+ void cegbr(DoubleRegister fltReg, Register fixReg);
+ void cdgbr(DoubleRegister fltReg, Register fixReg);
+ void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
+ void cefbr(DoubleRegister fltReg, Register fixReg);
+
+ // Floating Point Compare Instructions
+ void cebr(DoubleRegister r1, DoubleRegister r2);
+ void cdb(DoubleRegister r1, const MemOperand& opnd);
+ void cdbr(DoubleRegister r1, DoubleRegister r2);
+
+ // Floating Point Arithmetic Instructions
+ void aebr(DoubleRegister r1, DoubleRegister r2);
+ void adb(DoubleRegister r1, const MemOperand& opnd);
+ void adbr(DoubleRegister r1, DoubleRegister r2);
+ void lzdr(DoubleRegister r1);
+ void sebr(DoubleRegister r1, DoubleRegister r2);
+ void sdb(DoubleRegister r1, const MemOperand& opnd);
+ void sdbr(DoubleRegister r1, DoubleRegister r2);
+ void meebr(DoubleRegister r1, DoubleRegister r2);
+ void mdb(DoubleRegister r1, const MemOperand& opnd);
+ void mdbr(DoubleRegister r1, DoubleRegister r2);
+ void debr(DoubleRegister r1, DoubleRegister r2);
+ void ddb(DoubleRegister r1, const MemOperand& opnd);
+ void ddbr(DoubleRegister r1, DoubleRegister r2);
+ void madbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
+ void msdbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
+ void sqebr(DoubleRegister r1, DoubleRegister r2);
+ void sqdb(DoubleRegister r1, const MemOperand& opnd);
+ void sqdbr(DoubleRegister r1, DoubleRegister r2);
+ void lcdbr(DoubleRegister r1, DoubleRegister r2);
+ void ldeb(DoubleRegister r1, const MemOperand& opnd);
+
+ enum FIDBRA_MASK3 {
+ FIDBRA_CURRENT_ROUNDING_MODE = 0,
+ FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
+ // ...
+ FIDBRA_ROUND_TOWARD_0 = 5,
+ FIDBRA_ROUND_TOWARD_POS_INF = 6,
+ FIDBRA_ROUND_TOWARD_NEG_INF = 7
+ };
+ void fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
+ void fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
+
+ // Move integer
+ void mvhi(const MemOperand& opnd1, const Operand& i2);
+ void mvghi(const MemOperand& opnd1, const Operand& i2);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg, Condition cond = al,
+ int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+ void bkpt(uint32_t imm16); // v5 and above
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ GROUP_ENDING_NOP,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ };
+
+ void nop(int type = 0); // 0 is the default non-marking type.
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Debugging
+
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
+
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) { recorded_ast_id_ = ast_id; }
+
+ TypeFeedbackId RecordedAstId() {
+ // roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg);
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, int raw_position);
+
+ // Writes a single byte or word of data in the code stream. Used
+ // for inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data);
+
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
+
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
+
+ // Read/patch instructions
+ SixByteInstr instr_at(int pos) {
+ return Instruction::InstructionBits(buffer_ + pos);
+ }
+ template <typename T>
+ void instr_at_put(int pos, T instr) {
+ Instruction::SetInstructionBits<T>(buffer_ + pos, instr);
+ }
+
+ // Decodes instruction at pos, and returns its length
+ int32_t instr_length_at(int pos) {
+ return Instruction::InstructionLength(buffer_ + pos);
+ }
+
+ static SixByteInstr instr_at(byte* pc) {
+ return Instruction::InstructionBits(pc);
+ }
+
+ static Condition GetCondition(Instr instr);
+
+ static bool IsBranch(Instr instr);
+#if V8_TARGET_ARCH_S390X
+ static bool Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2);
+#else
+ static bool Is32BitLoadIntoIP(SixByteInstr instr);
+#endif
+
+ static bool IsCmpRegister(Instr instr);
+ static bool IsCmpImmediate(Instr instr);
+ static bool IsNop(SixByteInstr instr, int type = NON_MARKING_NOP);
+
+ // The code currently calls CheckBuffer() too often. This has the side
+ // effect of randomly growing the buffer in the middle of multi-instruction
+ // sequences.
+ //
+ // This function allows outside callers to check and grow the buffer
+ void EnsureSpaceFor(int space_needed);
+
+ void EmitRelocations();
+ void emit_label_addr(Label* label);
+
+ public:
+ byte* buffer_pos() const { return buffer_; }
+
+ protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode instruction(s) at pos and return backchain to previous
+ // label reference or kEndOfChain.
+ int target_at(int pos);
+
+ // Patch instruction(s) at pos to target target_pos (e.g. branch)
+ void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ private:
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ std::vector<DeferredRelocInfo> relocations_;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer(int needed = 0);
+ inline void TrackBranch();
+ inline void UntrackBranch();
+
+ inline int32_t emit_code_target(
+ Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Helpers to emit binary encoding of 2/4/6 byte instructions.
+ inline void emit2bytes(uint16_t x);
+ inline void emit4bytes(uint32_t x);
+ inline void emit6bytes(uint64_t x);
+
+ // Helpers to emit binary encoding for various instruction formats.
+
+ inline void rr_form(Opcode op, Register r1, Register r2);
+ inline void rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
+ inline void rr_form(Opcode op, Condition m1, Register r2);
+ inline void rr2_form(uint8_t op, Condition m1, Register r2);
+
+ inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2);
+ inline void rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+ Disp d2);
+
+ inline void ri_form(Opcode op, Register r1, const Operand& i2);
+ inline void ri_form(Opcode op, Condition m1, const Operand& i2);
+
+ inline void rie_form(Opcode op, Register r1, Register r3, const Operand& i2);
+ inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
+ const Operand& i4, const Operand& i5);
+
+ inline void ril_form(Opcode op, Register r1, const Operand& i2);
+ inline void ril_form(Opcode op, Condition m1, const Operand& i2);
+
+ inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
+ Disp d4, const Operand& i2);
+
+ inline void rrd_form(Opcode op, Register r1, Register r3, Register r2);
+
+ inline void rre_form(Opcode op, Register r1, Register r2);
+ inline void rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
+
+ inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
+ inline void rrf1_form(uint32_t x);
+ inline void rrf2_form(uint32_t x);
+ inline void rrf3_form(uint32_t x);
+ inline void rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
+ Register r2);
+
+ inline void rrs_form(Opcode op, Register r1, Register r2, Register b4,
+ Disp d4, Condition m3);
+
+ inline void rs_form(Opcode op, Register r1, Condition m3, Register b2,
+ const Disp d2);
+ inline void rs_form(Opcode op, Register r1, Register r3, Register b2,
+ const Disp d2);
+
+ inline void rsi_form(Opcode op, Register r1, Register r3, const Operand& i2);
+ inline void rsl_form(Opcode op, Length l1, Register b2, Disp d2);
+
+ inline void rsy_form(Opcode op, Register r1, Register r3, Register b2,
+ const Disp d2);
+ inline void rsy_form(Opcode op, Register r1, Condition m3, Register b2,
+ const Disp d2);
+
+ inline void rxe_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2);
+
+ inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
+ Register x2, Disp d2);
+
+ inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
+ Disp d2);
+ inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+ Disp d2);
+
+ inline void s_form(Opcode op, Register b1, Disp d2);
+
+ inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
+ inline void siy_form(Opcode op, const Operand& i2, Register b1, Disp d1);
+
+ inline void sil_form(Opcode op, Register b1, Disp d1, const Operand& i2);
+
+ inline void ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
+ Disp d2);
+ inline void ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
+ Register b2, Disp d2);
+ inline void ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
+ Disp d1, Register b2, Disp d2);
+ inline void ss_form(Opcode op, Register r1, Register r2, Register b1, Disp d1,
+ Register b2, Disp d2);
+ inline void sse_form(Opcode op, Register b1, Disp d1, Register b2, Disp d2);
+ inline void ssf_form(Opcode op, Register r3, Register b1, Disp d1,
+ Register b2, Disp d2);
+
+ // Labels
+ void print(Label* L);
+ int max_reach_from(int pos);
+ void bind_to(Label* L, int pos);
+ void next(Label* L);
+
+ friend class RegExpMacroAssemblerS390;
+ friend class RelocInfo;
+ friend class CodePatcher;
+
+ List<Handle<Code> > code_targets_;
+
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/s390/builtins-s390.cc b/deps/v8/src/s390/builtins-s390.cc
new file mode 100644
index 0000000000..12b52c123c
--- /dev/null
+++ b/deps/v8/src/s390/builtins-s390.cc
@@ -0,0 +1,2555 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments excluding receiver
+ // -- r3 : target
+ // -- r5 : new.target
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r5);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r3, r5);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
+ }
+
+ // JumpToExternalReference expects r2 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ AddP(r2, r2, Operand(num_extra_args + 1));
+
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, r3);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ TestIfSmi(r4);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r3);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ TestIfSmi(r4);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ __ LoadRR(r5, r3);
+ // Run the native code for the Array function called as a normal function.
+ // tail call a stub
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- lr : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in r3 and the double value in d1.
+ __ LoadRoot(r3, root_index);
+ __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+
+ // Setup state for loop
+ // r4: address of arg[0] + kPointerSize
+ // r5: number of slots to drop at exit (arguments + receiver)
+ __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+ __ AddP(r4, sp, r4);
+ __ AddP(r5, r2, Operand(1));
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ CmpLogicalP(r4, sp);
+ __ ble(&done_loop);
+
+ // Load the next parameter tagged value into r2.
+ __ lay(r4, MemOperand(r4, -kPointerSize));
+ __ LoadP(r2, MemOperand(r4));
+
+ // Load the double value of the parameter into d2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(r2, &convert_smi);
+ __ LoadP(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r5);
+ __ Push(r3, r4, r5);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Pop(r3, r4, r5);
+ __ SmiUntag(r5);
+ {
+ // Restore the double accumulator value (d1).
+ Label done_restore;
+ __ SmiToDouble(d1, r3);
+ __ JumpIfSmi(r3, &done_restore);
+ __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ bind(&done_restore);
+ }
+ }
+ __ b(&convert);
+ __ bind(&convert_number);
+ __ LoadDouble(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ b(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDouble(d2, r2);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (d1) and the next parameter value on the right hand side (d2).
+ Label compare_nan, compare_swap;
+ __ cdbr(d1, d2);
+ __ bunordered(&compare_nan);
+ __ b(cond_done, &loop);
+ __ b(CommuteCondition(cond_done), &compare_swap);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ TestDoubleIsMinusZero(reg, r6, r7);
+ __ bne(&loop);
+
+ // Update accumulator. Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ ldr(d1, d2);
+ __ LoadRR(r3, r2);
+ __ b(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ // We still need to visit the rest of the arguments.
+ __ bind(&compare_nan);
+ __ LoadRoot(r3, Heap::kNanValueRootIndex);
+ __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ b(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ LoadRR(r2, r3);
+ __ Drop(r5);
+ __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r2 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&no_arguments);
+ __ SubP(r2, r2, Operand(1));
+ __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+ __ la(sp, MemOperand(sp, r2));
+ __ LoadP(r2, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : constructor function
+ // -- r5 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r4 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&no_arguments);
+ __ SubP(r2, r2, Operand(1));
+ __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+ __ la(sp, MemOperand(sp, r4));
+ __ LoadP(r4, MemOperand(sp));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r4, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r4 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r4, &done_convert);
+ __ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
+ __ beq(&done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r5);
+ __ LoadRR(r2, r4);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ LoadRR(r4, r2);
+ __ Pop(r3, r5);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ CmpP(r3, r5);
+ __ bne(&new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Pop(r4);
+ }
+ __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+ __ Ret();
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+ // 1. Load the first argument into r2 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&no_arguments);
+ __ SubP(r2, r2, Operand(1));
+ __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+ __ lay(sp, MemOperand(sp, r2));
+ __ LoadP(r2, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. At least one argument, return r2 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(r2, &to_string);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+ __ bgt(&to_string);
+ __ beq(&symbol_descriptive_string);
+ __ Ret();
+ }
+
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(r2, Heap::kempty_stringRootIndex);
+ __ Ret(1);
+ }
+
+ // 3a. Convert r2 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ }
+ // 3b. Convert symbol in r2 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(r2);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+ }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : constructor function
+ // -- r5 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r4 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&no_arguments);
+ __ SubP(r2, r2, Operand(1));
+ __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+ __ lay(sp, MemOperand(sp, r4));
+ __ LoadP(r4, MemOperand(sp));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(r4, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r4 is a string.
+ {
+ Label convert, done_convert;
+ __ JumpIfSmi(r4, &convert);
+ __ CompareObjectType(r4, r6, r6, FIRST_NONSTRING_TYPE);
+ __ blt(&done_convert);
+ __ bind(&convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(r3, r5);
+ __ LoadRR(r2, r4);
+ __ CallStub(&stub);
+ __ LoadRR(r4, r2);
+ __ Pop(r3, r5);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ CmpP(r3, r5);
+ __ bne(&new_object);
+
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Pop(r4);
+ }
+ __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+ __ Ret();
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (preserved for callee)
+ // -- r3 : target function (preserved for callee)
+ // -- r5 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(r2);
+ __ Push(r2, r3, r5, r3);
+
+ __ CallRuntime(function_id, 1);
+ __ LoadRR(r4, r2);
+
+ // Restore target function and new target.
+ __ Pop(r2, r3, r5);
+ __ SmiUntag(r2);
+ }
+ __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&ok, Label::kNear);
+
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : constructor function
+ // -- r4 : allocation site or undefined
+ // -- r5 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(r4, r6);
+
+ if (!create_implicit_receiver) {
+ __ SmiTag(r6, r2);
+ __ LoadAndTestP(r6, r6);
+ __ Push(cp, r4, r6);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else {
+ __ SmiTag(r2);
+ __ Push(cp, r4, r2);
+
+ // Allocate the new receiver object.
+ __ Push(r3, r5);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ LoadRR(r6, r2);
+ __ Pop(r3, r5);
+
+ // ----------- S t a t e -------------
+ // -- r3: constructor function
+ // -- r5: new target
+ // -- r6: newly allocated object
+ // -----------------------------------
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r2, MemOperand(sp));
+ __ SmiUntag(r2);
+ __ LoadAndTestP(r2, r2);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r6, r6);
+ }
+
+ // Set up pointer to last argument.
+ __ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // r2: number of arguments
+ // r3: constructor function
+ // r4: address of last argument (caller sp)
+ // r5: new target
+ // cr0: condition indicating whether r2 is zero
+ // sp[0]: receiver
+ // sp[1]: receiver
+ // sp[2]: number of arguments (smi-tagged)
+ Label loop, no_args;
+ __ beq(&no_args);
+ __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, ip);
+ __ LoadRR(r1, r2);
+ __ bind(&loop);
+ __ lay(ip, MemOperand(ip, -kPointerSize));
+ __ LoadP(r0, MemOperand(ip, r4));
+ __ StoreP(r0, MemOperand(ip, sp));
+ __ BranchOnCount(r1, &loop);
+ __ bind(&no_args);
+
+ // Call the function.
+ // r2: number of arguments
+ // r3: constructor function
+ // r5: new target
+ if (is_api_function) {
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(r2);
+ __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (create_implicit_receiver && !is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ // r2: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r2: result
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r2, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r2, r3, r5, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r2, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r2: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ LoadP(r3, MemOperand(sp));
+ }
+
+ // Leave construct frame.
+ }
+
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(r2, &dont_throw);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ AddP(sp, sp, r3);
+ __ AddP(sp, sp, Operand(kPointerSize));
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
+ }
+ __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers r4; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ IsTagged argc_is_tagged) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
+ // Make r4 the space we have left. The stack might already be overflowed
+ // here which will cause r4 to become negative.
+ __ SubP(r4, sp, r4);
+ // Check if the arguments will overflow the stack.
+ if (argc_is_tagged == kArgcIsSmiTagged) {
+ __ SmiToPtrArrayOffset(r0, argc);
+ } else {
+ DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+ __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
+ }
+ __ CmpP(r4, r0);
+ __ bgt(&okay); // Signed comparison.
+
+ // Out of stack space.
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+
+ __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from Generate_JS_Entry
+ // r2: new.target
+ // r3: function
+ // r4: receiver
+ // r5: argc
+ // r6: argv
+ // r0,r7-r9, cp may be clobbered
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Enter an internal frame.
+ {
+ // FrameScope ends up calling MacroAssembler::EnterFrame here
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(cp, Operand(context_address));
+ __ LoadP(cp, MemOperand(cp));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(r3, r4);
+
+ // Check if we have enough stack space to push all arguments.
+ // Clobbers r4.
+ Generate_CheckStackOverflow(masm, r5, kArgcIsUntaggedInt);
+
+ // Copy arguments to the stack in a loop from argv to sp.
+ // The arguments are actually placed in reverse order on sp
+ // compared to argv (i.e. arg1 is highest memory in sp).
+ // r3: function
+ // r5: argc
+ // r6: argv, i.e. points to first arg
+ // r7: scratch reg to hold scaled argc
+ // r8: scratch reg to hold arg handle
+ // r9: scratch reg to hold index into argv
+ Label argLoop, argExit;
+ intptr_t zero = 0;
+ __ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
+ __ SubRR(sp, r7); // Buy the stack frame to fit args
+ __ LoadImmP(r9, Operand(zero)); // Initialize argv index
+ __ bind(&argLoop);
+ __ CmpPH(r7, Operand(zero));
+ __ beq(&argExit, Label::kNear);
+ __ lay(r7, MemOperand(r7, -kPointerSize));
+ __ LoadP(r8, MemOperand(r9, r6)); // read next parameter
+ __ la(r9, MemOperand(r9, kPointerSize)); // r9++;
+ __ LoadP(r0, MemOperand(r8)); // dereference handle
+ __ StoreP(r0, MemOperand(r7, sp)); // push parameter
+ __ b(&argLoop);
+ __ bind(&argExit);
+
+ // Setup new.target and argc.
+ __ LoadRR(r6, r2);
+ __ LoadRR(r2, r5);
+ __ LoadRR(r5, r6);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(r7, r6);
+ __ LoadRR(r8, r6);
+ __ LoadRR(r9, r6);
+
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
+ // Exit the JS frame and remove the parameters (except function), and
+ // return.
+ }
+ __ b(r14);
+
+ // r2: result
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o r3: the JS function object being called.
+// o r5: the new target
+// o cp: our context
+// o pp: the caller's constant pool pointer (if enabled)
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(r3);
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ Label array_done;
+ Register debug_info = r4;
+ DCHECK(!debug_info.is(r2));
+ __ LoadP(debug_info,
+ FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ // Load original bytecode array or the debug copy.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+ __ beq(&array_done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ bind(&array_done);
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ LoadImmP(r2, Operand::Zero());
+ __ Push(r5, kInterpreterBytecodeArrayRegister, r2);
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ SubP(r5, sp, r4);
+ __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+ __ CmpLogicalP(r5, r0);
+ __ bge(&ok);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ Label loop, no_args;
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
+ __ LoadAndTestP(r4, r4);
+ __ beq(&no_args);
+ __ LoadRR(r1, r4);
+ __ bind(&loop);
+ __ push(r5);
+ __ SubP(r1, Operand(1));
+ __ bne(&loop);
+ __ bind(&no_args);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Code aging of the BytecodeArray object.
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ AddP(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+ __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+}
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in r2.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments and return.
+ __ LoadlW(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ AddP(sp, sp, r0);
+ __ Ret();
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+ Register count, Register scratch) {
+ Label loop;
+ __ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU
+ __ LoadRR(r0, count);
+ __ bind(&loop);
+ __ LoadP(scratch, MemOperand(index, -kPointerSize));
+ __ lay(index, MemOperand(index, -kPointerSize));
+ __ push(scratch);
+ __ SubP(r0, Operand(1));
+ __ bne(&loop);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r4 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r3 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Calculate number of arguments (AddP one for receiver).
+ __ AddP(r5, r2, Operand(1));
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r4, r5, r6);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (not including receiver)
+ // -- r5 : new target
+ // -- r3 : constructor to call
+ // -- r4 : address of the first argument
+ // -----------------------------------
+
+ // Push a slot for the receiver to be constructed.
+ __ LoadImmP(r0, Operand::Zero());
+ __ push(r0);
+
+ // Push the arguments (skip if none).
+ Label skip;
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&skip);
+ Generate_InterpreterPushArgs(masm, r4, r2, r6);
+ __ bind(&skip);
+
+ // Call the constructor with r2, r3, and r5 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
+ // Initialize register file register and dispatch table register.
+ __ AddP(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
+
+ // Get the context from the frame.
+ __ LoadP(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ LoadP(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ LoadP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+ __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
+ __ Push(r3);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ mov(r14,
+ Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Point r2 at the start of the PlatformCodeAge sequence.
+ __ CleanseP(r14);
+ __ SubP(r14, Operand(kCodeAgingSequenceLength));
+ __ LoadRR(r2, r14);
+
+ __ pop(r14);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r2 - contains return address (beginning of patch sequence)
+ // r3 - isolate
+ // r5 - new target
+ // lr - return address
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+ __ PrepareCallCFunction(2, 0, r4);
+ __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+ __ LoadRR(ip, r2);
+ __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Point r2 at the start of the PlatformCodeAge sequence.
+ __ CleanseP(r14);
+ __ SubP(r14, Operand(kCodeAgingSequenceLength));
+ __ LoadRR(r2, r14);
+
+ __ pop(r14);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r2 - contains return address (beginning of patch sequence)
+ // r3 - isolate
+ // r5 - new target
+ // lr - return address
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+ __ PrepareCallCFunction(2, 0, r4);
+ __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+ __ LoadRR(ip, r2);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PushStandardFrame(r3);
+
+ // Jump to point after the code-age stub.
+ __ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
+ __ Jump(r2);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+ Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ la(sp, MemOperand(sp, kPointerSize)); // Ignore state
+ __ Ret(); // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ LoadSmiLiteral(r2, Smi::FromInt(static_cast<int>(type)));
+ __ push(r2);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ }
+
+ // Get the full codegen state from the stack and untag it -> r8.
+ __ LoadP(r8, MemOperand(sp, 0 * kPointerSize));
+ __ SmiUntag(r8);
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CmpP(r8, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ bne(&with_tos_register);
+ __ la(sp, MemOperand(sp, 1 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&with_tos_register);
+ __ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
+ __ CmpP(r8, Operand(FullCodeGenerator::TOS_REG));
+ __ bne(&unknown_state);
+ __ la(sp, MemOperand(sp, 2 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&unknown_state);
+ __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers registers {r6, r7, r8, r9}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = r6;
+ Register map = r7;
+ Register constructor = r8;
+ Register scratch = r9;
+
+ // If there is no signature, return the holder.
+ __ LoadP(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ __ CmpP(scratch, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ bne(&next_prototype);
+ Register type = constructor;
+ __ LoadP(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(type,
+ FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ CmpP(signature, type);
+ __ beq(&receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ bne(&next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ LoadP(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ b(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ LoadlW(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::HasHiddenPrototype>(scratch);
+ __ beq(receiver_check_failed);
+
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : number of arguments excluding receiver
+ // -- r3 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ LoadP(r4, MemOperand(sp, r1));
+ CompatibleReceiverCheck(masm, r4, r5, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ LoadP(r6, FieldMemOperand(r5, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(r6, FieldMemOperand(r6, CallHandlerInfo::kFastHandlerOffset));
+ __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ AddP(r1, r1, Operand(kPointerSize));
+ __ AddP(sp, sp, r1);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ push(r2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
+ __ bne(&skip);
+ __ Ret();
+
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ LoadP(
+ r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ SmiUntag(r3);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ AddP(r2, r3);
+ __ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadRR(r14, r0);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r2 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r2);
+ __ JumpIfSmi(r2, &receiver_not_date);
+ __ CompareObjectType(r2, r3, r4, JS_DATE_TYPE);
+ __ bne(&receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ LoadP(r3, MemOperand(r3));
+ __ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
+ __ CmpP(r3, ip);
+ __ bne(&stamp_mismatch);
+ __ LoadP(r2, FieldMemOperand(
+ r2, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- sp[0] : first argument (left-hand side)
+ // -- sp[4] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ LoadP(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ LoadP(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Ret(2);
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r3, argArray into r2 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label skip;
+ Register arg_size = r4;
+ Register new_sp = r5;
+ Register scratch = r6;
+ __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ AddP(new_sp, sp, arg_size);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(scratch, r2);
+ __ LoadP(r3, MemOperand(new_sp, 0)); // receiver
+ __ CmpP(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ beq(&skip);
+ __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ bind(&skip);
+ __ LoadRR(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- r2 : argArray
+ // -- r3 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r3, &receiver_not_callable);
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r6, Map::kIsCallable);
+ __ beq(&receiver_not_callable);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ LoadImmP(r2, Operand::Zero());
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ StoreP(r3, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ // 1. Make sure we have at least one argument.
+ // r2: actual number of arguments
+ {
+ Label done;
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ AddP(r2, Operand(1));
+ __ bind(&done);
+ }
+
+ // r2: actual number of arguments
+ // 2. Get the callable to call (passed as receiver) from the stack.
+ __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+ __ LoadP(r3, MemOperand(sp, r4));
+
+ // 3. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // r2: actual number of arguments
+ // r3: callable
+ {
+ Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ AddP(r4, sp, r4);
+
+ __ bind(&loop);
+ __ LoadP(ip, MemOperand(r4, -kPointerSize));
+ __ StoreP(ip, MemOperand(r4));
+ __ SubP(r4, Operand(kPointerSize));
+ __ CmpP(r4, sp);
+ __ bne(&loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ SubP(r2, Operand(1));
+ __ pop();
+ }
+
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label skip;
+ Register arg_size = r4;
+ Register new_sp = r5;
+ Register scratch = r6;
+ __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ AddP(new_sp, sp, arg_size);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(scratch, r3);
+ __ LoadRR(r2, r3);
+ __ CmpP(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ beq(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
+ __ CmpP(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ bind(&skip);
+ __ LoadRR(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- r2 : argumentsList
+ // -- r3 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r3, &target_not_callable);
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r6, Map::kIsCallable);
+ __ beq(&target_not_callable);
+
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ StoreP(r3, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // new.target into r5 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ Label skip;
+ Register arg_size = r4;
+ Register new_sp = r6;
+ __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+ __ AddP(new_sp, sp, arg_size);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(r2, r3);
+ __ LoadRR(r5, r3);
+ __ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
+ __ CmpP(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ LoadRR(r5, r3); // new.target defaults to target
+ __ beq(&skip);
+ __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ CmpP(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ bind(&skip);
+ __ LoadRR(sp, new_sp);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r2 : argumentsList
+ // -- r5 : new.target
+ // -- r3 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r3, &target_not_constructor);
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r6, Map::kIsConstructor);
+ __ beq(&target_not_constructor);
+
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r5, &new_target_not_constructor);
+ __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r6, Map::kIsConstructor);
+ __ beq(&new_target_not_constructor);
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ StoreP(r3, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ StoreP(r5, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- r2 : actual number of arguments
+ // -- r3 : function (passed through to callee)
+ // -- r4 : expected number of arguments
+ // -- r5 : new target (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
+ // Make r7 the space we have left. The stack might already be overflowed
+ // here which will cause r7 to become negative.
+ __ SubP(r7, sp, r7);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+ __ CmpP(r7, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(r2);
+ __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ // Stack updated as such:
+ // old SP --->
+ // R14 Return Addr
+ // Old FP <--- New FP
+ // Argument Adapter SMI
+ // Function
+ // ArgC as SMI <--- New SP
+ __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+
+ // Cleanse the top nibble of 31-bit pointers.
+ __ CleanseP(r14);
+ __ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
+ __ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
+ __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ int stack_adjustment = kPointerSize; // adjust for receiver
+ __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ lay(sp, MemOperand(sp, r3));
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argumentsList
+ // -- r3 : target
+ // -- r5 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r2, &create_runtime);
+
+ // Load the map of argumentsList into r4.
+ __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+
+ // Load native context into r6.
+ __ LoadP(r6, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ CmpP(ip, r4);
+ __ beq(&create_arguments);
+ __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ CmpP(ip, r4);
+ __ beq(&create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
+ __ beq(&create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r5, r2);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r3, r5);
+ __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ SmiUntag(r4);
+ }
+ __ b(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
+ __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
+ __ CmpP(r4, ip);
+ __ bne(&create_runtime);
+ __ SmiUntag(r4);
+ __ LoadRR(r2, r6);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r4);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ CmpP(r4, Operand(FAST_ELEMENTS));
+ __ bgt(&create_runtime);
+ __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ beq(&create_runtime);
+ __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+ __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+ __ SmiUntag(r4);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ SubP(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+ __ CmpP(ip, r0); // Signed comparison.
+ __ bgt(&done);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r3 : target
+ // -- r2 : args (a FixedArray built from argumentsList)
+ // -- r4 : len (number of elements to push from args)
+ // -- r5 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label loop, no_args;
+ __ CmpP(r4, Operand::Zero());
+ __ beq(&no_args);
+ __ AddP(r2, r2,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ LoadRR(r1, r4);
+ __ bind(&loop);
+ __ LoadP(r0, MemOperand(r2, kPointerSize));
+ __ la(r2, MemOperand(r2, kPointerSize));
+ __ push(r0);
+ __ BranchOnCount(r1, &loop);
+ __ bind(&no_args);
+ __ LoadRR(r2, r4);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if ES2015 tail call elimination is active.
+ Label done;
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
+ __ LoadlB(scratch1, MemOperand(scratch1));
+ __ CmpP(scratch1, Operand::Zero());
+ __ beq(&done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ LoadP(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+ __ bne(&no_interpreter_frame);
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(
+ scratch3,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ LoadRR(fp, scratch2);
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ LoadP(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+ __ LoadP(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(caller_args_count_reg,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+ __ SmiUntag(caller_args_count_reg);
+#endif
+
+ __ bind(&formal_parameter_count_loaded);
+
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
+ __ bind(&done);
+}
+} // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
+ __ bne(&class_constructor);
+
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
+ __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
+ __ bne(&done_convert);
+ {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the function to call (checked to be a JSFunction)
+ // -- r4 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r5);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
+ __ LoadP(r5, MemOperand(sp, r5));
+ __ JumpIfSmi(r5, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r5);
+ }
+ __ b(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r2);
+ __ Push(r2, r3);
+ __ LoadRR(r2, r5);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ LoadRR(r5, r2);
+ __ Pop(r2, r3);
+ __ SmiUntag(r2);
+ }
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ }
+ __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r5, MemOperand(sp, r6));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the function to call (checked to be a JSFunction)
+ // -- r4 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r2, r5, r6, r7);
+ }
+
+ __ LoadW(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+ __ SmiUntag(r4);
+#endif
+ ParameterCount actual(r2);
+ ParameterCount expected(r4);
+ __ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : target (checked to be a JSBoundFunction)
+ // -- r5 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r4 and length of that into r6.
+ Label no_bound_arguments;
+ __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
+ __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ SmiUntag(r6);
+ __ LoadAndTestP(r6, r6);
+ __ beq(&no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : target (checked to be a JSBoundFunction)
+ // -- r4 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r5 : new.target (only in case of [[Construct]])
+ // -- r6 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ LoadRR(r8, sp); // preserve previous stack pointer
+ __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, r9);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ bgt(&done); // Signed comparison.
+ // Restore the stack pointer.
+ __ LoadRR(sp, r8);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r8 : the previous stack pointer
+ // -- r9: the size of the [[BoundArguments]]
+ {
+ Label skip, loop;
+ __ LoadImmP(r7, Operand::Zero());
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&skip);
+ __ LoadRR(r1, r2);
+ __ bind(&loop);
+ __ LoadP(r0, MemOperand(r8, r7));
+ __ StoreP(r0, MemOperand(sp, r7));
+ __ AddP(r7, r7, Operand(kPointerSize));
+ __ BranchOnCount(r1, &loop);
+ __ bind(&skip);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(r4, r4, r9);
+ __ LoadRR(r1, r6);
+ __ bind(&loop);
+ __ LoadP(r0, MemOperand(r4, -kPointerSize));
+ __ lay(r4, MemOperand(r4, -kPointerSize));
+ __ StoreP(r0, MemOperand(sp, r7));
+ __ AddP(r7, r7, Operand(kPointerSize));
+ __ BranchOnCount(r1, &loop);
+ __ AddP(r2, r2, r6);
+ }
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r3);
+
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r2, r5, r6, r7);
+ }
+
+ // Patch the receiver to [[BoundThis]].
+ __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ StoreP(ip, MemOperand(sp, r1));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ LoadP(r3,
+ FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(r3, &non_callable);
+ __ bind(&non_smi);
+ __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ RelocInfo::CODE_TARGET, eq);
+ __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Call]] internal method.
+ __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r6, Map::kIsCallable);
+ __ beq(&non_callable);
+
+ __ CmpP(r7, Operand(JS_PROXY_TYPE));
+ __ bne(&non_function);
+
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r2, r5, r6, r7);
+ }
+
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r3);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ AddP(r2, r2, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Overwrite the original receiver the (original) target.
+ __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r3, MemOperand(sp, r7));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the constructor to call (checked to be a JSFunction)
+ // -- r5 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // Calling convention for function specific ConstructStubs require
+ // r4 to contain either an AllocationSite or undefined.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+ __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the function to call (checked to be a JSBoundFunction)
+ // -- r5 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r3);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ Label skip;
+ __ CmpP(r3, r5);
+ __ bne(&skip);
+ __ LoadP(r5,
+ FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ LoadP(r3,
+ FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the constructor to call (checked to be a JSProxy)
+ // -- r5 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r3, r5);
+ // Include the pushed new_target, constructor and the receiver.
+ __ AddP(r2, r2, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r3 : the constructor to call (can be any Object)
+ // -- r5 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target is a Smi.
+ Label non_constructor;
+ __ JumpIfSmi(r3, &non_constructor);
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
+ __ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ TestBit(r4, Map::kIsConstructor);
+ __ beq(&non_constructor);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpP(r7, Operand(JS_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r3, MemOperand(sp, r7));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : actual number of arguments
+ // -- r3 : function (passed through to callee)
+ // -- r4 : expected number of arguments
+ // -- r5 : new target (passed through to callee)
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments, stack_overflow;
+
+ Label enough, too_few;
+ __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+ __ CmpP(r2, r4);
+ __ blt(&too_few);
+ __ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ beq(&dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+ // Calculate copy start address into r2 and copy end address into r6.
+ // r2: actual number of arguments as a smi
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ // ip: code entry to call
+ __ SmiToPtrArrayOffset(r2, r2);
+ __ AddP(r2, fp);
+ // adjust for return address and receiver
+ __ AddP(r2, r2, Operand(2 * kPointerSize));
+ __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ SubP(r6, r2, r6);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r2: copy start address
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ // r6: copy end address
+ // ip: code entry to call
+
+ Label copy;
+ __ bind(&copy);
+ __ LoadP(r0, MemOperand(r2, 0));
+ __ push(r0);
+ __ CmpP(r2, r6); // Compare before moving to next argument.
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ bne(&copy);
+
+ __ b(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ bind(&too_few);
+
+ EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r2: actual number of arguments as a smi
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ // ip: code entry to call
+ __ SmiToPtrArrayOffset(r2, r2);
+ __ lay(r2, MemOperand(r2, fp));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r2: copy start address
+ // r3: function
+ // r4: expected number of arguments
+ // r5: new target (passed through to callee)
+ // ip: code entry to call
+ Label copy;
+ __ bind(&copy);
+ // Adjust load for return address and receiver.
+ __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
+ __ push(r0);
+ __ CmpP(r2, fp); // Compare before moving to next argument.
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ bne(&copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r3: function
+ // r4: expected number of argumentus
+ // ip: code entry to call
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+ __ SubP(r6, fp, r6);
+ // Adjust for frame.
+ __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r0);
+ __ CmpP(sp, r6);
+ __ bne(&fill);
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ __ LoadRR(r2, r4);
+ // r2 : expected number of arguments
+ // r3 : function (passed through to callee)
+ // r5 : new target (passed through to callee)
+ __ CallJSEntry(ip);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ JumpToJSEntry(ip);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0);
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
new file mode 100644
index 0000000000..1c7d27b5ca
--- /dev/null
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -0,0 +1,5695 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/isolate.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/runtime/runtime.h"
+#include "src/s390/code-stubs-s390.h"
+
+namespace v8 {
+namespace internal {
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ }
+}
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ }
+}
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+ Register rhs, Label* lhs_not_nan,
+ Label* slow, bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+ Register rhs);
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetRegisterParameterCount();
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ DCHECK(param_count == 0 ||
+ r2.is(descriptor.GetRegisterParameter(param_count - 1)));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor.GetRegisterParameter(i));
+ }
+ __ CallExternalReference(miss, param_count);
+ }
+
+ __ Ret();
+}
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done, fastpath_done;
+ Register input_reg = source();
+ Register result_reg = destination();
+ DCHECK(is_truncating());
+
+ int double_offset = offset();
+
+ // Immediate values for this stub fit in instructions, so it's safe to use ip.
+ Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ __ push(scratch);
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += kPointerSize;
+
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
+
+ // Do fast-path convert from double to int.
+ __ ConvertDoubleToInt64(double_scratch,
+#if !V8_TARGET_ARCH_S390X
+ scratch,
+#endif
+ result_reg, d0);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+ __ TestIfInt32(result_reg, r0);
+#else
+ __ TestIfInt32(scratch, result_reg, r0);
+#endif
+ __ beq(&fastpath_done, Label::kNear);
+ }
+
+ __ Push(scratch_high, scratch_low);
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+ __ LoadlW(scratch_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ __ LoadlW(scratch_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+
+ __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ CmpP(scratch, Operand(83));
+ __ bge(&out_of_range, Label::kNear);
+
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ Load(r0, Operand(51));
+ __ SubP(scratch, r0, scratch);
+ __ CmpP(scratch, Operand::Zero());
+ __ ble(&only_low, Label::kNear);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ ShiftRight(scratch_low, scratch_low, scratch);
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ Load(r0, Operand(32));
+ __ SubP(scratch, r0, scratch);
+ __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
+ __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
+ __ ShiftLeftP(r0, r0, Operand(16));
+ __ OrP(result_reg, result_reg, r0);
+ __ ShiftLeft(r0, result_reg, scratch);
+ __ OrP(result_reg, scratch_low, r0);
+ __ b(&negate, Label::kNear);
+
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ LoadComplementRR(scratch, scratch);
+ __ ShiftLeft(result_reg, scratch_low, scratch);
+
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ ShiftRightArith(r0, scratch_high, Operand(31));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(r0, r0);
+ __ ShiftRightP(r0, r0, Operand(32));
+#endif
+ __ XorP(result_reg, r0);
+ __ ShiftRight(r0, scratch_high, Operand(31));
+ __ AddP(result_reg, r0);
+
+ __ bind(&done);
+ __ Pop(scratch_high, scratch_low);
+
+ __ bind(&fastpath_done);
+ __ pop(scratch);
+
+ __ Ret();
+}
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ __ CmpP(r2, r3);
+ __ bne(&not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ // Call runtime on identical JSObjects.
+ __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ CmpP(r6, Operand(SYMBOL_TYPE));
+ __ beq(slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
+ __ beq(slow);
+ } else {
+ __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
+ __ beq(&heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ bge(slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ CmpP(r6, Operand(SYMBOL_TYPE));
+ __ beq(slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
+ __ beq(slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ CmpP(r6, Operand(ODDBALL_TYPE));
+ __ bne(&return_equal);
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bne(&return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ LoadImmP(r2, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ LoadImmP(r2, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cond == lt) {
+ __ LoadImmP(r2, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ LoadImmP(r2, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ LoadImmP(r2, Operand(EQUAL)); // Things are <=, >=, ==, === themselves
+ }
+ __ Ret();
+
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
+ __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask);
+ __ CmpLogicalP(r5, Operand(0x7ff));
+ __ bne(&return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
+ __ OrP(r2, r5, r4);
+ __ CmpP(r2, Operand::Zero());
+ // For equal we already have the right value in r2: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ Label not_equal;
+ __ bne(&not_equal, Label::kNear);
+ // All-zero means Infinity means equal.
+ __ Ret();
+ __ bind(&not_equal);
+ if (cond == le) {
+ __ LoadImmP(r2, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ LoadImmP(r2, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+
+ __ bind(&not_identical);
+}
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+ Register rhs, Label* lhs_not_nan,
+ Label* slow, bool strict) {
+ DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+ Label rhs_is_smi;
+ __ JumpIfSmi(rhs, &rhs_is_smi);
+
+ // Lhs is a Smi. Check whether the rhs is a heap number.
+ __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
+ // succeed. Return non-equal
+ // If rhs is r2 then there is already a non zero value in it.
+ Label skip;
+ __ beq(&skip, Label::kNear);
+ if (!rhs.is(r2)) {
+ __ mov(r2, Operand(NOT_EQUAL));
+ }
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ bne(slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert lhs to a double in d7.
+ __ SmiToDouble(d7, lhs);
+ // Load the double from rhs, tagged HeapNumber r2, to d6.
+ __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a smi.
+ __ b(lhs_not_nan);
+
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
+ __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs is not a number and rhs is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ // If lhs is r2 then there is already a non zero value in it.
+ Label skip;
+ __ beq(&skip, Label::kNear);
+ if (!lhs.is(r2)) {
+ __ mov(r2, Operand(NOT_EQUAL));
+ }
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
+ // the runtime.
+ __ bne(slow);
+ }
+
+ // Rhs is a smi, lhs is a heap number.
+ // Load the double from lhs, tagged HeapNumber r3, to d7.
+ __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ // Convert rhs to a double in d6.
+ __ SmiToDouble(d6, rhs);
+ // Fall through to both_loaded_as_doubles.
+}
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+ Register rhs) {
+ DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+ // If either operand is a JS object or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r4 and compare it with
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&first_non_object, Label::kNear);
+
+ // Return non-zero (r2 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpP(r4, Operand(ODDBALL_TYPE));
+ __ beq(&return_not_equal);
+
+ __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpP(r5, Operand(ODDBALL_TYPE));
+ __ beq(&return_not_equal);
+
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ OrP(r4, r4, r5);
+ __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ beq(&return_not_equal);
+}
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers, Label* slow) {
+ DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+ __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE);
+ __ bne(not_heap_numbers);
+ __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ CmpP(r4, r5);
+ __ bne(slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ b(both_loaded_as_doubles);
+}
+
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs, Register rhs,
+ Label* possible_strings,
+ Label* runtime_call) {
+ DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+ // r4 is object type of rhs.
+ Label object_test, return_equal, return_unequal, undetectable;
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ mov(r0, Operand(kIsNotStringMask));
+ __ AndP(r0, r4);
+ __ bne(&object_test, Label::kNear);
+ __ mov(r0, Operand(kIsNotInternalizedMask));
+ __ AndP(r0, r4);
+ __ bne(possible_strings);
+ __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE);
+ __ bge(runtime_call);
+ __ mov(r0, Operand(kIsNotInternalizedMask));
+ __ AndP(r0, r5);
+ __ bne(possible_strings);
+
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in r2.
+ __ Ret();
+
+ __ bind(&object_test);
+ __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable));
+ __ bne(&undetectable);
+ __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
+ __ bne(&return_unequal);
+
+ __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+ __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in r2.
+ __ Ret();
+
+ __ bind(&undetectable);
+ __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
+ __ beq(&return_unequal);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(r4, r4, ODDBALL_TYPE);
+ __ beq(&return_equal);
+ __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
+ __ bne(&return_unequal);
+
+ __ bind(&return_equal);
+ __ LoadImmP(r2, Operand(EQUAL));
+ __ Ret();
+}
+
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ Register scratch,
+ CompareICState::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareICState::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareICState::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+// On entry r3 and r4 are the values to be compared.
+// On exit r2 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r3;
+ Register rhs = r2;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+ Label not_two_smis, smi_done;
+ __ OrP(r4, r3, r2);
+ __ JumpIfNotSmi(r4, &not_two_smis);
+ __ SmiUntag(r3);
+ __ SmiUntag(r2);
+ __ SubP(r2, r3, r2);
+ __ Ret();
+ __ bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ __ AndP(r4, lhs, rhs);
+ __ JumpIfNotSmi(r4, &not_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to lhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded
+ // into d7 and d6.
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
+
+ __ bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in d6 and d7
+ __ bind(&lhs_not_nan);
+ Label no_nan;
+ __ cdbr(d7, d6);
+
+ Label nan, equal, less_than;
+ __ bunordered(&nan);
+ __ beq(&equal, Label::kNear);
+ __ blt(&less_than, Label::kNear);
+ __ LoadImmP(r2, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ LoadImmP(r2, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ LoadImmP(r2, Operand(LESS));
+ __ Ret();
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r2 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc == lt || cc == le) {
+ __ LoadImmP(r2, Operand(GREATER));
+ } else {
+ __ LoadImmP(r2, Operand(LESS));
+ }
+ __ Ret();
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
+ if (strict()) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r2, r3, r4, r5 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
+ // In this case r4 will contain the type of rhs_. Never falls through.
+ EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
+ &check_for_internalized_strings,
+ &flat_string_check);
+
+ __ bind(&check_for_internalized_strings);
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // internalized strings.
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r4 is the type of rhs_ on entry.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
+ &slow);
+ }
+
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow);
+
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
+ r5);
+ if (cc == eq) {
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5);
+ } else {
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+
+ if (cc == eq) {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(lhs, rhs);
+ __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ }
+ // Turn true into 0 and false into some non-zero value.
+ STATIC_ASSERT(EQUAL == 0);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ SubP(r2, r2, r3);
+ __ Ret();
+ } else {
+ __ Push(lhs, rhs);
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ LoadSmiLiteral(r2, Smi::FromInt(ncr));
+ __ push(r2);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ TailCallRuntime(Runtime::kCompare);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ MultiPush(kJSCallerSaved | r14.bit());
+ if (save_doubles()) {
+ __ MultiPushDoubles(kCallerSavedDoubles);
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r3;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
+ argument_count);
+ if (save_doubles()) {
+ __ MultiPopDoubles(kCallerSavedDoubles);
+ }
+ __ MultiPop(kJSCallerSaved | r14.bit());
+ __ Ret();
+}
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ PushSafepointRegisters();
+ __ b(r14);
+}
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ PopSafepointRegisters();
+ __ b(r14);
+}
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ const Register base = r3;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(r4));
+ const Register heapnumbermap = r7;
+ const Register heapnumber = r2;
+ const DoubleRegister double_base = d1;
+ const DoubleRegister double_exponent = d2;
+ const DoubleRegister double_result = d3;
+ const DoubleRegister double_scratch = d0;
+ const Register scratch = r1;
+ const Register scratch2 = r9;
+
+ Label call_runtime, done, int_exponent;
+ if (exponent_type() == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
+ __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+ __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ CmpP(scratch, heapnumbermap);
+ __ bne(&call_runtime);
+
+ __ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ b(&unpack_exponent, Label::kNear);
+
+ __ bind(&base_is_smi);
+ __ ConvertIntToDouble(scratch, double_base);
+ __ bind(&unpack_exponent);
+
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+ __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ CmpP(scratch, heapnumbermap);
+ __ bne(&call_runtime);
+
+ __ LoadDouble(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type() == TAGGED) {
+ // Base is already in double_base.
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+ __ LoadDouble(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type() != INTEGER) {
+ // Detect integer exponents stored as double.
+ __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
+ double_scratch);
+ __ beq(&int_exponent, Label::kNear);
+
+ if (exponent_type() == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half, not_minus_inf1, not_minus_inf2;
+
+ // Test for 0.5.
+ __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
+ __ cdbr(double_exponent, double_scratch);
+ __ bne(&not_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+ __ cdbr(double_base, double_scratch);
+ __ bne(&not_minus_inf1, Label::kNear);
+ __ lcdbr(double_result, double_scratch);
+ __ b(&done);
+ __ bind(&not_minus_inf1);
+
+ // Add +0 to convert -0 to +0.
+ __ ldr(double_scratch, double_base);
+ __ lzdr(kDoubleRegZero);
+ __ adbr(double_scratch, kDoubleRegZero);
+ __ sqdbr(double_result, double_scratch);
+ __ b(&done);
+
+ __ bind(&not_plus_half);
+ __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
+ __ cdbr(double_exponent, double_scratch);
+ __ bne(&call_runtime);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+ __ cdbr(double_base, double_scratch);
+ __ bne(&not_minus_inf2, Label::kNear);
+ __ ldr(double_result, kDoubleRegZero);
+ __ b(&done);
+ __ bind(&not_minus_inf2);
+
+ // Add +0 to convert -0 to +0.
+ __ ldr(double_scratch, double_base);
+ __ lzdr(kDoubleRegZero);
+ __ adbr(double_scratch, kDoubleRegZero);
+ __ LoadDoubleLiteral(double_result, 1.0, scratch);
+ __ sqdbr(double_scratch, double_scratch);
+ __ ddbr(double_result, double_scratch);
+ __ b(&done);
+ }
+
+ __ push(r14);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ __ pop(r14);
+ __ MovFromFloatResult(double_result);
+ __ b(&done);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type() == INTEGER) {
+ __ LoadRR(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ LoadRR(exponent, scratch);
+ }
+ __ ldr(double_scratch, double_base); // Back up base.
+ __ LoadImmP(scratch2, Operand(1));
+ __ ConvertIntToDouble(scratch2, double_result);
+
+ // Get absolute value of exponent.
+ Label positive_exponent;
+ __ CmpP(scratch, Operand::Zero());
+ __ bge(&positive_exponent, Label::kNear);
+ __ LoadComplementRR(scratch, scratch);
+ __ bind(&positive_exponent);
+
+ Label while_true, no_carry, loop_end;
+ __ bind(&while_true);
+ __ mov(scratch2, Operand(1));
+ __ AndP(scratch2, scratch);
+ __ beq(&no_carry, Label::kNear);
+ __ mdbr(double_result, double_scratch);
+ __ bind(&no_carry);
+ __ ShiftRightArithP(scratch, scratch, Operand(1));
+ __ beq(&loop_end, Label::kNear);
+ __ mdbr(double_scratch, double_scratch);
+ __ b(&while_true);
+ __ bind(&loop_end);
+
+ __ CmpP(exponent, Operand::Zero());
+ __ bge(&done);
+
+ // get 1/double_result:
+ __ ldr(double_scratch, double_result);
+ __ LoadImmP(scratch2, Operand(1));
+ __ ConvertIntToDouble(scratch2, double_result);
+ __ ddbr(double_result, double_scratch);
+
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(double_result, kDoubleRegZero);
+ __ bne(&done, Label::kNear);
+ // double_exponent may not containe the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ ConvertIntToDouble(exponent, double_exponent);
+
+ // Returning or bailing out.
+ if (exponent_type() == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMathPowRT);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
+ &call_runtime);
+ __ StoreDouble(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ DCHECK(heapnumber.is(r2));
+ __ Ret(2);
+ } else {
+ __ push(r14);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ __ pop(r14);
+ __ MovFromFloatResult(double_result);
+
+ __ bind(&done);
+ __ Ret();
+ }
+}
+
+bool CEntryStub::NeedsImmovableCode() { return true; }
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+ StoreFastElementStub::GenerateAheadOfTime(isolate);
+ TypeofStub::GenerateAheadOfTime(isolate);
+}
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub(isolate);
+ stub.GetCode();
+}
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub(isolate);
+ stub.GetCode();
+}
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
+ isolate->set_fp_stubs_generated(true);
+}
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+}
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function.
+ // r2: number of arguments including receiver
+ // r3: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ //
+ // If argv_in_register():
+ // r4: pointer to the first argument
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ __ LoadRR(r7, r3);
+
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ LoadRR(r3, r4);
+ } else {
+ // Compute the argv pointer.
+ __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
+ __ lay(r3, MemOperand(r3, sp, -kPointerSize));
+ }
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+
+ // Need at least one extra slot for return address location.
+ int arg_stack_space = 1;
+
+ // Pass buffer for return value on stack if necessary
+ bool needs_return_buffer =
+ result_size() > 2 ||
+ (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+ if (needs_return_buffer) {
+ arg_stack_space += result_size();
+ }
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit linux pass Argument object by reference not value
+ arg_stack_space += 2;
+#endif
+
+ __ EnterExitFrame(save_doubles(), arg_stack_space);
+
+ // Store a copy of argc, argv in callee-saved registers for later.
+ __ LoadRR(r6, r2);
+ __ LoadRR(r8, r3);
+ // r2, r6: number of arguments including receiver (C callee-saved)
+ // r3, r8: pointer to the first argument
+ // r7: pointer to builtin function (C callee-saved)
+
+ // Result returned in registers or stack, depending on result size and ABI.
+
+ Register isolate_reg = r4;
+ if (needs_return_buffer) {
+ // The return value is 16-byte non-scalar value.
+ // Use frame storage reserved by calling function to pass return
+ // buffer as implicit first argument in R2. Shfit original parameters
+ // by one register each.
+ __ LoadRR(r4, r3);
+ __ LoadRR(r3, r2);
+ __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ isolate_reg = r5;
+ }
+ // Call C built-in.
+ __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+
+ Register target = r7;
+
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
+ {
+ Label return_label;
+ __ larl(r14, &return_label); // Generate the return addr of call later.
+ __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+
+ // zLinux ABI requires caller's frame to have sufficient space for callee
+ // preserved regsiter save area.
+ // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
+ __ positions_recorder()->WriteRecordedPositions();
+ __ b(target);
+ __ bind(&return_label);
+ // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
+ }
+
+ // If return value is on the stack, pop it to registers.
+ if (needs_return_buffer) {
+ if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
+ __ LoadP(r3, MemOperand(r2, kPointerSize));
+ __ LoadP(r2, MemOperand(r2));
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(r2, Heap::kExceptionRootIndex);
+ __ beq(&exception_returned, Label::kNear);
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+ __ mov(r1, Operand(pending_exception_address));
+ __ LoadP(r1, MemOperand(r1));
+ __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ beq(&okay, Label::kNear);
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // r2:r3: result
+ // sp: stack pointer
+ // fp: frame pointer
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // r6: still holds argc (callee-saved).
+ argc = r6;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true);
+ __ b(r14);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set r3 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
+ isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, r2);
+ __ LoadImmP(r2, Operand::Zero());
+ __ LoadImmP(r3, Operand::Zero());
+ __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
+
+ // Retrieve the handler context, SP and FP.
+ __ mov(cp, Operand(pending_handler_context_address));
+ __ LoadP(cp, MemOperand(cp));
+ __ mov(sp, Operand(pending_handler_sp_address));
+ __ LoadP(sp, MemOperand(sp));
+ __ mov(fp, Operand(pending_handler_fp_address));
+ __ LoadP(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label skip;
+ __ CmpP(cp, Operand::Zero());
+ __ beq(&skip, Label::kNear);
+ __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ // Compute the handler entry address and jump to it.
+ __ mov(r3, Operand(pending_handler_code_address));
+ __ LoadP(r3, MemOperand(r3));
+ __ mov(r4, Operand(pending_handler_offset_address));
+ __ LoadP(r4, MemOperand(r4));
+ __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ __ AddP(ip, r3, r4);
+ __ Jump(ip);
+}
+
+void JSEntryStub::Generate(MacroAssembler* masm) {
+ // r2: code entry
+ // r3: function
+ // r4: receiver
+ // r5: argc
+ // r6: argv
+
+ Label invoke, handler_entry, exit;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+// saving floating point registers
+#if V8_TARGET_ARCH_S390X
+ // 64bit ABI requires f8 to f15 be saved
+ __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
+ __ std(d8, MemOperand(sp));
+ __ std(d9, MemOperand(sp, 1 * kDoubleSize));
+ __ std(d10, MemOperand(sp, 2 * kDoubleSize));
+ __ std(d11, MemOperand(sp, 3 * kDoubleSize));
+ __ std(d12, MemOperand(sp, 4 * kDoubleSize));
+ __ std(d13, MemOperand(sp, 5 * kDoubleSize));
+ __ std(d14, MemOperand(sp, 6 * kDoubleSize));
+ __ std(d15, MemOperand(sp, 7 * kDoubleSize));
+#else
+ // 31bit ABI requires you to store f4 and f6:
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
+ __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
+ __ std(d4, MemOperand(sp));
+ __ std(d6, MemOperand(sp, kDoubleSize));
+#endif
+
+ // zLinux ABI
+ // Incoming parameters:
+ // r2: code entry
+ // r3: function
+ // r4: receiver
+ // r5: argc
+ // r6: argv
+ // Requires us to save the callee-preserved registers r6-r13
+ // General convention is to also save r14 (return addr) and
+ // sp/r15 as well in a single STM/STMG
+ __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+ __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
+
+ // Set up the reserved register for 0.0.
+ // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // Bad FP (-1)
+ // SMI Marker
+ // SMI Marker
+ // kCEntryFPAddress
+ // Frame type
+ __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+ // Push a bad frame pointer to fail if it is used.
+ __ LoadImmP(r10, Operand(-1));
+
+ int marker = type();
+ __ LoadSmiLiteral(r9, Smi::FromInt(marker));
+ __ LoadSmiLiteral(r8, Smi::FromInt(marker));
+ // Save copies of the top frame descriptor on the stack.
+ __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ LoadP(r7, MemOperand(r7));
+ __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
+ // Set up frame pointer for the frame to be pushed.
+ // Need to add kPointerSize, because sp has one extra
+ // frame already for the frame type being pushed later.
+ __ lay(fp,
+ MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ __ mov(r7, Operand(ExternalReference(js_entry_sp)));
+ __ LoadAndTestP(r8, MemOperand(r7));
+ __ bne(&non_outermost_js, Label::kNear);
+ __ StoreP(fp, MemOperand(r7));
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont, Label::kNear);
+ __ bind(&non_outermost_js);
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+
+ __ bind(&cont);
+ __ StoreP(ip, MemOperand(sp)); // frame-type
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ b(&invoke, Label::kNear);
+
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+
+ __ StoreP(r2, MemOperand(ip));
+ __ LoadRoot(r2, Heap::kExceptionRootIndex);
+ __ b(&exit, Label::kNear);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r2-r6.
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the b(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ mov(r7, Operand(isolate()->factory()->the_hole_value()));
+ __ StoreP(r7, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r2: code entry
+ // r3: function
+ // r4: receiver
+ // r5: argc
+ // r6: argv
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate());
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+ __ mov(ip, Operand(entry));
+ }
+ __ LoadP(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline.
+ // the address points to the start of the code object, skip the header
+ __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Label return_addr;
+ // __ basr(r14, ip);
+ __ larl(r14, &return_addr);
+ __ b(ip);
+ __ bind(&return_addr);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // r2 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r7);
+ __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+ __ bne(&non_outermost_js_2, Label::kNear);
+ __ mov(r8, Operand::Zero());
+ __ mov(r7, Operand(ExternalReference(js_entry_sp)));
+ __ StoreP(r8, MemOperand(r7));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(r5);
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ StoreP(r5, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
+
+ // Reload callee-saved preserved regs, return address reg (r14) and sp
+ __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
+ __ la(sp, MemOperand(sp, 10 * kPointerSize));
+
+// saving floating point registers
+#if V8_TARGET_ARCH_S390X
+ // 64bit ABI requires f8 to f15 be saved
+ __ ld(d8, MemOperand(sp));
+ __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
+ __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
+ __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
+ __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
+ __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
+ __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
+ __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
+ __ la(sp, MemOperand(sp, 8 * kDoubleSize));
+#else
+ // 31bit ABI requires you to store f4 and f6:
+ // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
+ __ ld(d4, MemOperand(sp));
+ __ ld(d6, MemOperand(sp, kDoubleSize));
+ __ la(sp, MemOperand(sp, 2 * kDoubleSize));
+#endif
+
+ __ b(r14);
+}
+
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = r3; // Object (lhs).
+ Register const function = r2; // Function (rhs).
+ Register const object_map = r4; // Map of {object}.
+ Register const function_map = r5; // Map of {function}.
+ Register const function_prototype = r6; // Prototype of {function}.
+ Register const scratch = r7;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ bne(&fast_case);
+ __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+ __ bne(&fast_case);
+ __ LoadRoot(r2, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
+ __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+ __ Ret();
+
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
+
+ // Go to the runtime if the function is not a constructor.
+ __ LoadlB(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ TestBit(scratch, Map::kIsConstructor, r0);
+ __ beq(&slow_case);
+
+ // Ensure that {function} has an instance prototype.
+ __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
+ __ bne(&slow_case);
+
+ // Get the "prototype" (or initial map) of the {function}.
+ __ LoadP(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
+ __ bne(&function_prototype_valid);
+ __ LoadP(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
+ Register const null = scratch;
+ Register const result = r2;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+
+ // Check if the object needs to be access checked.
+ __ LoadlB(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
+ __ bne(&fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ beq(&fast_runtime_fallback);
+
+ __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ CmpP(object, function_prototype);
+ __ beq(&done);
+ __ CmpP(object, null);
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ bne(&loop);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ LoadSmiLiteral(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
+}
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
+ r7, &miss);
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
+ Register scratch = r7;
+ Register result = r2;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadWithVectorDescriptor::SlotRegister()));
+
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX,
+ RECEIVER_IS_STRING);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
+
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+// Just jump directly to runtime if native RegExp is not selected at compile
+// time or if regexp entry in generated code is turned off runtime switch or
+// at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, br_over, encoding_type_UC16;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ Register subject = r6;
+ Register regexp_data = r7;
+ Register last_match_info_elements = r8;
+ Register code = r9;
+
+ __ CleanseP(r14);
+
+ // Ensure register assigments are consistent with callee save masks
+ DCHECK(subject.bit() & kCalleeSaved);
+ DCHECK(regexp_data.bit() & kCalleeSaved);
+ DCHECK(last_match_info_elements.bit() & kCalleeSaved);
+ DCHECK(code.bit() & kCalleeSaved);
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ mov(r2, Operand(address_of_regexp_stack_memory_size));
+ __ LoadAndTestP(r2, MemOperand(r2));
+ __ beq(&runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ LoadP(r2, MemOperand(sp, kJSRegExpOffset));
+ __ JumpIfSmi(r2, &runtime);
+ __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
+ __ bne(&runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ TestIfSmi(regexp_data);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
+ __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
+ __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0);
+ __ bne(&runtime);
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ LoadP(r4,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+ // SmiUntag (which is a nop for 32-bit).
+ __ SmiToShortArrayOffset(r4, r4);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
+ __ bgt(&runtime);
+
+ // Reset offset for possibly sliced string.
+ __ LoadImmP(ip, Operand::Zero());
+ __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ LoadRR(r5, subject); // Make a copy of the original subject string.
+ // subject: subject string
+ // r5: subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (4).
+ // (2) Sequential or cons? If not, go to (5).
+ // (3) Cons string. If the string is flat, replace subject with first string
+ // and go to (1). Otherwise bail out to runtime.
+ // (4) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (5) Long external string? If not, go to (7).
+ // (6) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (4).
+ // (7) Short external string or not a string? If yes, bail out to runtime.
+ // (8) Sliced string. Replace subject with parent. Go to (1).
+
+ Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+ not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
+
+ __ bind(&check_underlying);
+ __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+
+ // (1) Sequential string? If yes, go to (4).
+
+ STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
+ kShortExternalStringMask) == 0x93);
+ __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
+ kShortExternalStringMask));
+ __ AndP(r3, r2);
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ beq(&seq_string, Label::kNear); // Go to (4).
+
+ // (2) Sequential or cons? If not, go to (5).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ STATIC_ASSERT(kExternalStringTag < 0xffffu);
+ __ CmpP(r3, Operand(kExternalStringTag));
+ __ bge(&not_seq_nor_cons); // Go to (5).
+
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
+ __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ CompareRoot(r2, Heap::kempty_stringRootIndex);
+ __ bne(&runtime);
+ __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ b(&check_underlying);
+
+ // (4) Sequential string. Load regexp code according to encoding.
+ __ bind(&seq_string);
+ // subject: sequential subject string (or look-alike, external string)
+ // r5: original subject string
+ // Load previous index and check range before r5 is overwritten. We have to
+ // use r5 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r3, &runtime);
+ __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset));
+ __ CmpLogicalP(r5, r3);
+ __ ble(&runtime);
+ __ SmiUntag(r3);
+
+ STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
+ __ beq(&encoding_type_UC16, Label::kNear);
+ __ LoadP(code,
+ FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+ __ b(&br_over, Label::kNear);
+ __ bind(&encoding_type_UC16);
+ __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ bind(&br_over);
+
+ // (E) Carry on. String handling is done.
+ // code: irregexp code
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code, &runtime);
+
+ // r3: previous index
+ // r5: encoding of subject string (1 if one_byte, 0 if two_byte);
+ // code: Address of generated regexp code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ const int kRegExpExecuteArguments = 10;
+ const int kParameterRegisters = 5;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers.
+
+ // Argument 10 (in stack parameter area): Pass current isolate address.
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+ 4 * kPointerSize));
+
+ // Argument 9 is a dummy that reserves the space used for
+ // the return address added by the ExitFrame in native calls.
+ __ mov(r2, Operand::Zero());
+ __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+ 3 * kPointerSize));
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ mov(r2, Operand(1));
+ __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+ 2 * kPointerSize));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ mov(r2, Operand(address_of_regexp_stack_memory_address));
+ __ LoadP(r2, MemOperand(r2, 0));
+ __ mov(r1, Operand(address_of_regexp_stack_memory_size));
+ __ LoadP(r1, MemOperand(r1, 0));
+ __ AddP(r2, r1);
+ __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+ 1 * kPointerSize));
+
+ // Argument 6: Set the number of capture registers to zero to force
+ // global egexps to behave as non-global. This does not affect non-global
+ // regexps.
+ __ mov(r2, Operand::Zero());
+ __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+ 0 * kPointerSize));
+
+ // Argument 1 (r2): Subject string.
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to 15 pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and moves up sp by 2 * kPointerSize and
+ // 13 registers saved on the stack previously)
+ __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+
+ // Argument 2 (r3): Previous index.
+ // Already there
+ __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+
+ // Argument 5 (r6): static offsets vector buffer.
+ __ mov(
+ r6,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
+
+ // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data
+ // and calculate the shift of the index (0 for one-byte and 1 for two byte).
+ __ XorP(r5, Operand(1));
+ // If slice offset is not 0, load the length from the original sliced string.
+ // Argument 3, r4: Start of string data
+ // Prepare start and end index of the input.
+ __ ShiftLeftP(ip, ip, r5);
+ __ AddP(ip, r1, ip);
+ __ ShiftLeftP(r4, r3, r5);
+ __ AddP(r4, ip, r4);
+
+ // Argument 4, r5: End of string data
+ __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset));
+ __ SmiUntag(r1);
+ __ ShiftLeftP(r0, r1, r5);
+ __ AddP(r5, ip, r0);
+
+ // Locate the code entry and call it.
+ __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm, code);
+
+ __ LeaveExitFrame(false, no_reg, true);
+
+ // r2: result (int32)
+ // subject: subject string -- needed to reload
+ __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+ // Check the result.
+ Label success;
+ __ Cmp32(r2, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ __ beq(&success);
+ Label failure;
+ __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ beq(&failure);
+ __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ bne(&runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ mov(r3, Operand(isolate()->factory()->the_hole_value()));
+ __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ LoadP(r2, MemOperand(r4, 0));
+ __ CmpP(r2, r3);
+ __ beq(&runtime);
+
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(r2, Operand(isolate()->factory()->null_value()));
+ __ la(sp, MemOperand(sp, (4 * kPointerSize)));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ LoadP(r3,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+ // SmiUntag (which is a nop for 32-bit).
+ __ SmiToShortArrayOffset(r3, r3);
+ __ AddP(r3, Operand(2));
+
+ __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r2, &runtime);
+ __ CompareObjectType(r2, r4, r4, JS_ARRAY_TYPE);
+ __ bne(&runtime);
+ // Check that the JSArray is in fast case.
+ __ LoadP(last_match_info_elements,
+ FieldMemOperand(r2, JSArray::kElementsOffset));
+ __ LoadP(r2,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
+ __ bne(&runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ LoadP(
+ r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead));
+ __ SmiUntag(r0, r2);
+ __ CmpP(r4, r0);
+ __ bgt(&runtime);
+
+ // r3: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ SmiTag(r4, r3);
+ __ StoreP(r4, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ LoadRR(r4, subject);
+ __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
+ subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ LoadRR(subject, r4);
+ __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
+ subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ mov(r4, Operand(address_of_static_offsets_vector));
+
+ // r3: number of capture registers
+ // r4: offsets vector
+ Label next_capture;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ AddP(
+ r2, last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+ __ AddP(r4, Operand(-kIntSize)); // bias down for lwzu
+ __ bind(&next_capture);
+ // Read the value from the static offsets vector buffer.
+ __ ly(r5, MemOperand(r4, kIntSize));
+ __ lay(r4, MemOperand(r4, kIntSize));
+ // Store the smi value in the last match info.
+ __ SmiTag(r5);
+ __ StoreP(r5, MemOperand(r2, kPointerSize));
+ __ lay(r2, MemOperand(r2, kPointerSize));
+ __ BranchOnCount(r3, &next_capture);
+
+ // Return last match info.
+ __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+ __ la(sp, MemOperand(sp, (4 * kPointerSize)));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec);
+
+ // Deferred code for string handling.
+ // (5) Long external string? If not, go to (7).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ bgt(&not_long_external, Label::kNear); // Go to (7).
+
+ // (6) External string. Make it, offset-wise, look like a sequential string.
+ __ bind(&external_string);
+ __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ STATIC_ASSERT(kIsIndirectStringMask == 1);
+ __ tmll(r2, Operand(kIsIndirectStringMask));
+ __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+ }
+ __ LoadP(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ SubP(subject, subject,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ b(&seq_string); // Go to (4).
+
+ // (7) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
+ __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ AndP(r0, r3);
+ __ bne(&runtime);
+
+ // (8) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into ip and replace subject string with parent.
+ __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ SmiUntag(ip);
+ __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ b(&check_underlying); // Go to (4).
+#endif // V8_INTERPRETED_REGEXP
+}
+
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+ // r2 : number of arguments to the construct function
+ // r3 : the function to call
+ // r4 : feedback vector
+ // r5 : slot in feedback vector (Smi)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Number-of-arguments register must be smi-tagged to call out.
+ __ SmiTag(r2);
+ __ Push(r5, r4, r3, r2);
+
+ __ CallStub(stub);
+
+ __ Pop(r5, r4, r3, r2);
+ __ SmiUntag(r2);
+}
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r2 : number of arguments to the construct function
+ // r3 : the function to call
+ // r4 : feedback vector
+ // r5 : slot in feedback vector (Smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state into r7.
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ AddP(r7, r4, r7);
+ __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
+ // this position in a symbol (see static asserts in type-feedback-vector.h).
+ Label check_allocation_site;
+ Register feedback_map = r8;
+ Register weak_value = r9;
+ __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
+ __ CmpP(r3, weak_value);
+ __ beq(&done);
+ __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
+ __ beq(&done);
+ __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
+ __ bne(&check_allocation_site);
+
+ // If the weak cell is cleared, we have a new chance to become monomorphic.
+ __ JumpIfSmi(weak_value, &initialize);
+ __ b(&megamorphic);
+
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&miss);
+
+ // Make sure the function is the Array() function
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ CmpP(r3, r7);
+ __ bne(&megamorphic);
+ __ b(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ AddP(r7, r4, r7);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function
+ __ bind(&initialize);
+
+ // Make sure the function is the Array() function.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ CmpP(r3, r7);
+ __ bne(&not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &create_stub);
+ __ b(&done);
+
+ __ bind(&not_array_function);
+
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
+ __ bind(&done);
+}
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // r2 : number of arguments
+ // r3 : the function to call
+ // r4 : feedback vector
+ // r5 : slot in feedback vector (Smi, for RecordCallTarget)
+
+ Label non_function;
+ // Check that the function is not a smi.
+ __ JumpIfSmi(r3, &non_function);
+ // Check that the function is a JSFunction.
+ __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&non_function);
+
+ GenerateRecordCallTarget(masm);
+
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ AddP(r7, r4, r7);
+ // Put the AllocationSite from the feedback vector into r4, or undefined.
+ __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
+ __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
+ __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+
+ __ AssertUndefinedOrAllocationSite(r4, r7);
+
+ // Pass function as new target.
+ __ LoadRR(r5, r3);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+ __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ __ bind(&non_function);
+ __ LoadRR(r5, r3);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // r3 - function
+ // r5 - slot id
+ // r4 - vector
+ // r6 - allocation site (loaded from vector[slot])
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ CmpP(r3, r7);
+ __ bne(miss);
+
+ __ mov(r2, Operand(arg_count()));
+
+ // Increment the call count for monomorphic function calls.
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ AddP(r4, r4, r7);
+ __ LoadP(r5, FieldMemOperand(r4, count_offset));
+ __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
+
+ __ LoadRR(r4, r6);
+ __ LoadRR(r5, r3);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+}
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r3 - function
+ // r5 - slot id (Smi)
+ // r4 - vector
+ Label extra_checks_or_miss, call, call_function;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ // The checks. First, does r3 match the recorded monomorphic target?
+ __ SmiToPtrArrayOffset(r8, r5);
+ __ AddP(r8, r4, r8);
+ __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
+ __ CmpP(r3, r7);
+ __ bne(&extra_checks_or_miss, Label::kNear);
+
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(r3, &extra_checks_or_miss);
+
+ // Increment the call count for monomorphic function calls.
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ LoadP(r5, FieldMemOperand(r8, count_offset));
+ __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
+
+ __ bind(&call_function);
+ __ mov(r2, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&extra_checks_or_miss);
+ Label uninitialized, miss, not_allocation_site;
+
+ __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
+ __ beq(&call);
+
+ // Verify that r6 contains an AllocationSite
+ __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ b(&miss);
+ }
+
+ __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&uninitialized);
+
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r6);
+ __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
+
+ __ bind(&call);
+ __ mov(r2, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(r3, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+
+ // Make sure the function is not the Array() function, which requires special
+ // behavior on MISS.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
+ __ CmpP(r3, r6);
+ __ beq(&miss);
+
+ // Make sure the function belongs to the same native context.
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
+ __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(ip, NativeContextMemOperand());
+ __ CmpP(r6, ip);
+ __ bne(&miss);
+
+ // Initialize the call counter.
+ __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
+
+ // Store the function. Use a stub since we need a frame for allocation.
+ // r4 - vector
+ // r5 - slot
+ // r3 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r3);
+ __ CallStub(&create_stub);
+ __ Pop(r3);
+ }
+
+ __ b(&call_function);
+
+ // We are here because tracing is on or we encountered a MISS case we can't
+ // handle here.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ __ b(&call);
+}
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the function and feedback info.
+ __ Push(r3, r4, r5);
+
+ // Call the entry.
+ __ CallRuntime(Runtime::kCallIC_Miss);
+
+ // Move result to r3 and exit the internal frame.
+ __ LoadRR(r3, r2);
+}
+
+// StringCharCodeAtGenerator
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ mov(r0, Operand(kIsNotStringMask));
+ __ AndP(r0, result_);
+ __ bne(receiver_not_string_);
+ }
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ CmpLogicalP(ip, index_);
+ __ ble(index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm, object_, index_, result_,
+ &call_runtime_);
+
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, EmbedMode embed_mode,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
+ } else {
+ // index_ is consumed by runtime conversion function.
+ __ Push(object_, index_);
+ }
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
+ } else {
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(index_, r2);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_);
+ } else {
+ __ pop(object_);
+ }
+ // Reload the instance type.
+ __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ b(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
+ __ Move(result_, r2);
+ call_helper.AfterCall(masm);
+ __ b(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
+ __ OrP(r0, r0, Operand(kSmiTagMask));
+ __ AndP(r0, code_, r0);
+ __ bne(&slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged one-byte char code.
+ __ LoadRR(r0, code_);
+ __ SmiToPtrArrayOffset(code_, code_);
+ __ AddP(result_, code_);
+ __ LoadRR(code_, r0);
+ __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ beq(&slow_case_);
+ __ bind(&exit_);
+}
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kStringCharFromCode);
+ __ Move(result_, r2);
+ call_helper.AfterCall(masm);
+ __ b(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+ Register src, Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ mov(r0, Operand(kPointerAlignmentMask));
+ __ AndP(r0, dest);
+ __ Check(eq, kDestinationOfCopyNotAligned, cr0);
+ }
+
+ // Nothing to do for zero characters.
+ Label done;
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ // double the length
+ __ AddP(count, count, count);
+ __ beq(&done, Label::kNear);
+ } else {
+ __ CmpP(count, Operand::Zero());
+ __ beq(&done, Label::kNear);
+ }
+
+ // Copy count bytes from src to dst.
+ Label byte_loop;
+ // TODO(joransiu): Convert into MVC loop
+ __ bind(&byte_loop);
+ __ LoadlB(scratch, MemOperand(src));
+ __ la(src, MemOperand(src, 1));
+ __ stc(scratch, MemOperand(dest));
+ __ la(dest, MemOperand(dest, 1));
+ __ BranchOnCount(count, &byte_loop);
+
+ __ bind(&done);
+}
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
+
+ __ LoadP(r4, MemOperand(sp, kToOffset));
+ __ LoadP(r5, MemOperand(sp, kFromOffset));
+
+ // If either to or from had the smi tag bit set, then fail to generic runtime
+ __ JumpIfNotSmi(r4, &runtime);
+ __ JumpIfNotSmi(r5, &runtime);
+ __ SmiUntag(r4);
+ __ SmiUntag(r5);
+ // Both r4 and r5 are untagged integers.
+
+ // We want to bailout to runtime here if From is negative.
+ __ blt(&runtime); // From < 0.
+
+ __ CmpLogicalP(r5, r4);
+ __ bgt(&runtime); // Fail if from > to.
+ __ SubP(r4, r4, r5);
+
+ // Make sure first argument is a string.
+ __ LoadP(r2, MemOperand(sp, kStringOffset));
+ __ JumpIfSmi(r2, &runtime);
+ Condition is_string = masm->IsObjectStringType(r2, r3);
+ __ b(NegateCondition(is_string), &runtime);
+
+ Label single_char;
+ __ CmpP(r4, Operand(1));
+ __ b(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_r2;
+ // r2: original string
+ // r4: result string length
+ __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset));
+ __ SmiUntag(r0, r6);
+ __ CmpLogicalP(r4, r0);
+ // Return original string.
+ __ beq(&return_r2);
+ // Longer than original string's length or negative: unsafe arguments.
+ __ bgt(&runtime);
+ // Shorter than original string's length: an actual substring.
+
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into r7.
+ // r2: original string
+ // r3: instance type
+ // r4: length
+ // r5: from index (untagged)
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ mov(r0, Operand(kIsIndirectStringMask));
+ __ AndP(r0, r3);
+ __ beq(&seq_or_external_string);
+
+ __ mov(r0, Operand(kSlicedNotConsMask));
+ __ AndP(r0, r3);
+ __ bne(&sliced_string);
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset));
+ __ CompareRoot(r7, Heap::kempty_stringRootIndex);
+ __ bne(&runtime);
+ __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset));
+ // Update instance type.
+ __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ b(&underlying_unpacked);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
+ __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset));
+ __ SmiUntag(r3, r6);
+ __ AddP(r5, r3); // Add offset to index.
+ // Update instance type.
+ __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ b(&underlying_unpacked);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ LoadRR(r7, r2);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // r7: underlying subject string
+ // r3: instance type of underlying subject string
+ // r4: length
+ // r5: adjusted start index (untagged)
+ __ CmpP(r4, Operand(SlicedString::kMinLength));
+ // Short slice. Copy instead of slicing.
+ __ blt(&copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ mov(r0, Operand(kStringEncodingMask));
+ __ AndP(r0, r3);
+ __ beq(&two_byte_slice);
+ __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime);
+ __ b(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime);
+ __ bind(&set_slice_header);
+ __ SmiTag(r5);
+ __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
+ __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset));
+ __ b(&return_r2);
+
+ __ bind(&copy_routine);
+ }
+
+ // r7: underlying subject string
+ // r3: instance type of underlying subject string
+ // r4: length
+ // r5: adjusted start index (untagged)
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ mov(r0, Operand(kExternalStringTag));
+ __ AndP(r0, r3);
+ __ beq(&sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ mov(r0, Operand(kShortExternalStringTag));
+ __ AndP(r0, r3);
+ __ bne(&runtime);
+ __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset));
+ // r7 already points to the first character of underlying string.
+ __ b(&allocate_result);
+
+ __ bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&allocate_result);
+ // Sequential acii string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ mov(r0, Operand(kStringEncodingMask));
+ __ AndP(r0, r3);
+ __ beq(&two_byte_sequential);
+
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime);
+
+ // Locate first character of substring to copy.
+ __ AddP(r7, r5);
+ // Locate first character of result.
+ __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ // r2: result string
+ // r3: first character of result string
+ // r4: result string length
+ // r7: first character of substring to copy
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
+ String::ONE_BYTE_ENCODING);
+ __ b(&return_r2);
+
+ // Allocate and copy the resulting two-byte string.
+ __ bind(&two_byte_sequential);
+ __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime);
+
+ // Locate first character of substring to copy.
+ __ ShiftLeftP(r3, r5, Operand(1));
+ __ AddP(r7, r3);
+ // Locate first character of result.
+ __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r2: result string.
+ // r3: first character of result.
+ // r4: result length.
+ // r7: first character of substring to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
+ String::TWO_BYTE_ENCODING);
+
+ __ bind(&return_r2);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, r5, r6);
+ __ Drop(3);
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString);
+
+ __ bind(&single_char);
+ // r2: original string
+ // r3: instance type
+ // r4: length
+ // r5: from index (untagged)
+ __ SmiTag(r5, r5);
+ StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
+ STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r2.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ TestIfSmi(r2);
+ __ Ret(eq);
+
+ __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
+ // r2: receiver
+ // r3: receiver instance type
+ Label not_heap_number;
+ __ bne(&not_heap_number);
+ __ Ret();
+ __ bind(&not_heap_number);
+
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in r2.
+ __ AssertNotNumber(r2);
+
+ __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+ // r2: receiver
+ // r3: receiver instance type
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub, lt);
+
+ Label not_oddball;
+ __ CmpP(r3, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball, Label::kNear);
+ __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
+ __ b(r14);
+ __ bind(&not_oddball);
+
+ __ push(r2); // Push argument.
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in r2.
+ __ AssertString(r2);
+
+ // Check if string has a cached array index.
+ Label runtime;
+ __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ And(r0, r4, Operand(String::kContainsCachedArrayIndexMask));
+ __ bne(&runtime);
+ __ IndexFromHash(r4, r2);
+ __ Ret();
+
+ __ bind(&runtime);
+ __ push(r2); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber);
+}
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in r2.
+ Label done;
+ Label is_number;
+ __ JumpIfSmi(r2, &is_number);
+
+ __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+ // r2: receiver
+ // r3: receiver instance type
+ __ blt(&done);
+
+ Label not_heap_number;
+ __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpP(r3, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r2); // Push argument.
+ __ TailCallRuntime(Runtime::kToString);
+
+ __ bind(&done);
+ __ Ret();
+}
+
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in r2.
+ Label is_number;
+ __ JumpIfSmi(r2, &is_number);
+
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE);
+ // r2: receiver
+ // r3: receiver instance type
+ __ Ret(le);
+
+ Label not_heap_number;
+ __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpP(r3, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r2); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
+ __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ CmpP(length, scratch2);
+ __ beq(&check_zero_length);
+ __ bind(&strings_not_equal);
+ __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ CmpP(length, Operand::Zero());
+ __ bne(&compare_chars);
+ __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+ __ Ret();
+}
+
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
+ Label skip, result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
+ // Removing RC looks okay here.
+ Register length_delta = scratch3;
+ __ ble(&skip, Label::kNear);
+ __ LoadRR(scratch1, scratch2);
+ __ bind(&skip);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ CmpP(min_length, Operand::Zero());
+ __ beq(&compare_lengths);
+
+ // Compare loop.
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ LoadRR(r2, length_delta);
+ __ CmpP(length_delta, Operand::Zero());
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label less_equal, equal;
+ __ ble(&less_equal);
+ __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
+ __ Ret();
+ __ bind(&less_equal);
+ __ beq(&equal);
+ __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
+ __ bind(&equal);
+ __ Ret();
+}
+
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ AddP(scratch1, length,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ AddP(left, scratch1);
+ __ AddP(right, scratch1);
+ __ LoadComplementRR(length, length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ LoadlB(scratch1, MemOperand(left, index));
+ __ LoadlB(r0, MemOperand(right, index));
+ __ CmpP(scratch1, r0);
+ __ bne(chars_not_equal);
+ __ AddP(index, Operand(1));
+ __ CmpP(index, Operand::Zero());
+ __ bne(&loop);
+}
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : left
+ // -- r2 : right
+ // r3: second string
+ // -----------------------------------
+
+ // Load r4 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(r4, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ TestIfSmi(r4);
+ __ Assert(ne, kExpectedAllocationSite, cr0);
+ __ push(r4);
+ __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
+ __ pop(r4);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
+ __ TailCallStub(&stub);
+}
+
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (!Token::IsEqualityOp(op())) {
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ AssertSmi(r3);
+ __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
+ __ AssertSmi(r2);
+ }
+ __ SubP(r2, r3, r2);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
+ Label miss;
+ __ OrP(r4, r3, r2);
+ __ JumpIfNotSmi(r4, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ // __ sub(r2, r2, r3, SetCC);
+ __ SubP(r2, r2, r3);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(r3);
+ __ SmiUntag(r2);
+ __ SubP(r2, r3, r2);
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
+
+ Label generic_stub;
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss;
+ Label equal, less_than;
+
+ if (left() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r3, &miss);
+ }
+ if (right() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r2, &miss);
+ }
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r2, &right_smi);
+ __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiToDouble(d1, r2);
+
+ __ bind(&left);
+ __ JumpIfSmi(r3, &left_smi);
+ __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiToDouble(d0, r3);
+
+ __ bind(&done);
+
+ // Compare operands
+ __ cdbr(d0, d1);
+
+ // Don't base result on status bits when a NaN is involved.
+ __ bunordered(&unordered);
+
+ // Return a result of -1, 0, or 1, based on status bits.
+ __ beq(&equal);
+ __ blt(&less_than);
+ // assume greater than
+ __ LoadImmP(r2, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ LoadImmP(r2, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ LoadImmP(r2, Operand(LESS));
+ __ Ret();
+
+ __ bind(&unordered);
+ __ bind(&generic_stub);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op())) {
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bne(&miss);
+ __ JumpIfSmi(r3, &unordered);
+ __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+ __ bne(&maybe_undefined2);
+ __ b(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op())) {
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ beq(&unordered);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+ Label miss, not_equal;
+
+ // Registers containing left and right operands respectively.
+ Register left = r3;
+ Register right = r2;
+ Register tmp1 = r4;
+ Register tmp2 = r5;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ OrP(tmp1, tmp1, tmp2);
+ __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ bne(&miss);
+
+ // Internalized strings are compared by identity.
+ __ CmpP(left, right);
+ __ bne(&not_equal);
+ // Make sure r2 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r2));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+ __ bind(&not_equal);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ DCHECK(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r3;
+ Register right = r2;
+ Register tmp1 = r4;
+ Register tmp2 = r5;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+ // Unique names are compared by identity.
+ __ CmpP(left, right);
+ __ bne(&miss);
+ // Make sure r2 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r2));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ Label miss, not_identical, is_symbol;
+
+ bool equality = Token::IsEqualityOp(op());
+
+ // Registers containing left and right operands respectively.
+ Register left = r3;
+ Register right = r2;
+ Register tmp1 = r4;
+ Register tmp2 = r5;
+ Register tmp3 = r6;
+ Register tmp4 = r7;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ OrP(tmp3, tmp1, tmp2);
+ __ AndP(r0, tmp3, Operand(kIsNotStringMask));
+ __ bne(&miss);
+
+ // Fast check for identical strings.
+ __ CmpP(left, right);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ bne(&not_identical);
+ __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+ __ Ret();
+ __ bind(&not_identical);
+
+ // Handle not identical strings.
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ DCHECK(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ OrP(tmp3, tmp1, tmp2);
+ __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask));
+ __ bne(&is_symbol);
+ // Make sure r2 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r2));
+ __ Ret();
+ __ bind(&is_symbol);
+ }
+
+ // Check that both strings are sequential one-byte.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat one-byte strings. Returns when done.
+ if (equality) {
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+ tmp2);
+ } else {
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3);
+ }
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ if (equality) {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left, right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ SubP(r2, r2, r3);
+ __ Ret();
+ } else {
+ __ Push(left, right);
+ __ TailCallRuntime(Runtime::kStringCompare);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
+ Label miss;
+ __ AndP(r4, r3, r2);
+ __ JumpIfSmi(r4, &miss);
+
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
+
+ DCHECK(GetCondition() == eq);
+ __ SubP(r2, r2, r3);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
+ Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
+ __ AndP(r4, r3, r2);
+ __ JumpIfSmi(r4, &miss);
+ __ GetWeakValue(r6, cell);
+ __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ CmpP(r4, r6);
+ __ bne(&miss);
+ __ CmpP(r5, r6);
+ __ bne(&miss);
+
+ if (Token::IsEqualityOp(op())) {
+ __ SubP(r2, r2, r3);
+ __ Ret();
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
+ } else {
+ __ LoadSmiLiteral(r4, Smi::FromInt(LESS));
+ }
+ __ Push(r3, r2, r4);
+ __ TailCallRuntime(Runtime::kCompare);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r2);
+ __ Push(r3, r2);
+ __ LoadSmiLiteral(r0, Smi::FromInt(op()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
+ // Compute the entry point of the rewritten stub.
+ __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ Pop(r3, r2);
+ }
+
+ __ JumpToJSEntry(r4);
+}
+
+// This stub is paired with DirectCEntryStub::GenerateCall
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ __ CleanseP(r14);
+
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ __ positions_recorder()->WriteRecordedPositions();
+
+ __ b(ip); // Callee will return to R14 directly
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+ // Native AIX/S390X Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ __ LoadP(target, MemOperand(target, 0)); // Instruction address
+#else
+ // ip needs to be set for DirectCEentryStub::Generate, and also
+ // for ABI_CALL_VIA_IP.
+ __ Move(ip, target);
+#endif
+
+ __ call(GetCode(), RelocInfo::CODE_TARGET); // Call the stub.
+}
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm, Label* miss, Label* done, Register receiver,
+ Register properties, Handle<Name> name, Register scratch0) {
+ DCHECK(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
+ __ SubP(index, Operand(1));
+ __ LoadSmiLiteral(
+ ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
+ __ AndP(index, ip);
+
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+ __ ShiftLeftP(ip, index, Operand(1));
+ __ AddP(index, ip); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = properties;
+ __ SmiToPtrArrayOffset(ip, index);
+ __ AddP(tmp, properties, ip);
+ __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ DCHECK(!tmp.is(entity_name));
+ __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
+ __ beq(done);
+
+ // Stop if found the property.
+ __ CmpP(entity_name, Operand(Handle<Name>(name)));
+ __ beq(miss);
+
+ Label good;
+ __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
+ __ beq(&good);
+
+ // Check if the entry name is not a unique name.
+ __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ LoadlB(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+ __ bind(&good);
+
+ // Restore the properties.
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+
+ const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
+ r4.bit() | r3.bit() | r2.bit());
+
+ __ LoadRR(r0, r14);
+ __ MultiPush(spill_mask);
+
+ __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(r3, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ CmpP(r2, Operand::Zero());
+
+ __ MultiPop(spill_mask); // MultiPop does not touch condition flags
+ __ LoadRR(r14, r0);
+
+ __ beq(done);
+ __ bne(miss);
+}
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm, Label* miss, Label* done, Register elements,
+ Register name, Register scratch1, Register scratch2) {
+ DCHECK(!elements.is(scratch1));
+ DCHECK(!elements.is(scratch2));
+ DCHECK(!name.is(scratch1));
+ DCHECK(!name.is(scratch2));
+
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ SmiUntag(scratch1); // convert smi to int
+ __ SubP(scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ AddP(scratch2,
+ Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ srl(scratch2, Operand(String::kHashShift));
+ __ AndP(scratch2, scratch1);
+
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+ __ ShiftLeftP(ip, scratch2, Operand(1));
+ __ AddP(scratch2, ip);
+
+ // Check if the key is identical to the name.
+ __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
+ __ AddP(scratch2, elements, ip);
+ __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ CmpP(name, ip);
+ __ beq(done);
+ }
+
+ const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
+ r4.bit() | r3.bit() | r2.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ LoadRR(r0, r14);
+ __ MultiPush(spill_mask);
+ if (name.is(r2)) {
+ DCHECK(!elements.is(r3));
+ __ LoadRR(r3, name);
+ __ LoadRR(r2, elements);
+ } else {
+ __ LoadRR(r2, elements);
+ __ LoadRR(r3, name);
+ }
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ LoadRR(r1, r2);
+ __ LoadRR(scratch2, r4);
+ __ MultiPop(spill_mask);
+ __ LoadRR(r14, r0);
+
+ __ CmpP(r1, Operand::Zero());
+ __ bne(done);
+ __ beq(miss);
+}
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ // Registers:
+ // result: NameDictionary to probe
+ // r3: key
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = r2;
+ Register dictionary = r2;
+ Register key = r3;
+ Register index = r4;
+ Register mask = r5;
+ Register hash = r6;
+ Register undefined = r7;
+ Register entry_key = r8;
+ Register scratch = r8;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ SmiUntag(mask);
+ __ SubP(mask, Operand(1));
+
+ __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ AddP(index, hash,
+ Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ } else {
+ __ LoadRR(index, hash);
+ }
+ __ ShiftRight(r0, index, Operand(String::kHashShift));
+ __ AndP(index, r0, mask);
+
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+ __ ShiftLeftP(scratch, index, Operand(1));
+ __ AddP(index, scratch); // index *= 3.
+
+ __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
+ __ AddP(index, dictionary, scratch);
+ __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ CmpP(entry_key, undefined);
+ __ beq(&not_in_dictionary);
+
+ // Stop if found the property.
+ __ CmpP(entry_key, key);
+ __ beq(&in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ LoadlB(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode() == POSITIVE_LOOKUP) {
+ __ LoadImmP(result, Operand::Zero());
+ __ Ret();
+ }
+
+ __ bind(&in_dictionary);
+ __ LoadImmP(result, Operand(1));
+ __ Ret();
+
+ __ bind(&not_in_dictionary);
+ __ LoadImmP(result, Operand::Zero());
+ __ Ret();
+}
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two branch instructions are generated with labels so as to
+ // get the offset fixed up correctly by the bind(Label*) call. We patch
+ // it back and forth between branch condition True and False
+ // when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+
+ // Clear the bit, branch on True for NOP action initially
+ __ b(CC_NOP, &skip_to_incremental_noncompacting);
+ __ b(CC_NOP, &skip_to_incremental_compacting);
+
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ // patching not required on S390 as the initial path is effectively NOP
+}
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(), &dont_need_remembered_set);
+
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ DCHECK(!address.is(regs_.object()));
+ DCHECK(!address.is(r2));
+ __ LoadRR(address, regs_.address());
+ __ LoadRR(r2, regs_.object());
+ __ LoadRR(r3, address);
+ __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
+}
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+ __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ LoadP(
+ regs_.scratch1(),
+ MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+ __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1));
+ __ StoreP(
+ regs_.scratch1(),
+ MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+ __ CmpP(regs_.scratch1(), Operand::Zero()); // S390, we could do better here
+ __ blt(&need_incremental);
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask, eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
+ __ LoadP(r3, MemOperand(fp, parameter_count_offset));
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
+ __ AddP(r3, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+ __ la(sp, MemOperand(r3, sp));
+ __ Ret();
+}
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(r4);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
+
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+
+ Register cached_map = scratch1;
+
+ __ LoadP(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ CmpP(receiver_map, cached_map);
+ __ bne(&start_polymorphic, Label::kNear);
+ // found, now call handler.
+ Register handler = feedback;
+ __ LoadP(handler,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ Register length = scratch2;
+ __ bind(&start_polymorphic);
+ __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
+ __ beq(miss);
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiToPtrArrayOffset(r0, length);
+ __ AddP(too_far, feedback, r0);
+ __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ LoadP(cached_map, MemOperand(pointer_reg));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ CmpP(receiver_map, cached_map);
+ __ bne(&prepare_next, Label::kNear);
+ __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
+ __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&prepare_next);
+ __ AddP(pointer_reg, Operand(kPointerSize * 2));
+ __ CmpP(pointer_reg, too_far);
+ __ blt(&next_loop, Label::kNear);
+
+ // We exhausted our array of map handler pairs.
+ __ b(miss);
+}
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register receiver_map, Register feedback,
+ Register vector, Register slot,
+ Register scratch, Label* compare_map,
+ Label* load_smi_map, Label* try_array) {
+ __ JumpIfSmi(receiver, load_smi_map);
+ __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(compare_map);
+ Register cached_map = scratch;
+ // Move the weak map into the weak_cell register.
+ __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
+ __ CmpP(cached_map, receiver_map);
+ __ bne(try_array);
+ Register handler = feedback;
+ __ SmiToPtrArrayOffset(r1, slot);
+ __ LoadP(handler,
+ FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
+ __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
+ Register name = LoadWithVectorDescriptor::NameRegister(); // r4
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
+ Register feedback = r6;
+ Register receiver_map = r7;
+ Register scratch1 = r8;
+
+ __ SmiToPtrArrayOffset(r1, slot);
+ __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array, Label::kNear);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ receiver, name, feedback,
+ receiver_map, scratch1, r9);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
+}
+
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
+ Register key = LoadWithVectorDescriptor::NameRegister(); // r4
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
+ Register feedback = r6;
+ Register receiver_map = r7;
+ Register scratch1 = r8;
+
+ __ SmiToPtrArrayOffset(r1, slot);
+ __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ CmpP(key, feedback);
+ __ bne(&miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiToPtrArrayOffset(r1, slot);
+ __ LoadP(feedback,
+ FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
+}
+
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
+ Register key = VectorStoreICDescriptor::NameRegister(); // r4
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
+ Register feedback = r7;
+ Register receiver_map = r8;
+ Register scratch1 = r9;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ AddP(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ Register scratch2 = ip;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
+
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
+}
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+ __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiToPtrArrayOffset(r0, too_far);
+ __ AddP(too_far, feedback, r0);
+ __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ LoadP(cached_map, MemOperand(pointer_reg));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ CmpP(receiver_map, cached_map);
+ __ bne(&prepare_next);
+ // Is it a transitioning store?
+ __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ bne(&transition_call);
+ __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&transition_call);
+ __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ LoadRR(feedback, too_far);
+
+ __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&prepare_next);
+ __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ CmpLogicalP(pointer_reg, too_far);
+ __ blt(&next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ b(miss);
+}
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
+ Register key = VectorStoreICDescriptor::NameRegister(); // r4
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
+ Register feedback = r7;
+ Register receiver_map = r8;
+ Register scratch1 = r9;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ AddP(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+
+ Register scratch2 = ip;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ CmpP(key, feedback);
+ __ bne(&miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ AddP(feedback, vector, r0);
+ __ LoadP(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
+
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
+}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ PredictableCodeSizeScope predictable(masm,
+#if V8_TARGET_ARCH_S390X
+ 40);
+#elif V8_HOST_ARCH_S390
+ 36);
+#else
+ 32);
+#endif
+ ProfileEntryHookStub stub(masm->isolate());
+ __ CleanseP(r14);
+ __ Push(r14, ip);
+ __ CallStub(&stub); // BRASL
+ __ Pop(r14, ip);
+ }
+}
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+// The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
+#if V8_TARGET_ARCH_S390X
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + 18; // LAY + STG * 2
+#elif V8_HOST_ARCH_S390
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + 18; // NILH + LAY + ST * 2
+#else
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + 14; // LAY + ST * 2
+#endif
+
+ // This should contain all kJSCallerSaved registers.
+ const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
+ r7.bit(); // Saved stack pointer.
+
+ // We also save r14+ip, so count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ CleanseP(r14);
+ __ LoadRR(ip, r14);
+ __ MultiPush(kSavedRegs | ip.bit());
+
+ // Compute the function's address for the first argument.
+
+ __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is two slots above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ LoadRR(r7, sp);
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ }
+
+#if !defined(USE_SIMULATOR)
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+ __ mov(ip, Operand(entry_hook));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // Function descriptor
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+ __ LoadP(ip, MemOperand(ip, 0));
+// ip already set.
+#endif
+#endif
+
+ // zLinux ABI requires caller's frame to have sufficient space for callee
+ // preserved regsiter save area.
+ __ LoadImmP(r0, Operand::Zero());
+ __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
+ kNumRequiredStackFrameSlots * kPointerSize));
+ __ StoreP(r0, MemOperand(sp));
+#if defined(USE_SIMULATOR)
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ mov(ip, Operand(ExternalReference(
+ &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
+#endif
+ __ Call(ip);
+
+ // zLinux ABI requires caller's frame to have sufficient space for callee
+ // preserved regsiter save area.
+ __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
+ kNumRequiredStackFrameSlots * kPointerSize));
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ LoadRR(sp, r7);
+ }
+
+ // Also pop lr to get Ret(0).
+ __ MultiPop(kSavedRegs | ip.bit());
+ __ LoadRR(r14, ip);
+ __ Ret();
+}
+
+template <class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ CmpP(r5, Operand(kind));
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // r2 - number of arguments
+ // r3 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ AndP(r0, r5, Operand(1));
+ __ bne(&normal_sequence);
+ }
+
+ // look at the first argument
+ __ LoadP(r7, MemOperand(sp, 0));
+ __ CmpP(r7, Operand::Zero());
+ __ beq(&normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(
+ masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ AddP(r5, r5, Operand(1));
+ if (FLAG_debug_code) {
+ __ LoadP(r7, FieldMemOperand(r4, 0));
+ __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store r5
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
+ __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+
+ __ bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ CmpP(r5, Operand(kind));
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+template <class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+ if (argument_count() == ANY) {
+ Label not_zero_case, not_one_case;
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ CmpP(r2, Operand(1));
+ __ bgt(&not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count() == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count() == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count() == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc (only if argument_count() == ANY)
+ // -- r3 : constructor
+ // -- r4 : AllocationSite or undefined
+ // -- r5 : new target
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ TestIfSmi(r6);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r6, r6, r7, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in r4 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r4, r6);
+ }
+
+ // Enter the context of the Array function.
+ __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ Label subclassing;
+ __ CmpP(r5, r3);
+ __ bne(&subclassing, Label::kNear);
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ beq(&no_info);
+
+ __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r5);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ __ bind(&subclassing);
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r3, MemOperand(sp, r1));
+ __ AddP(r2, r2, Operand(3));
+ break;
+ case NONE:
+ __ StoreP(r3, MemOperand(sp, 0 * kPointerSize));
+ __ LoadImmP(r2, Operand(3));
+ break;
+ case ONE:
+ __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ LoadImmP(r2, Operand(4));
+ break;
+ }
+
+ __ Push(r5, r4);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
+}
+
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ CmpLogicalP(r2, Operand(1));
+
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lt);
+
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, gt);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ LoadP(r5, MemOperand(sp, 0));
+ __ CmpP(r5, Operand::Zero());
+
+ InternalArraySingleArgumentConstructorStub stub1_holey(
+ isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne);
+ }
+
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+}
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argc
+ // -- r3 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ TestIfSmi(r5);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r5, r5, r6, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r5);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ CmpP(r5, Operand(FAST_ELEMENTS));
+ __ beq(&done);
+ __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ CmpP(r5, Operand(FAST_ELEMENTS));
+ __ beq(&fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : target
+ // -- r5 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r3);
+ __ AssertReceiver(r5);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
+ __ bne(&new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r4, &new_object);
+ __ CompareObjectType(r4, r2, r2, MAP_TYPE);
+ __ bne(&new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
+ __ CmpP(r2, r3);
+ __ bne(&new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
+ __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ StoreP(r4, MemOperand(r2, JSObject::kMapOffset));
+ __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r5, MemOperand(r2, JSObject::kPropertiesOffset));
+ __ StoreP(r5, MemOperand(r2, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ AddP(r3, r2, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- r2 : result (untagged)
+ // -- r3 : result fields (untagged)
+ // -- r7 : result end (untagged)
+ // -- r4 : initial map
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
+ __ DecodeField<Map::ConstructionCounter>(r9, r5);
+ __ LoadAndTestP(r9, r9);
+ __ bne(&slack_tracking);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(r3, r7, r8);
+
+ // Add the object tag to make the JSObject real.
+ __ AddP(r2, r2, Operand(kHeapObjectTag));
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
+ __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
+ __ SubP(r6, r7, r6);
+ __ InitializeFieldsWithFiller(r3, r6, r8);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r3, r7, r8);
+
+ // Add the object tag to make the JSObject real.
+ __ AddP(r2, r2, Operand(kHeapObjectTag));
+
+ // Check if we can finalize the instance size.
+ __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
+ __ Ret(ne);
+
+ // Finalize the instance size.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r2, r4);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r2);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ ShiftLeftP(r6, r6,
+ Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+ __ Push(r4, r6);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(r4);
+ }
+ __ SubP(r2, r2, Operand(kHeapObjectTag));
+ __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
+ __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
+ __ AddP(r7, r2, r7);
+ __ b(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(r3, r5);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r4 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ LoadRR(r4, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
+ __ CmpP(ip, r3);
+ __ bne(&loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_rest_parameters);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(r3);
+#endif
+ __ SubP(r2, r2, r3);
+ __ bgt(&rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in r0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
+ __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
+ __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
+ __ LoadImmP(r3, Operand::Zero());
+ __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ b(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ SmiToPtrArrayOffset(r8, r2);
+ __ AddP(r4, r4, r8);
+ __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r2 : number of rest parameters (tagged)
+ // -- r4 : pointer just past first rest parameters
+ // -- r8 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ AddP(r3, r3, r8);
+ __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r5.
+ __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
+ __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
+ __ AddP(r6, r5,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop;
+ __ SmiUntag(r1, r2);
+ // __ mtctr(r0);
+ __ bind(&loop);
+ __ lay(r4, MemOperand(r4, -kPointerSize));
+ __ LoadP(ip, MemOperand(r4));
+ __ la(r6, MemOperand(r6, kPointerSize));
+ __ StoreP(ip, MemOperand(r6));
+ // __ bdnz(&loop);
+ __ BranchOnCount(r1, &loop);
+ __ AddP(r6, r6, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r6.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
+ __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
+ __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
+ __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ AddP(r2, r6, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r2, r4, r3);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ LoadRR(r5, r2);
+ __ Pop(r2, r4);
+ }
+ __ b(&done_allocate);
+ }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(r4);
+#endif
+ __ SmiToPtrArrayOffset(r5, r4);
+ __ AddP(r5, fp, r5);
+ __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r3 : function
+ // r4 : number of parameters (tagged)
+ // r5 : parameters pointer
+ // Registers used over whole function:
+ // r7 : arguments count (tagged)
+ // r8 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ LoadRR(r7, r4);
+ __ LoadRR(r8, r4);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r5, r7);
+ __ AddP(r5, r5, r6);
+ __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r7 = argument count (tagged)
+ // r8 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r4, r7) in r8.
+ __ CmpP(r4, r7);
+ Label skip;
+ __ LoadRR(r8, r4);
+ __ blt(&skip);
+ __ LoadRR(r8, r7);
+ __ bind(&skip);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+ Label skip2, skip3;
+ __ bne(&skip2);
+ __ LoadImmP(r1, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r1, r8);
+ __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+
+ // 2. Backing store.
+ __ SmiToPtrArrayOffset(r6, r7);
+ __ AddP(r1, r1, r6);
+ __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r1, r2, r1, r6, &runtime, TAG_OBJECT);
+
+ // r2 = address of new object(s) (tagged)
+ // r4 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r3.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ LoadP(r6, NativeContextMemOperand());
+ __ CmpP(r8, Operand::Zero());
+ Label skip4, skip5;
+ __ bne(&skip4);
+ __ LoadP(r6, MemOperand(r6, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r6, MemOperand(r6, kAliasedOffset));
+ __ bind(&skip5);
+
+ // r2 = address of new object (tagged)
+ // r4 = argument count (smi-tagged)
+ // r6 = address of arguments map (tagged)
+ // r8 = mapped parameter count (tagged)
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(r3);
+ __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
+ r0);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r7);
+ __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
+ r0);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r6 will point there, otherwise
+ // it will point to the backing store.
+ __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+
+ // r2 = address of new object (tagged)
+ // r4 = argument count (tagged)
+ // r6 = address of parameter map or backing store (tagged)
+ // r8 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+ Label skip6;
+ __ bne(&skip6);
+ // Move backing store address to r3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ LoadRR(r3, r6);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+
+ __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
+ __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
+ r0);
+ __ SmiToPtrArrayOffset(r7, r8);
+ __ AddP(r7, r7, r6);
+ __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
+ __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
+ r0);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop;
+ __ LoadRR(r7, r8);
+ __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ SubP(r1, r1, r8);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r3, r7);
+ __ AddP(r3, r3, r6);
+ __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
+
+ // r3 = address of backing store (tagged)
+ // r6 = address of parameter map (tagged)
+ // r7 = temporary scratch (a.o., for address calculation)
+ // r9 = temporary scratch (a.o., for address calculation)
+ // ip = the hole value
+ __ SmiUntag(r7);
+ __ push(r4);
+ __ LoadRR(r4, r7);
+ __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
+ __ AddP(r9, r3, r7);
+ __ AddP(r7, r6, r7);
+ __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+
+ __ bind(&parameters_loop);
+ __ StoreP(r1, MemOperand(r7, -kPointerSize));
+ __ lay(r7, MemOperand(r7, -kPointerSize));
+ __ StoreP(ip, MemOperand(r9, -kPointerSize));
+ __ lay(r9, MemOperand(r9, -kPointerSize));
+ __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+ __ BranchOnCount(r4, &parameters_loop);
+ __ pop(r4);
+
+ // Restore r7 = argument count (tagged).
+ __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // r2 = address of new object (tagged)
+ // r3 = address of backing store (tagged)
+ // r7 = argument count (tagged)
+ // r8 = mapped parameter count (tagged)
+ // r1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
+ __ SubP(r1, r7, r8);
+ __ Ret(eq);
+
+ Label arguments_loop;
+ __ SmiUntag(r1);
+ __ LoadRR(r4, r1);
+
+ __ SmiToPtrArrayOffset(r0, r8);
+ __ SubP(r5, r5, r0);
+ __ AddP(r1, r3, r0);
+ __ AddP(r1, r1,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+ __ bind(&arguments_loop);
+ __ LoadP(r6, MemOperand(r5, -kPointerSize));
+ __ lay(r5, MemOperand(r5, -kPointerSize));
+ __ StoreP(r6, MemOperand(r1, kPointerSize));
+ __ la(r1, MemOperand(r1, kPointerSize));
+ __ BranchOnCount(r4, &arguments_loop);
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r7 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(r3, r5, r7);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r3);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r4 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ LoadRR(r4, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
+ __ CmpP(ip, r3);
+ __ bne(&loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&arguments_adaptor);
+ {
+ __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(r2, FieldMemOperand(
+ r3, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(r2);
+#endif
+ __ SmiToPtrArrayOffset(r8, r2);
+ __ AddP(r4, r4, r8);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r8, r2);
+ __ AddP(r4, r5, r8);
+ }
+ __ bind(&arguments_done);
+ __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r2 : number of rest parameters (tagged)
+ // -- r4 : pointer just past first rest parameters
+ // -- r8 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ AddP(r3, r3, r8);
+ __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r5.
+ __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
+ __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
+ __ AddP(r6, r5,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop, done_loop;
+ __ SmiUntag(r1, r2);
+ __ LoadAndTestP(r1, r1);
+ __ beq(&done_loop);
+ __ bind(&loop);
+ __ lay(r4, MemOperand(r4, -kPointerSize));
+ __ LoadP(ip, MemOperand(r4));
+ __ la(r6, MemOperand(r6, kPointerSize));
+ __ StoreP(ip, MemOperand(r6));
+ __ BranchOnCount(r1, &loop);
+ __ bind(&done_loop);
+ __ AddP(r6, r6, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r6.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
+ __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
+ __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
+ __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ AddP(r2, r6, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r2, r4, r3);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ LoadRR(r5, r2);
+ __ Pop(r2, r4);
+ }
+ __ b(&done_allocate);
+}
+
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = r2;
+ Register slot = r4;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
+ __ AddP(result, context, r0);
+ __ LoadP(result, ContextMemOperand(result));
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ Label runtime;
+ __ beq(&runtime);
+ __ Ret();
+ __ bind(&runtime);
+
+ // Fallback to runtime.
+ __ SmiTag(slot);
+ __ Push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+}
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register value = r2;
+ Register slot = r4;
+
+ Register cell = r3;
+ Register cell_details = r5;
+ Register cell_value = r6;
+ Register cell_value_map = r7;
+ Register scratch = r8;
+
+ Register context = cp;
+ Register context_temp = cell;
+
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
+ __ AddP(cell, context, r0);
+ __ LoadP(cell, ContextMemOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details);
+ __ AndP(cell_details, cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bne(&not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+
+ __ bind(&fast_heapobject_case);
+ __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ LoadRR(r5, value);
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ CmpP(cell_value, value);
+ __ bne(&not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
+ __ bne(&slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ CmpP(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ beq(&done);
+ __ CmpP(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ beq(&done);
+ __ CmpP(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bne(&slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+ __ Ret();
+
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CmpP(cell_value_map, scratch);
+ __ beq(&fast_heapobject_case);
+
+ // Fallback to runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy);
+}
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ // Additional parameter is the address of the actual callback.
+ DCHECK(function_address.is(r3) || function_address.is(r4));
+ Register scratch = r5;
+
+ __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ LoadlB(scratch, MemOperand(scratch, 0));
+ __ CmpP(scratch, Operand::Zero());
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ beq(&profiler_disabled, Label::kNear);
+ __ mov(scratch, Operand(thunk_ref));
+ __ b(&end_profiler_check, Label::kNear);
+ __ bind(&profiler_disabled);
+ __ LoadRR(scratch, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ // r9 - next_address
+ // r6 - next_address->kNextOffset
+ // r7 - next_address->kLimitOffset
+ // r8 - next_address->kLevelOffset
+ __ mov(r9, Operand(next_address));
+ __ LoadP(r6, MemOperand(r9, kNextOffset));
+ __ LoadP(r7, MemOperand(r9, kLimitOffset));
+ __ LoadlW(r8, MemOperand(r9, kLevelOffset));
+ __ AddP(r8, Operand(1));
+ __ StoreW(r8, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r2);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r2);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ LoadP(r2, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ StoreP(r6, MemOperand(r9, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ LoadlW(r3, MemOperand(r9, kLevelOffset));
+ __ CmpP(r3, r8);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ SubP(r8, Operand(1));
+ __ StoreW(r8, MemOperand(r9, kLevelOffset));
+ __ CmpP(r7, MemOperand(r9, kLimitOffset));
+ __ bne(&delete_allocated_handles, Label::kNear);
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ LoadP(cp, *context_restore_operand);
+ }
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand != NULL) {
+ __ l(r6, *stack_space_operand);
+ } else {
+ __ mov(r6, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
+
+ // Check if the function scheduled an exception.
+ __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ LoadP(r7, MemOperand(r7));
+ __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ bne(&promote_scheduled_exception, Label::kNear);
+
+ __ b(r14);
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ StoreP(r7, MemOperand(r9, kLimitOffset));
+ __ LoadRR(r6, r2);
+ __ PrepareCallCFunction(1, r7);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ LoadRR(r2, r6);
+ __ b(&leave_exit_frame, Label::kNear);
+}
+
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : callee
+ // -- r6 : call_data
+ // -- r4 : holder
+ // -- r3 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = r2;
+ Register call_data = r6;
+ Register holder = r4;
+ Register api_function_address = r3;
+ Register context = cp;
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(context);
+ if (!is_lazy()) {
+ // load context from callee
+ __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined()) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ // Prepare arguments.
+ __ LoadRR(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ // S390 LINUX ABI:
+ //
+ // Create 5 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1-4] FunctionCallbackInfo
+ const int kApiStackSpace = 5;
+ const int kFunctionCallbackInfoOffset =
+ (kStackFrameExtraParamSlot + 1) * kPointerSize;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
+ // r2 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
+ // FunctionCallbackInfo::implicit_args_
+ __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ LoadImmP(ip, Operand(argc()));
+ __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ LoadImmP(ip, Operand::Zero());
+ __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize));
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store()) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand =
+ MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
+ // -- ...
+ // -- r4 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ int arg0Slot = 0;
+ int accessorInfoSlot = 0;
+ int apiStackSpace = 0;
+ DCHECK(api_function_address.is(r4));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ LoadRR(r2, sp); // r2 = Handle<Name>
+ __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
+
+ // If ABI passes Handles (pointer-sized struct) in a register:
+ //
+ // Create 2 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] AccessorInfo&
+ //
+ // Otherwise:
+ //
+ // Create 3 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] copy of Handle (first arg)
+ // [2] AccessorInfo&
+ if (ABI_PASSES_HANDLES_IN_REGS) {
+ accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ apiStackSpace = 2;
+ } else {
+ arg0Slot = kStackFrameExtraParamSlot + 1;
+ accessorInfoSlot = arg0Slot + 1;
+ apiStackSpace = 3;
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, apiStackSpace);
+
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ // pass 1st arg by reference
+ __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
+ __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
+ }
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
+ __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
+ // r3 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL, return_value_operand, NULL);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/code-stubs-s390.h b/deps/v8/src/s390/code-stubs-s390.h
new file mode 100644
index 0000000000..461e569023
--- /dev/null
+++ b/deps/v8/src/s390/code-stubs-s390.h
@@ -0,0 +1,467 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CODE_STUBS_S390_H_
+#define V8_S390_CODE_STUBS_S390_H_
+
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+ Register src, Register count,
+ Register scratch,
+ String::Encoding encoding);
+
+ // Compares two flat one-byte strings and returns result in r0.
+ static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // Compares two flat one-byte strings for equality and returns result in r0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2);
+
+ private:
+ static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
+ Register left, Register right,
+ Register length,
+ Register scratch1,
+ Label* chars_not_equal);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+class StoreRegistersStateStub : public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
+};
+
+class RestoreRegistersStateStub : public PlatformCodeStub {
+ public:
+ explicit RestoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
+};
+
+class RecordWriteStub : public PlatformCodeStub {
+ public:
+ RecordWriteStub(Isolate* isolate, Register object, Register value,
+ Register address, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : PlatformCodeStub(isolate),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
+ enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
+
+ bool SometimesSetsUpAFrame() override { return false; }
+
+ // Patch an always taken branch into a NOP branch
+ static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
+ int32_t instrLen = masm->instr_length_at(pos);
+ DCHECK(instrLen == 4 || instrLen == 6);
+
+ if (instrLen == 4) {
+ // BRC - Branch Mask @ Bits 23-20
+ FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
+ masm->instr_at_put<FourByteInstr>(
+ pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
+ } else {
+ // BRCL - Branch Mask @ Bits 39-36
+ SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
+ masm->instr_at_put<SixByteInstr>(
+ pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
+ }
+ }
+
+ static bool isBranchNop(SixByteInstr instr, int instrLength) {
+ if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
+ // BRC - Check for 0x0 mask condition.
+ (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
+ // BRCL - Check for 0x0 mask condition
+ return true;
+ }
+ return false;
+ }
+
+ static Mode GetMode(Code* stub) {
+ int32_t first_instr_length =
+ Instruction::InstructionLength(stub->instruction_start());
+ int32_t second_instr_length = Instruction::InstructionLength(
+ stub->instruction_start() + first_instr_length);
+
+ uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
+ uint64_t second_instr =
+ Assembler::instr_at(stub->instruction_start() + first_instr_length);
+
+ DCHECK(first_instr_length == 4 || first_instr_length == 6);
+ DCHECK(second_instr_length == 4 || second_instr_length == 6);
+
+ bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
+ bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
+
+ // STORE_BUFFER_ONLY has NOP on both branches
+ if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
+ // INCREMENTAL_COMPACTION has NOP on second branch.
+ else if (isFirstInstrNOP && !isSecondInstrNOP)
+ return INCREMENTAL_COMPACTION;
+ // INCREMENTAL has NOP on first branch.
+ else if (!isFirstInstrNOP && isSecondInstrNOP)
+ return INCREMENTAL;
+
+ DCHECK(false);
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
+
+ // Get instruction lengths of two branches
+ int32_t first_instr_length = masm.instr_length_at(0);
+ int32_t second_instr_length = masm.instr_length_at(first_instr_length);
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+
+ PatchBranchCondMask(&masm, 0, CC_NOP);
+ PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchBranchCondMask(&masm, 0, CC_ALWAYS);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ first_instr_length + second_instr_length);
+ }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object, Register address, Register scratch0)
+ : object_(object), address_(address), scratch0_(scratch0) {
+ DCHECK(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->push(r14);
+ masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ // Save all volatile FP registers except d0.
+ masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ // Restore all volatile FP registers except d0.
+ masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
+ }
+ masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
+ masm->pop(r14);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ inline Major MajorKey() const final { return RecordWrite; }
+
+ void Generate(MacroAssembler* masm) override;
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ void Activate(Code* code) override {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
+ }
+
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
+ }
+
+ class ObjectBits : public BitField<int, 0, 4> {};
+ class ValueBits : public BitField<int, 4, 4> {};
+ class AddressBits : public BitField<int, 8, 4> {};
+ class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
+ };
+ class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Label slow_;
+ RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
+};
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub : public PlatformCodeStub {
+ public:
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ bool NeedsImmovableCode() override { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
+};
+
+class NameDictionaryLookupStub : public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
+
+ static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
+ Label* done, Register receiver,
+ Register properties, Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
+ Label* done, Register elements,
+ Register name, Register r0, Register r1);
+
+ bool SometimesSetsUpAFrame() override { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
+ class LookupModeBits : public BitField<LookupMode, 0, 1> {};
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum Destination { kFPRegisters, kCoreRegisters };
+
+ // Loads smis from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will be scratched.
+ static void LoadSmis(MacroAssembler* masm, Register scratch1,
+ Register scratch2);
+
+ // Loads objects from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with r0 and r1 intact.
+ static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
+ Register scratch1, Register scratch2,
+ Label* not_number);
+
+ // Convert the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
+ Register dst, Register heap_number_map,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ DoubleRegister double_scratch,
+ Label* not_int32);
+
+ // Converts the integer (untagged smi) in |src| to a double, storing
+ // the result to |double_dst|
+ static void ConvertIntToDouble(MacroAssembler* masm, Register src,
+ DoubleRegister double_dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a double, storing the result to |double_dst|
+ static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
+ DoubleRegister double_dst);
+
+ // Converts the integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
+ const Register src);
+
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
+ DoubleRegister double_dst,
+ DoubleRegister double_scratch,
+ Register heap_number_map,
+ Register scratch1, Register scratch2,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when VFP3 is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
+ Register dst, Register heap_number_map,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ DoubleRegister double_scratch0,
+ DoubleRegister double_scratch1,
+ Label* not_int32);
+
+ // Generate non VFP3 code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
+ Register src2, Register dst,
+ Register scratch, Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when VFP3 is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in r0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm, Register object,
+ DoubleRegister dst, Register heap_number_map,
+ Register scratch1, Register scratch2,
+ Label* not_number);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_CODE_STUBS_S390_H_
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
new file mode 100644
index 0000000000..6636a7ca1d
--- /dev/null
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -0,0 +1,675 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/s390/codegen-s390.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/s390/simulator-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ masm.
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_s390_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)->CallFPReturnsDouble(
+ fast_exp_s390_machine_code, x, 0);
+}
+#endif
+
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == nullptr) return nullptr;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
+
+ {
+ DoubleRegister input = d0;
+ DoubleRegister result = d2;
+ DoubleRegister double_scratch1 = d3;
+ DoubleRegister double_scratch2 = d4;
+ Register temp1 = r6;
+ Register temp2 = r7;
+ Register temp3 = r8;
+
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
+ double_scratch2, temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ __ ldr(d0, result);
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+
+ Assembler::FlushICache(isolate, buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+#else
+ fast_exp_s390_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+#if defined(USE_SIMULATOR)
+ return nullptr;
+#else
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == nullptr) return nullptr;
+
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
+
+ __ MovFromFloatParameter(d0);
+ __ sqdbr(d0, d0);
+ __ MovToFloatResult(d0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+
+ Assembler::FlushICache(isolate, buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+#endif
+}
+
+#undef __
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ DCHECK(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ DCHECK(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ Register scratch_elements = r6;
+ DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ DCHECK(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode, Label* fail) {
+ // lr contains the return address
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
+ Register elements = r6;
+ Register length = r7;
+ Register array = r8;
+ Register array_end = array;
+
+ // target_map parameter can be clobbered.
+ Register scratch1 = target_map;
+ Register scratch2 = r1;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
+ scratch2));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ beq(&only_change_map, Label::kNear);
+
+ // Preserve lr and use r14 as a temporary register.
+ __ push(r14);
+
+ __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // length: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ SmiToDoubleArrayOffset(r14, length);
+ __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+ __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+ __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ AddP(scratch1, array, Operand(kHeapObjectTag));
+ __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ __ AddP(target_map, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToDoubleArrayOffset(array, length);
+ __ AddP(array_end, r9, array);
+// Repurpose registers no longer in use.
+#if V8_TARGET_ARCH_S390X
+ Register hole_int64 = elements;
+#else
+ Register hole_lower = elements;
+ Register hole_upper = length;
+#endif
+ // scratch1: begin of source FixedArray element fields, not tagged
+ // hole_lower: kHoleNanLower32 OR hol_int64
+ // hole_upper: kHoleNanUpper32
+ // array_end: end of destination FixedDoubleArray, not tagged
+ // scratch2: begin of FixedDoubleArray element fields, not tagged
+
+ __ b(&entry, Label::kNear);
+
+ __ bind(&only_change_map);
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&done, Label::kNear);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(r14);
+ __ b(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ LoadP(r14, MemOperand(scratch1));
+ __ la(scratch1, MemOperand(scratch1, kPointerSize));
+ // r1: current element
+ __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
+
+ // Normal smi, convert to double and store.
+ __ ConvertIntToDouble(r14, d0);
+ __ StoreDouble(d0, MemOperand(r9, 0));
+ __ la(r9, MemOperand(r9, 8));
+
+ __ b(&entry, Label::kNear);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ LoadP(r1, MemOperand(r5, -kPointerSize));
+ __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, kObjectFoundInSmiOnlyArray);
+ }
+#if V8_TARGET_ARCH_S390X
+ __ stg(hole_int64, MemOperand(r9, 0));
+#else
+ __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
+ __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
+#endif
+ __ AddP(r9, Operand(8));
+
+ __ bind(&entry);
+ __ CmpP(r9, array_end);
+ __ blt(&loop);
+
+ __ pop(r14);
+ __ bind(&done);
+}
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode, Label* fail) {
+ // Register lr contains the return address.
+ Label loop, convert_hole, gc_required, only_change_map;
+ Register elements = r6;
+ Register array = r8;
+ Register length = r7;
+ Register scratch = r1;
+ Register scratch3 = r9;
+ Register hole_value = r9;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
+ scratch));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ beq(&only_change_map);
+
+ __ Push(target_map, receiver, key, value);
+ __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // elements: source FixedDoubleArray
+ // length: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ // Re-use value and target_map registers, as they have been saved on the
+ // stack.
+ Register array_size = value;
+ Register allocate_scratch = target_map;
+ __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToPtrArrayOffset(r0, length);
+ __ AddP(array_size, r0);
+ __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+ NO_ALLOCATION_FLAGS);
+ // array: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
+ __ AddP(array, Operand(kHeapObjectTag));
+
+ // Prepare for conversion loop.
+ Register src_elements = elements;
+ Register dst_elements = target_map;
+ Register dst_end = length;
+ Register heap_number_map = scratch;
+ __ AddP(src_elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(length, length);
+ __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
+
+ Label initialization_loop, loop_done;
+ __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
+ __ beq(&loop_done, Label::kNear);
+
+ // Allocating heap numbers in the loop below can fail and cause a jump to
+ // gc_required. We can't leave a partly initialized FixedArray behind,
+ // so pessimistically fill it with holes now.
+ __ AddP(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ bind(&initialization_loop);
+ __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
+ __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
+ __ BranchOnCount(scratch, &initialization_loop);
+
+ __ AddP(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ AddP(dst_end, dst_elements, length);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses in src_elements to fully take advantage of
+ // post-indexing.
+ // dst_elements: begin of destination FixedArray element fields, not tagged
+ // src_elements: begin of source FixedDoubleArray element fields,
+ // not tagged, +4
+ // dst_end: end of destination FixedArray, not tagged
+ // array: destination FixedArray
+ // hole_value: the-hole pointer
+ // heap_number_map: heap number map
+ __ b(&loop, Label::kNear);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ Pop(target_map, receiver, key, value);
+ __ b(fail);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ StoreP(hole_value, MemOperand(dst_elements));
+ __ AddP(dst_elements, Operand(kPointerSize));
+ __ CmpLogicalP(dst_elements, dst_end);
+ __ bge(&loop_done);
+
+ __ bind(&loop);
+ Register upper_bits = key;
+ __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
+ __ AddP(src_elements, Operand(kDoubleSize));
+ // upper_bits: current element's upper 32 bit
+ // src_elements: address of next element's upper 32 bit
+ __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
+ __ beq(&convert_hole, Label::kNear);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_number = receiver;
+ Register scratch2 = value;
+ __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
+ &gc_required);
+// heap_number: new heap number
+#if V8_TARGET_ARCH_S390X
+ __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
+ // subtract tag for std
+ __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
+ __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
+#else
+ __ LoadlW(scratch2,
+ MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
+ __ LoadlW(upper_bits,
+ MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
+ __ StoreW(scratch2,
+ FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+ __ StoreW(upper_bits,
+ FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+#endif
+ __ LoadRR(scratch2, dst_elements);
+ __ StoreP(heap_number, MemOperand(dst_elements));
+ __ AddP(dst_elements, Operand(kPointerSize));
+ __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ CmpLogicalP(dst_elements, dst_end);
+ __ blt(&loop);
+ __ bind(&loop_done);
+
+ __ Pop(target_map, receiver, key, value);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+// assume ip can be used as a scratch register below
+void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
+ Register index, Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ mov(r0, Operand(kIsIndirectStringMask));
+ __ AndP(r0, result);
+ __ beq(&check_sequential, Label::kNear /*, cr0*/);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ mov(ip, Operand(kSlicedNotConsMask));
+ __ LoadRR(r0, result);
+ __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
+ __ beq(&cons_string, Label::kNear /*, cr0*/);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ SmiUntag(ip, result);
+ __ AddP(index, ip);
+ __ b(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ CompareRoot(result, Heap::kempty_stringRootIndex);
+ __ bne(call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ mov(r0, Operand(kStringRepresentationMask));
+ __ AndP(r0, result);
+ __ bne(&external_string, Label::kNear);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ b(&check_encoding, Label::kNear);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ mov(r0, Operand(kIsIndirectStringMask));
+ __ AndP(r0, result);
+ __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+ }
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ mov(r0, Operand(kShortExternalStringMask));
+ __ AndP(r0, result);
+ __ bne(call_runtime /*, cr0*/);
+ __ LoadP(string,
+ FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label one_byte, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ mov(r0, Operand(kStringEncodingMask));
+ __ AndP(r0, result);
+ __ bne(&one_byte, Label::kNear);
+ // Two-byte string.
+ __ ShiftLeftP(result, index, Operand(1));
+ __ LoadLogicalHalfWordP(result, MemOperand(string, result));
+ __ b(&done, Label::kNear);
+ __ bind(&one_byte);
+ // One-byte string.
+ __ LoadlB(result, MemOperand(string, index));
+ __ bind(&done);
+}
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1, Register temp2,
+ Register temp3) {
+ DCHECK(!input.is(result));
+ DCHECK(!input.is(double_scratch1));
+ DCHECK(!input.is(double_scratch2));
+ DCHECK(!result.is(double_scratch1));
+ DCHECK(!result.is(double_scratch2));
+ DCHECK(!double_scratch1.is(double_scratch2));
+ DCHECK(!temp1.is(temp2));
+ DCHECK(!temp1.is(temp3));
+ DCHECK(!temp2.is(temp3));
+ DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+ DCHECK(!masm->serializer_enabled()); // External references not serializable.
+
+ Label zero, infinity, done;
+
+ __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ LoadDouble(double_scratch1, ExpConstant(0, temp3));
+ __ cdbr(double_scratch1, input);
+ __ ldr(result, input);
+ __ bunordered(&done, Label::kNear);
+ __ bge(&zero, Label::kNear);
+
+ __ LoadDouble(double_scratch2, ExpConstant(1, temp3));
+ __ cdbr(input, double_scratch2);
+ __ bge(&infinity, Label::kNear);
+
+ __ LoadDouble(double_scratch1, ExpConstant(3, temp3));
+ __ LoadDouble(result, ExpConstant(4, temp3));
+
+ // Do not generate madbr, as intermediate result are not
+ // rounded properly
+ __ mdbr(double_scratch1, input);
+ __ adbr(double_scratch1, result);
+
+ // Move low word of double_scratch1 to temp2
+ __ lgdr(temp2, double_scratch1);
+ __ nihf(temp2, Operand::Zero());
+
+ __ sdbr(double_scratch1, result);
+ __ LoadDouble(result, ExpConstant(6, temp3));
+ __ LoadDouble(double_scratch2, ExpConstant(5, temp3));
+ __ mdbr(double_scratch1, double_scratch2);
+ __ sdbr(double_scratch1, input);
+ __ sdbr(result, double_scratch1);
+ __ ldr(double_scratch2, double_scratch1);
+ __ mdbr(double_scratch2, double_scratch2);
+ __ mdbr(result, double_scratch2);
+ __ LoadDouble(double_scratch2, ExpConstant(7, temp3));
+ __ mdbr(result, double_scratch2);
+ __ sdbr(result, double_scratch1);
+ __ LoadDouble(double_scratch2, ExpConstant(8, temp3));
+ __ adbr(result, double_scratch2);
+ __ ShiftRight(temp1, temp2, Operand(11));
+ __ AndP(temp2, Operand(0x7ff));
+ __ AddP(temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ ShiftLeft(temp2, temp2, Operand(3));
+
+ __ lg(temp2, MemOperand(temp2, temp3));
+ __ sllg(temp1, temp1, Operand(52));
+ __ ogr(temp2, temp1);
+ __ ldgr(double_scratch1, temp2);
+
+ __ mdbr(result, double_scratch1);
+ __ b(&done, Label::kNear);
+
+ __ bind(&zero);
+ __ lzdr(kDoubleRegZero);
+ __ ldr(result, kDoubleRegZero);
+ __ b(&done, Label::kNear);
+
+ __ bind(&infinity);
+ __ LoadDouble(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
+#undef __
+
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
+ DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before ARM simulator ICache is setup.
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length(), CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->PushStandardFrame(r3);
+}
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Assembler::IsNop(Assembler::instr_at(candidate));
+}
+#endif
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Code* code = NULL;
+ Address target_address =
+ Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ Assembler::FlushICache(isolate, sequence, young_length);
+ } else {
+ // FIXED_SEQUENCE
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(isolate, sequence, young_length);
+ intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+ // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
+ // knows where to pick up the return address
+ //
+ // Since we can no longer guarentee ip will hold the branch address
+ // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
+ // can calculate the branch address offset
+ patcher.masm()->nop(); // marker to detect sequence (see IsOld)
+ patcher.masm()->CleanseP(r14);
+ patcher.masm()->Push(r14);
+ patcher.masm()->mov(r2, Operand(target));
+ patcher.masm()->Call(r2);
+ for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
+ i += 2) {
+ // TODO(joransiu): Create nop function to pad
+ // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
+ patcher.masm()->nop(); // 2-byte nops().
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/codegen-s390.h b/deps/v8/src/s390/codegen-s390.h
new file mode 100644
index 0000000000..18cf8e29d1
--- /dev/null
+++ b/deps/v8/src/s390/codegen-s390.h
@@ -0,0 +1,44 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+//
+// Copyright IBM Corp. 2012, 2015. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CODEGEN_S390_H_
+#define V8_S390_CODEGEN_S390_H_
+
+#include "src/ast/ast.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm, Register string, Register index,
+ Register result, Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+ DoubleRegister result, DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2, Register temp1,
+ Register temp2, Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_CODEGEN_S390_H_
diff --git a/deps/v8/src/s390/constants-s390.cc b/deps/v8/src/s390/constants-s390.cc
new file mode 100644
index 0000000000..a958082a8f
--- /dev/null
+++ b/deps/v8/src/s390/constants-s390.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "fp", "ip", "r13", "r14", "sp"};
+
+const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15"};
+
+int DoubleRegisters::Number(const char* name) {
+ for (int i = 0; i < kNumDoubleRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
new file mode 100644
index 0000000000..c313c929e6
--- /dev/null
+++ b/deps/v8/src/s390/constants-s390.h
@@ -0,0 +1,1561 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CONSTANTS_S390_H_
+#define V8_S390_CONSTANTS_S390_H_
+
+// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Number of registers
+const int kNumRegisters = 16;
+
+// FP support.
+const int kNumDoubleRegisters = 16;
+
+const int kNoRegister = -1;
+
+// sign-extend the least significant 16-bits of value <imm>
+#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+
+// sign-extend the least significant 26-bits of value <imm>
+#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate z/Architecture instructions.
+//
+// Section references in the code refer to the "z/Architecture Principles
+// Of Operation" http://publibfi.boulder.ibm.com/epubs/pdf/dz9zr009.pdf
+//
+
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+enum Condition {
+ kNoCondition = -1,
+ eq = 0x8, // Equal.
+ ne = 0x7, // Not equal.
+ ge = 0xa, // Greater or equal.
+ lt = 0x4, // Less than.
+ gt = 0x2, // Greater than.
+ le = 0xc, // Less then or equal
+ al = 0xf, // Always.
+
+ CC_NOP = 0x0, // S390 NOP
+ CC_EQ = 0x08, // S390 condition code 0b1000
+ CC_LT = 0x04, // S390 condition code 0b0100
+ CC_LE = CC_EQ | CC_LT, // S390 condition code 0b1100
+ CC_GT = 0x02, // S390 condition code 0b0010
+ CC_GE = CC_EQ | CC_GT, // S390 condition code 0b1010
+ CC_OF = 0x01, // S390 condition code 0b0001
+ CC_NOF = 0x0E, // S390 condition code 0b1110
+ CC_ALWAYS = 0x0F, // S390 always taken branch
+ unordered = CC_OF, // Floating-point unordered
+ ordered = CC_NOF, // floating-point ordered
+ overflow = CC_OF, // Summary overflow
+ nooverflow = CC_NOF,
+
+ mask0x0 = 0, // no jumps
+ mask0x1 = 1,
+ mask0x2 = 2,
+ mask0x3 = 3,
+ mask0x4 = 4,
+ mask0x5 = 5,
+ mask0x6 = 6,
+ mask0x7 = 7,
+ mask0x8 = 8,
+ mask0x9 = 9,
+ mask0xA = 10,
+ mask0xB = 11,
+ mask0xC = 12,
+ mask0xD = 13,
+ mask0xE = 14,
+ mask0xF = 15,
+
+ // Rounding modes for floating poing facility
+ CURRENT_ROUNDING_MODE = 0,
+ ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0 = 1,
+ ROUND_TO_PREPARE_FOR_SHORTER_PRECISION = 3,
+ ROUND_TO_NEAREST_WITH_TIES_TO_EVEN = 4,
+ ROUND_TOWARD_0 = 5,
+ ROUND_TOWARD_PLUS_INFINITE = 6,
+ ROUND_TOWARD_MINUS_INFINITE = 7
+};
+
+inline Condition NegateCondition(Condition cond) {
+ DCHECK(cond != al);
+ switch (cond) {
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ge:
+ return lt;
+ case gt:
+ return le;
+ case le:
+ return gt;
+ case lt:
+ return ge;
+ case lt | gt:
+ return eq;
+ case le | ge:
+ return CC_OF;
+ case CC_OF:
+ return CC_NOF;
+ default:
+ DCHECK(false);
+ }
+ return al;
+}
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+ switch (cond) {
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ case ne:
+ return ne;
+ default:
+ DCHECK(false);
+ return cond;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+typedef uint16_t TwoByteInstr;
+typedef uint32_t FourByteInstr;
+typedef uint64_t SixByteInstr;
+
+// Opcodes as defined in Appendix B-2 table
+enum Opcode {
+ A = 0x5A, // Add (32)
+ ADB = 0xED1A, // Add (long BFP)
+ ADBR = 0xB31A, // Add (long BFP)
+ ADTR = 0xB3D2, // Add (long DFP)
+ ADTRA = 0xB3D2, // Add (long DFP)
+ AEB = 0xED0A, // Add (short BFP)
+ AEBR = 0xB30A, // Add (short BFP)
+ AFI = 0xC29, // Add Immediate (32)
+ AG = 0xE308, // Add (64)
+ AGF = 0xE318, // Add (64<-32)
+ AGFI = 0xC28, // Add Immediate (64<-32)
+ AGFR = 0xB918, // Add (64<-32)
+ AGHI = 0xA7B, // Add Halfword Immediate (64)
+ AGHIK = 0xECD9, // Add Immediate (64<-16)
+ AGR = 0xB908, // Add (64)
+ AGRK = 0xB9E8, // Add (64)
+ AGSI = 0xEB7A, // Add Immediate (64<-8)
+ AH = 0x4A, // Add Halfword
+ AHHHR = 0xB9C8, // Add High (32)
+ AHHLR = 0xB9D8, // Add High (32)
+ AHI = 0xA7A, // Add Halfword Immediate (32)
+ AHIK = 0xECD8, // Add Immediate (32<-16)
+ AHY = 0xE37A, // Add Halfword
+ AIH = 0xCC8, // Add Immediate High (32)
+ AL = 0x5E, // Add Logical (32)
+ ALC = 0xE398, // Add Logical With Carry (32)
+ ALCG = 0xE388, // Add Logical With Carry (64)
+ ALCGR = 0xB988, // Add Logical With Carry (64)
+ ALCR = 0xB998, // Add Logical With Carry (32)
+ ALFI = 0xC2B, // Add Logical Immediate (32)
+ ALG = 0xE30A, // Add Logical (64)
+ ALGF = 0xE31A, // Add Logical (64<-32)
+ ALGFI = 0xC2A, // Add Logical Immediate (64<-32)
+ ALGFR = 0xB91A, // Add Logical (64<-32)
+ ALGHSIK = 0xECDB, // Add Logical With Signed Immediate (64<-16)
+ ALGR = 0xB90A, // Add Logical (64)
+ ALGRK = 0xB9EA, // Add Logical (64)
+ ALGSI = 0xEB7E, // Add Logical With Signed Immediate (64<-8)
+ ALHHHR = 0xB9CA, // Add Logical High (32)
+ ALHHLR = 0xB9DA, // Add Logical High (32)
+ ALHSIK = 0xECDA, // Add Logical With Signed Immediate (32<-16)
+ ALR = 0x1E, // Add Logical (32)
+ ALRK = 0xB9FA, // Add Logical (32)
+ ALSI = 0xEB6E, // Add Logical With Signed Immediate (32<-8)
+ ALSIH = 0xCCA, // Add Logical With Signed Immediate High (32)
+ ALSIHN = 0xCCB, // Add Logical With Signed Immediate High (32)
+ ALY = 0xE35E, // Add Logical (32)
+ AP = 0xFA, // Add Decimal
+ AR = 0x1A, // Add (32)
+ ARK = 0xB9F8, // Add (32)
+ ASI = 0xEB6A, // Add Immediate (32<-8)
+ AXBR = 0xB34A, // Add (extended BFP)
+ AXTR = 0xB3DA, // Add (extended DFP)
+ AXTRA = 0xB3DA, // Add (extended DFP)
+ AY = 0xE35A, // Add (32)
+ BAL = 0x45, // Branch And Link
+ BALR = 0x05, // Branch And Link
+ BAS = 0x4D, // Branch And Save
+ BASR = 0x0D, // Branch And Save
+ BASSM = 0x0C, // Branch And Save And Set Mode
+ BC = 0x47, // Branch On Condition
+ BCR = 0x07, // Branch On Condition
+ BCT = 0x46, // Branch On Count (32)
+ BCTG = 0xE346, // Branch On Count (64)
+ BCTGR = 0xB946, // Branch On Count (64)
+ BCTR = 0x06, // Branch On Count (32)
+ BPP = 0xC7, // Branch Prediction Preload
+ BPRP = 0xC5, // Branch Prediction Relative Preload
+ BRAS = 0xA75, // Branch Relative And Save
+ BRASL = 0xC05, // Branch Relative And Save Long
+ BRC = 0xA74, // Branch Relative On Condition
+ BRCL = 0xC04, // Branch Relative On Condition Long
+ BRCT = 0xA76, // Branch Relative On Count (32)
+ BRCTG = 0xA77, // Branch Relative On Count (64)
+ BRCTH = 0xCC6, // Branch Relative On Count High (32)
+ BRXH = 0x84, // Branch Relative On Index High (32)
+ BRXHG = 0xEC44, // Branch Relative On Index High (64)
+ BRXLE = 0x85, // Branch Relative On Index Low Or Eq. (32)
+ BRXLG = 0xEC45, // Branch Relative On Index Low Or Eq. (64)
+ BSM = 0x0B, // Branch And Set Mode
+ BXH = 0x86, // Branch On Index High (32)
+ BXHG = 0xEB44, // Branch On Index High (64)
+ BXLE = 0x87, // Branch On Index Low Or Equal (32)
+ BXLEG = 0xEB45, // Branch On Index Low Or Equal (64)
+ C = 0x59, // Compare (32)
+ CDB = 0xED19, // Compare (long BFP)
+ CDBR = 0xB319, // Compare (long BFP)
+ CDFBR = 0xB395, // Convert From Fixed (32 to long BFP)
+ CDFBRA = 0xB395, // Convert From Fixed (32 to long BFP)
+ CDFTR = 0xB951, // Convert From Fixed (32 to long DFP)
+ CDGBR = 0xB3A5, // Convert From Fixed (64 to long BFP)
+ CDGBRA = 0xB3A5, // Convert From Fixed (64 to long BFP)
+ CDGTR = 0xB3F1, // Convert From Fixed (64 to long DFP)
+ CDGTRA = 0xB3F1, // Convert From Fixed (64 to long DFP)
+ CDLFBR = 0xB391, // Convert From Logical (32 to long BFP)
+ CDLFTR = 0xB953, // Convert From Logical (32 to long DFP)
+ CDLGBR = 0xB3A1, // Convert From Logical (64 to long BFP)
+ CDLGTR = 0xB952, // Convert From Logical (64 to long DFP)
+ CDS = 0xBB, // Compare Double And Swap (32)
+ CDSG = 0xEB3E, // Compare Double And Swap (64)
+ CDSTR = 0xB3F3, // Convert From Signed Packed (64 to long DFP)
+ CDSY = 0xEB31, // Compare Double And Swap (32)
+ CDTR = 0xB3E4, // Compare (long DFP)
+ CDUTR = 0xB3F2, // Convert From Unsigned Packed (64 to long DFP)
+ CDZT = 0xEDAA, // Convert From Zoned (to long DFP)
+ CEB = 0xED09, // Compare (short BFP)
+ CEBR = 0xB309, // Compare (short BFP)
+ CEDTR = 0xB3F4, // Compare Biased Exponent (long DFP)
+ CEFBR = 0xB394, // Convert From Fixed (32 to short BFP)
+ CEFBRA = 0xB394, // Convert From Fixed (32 to short BFP)
+ CEGBR = 0xB3A4, // Convert From Fixed (64 to short BFP)
+ CEGBRA = 0xB3A4, // Convert From Fixed (64 to short BFP)
+ CELFBR = 0xB390, // Convert From Logical (32 to short BFP)
+ CELGBR = 0xB3A0, // Convert From Logical (64 to short BFP)
+ CEXTR = 0xB3FC, // Compare Biased Exponent (extended DFP)
+ CFC = 0xB21A, // Compare And Form Codeword
+ CFDBR = 0xB399, // Convert To Fixed (long BFP to 32)
+ CFDBRA = 0xB399, // Convert To Fixed (long BFP to 32)
+ CFDR = 0xB3B9, // Convert To Fixed (long HFP to 32)
+ CFDTR = 0xB941, // Convert To Fixed (long DFP to 32)
+ CFEBR = 0xB398, // Convert To Fixed (short BFP to 32)
+ CFEBRA = 0xB398, // Convert To Fixed (short BFP to 32)
+ CFER = 0xB3B8, // Convert To Fixed (short HFP to 32)
+ CFI = 0xC2D, // Compare Immediate (32)
+ CFXBR = 0xB39A, // Convert To Fixed (extended BFP to 32)
+ CFXBRA = 0xB39A, // Convert To Fixed (extended BFP to 32)
+ CFXR = 0xB3BA, // Convert To Fixed (extended HFP to 32)
+ CFXTR = 0xB949, // Convert To Fixed (extended DFP to 32)
+ CG = 0xE320, // Compare (64)
+ CGDBR = 0xB3A9, // Convert To Fixed (long BFP to 64)
+ CGDBRA = 0xB3A9, // Convert To Fixed (long BFP to 64)
+ CGDR = 0xB3C9, // Convert To Fixed (long HFP to 64)
+ CGDTR = 0xB3E1, // Convert To Fixed (long DFP to 64)
+ CGDTRA = 0xB3E1, // Convert To Fixed (long DFP to 64)
+ CGEBR = 0xB3A8, // Convert To Fixed (short BFP to 64)
+ CGEBRA = 0xB3A8, // Convert To Fixed (short BFP to 64)
+ CGER = 0xB3C8, // Convert To Fixed (short HFP to 64)
+ CGF = 0xE330, // Compare (64<-32)
+ CGFI = 0xC2C, // Compare Immediate (64<-32)
+ CGFR = 0xB930, // Compare (64<-32)
+ CGFRL = 0xC6C, // Compare Relative Long (64<-32)
+ CGH = 0xE334, // Compare Halfword (64<-16)
+ CGHI = 0xA7F, // Compare Halfword Immediate (64<-16)
+ CGHRL = 0xC64, // Compare Halfword Relative Long (64<-16)
+ CGHSI = 0xE558, // Compare Halfword Immediate (64<-16)
+ CGIB = 0xECFC, // Compare Immediate And Branch (64<-8)
+ CGIJ = 0xEC7C, // Compare Immediate And Branch Relative (64<-8)
+ CGIT = 0xEC70, // Compare Immediate And Trap (64<-16)
+ CGR = 0xB920, // Compare (64)
+ CGRB = 0xECE4, // Compare And Branch (64)
+ CGRJ = 0xEC64, // Compare And Branch Relative (64)
+ CGRL = 0xC68, // Compare Relative Long (64)
+ CGRT = 0xB960, // Compare And Trap (64)
+ CGXBR = 0xB3AA, // Convert To Fixed (extended BFP to 64)
+ CGXBRA = 0xB3AA, // Convert To Fixed (extended BFP to 64)
+ CGXR = 0xB3CA, // Convert To Fixed (extended HFP to 64)
+ CGXTR = 0xB3E9, // Convert To Fixed (extended DFP to 64)
+ CGXTRA = 0xB3E9, // Convert To Fixed (extended DFP to 64)
+ CH = 0x49, // Compare Halfword (32<-16)
+ CHF = 0xE3CD, // Compare High (32)
+ CHHR = 0xB9CD, // Compare High (32)
+ CHHSI = 0xE554, // Compare Halfword Immediate (16)
+ CHI = 0xA7E, // Compare Halfword Immediate (32<-16)
+ CHLR = 0xB9DD, // Compare High (32)
+ CHRL = 0xC65, // Compare Halfword Relative Long (32<-16)
+ CHSI = 0xE55C, // Compare Halfword Immediate (32<-16)
+ CHY = 0xE379, // Compare Halfword (32<-16)
+ CIB = 0xECFE, // Compare Immediate And Branch (32<-8)
+ CIH = 0xCCD, // Compare Immediate High (32)
+ CIJ = 0xEC7E, // Compare Immediate And Branch Relative (32<-8)
+ CIT = 0xEC72, // Compare Immediate And Trap (32<-16)
+ CKSM = 0xB241, // Checksum
+ CL = 0x55, // Compare Logical (32)
+ CLC = 0xD5, // Compare Logical (character)
+ CLCL = 0x0F, // Compare Logical Long
+ CLCLE = 0xA9, // Compare Logical Long Extended
+ CLCLU = 0xEB8F, // Compare Logical Long Unicode
+ CLFDBR = 0xB39D, // Convert To Logical (long BFP to 32)
+ CLFDTR = 0xB943, // Convert To Logical (long DFP to 32)
+ CLFEBR = 0xB39C, // Convert To Logical (short BFP to 32)
+ CLFHSI = 0xE55D, // Compare Logical Immediate (32<-16)
+ CLFI = 0xC2F, // Compare Logical Immediate (32)
+ CLFIT = 0xEC73, // Compare Logical Immediate And Trap (32<-16)
+ CLFXBR = 0xB39E, // Convert To Logical (extended BFP to 32)
+ CLFXTR = 0xB94B, // Convert To Logical (extended DFP to 32)
+ CLG = 0xE321, // Compare Logical (64)
+ CLGDBR = 0xB3AD, // Convert To Logical (long BFP to 64)
+ CLGDTR = 0xB942, // Convert To Logical (long DFP to 64)
+ CLGEBR = 0xB3AC, // Convert To Logical (short BFP to 64)
+ CLGF = 0xE331, // Compare Logical (64<-32)
+ CLGFI = 0xC2E, // Compare Logical Immediate (64<-32)
+ CLGR = 0xB921, // Compare Logical (64)
+ CLI = 0x95, // Compare Logical Immediate (8)
+ CLIY = 0xEB55, // Compare Logical Immediate (8)
+ CLR = 0x15, // Compare Logical (32)
+ CLY = 0xE355, // Compare Logical (32)
+ CD = 0x69, // Compare (LH)
+ CDR = 0x29, // Compare (LH)
+ CR = 0x19, // Compare (32)
+ CSST = 0xC82, // Compare And Swap And Store
+ CSXTR = 0xB3EB, // Convert To Signed Packed (extended DFP to 128)
+ CSY = 0xEB14, // Compare And Swap (32)
+ CU12 = 0xB2A7, // Convert Utf-8 To Utf-16
+ CU14 = 0xB9B0, // Convert Utf-8 To Utf-32
+ CU21 = 0xB2A6, // Convert Utf-16 To Utf-8
+ CU24 = 0xB9B1, // Convert Utf-16 To Utf-32
+ CU41 = 0xB9B2, // Convert Utf-32 To Utf-8
+ CU42 = 0xB9B3, // Convert Utf-32 To Utf-16
+ CUDTR = 0xB3E2, // Convert To Unsigned Packed (long DFP to 64)
+ CUSE = 0xB257, // Compare Until Substring Equal
+ CUTFU = 0xB2A7, // Convert Utf-8 To Unicode
+ CUUTF = 0xB2A6, // Convert Unicode To Utf-8
+ CUXTR = 0xB3EA, // Convert To Unsigned Packed (extended DFP to 128)
+ CVB = 0x4F, // Convert To Binary (32)
+ CVBG = 0xE30E, // Convert To Binary (64)
+ CVBY = 0xE306, // Convert To Binary (32)
+ CVD = 0x4E, // Convert To Decimal (32)
+ CVDG = 0xE32E, // Convert To Decimal (64)
+ CVDY = 0xE326, // Convert To Decimal (32)
+ CXBR = 0xB349, // Compare (extended BFP)
+ CXFBR = 0xB396, // Convert From Fixed (32 to extended BFP)
+ CXFBRA = 0xB396, // Convert From Fixed (32 to extended BFP)
+ CXFTR = 0xB959, // Convert From Fixed (32 to extended DFP)
+ CXGBR = 0xB3A6, // Convert From Fixed (64 to extended BFP)
+ CXGBRA = 0xB3A6, // Convert From Fixed (64 to extended BFP)
+ CXGTR = 0xB3F9, // Convert From Fixed (64 to extended DFP)
+ CXGTRA = 0xB3F9, // Convert From Fixed (64 to extended DFP)
+ CXLFBR = 0xB392, // Convert From Logical (32 to extended BFP)
+ CXLFTR = 0xB95B, // Convert From Logical (32 to extended DFP)
+ CXLGBR = 0xB3A2, // Convert From Logical (64 to extended BFP)
+ CXLGTR = 0xB95A, // Convert From Logical (64 to extended DFP)
+ CXSTR = 0xB3FB, // Convert From Signed Packed (128 to extended DFP)
+ CXTR = 0xB3EC, // Compare (extended DFP)
+ CXUTR = 0xB3FA, // Convert From Unsigned Packed (128 to ext. DFP)
+ CXZT = 0xEDAB, // Convert From Zoned (to extended DFP)
+ CY = 0xE359, // Compare (32)
+ CZDT = 0xEDA8, // Convert To Zoned (from long DFP)
+ CZXT = 0xEDA9, // Convert To Zoned (from extended DFP)
+ D = 0x5D, // Divide (32<-64)
+ DDB = 0xED1D, // Divide (long BFP)
+ DDBR = 0xB31D, // Divide (long BFP)
+ DDTR = 0xB3D1, // Divide (long DFP)
+ DDTRA = 0xB3D1, // Divide (long DFP)
+ DEB = 0xED0D, // Divide (short BFP)
+ DEBR = 0xB30D, // Divide (short BFP)
+ DIDBR = 0xB35B, // Divide To Integer (long BFP)
+ DIEBR = 0xB353, // Divide To Integer (short BFP)
+ DL = 0xE397, // Divide Logical (32<-64)
+ DLG = 0xE387, // Divide Logical (64<-128)
+ DLGR = 0xB987, // Divide Logical (64<-128)
+ DLR = 0xB997, // Divide Logical (32<-64)
+ DP = 0xFD, // Divide Decimal
+ DR = 0x1D, // Divide (32<-64)
+ DSG = 0xE30D, // Divide Single (64)
+ DSGF = 0xE31D, // Divide Single (64<-32)
+ DSGFR = 0xB91D, // Divide Single (64<-32)
+ DSGR = 0xB90D, // Divide Single (64)
+ DXBR = 0xB34D, // Divide (extended BFP)
+ DXTR = 0xB3D9, // Divide (extended DFP)
+ DXTRA = 0xB3D9, // Divide (extended DFP)
+ EAR = 0xB24F, // Extract Access
+ ECAG = 0xEB4C, // Extract Cache Attribute
+ ECTG = 0xC81, // Extract Cpu Time
+ ED = 0xDE, // Edit
+ EDMK = 0xDF, // Edit And Mark
+ EEDTR = 0xB3E5, // Extract Biased Exponent (long DFP to 64)
+ EEXTR = 0xB3ED, // Extract Biased Exponent (extended DFP to 64)
+ EFPC = 0xB38C, // Extract Fpc
+ EPSW = 0xB98D, // Extract Psw
+ ESDTR = 0xB3E7, // Extract Significance (long DFP)
+ ESXTR = 0xB3EF, // Extract Significance (extended DFP)
+ ETND = 0xB2EC, // Extract Transaction Nesting Depth
+ EX = 0x44, // Execute
+ EXRL = 0xC60, // Execute Relative Long
+ FIDBR = 0xB35F, // Load Fp Integer (long BFP)
+ FIDBRA = 0xB35F, // Load Fp Integer (long BFP)
+ FIDTR = 0xB3D7, // Load Fp Integer (long DFP)
+ FIEBR = 0xB357, // Load Fp Integer (short BFP)
+ FIEBRA = 0xB357, // Load Fp Integer (short BFP)
+ FIXBR = 0xB347, // Load Fp Integer (extended BFP)
+ FIXBRA = 0xB347, // Load Fp Integer (extended BFP)
+ FIXTR = 0xB3DF, // Load Fp Integer (extended DFP)
+ FLOGR = 0xB983, // Find Leftmost One
+ HSCH = 0xB231, // Halt Subchannel
+ IC_z = 0x43, // Insert Character
+ ICM = 0xBF, // Insert Characters Under Mask (low)
+ ICMH = 0xEB80, // Insert Characters Under Mask (high)
+ ICMY = 0xEB81, // Insert Characters Under Mask (low)
+ ICY = 0xE373, // Insert Character
+ IEDTR = 0xB3F6, // Insert Biased Exponent (64 to long DFP)
+ IEXTR = 0xB3FE, // Insert Biased Exponent (64 to extended DFP)
+ IIHF = 0xC08, // Insert Immediate (high)
+ IIHH = 0xA50, // Insert Immediate (high high)
+ IIHL = 0xA51, // Insert Immediate (high low)
+ IILF = 0xC09, // Insert Immediate (low)
+ IILH = 0xA52, // Insert Immediate (low high)
+ IILL = 0xA53, // Insert Immediate (low low)
+ IPM = 0xB222, // Insert Program Mask
+ KDB = 0xED18, // Compare And Signal (long BFP)
+ KDBR = 0xB318, // Compare And Signal (long BFP)
+ KDTR = 0xB3E0, // Compare And Signal (long DFP)
+ KEB = 0xED08, // Compare And Signal (short BFP)
+ KEBR = 0xB308, // Compare And Signal (short BFP)
+ KIMD = 0xB93E, // Compute Intermediate Message Digest
+ KLMD = 0xB93F, // Compute Last Message Digest
+ KM = 0xB92E, // Cipher Message
+ KMAC = 0xB91E, // Compute Message Authentication Code
+ KMC = 0xB92F, // Cipher Message With Chaining
+ KMCTR = 0xB92D, // Cipher Message With Counter
+ KMF = 0xB92A, // Cipher Message With Cfb
+ KMO = 0xB92B, // Cipher Message With Ofb
+ KXBR = 0xB348, // Compare And Signal (extended BFP)
+ KXTR = 0xB3E8, // Compare And Signal (extended DFP)
+ L = 0x58, // Load (32)
+ LA = 0x41, // Load Address
+ LAA = 0xEBF8, // Load And Add (32)
+ LAAG = 0xEBE8, // Load And Add (64)
+ LAAL = 0xEBFA, // Load And Add Logical (32)
+ LAALG = 0xEBEA, // Load And Add Logical (64)
+ LAE = 0x51, // Load Address Extended
+ LAEY = 0xE375, // Load Address Extended
+ LAN = 0xEBF4, // Load And And (32)
+ LANG = 0xEBE4, // Load And And (64)
+ LAO = 0xEBF6, // Load And Or (32)
+ LAOG = 0xEBE6, // Load And Or (64)
+ LARL = 0xC00, // Load Address Relative Long
+ LAT = 0xE39F, // Load And Trap (32L<-32)
+ LAX = 0xEBF7, // Load And Exclusive Or (32)
+ LAXG = 0xEBE7, // Load And Exclusive Or (64)
+ LAY = 0xE371, // Load Address
+ LB = 0xE376, // Load Byte (32)
+ LBH = 0xE3C0, // Load Byte High (32<-8)
+ LBR = 0xB926, // Load Byte (32)
+ LCDBR = 0xB313, // Load Complement (long BFP)
+ LCDFR = 0xB373, // Load Complement (long)
+ LCEBR = 0xB303, // Load Complement (short BFP)
+ LCGFR = 0xB913, // Load Complement (64<-32)
+ LCGR = 0xB903, // Load Complement (64)
+ LCR = 0x13, // Load Complement (32)
+ LCXBR = 0xB343, // Load Complement (extended BFP)
+ LD = 0x68, // Load (long)
+ LDEB = 0xED04, // Load Lengthened (short to long BFP)
+ LDEBR = 0xB304, // Load Lengthened (short to long BFP)
+ LDETR = 0xB3D4, // Load Lengthened (short to long DFP)
+ LDGR = 0xB3C1, // Load Fpr From Gr (64 to long)
+ LDR = 0x28, // Load (long)
+ LDXBR = 0xB345, // Load Rounded (extended to long BFP)
+ LDXBRA = 0xB345, // Load Rounded (extended to long BFP)
+ LDXTR = 0xB3DD, // Load Rounded (extended to long DFP)
+ LDY = 0xED65, // Load (long)
+ LE = 0x78, // Load (short)
+ LEDBR = 0xB344, // Load Rounded (long to short BFP)
+ LEDBRA = 0xB344, // Load Rounded (long to short BFP)
+ LEDTR = 0xB3D5, // Load Rounded (long to short DFP)
+ LER = 0x38, // Load (short)
+ LEXBR = 0xB346, // Load Rounded (extended to short BFP)
+ LEXBRA = 0xB346, // Load Rounded (extended to short BFP)
+ LEY = 0xED64, // Load (short)
+ LFAS = 0xB2BD, // Load Fpc And Signal
+ LFH = 0xE3CA, // Load High (32)
+ LFHAT = 0xE3C8, // Load High And Trap (32H<-32)
+ LFPC = 0xB29D, // Load Fpc
+ LG = 0xE304, // Load (64)
+ LGAT = 0xE385, // Load And Trap (64)
+ LGB = 0xE377, // Load Byte (64)
+ LGBR = 0xB906, // Load Byte (64)
+ LGDR = 0xB3CD, // Load Gr From Fpr (long to 64)
+ LGF = 0xE314, // Load (64<-32)
+ LGFI = 0xC01, // Load Immediate (64<-32)
+ LGFR = 0xB914, // Load (64<-32)
+ LGFRL = 0xC4C, // Load Relative Long (64<-32)
+ LGH = 0xE315, // Load Halfword (64)
+ LGHI = 0xA79, // Load Halfword Immediate (64)
+ LGHR = 0xB907, // Load Halfword (64)
+ LGHRL = 0xC44, // Load Halfword Relative Long (64<-16)
+ LGR = 0xB904, // Load (64)
+ LGRL = 0xC48, // Load Relative Long (64)
+ LH = 0x48, // Load Halfword (32)
+ LHH = 0xE3C4, // Load Halfword High (32<-16)
+ LHI = 0xA78, // Load Halfword Immediate (32)
+ LHR = 0xB927, // Load Halfword (32)
+ LHRL = 0xC45, // Load Halfword Relative Long (32<-16)
+ LHY = 0xE378, // Load Halfword (32)
+ LLC = 0xE394, // Load Logical Character (32)
+ LLCH = 0xE3C2, // Load Logical Character High (32<-8)
+ LLCR = 0xB994, // Load Logical Character (32)
+ LLGC = 0xE390, // Load Logical Character (64)
+ LLGCR = 0xB984, // Load Logical Character (64)
+ LLGF = 0xE316, // Load Logical (64<-32)
+ LLGFAT = 0xE39D, // Load Logical And Trap (64<-32)
+ LLGFR = 0xB916, // Load Logical (64<-32)
+ LLGFRL = 0xC4E, // Load Logical Relative Long (64<-32)
+ LLGH = 0xE391, // Load Logical Halfword (64)
+ LLGHR = 0xB985, // Load Logical Halfword (64)
+ LLGHRL = 0xC46, // Load Logical Halfword Relative Long (64<-16)
+ LLGT = 0xE317, // Load Logical Thirty One Bits
+ LLGTAT = 0xE39C, // Load Logical Thirty One Bits And Trap (64<-31)
+ LLGTR = 0xB917, // Load Logical Thirty One Bits
+ LLH = 0xE395, // Load Logical Halfword (32)
+ LLHH = 0xE3C6, // Load Logical Halfword High (32<-16)
+ LLHR = 0xB995, // Load Logical Halfword (32)
+ LLHRL = 0xC42, // Load Logical Halfword Relative Long (32<-16)
+ LLIHF = 0xC0E, // Load Logical Immediate (high)
+ LLIHH = 0xA5C, // Load Logical Immediate (high high)
+ LLIHL = 0xA5D, // Load Logical Immediate (high low)
+ LLILF = 0xC0F, // Load Logical Immediate (low)
+ LLILH = 0xA5E, // Load Logical Immediate (low high)
+ LLILL = 0xA5F, // Load Logical Immediate (low low)
+ LM = 0x98, // Load Multiple (32)
+ LMD = 0xEF, // Load Multiple Disjoint
+ LMG = 0xEB04, // Load Multiple (64)
+ LMH = 0xEB96, // Load Multiple High
+ LMY = 0xEB98, // Load Multiple (32)
+ LNDBR = 0xB311, // Load Negative (long BFP)
+ LNDFR = 0xB371, // Load Negative (long)
+ LNEBR = 0xB301, // Load Negative (short BFP)
+ LNGFR = 0xB911, // Load Negative (64<-32)
+ LNGR = 0xB901, // Load Negative (64)
+ LNR = 0x11, // Load Negative (32)
+ LNXBR = 0xB341, // Load Negative (extended BFP)
+ LOC = 0xEBF2, // Load On Condition (32)
+ LOCG = 0xEBE2, // Load On Condition (64)
+ LOCGR = 0xB9E2, // Load On Condition (64)
+ LOCR = 0xB9F2, // Load On Condition (32)
+ LPD = 0xC84, // Load Pair Disjoint (32)
+ LPDBR = 0xB310, // Load Positive (long BFP)
+ LPDFR = 0xB370, // Load Positive (long)
+ LPDG = 0xC85, // Load Pair Disjoint (64)
+ LPEBR = 0xB300, // Load Positive (short BFP)
+ LPGFR = 0xB910, // Load Positive (64<-32)
+ LPGR = 0xB900, // Load Positive (64)
+ LPQ = 0xE38F, // Load Pair From Quadword
+ LPR = 0x10, // Load Positive (32)
+ LPXBR = 0xB340, // Load Positive (extended BFP)
+ LR = 0x18, // Load (32)
+ LRL = 0xC4D, // Load Relative Long (32)
+ LRV = 0xE31E, // Load Reversed (32)
+ LRVG = 0xE30F, // Load Reversed (64)
+ LRVGR = 0xB90F, // Load Reversed (64)
+ LRVH = 0xE31F, // Load Reversed (16)
+ LRVR = 0xB91F, // Load Reversed (32)
+ LT = 0xE312, // Load And Test (32)
+ LTDBR = 0xB312, // Load And Test (long BFP)
+ LTDTR = 0xB3D6, // Load And Test (long DFP)
+ LTEBR = 0xB302, // Load And Test (short BFP)
+ LTG = 0xE302, // Load And Test (64)
+ LTGF = 0xE332, // Load And Test (64<-32)
+ LTGFR = 0xB912, // Load And Test (64<-32)
+ LTGR = 0xB902, // Load And Test (64)
+ LTR = 0x12, // Load And Test (32)
+ LTXBR = 0xB342, // Load And Test (extended BFP)
+ LTXTR = 0xB3DE, // Load And Test (extended DFP)
+ LXDB = 0xED05, // Load Lengthened (long to extended BFP)
+ LXDBR = 0xB305, // Load Lengthened (long to extended BFP)
+ LXDTR = 0xB3DC, // Load Lengthened (long to extended DFP)
+ LXEB = 0xED06, // Load Lengthened (short to extended BFP)
+ LXEBR = 0xB306, // Load Lengthened (short to extended BFP)
+ LXR = 0xB365, // Load (extended)
+ LY = 0xE358, // Load (32)
+ LZDR = 0xB375, // Load Zero (long)
+ LZER = 0xB374, // Load Zero (short)
+ LZXR = 0xB376, // Load Zero (extended)
+ M = 0x5C, // Multiply (64<-32)
+ MADB = 0xED1E, // Multiply And Add (long BFP)
+ MADBR = 0xB31E, // Multiply And Add (long BFP)
+ MAEB = 0xED0E, // Multiply And Add (short BFP)
+ MAEBR = 0xB30E, // Multiply And Add (short BFP)
+ MC = 0xAF, // Monitor Call
+ MDB = 0xED1C, // Multiply (long BFP)
+ MDBR = 0xB31C, // Multiply (long BFP)
+ MDEB = 0xED0C, // Multiply (short to long BFP)
+ MDEBR = 0xB30C, // Multiply (short to long BFP)
+ MDTR = 0xB3D0, // Multiply (long DFP)
+ MDTRA = 0xB3D0, // Multiply (long DFP)
+ MEEB = 0xED17, // Multiply (short BFP)
+ MEEBR = 0xB317, // Multiply (short BFP)
+ MFY = 0xE35C, // Multiply (64<-32)
+ MGHI = 0xA7D, // Multiply Halfword Immediate (64)
+ MH = 0x4C, // Multiply Halfword (32)
+ MHI = 0xA7C, // Multiply Halfword Immediate (32)
+ MHY = 0xE37C, // Multiply Halfword (32)
+ ML = 0xE396, // Multiply Logical (64<-32)
+ MLG = 0xE386, // Multiply Logical (128<-64)
+ MLGR = 0xB986, // Multiply Logical (128<-64)
+ MLR = 0xB996, // Multiply Logical (64<-32)
+ MP = 0xFC, // Multiply Decimal
+ MR = 0x1C, // Multiply (64<-32)
+ MS = 0x71, // Multiply Single (32)
+ MSCH = 0xB232, // Modify Subchannel
+ MSDB = 0xED1F, // Multiply And Subtract (long BFP)
+ MSDBR = 0xB31F, // Multiply And Subtract (long BFP)
+ MSEB = 0xED0F, // Multiply And Subtract (short BFP)
+ MSEBR = 0xB30F, // Multiply And Subtract (short BFP)
+ MSFI = 0xC21, // Multiply Single Immediate (32)
+ MSG = 0xE30C, // Multiply Single (64)
+ MSGF = 0xE31C, // Multiply Single (64<-32)
+ MSGFI = 0xC20, // Multiply Single Immediate (64<-32)
+ MSGFR = 0xB91C, // Multiply Single (64<-32)
+ MSGR = 0xB90C, // Multiply Single (64)
+ MSR = 0xB252, // Multiply Single (32)
+ MSY = 0xE351, // Multiply Single (32)
+ MVC = 0xD2, // Move (character)
+ MVCP = 0xDA, // Move To Primary
+ MVCDK = 0xE50F, // Move To Primary
+ MVCIN = 0xE8, // Move Inverse
+ MVCL = 0x0E, // Move Long
+ MVCLE = 0xA8, // Move Long Extended
+ MVCLU = 0xEB8E, // Move Long Unicode
+ MVGHI = 0xE548, // Move (64<-16)
+ MVHHI = 0xE544, // Move (16<-16)
+ MVHI = 0xE54C, // Move (32<-16)
+ MVI = 0x92, // Move (immediate)
+ MVIY = 0xEB52, // Move (immediate)
+ MVN = 0xD1, // Move Numerics
+ MVO = 0xF1, // Move With Offset
+ MVST = 0xB255, // Move String
+ MVZ = 0xD3, // Move Zones
+ MXBR = 0xB34C, // Multiply (extended BFP)
+ MXDB = 0xED07, // Multiply (long to extended BFP)
+ MXDBR = 0xB307, // Multiply (long to extended BFP)
+ MXTR = 0xB3D8, // Multiply (extended DFP)
+ MXTRA = 0xB3D8, // Multiply (extended DFP)
+ N = 0x54, // And (32)
+ NC = 0xD4, // And (character)
+ NG = 0xE380, // And (64)
+ NGR = 0xB980, // And (64)
+ NGRK = 0xB9E4, // And (64)
+ NI = 0x94, // And (immediate)
+ NIAI = 0xB2FA, // Next Instruction Access Intent Ie Eh
+ NIHF = 0xC0A, // And Immediate (high)
+ NIHH = 0xA54, // And Immediate (high high)
+ NIHL = 0xA55, // And Immediate (high low)
+ NILF = 0xC0B, // And Immediate (low)
+ NILH = 0xA56, // And Immediate (low high)
+ NILL = 0xA57, // And Immediate (low low)
+ NIY = 0xEB54, // And (immediate)
+ NR = 0x14, // And (32)
+ NRK = 0xB9F4, // And (32)
+ NTSTG = 0xE325, // Nontransactional Store Rxy Tx ¤9 A Sp St B2
+ NY = 0xE354, // And (32)
+ O = 0x56, // Or (32)
+ OC = 0xD6, // Or (character)
+ OG = 0xE381, // Or (64)
+ OGR = 0xB981, // Or (64)
+ OGRK = 0xB9E6, // Or (64)
+ OI = 0x96, // Or (immediate)
+ OIHF = 0xC0C, // Or Immediate (high)
+ OIHH = 0xA58, // Or Immediate (high high)
+ OIHL = 0xA59, // Or Immediate (high low)
+ OILF = 0xC0D, // Or Immediate (low)
+ OILH = 0xA5A, // Or Immediate (low high)
+ OILL = 0xA5B, // Or Immediate (low low)
+ OIY = 0xEB56, // Or (immediate)
+ OR = 0x16, // Or (32)
+ ORK = 0xB9F6, // Or (32)
+ OY = 0xE356, // Or (32)
+ PACK = 0xF2, // Pack
+ PCC = 0xB92C, // Perform Cryptographic Computation
+ PFD = 0xE336, // Prefetch Data
+ PFDRL = 0xC62, // Prefetch Data Relative Long
+ PFPO = 0x010A, // Perform Floating-POINT Operation
+ PKA = 0xE9, // Pack Ascii
+ PKU = 0xE1, // Pack Unicode
+ PLO = 0xEE, // Perform Locked Operation
+ POPCNT_Z = 0xB9E1, // Population Count
+ PPA = 0xB2E8, // Perform Processor Assist
+ QADTR = 0xB3F5, // Quantize (long DFP)
+ QAXTR = 0xB3FD, // Quantize (extended DFP)
+ RCHP = 0xB23B, // Reset Channel Path
+ RISBG = 0xEC55, // Rotate Then Insert Selected Bits
+ RISBGN = 0xEC59, // Rotate Then Insert Selected Bits
+ RISBHG = 0xEC5D, // Rotate Then Insert Selected Bits High
+ RISBLG = 0xEC51, // Rotate Then Insert Selected Bits Low
+ RLL = 0xEB1D, // Rotate Left Single Logical (32)
+ RLLG = 0xEB1C, // Rotate Left Single Logical (64)
+ RNSBG = 0xEC54, // Rotate Then And Selected Bits
+ ROSBG = 0xEC56, // Rotate Then Or Selected Bits
+ RRDTR = 0xB3F7, // Reround (long DFP)
+ RRXTR = 0xB3FF, // Reround (extended DFP)
+ RSCH = 0xB238, // Resume Subchannel
+ RXSBG = 0xEC57, // Rotate Then Exclusive Or Selected Bits
+ S = 0x5B, // Subtract (32)
+ SAL = 0xB237, // Set Address Limit
+ SAR = 0xB24E, // Set Access
+ SCHM = 0xB23C, // Set Channel Monitor
+ SDB = 0xED1B, // Subtract (long BFP)
+ SDBR = 0xB31B, // Subtract (long BFP)
+ SDTR = 0xB3D3, // Subtract (long DFP)
+ SDTRA = 0xB3D3, // Subtract (long DFP)
+ SEB = 0xED0B, // Subtract (short BFP)
+ SEBR = 0xB30B, // Subtract (short BFP)
+ SFASR = 0xB385, // Set Fpc And Signal
+ SFPC = 0xB384, // Set Fpc
+ SG = 0xE309, // Subtract (64)
+ SGF = 0xE319, // Subtract (64<-32)
+ SGFR = 0xB919, // Subtract (64<-32)
+ SGR = 0xB909, // Subtract (64)
+ SGRK = 0xB9E9, // Subtract (64)
+ SH = 0x4B, // Subtract Halfword
+ SHHHR = 0xB9C9, // Subtract High (32)
+ SHHLR = 0xB9D9, // Subtract High (32)
+ SHY = 0xE37B, // Subtract Halfword
+ SL = 0x5F, // Subtract Logical (32)
+ SLA = 0x8B, // Shift Left Single (32)
+ SLAG = 0xEB0B, // Shift Left Single (64)
+ SLAK = 0xEBDD, // Shift Left Single (32)
+ SLB = 0xE399, // Subtract Logical With Borrow (32)
+ SLBG = 0xE389, // Subtract Logical With Borrow (64)
+ SLBGR = 0xB989, // Subtract Logical With Borrow (64)
+ SLBR = 0xB999, // Subtract Logical With Borrow (32)
+ SLDA = 0x8F, // Shift Left Double
+ SLDL = 0x8D, // Shift Left Double Logical
+ SLDT = 0xED40, // Shift Significand Left (long DFP)
+ SLFI = 0xC25, // Subtract Logical Immediate (32)
+ SLG = 0xE30B, // Subtract Logical (64)
+ SLGF = 0xE31B, // Subtract Logical (64<-32)
+ SLGFI = 0xC24, // Subtract Logical Immediate (64<-32)
+ SLGFR = 0xB91B, // Subtract Logical (64<-32)
+ SLGR = 0xB90B, // Subtract Logical (64)
+ SLGRK = 0xB9EB, // Subtract Logical (64)
+ SLHHHR = 0xB9CB, // Subtract Logical High (32)
+ SLHHLR = 0xB9DB, // Subtract Logical High (32)
+ SLL = 0x89, // Shift Left Single Logical (32)
+ SLLG = 0xEB0D, // Shift Left Single Logical (64)
+ SLLK = 0xEBDF, // Shift Left Single Logical (32)
+ SLR = 0x1F, // Subtract Logical (32)
+ SLRK = 0xB9FB, // Subtract Logical (32)
+ SLXT = 0xED48, // Shift Significand Left (extended DFP)
+ SLY = 0xE35F, // Subtract Logical (32)
+ SP = 0xFB, // Subtract Decimal
+ SPM = 0x04, // Set Program Mask
+ SQDB = 0xED15, // Square Root (long BFP)
+ SQDBR = 0xB315, // Square Root (long BFP)
+ SQEB = 0xED14, // Square Root (short BFP)
+ SQEBR = 0xB314, // Square Root (short BFP)
+ SQXBR = 0xB316, // Square Root (extended BFP)
+ SR = 0x1B, // Subtract (32)
+ SRA = 0x8A, // Shift Right Single (32)
+ SRAG = 0xEB0A, // Shift Right Single (64)
+ SRAK = 0xEBDC, // Shift Right Single (32)
+ SRDA = 0x8E, // Shift Right Double
+ SRDL = 0x8C, // Shift Right Double Logical
+ SRDT = 0xED41, // Shift Significand Right (long DFP)
+ SRK = 0xB9F9, // Subtract (32)
+ SRL = 0x88, // Shift Right Single Logical (32)
+ SRLG = 0xEB0C, // Shift Right Single Logical (64)
+ SRLK = 0xEBDE, // Shift Right Single Logical (32)
+ SRNM = 0xB299, // Set BFP Rounding Mode (2 bit)
+ SRNMB = 0xB2B8, // Set BFP Rounding Mode (3 bit)
+ SRNMT = 0xB2B9, // Set DFP Rounding Mode
+ SRP = 0xF0, // Shift And Round Decimal
+ SRST = 0xB25E, // Search String
+ SRSTU = 0xB9BE, // Search String Unicode
+ SRXT = 0xED49, // Shift Significand Right (extended DFP)
+ SSCH = 0xB233, // Start Subchannel
+ ST = 0x50, // Store (32)
+ STC = 0x42, // Store Character
+ STCH = 0xE3C3, // Store Character High (8)
+ STCK = 0xB205, // Store Clock
+ STCKE = 0xB278, // Store Clock Extended
+ STCKF = 0xB27C, // Store Clock Fast
+ STCM = 0xBE, // Store Characters Under Mask (low)
+ STCMH = 0xEB2C, // Store Characters Under Mask (high)
+ STCMY = 0xEB2D, // Store Characters Under Mask (low)
+ STCPS = 0xB23A, // Store Channel Path Status
+ STCRW = 0xB239, // Store Channel Report Word
+ STCY = 0xE372, // Store Character
+ STD = 0x60, // Store (long)
+ STDY = 0xED67, // Store (long)
+ STE = 0x70, // Store (short)
+ STEY = 0xED66, // Store (short)
+ STFH = 0xE3CB, // Store High (32)
+ STFLE = 0xB2B0, // Store Facility List Extended
+ STFPC = 0xB29C, // Store Fpc
+ STG = 0xE324, // Store (64)
+ STGRL = 0xC4B, // Store Relative Long (64)
+ STH = 0x40, // Store Halfword
+ STHH = 0xE3C7, // Store Halfword High (16)
+ STHRL = 0xC47, // Store Halfword Relative Long
+ STHY = 0xE370, // Store Halfword
+ STM = 0x90, // Store Multiple (32)
+ STMG = 0xEB24, // Store Multiple (64)
+ STMH = 0xEB26, // Store Multiple High
+ STMY = 0xEB90, // Store Multiple (32)
+ STOC = 0xEBF3, // Store On Condition (32)
+ STOCG = 0xEBE3, // Store On Condition (64)
+ STPQ = 0xE38E, // Store Pair To Quadword
+ STRL = 0xC4F, // Store Relative Long (32)
+ STRV = 0xE33E, // Store Reversed (32)
+ STRVG = 0xE32F, // Store Reversed (64)
+ STRVH = 0xE33F, // Store Reversed (16)
+ STSCH = 0xB234, // Store Subchannel
+ STY = 0xE350, // Store (32)
+ SVC = 0x0A, // Supervisor Call
+ SXBR = 0xB34B, // Subtract (extended BFP)
+ SXTR = 0xB3DB, // Subtract (extended DFP)
+ SXTRA = 0xB3DB, // Subtract (extended DFP)
+ SY = 0xE35B, // Subtract (32)
+ TABORT = 0xB2FC, // Transaction Abort
+ TBDR = 0xB351, // Convert HFP To BFP (long)
+ TBEDR = 0xB350, // Convert HFP To BFP (long to short)
+ TBEGIN = 0xE560, // Transaction Begin
+ TBEGINC = 0xE561, // Transaction Begin
+ TCDB = 0xED11, // Test Data Class (long BFP)
+ TCEB = 0xED10, // Test Data Class (short BFP)
+ TCXB = 0xED12, // Test Data Class (extended BFP)
+ TDCDT = 0xED54, // Test Data Class (long DFP)
+ TDCET = 0xED50, // Test Data Class (short DFP)
+ TDCXT = 0xED58, // Test Data Class (extended DFP)
+ TDGDT = 0xED55, // Test Data Group (long DFP)
+ TDGET = 0xED51, // Test Data Group (short DFP)
+ TDGXT = 0xED59, // Test Data Group (extended DFP)
+ TEND = 0xB2F8, // Transaction End
+ THDER = 0xB358, // Convert BFP To HFP (short to long)
+ THDR = 0xB359, // Convert BFP To HFP (long)
+ TM = 0x91, // Test Under Mask Si C A B1
+ TMH = 0xA70, // Test Under Mask High
+ TMHH = 0xA72, // Test Under Mask (high high)
+ TMHL = 0xA73, // Test Under Mask (high low)
+ TML = 0xA71, // Test Under Mask Low
+ TMLH = 0xA70, // Test Under Mask (low high)
+ TMLL = 0xA71, // Test Under Mask (low low)
+ TMY = 0xEB51, // Test Under Mask
+ TP = 0xEBC0, // Test Decimal
+ TPI = 0xB236, // Test Pending Interruption
+ TR = 0xDC, // Translate
+ TRAP4 = 0xB2FF, // Trap (4)
+ TRE = 0xB2A5, // Translate Extended
+ TROO = 0xB993, // Translate One To One
+ TROT = 0xB992, // Translate One To Two
+ TRT = 0xDD, // Translate And Test
+ TRTE = 0xB9BF, // Translate And Test Extended
+ TRTO = 0xB991, // Translate Two To One
+ TRTR = 0xD0, // Translate And Test Reverse
+ TRTRE = 0xB9BD, // Translate And Test Reverse Extended
+ TRTT = 0xB990, // Translate Two To Two
+ TS = 0x93, // Test And Set
+ TSCH = 0xB235, // Test Subchannel
+ UNPK = 0xF3, // Unpack
+ UNPKA = 0xEA, // Unpack Ascii
+ UNPKU = 0xE2, // Unpack Unicode
+ UPT = 0x0102, // Update Tree
+ X = 0x57, // Exclusive Or (32)
+ XC = 0xD7, // Exclusive Or (character)
+ XG = 0xE382, // Exclusive Or (64)
+ XGR = 0xB982, // Exclusive Or (64)
+ XGRK = 0xB9E7, // Exclusive Or (64)
+ XI = 0x97, // Exclusive Or (immediate)
+ XIHF = 0xC06, // Exclusive Or Immediate (high)
+ XILF = 0xC07, // Exclusive Or Immediate (low)
+ XIY = 0xEB57, // Exclusive Or (immediate)
+ XR = 0x17, // Exclusive Or (32)
+ XRK = 0xB9F7, // Exclusive Or (32)
+ XSCH = 0xB276, // Cancel Subchannel
+ XY = 0xE357, // Exclusive Or (32)
+ ZAP = 0xF8, // Zero And Add
+ BKPT = 0x0001 // GDB Software Breakpoint
+};
+
+// Instruction encoding bits and masks.
+enum {
+ // Instruction encoding bit
+ B1 = 1 << 1,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+
+ B6 = 1 << 6,
+ B10 = 1 << 10,
+ B11 = 1 << 11,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B21 = 1 << 21,
+
+ // Instruction bit masks
+ kCondMask = 0x1F << 21,
+ kOff12Mask = (1 << 12) - 1,
+ kImm24Mask = (1 << 24) - 1,
+ kOff16Mask = (1 << 16) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm26Mask = (1 << 26) - 1,
+ kBOfieldMask = 0x1f << 21,
+ kOpcodeMask = 0x3f << 26,
+ kExt2OpcodeMask = 0x1f << 1,
+ kExt5OpcodeMask = 0x3 << 2,
+ kBIMask = 0x1F << 16,
+ kBDMask = 0x14 << 2,
+ kAAMask = 0x01 << 1,
+ kLKMask = 0x01,
+ kRCMask = 0x01,
+ kTOMask = 0x1f << 21
+};
+
+// S390 instructions requires bigger shifts,
+// make them macros instead of enum because of the typing issue
+#define B32 ((uint64_t)1 << 32)
+#define B36 ((uint64_t)1 << 36)
+#define B40 ((uint64_t)1 << 40)
+const FourByteInstr kFourByteBrCondMask = 0xF << 20;
+const SixByteInstr kSixByteBrCondMask = static_cast<SixByteInstr>(0xF) << 36;
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Overflow Exception
+enum OEBit {
+ SetOE = 1 << 10, // Set overflow exception
+ LeaveOE = 0 << 10 // No overflow exception
+};
+
+// Record bit
+enum RCBit { // Bit 0
+ SetRC = 1, // LT,GT,EQ,SO
+ LeaveRC = 0 // None
+};
+
+// Link bit
+enum LKBit { // Bit 0
+ SetLK = 1, // Load effective address of next instruction
+ LeaveLK = 0 // No action
+};
+
+enum BOfield { // Bits 25-21
+ DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
+ DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
+ BF = 4 << 21, // Branch if condition false
+ DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
+ DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
+ BT = 12 << 21, // Branch if condition true
+ DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
+ DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
+ BA = 20 << 21 // Branch always
+};
+
+#ifdef _AIX
+#undef CR_LT
+#undef CR_GT
+#undef CR_EQ
+#undef CR_SO
+#endif
+
+enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
+
+#define CRWIDTH 4
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the S390
+// simulator.
+// SVC provides a 24bit immediate value. Use bits 22:0 for standard
+// SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ // Transition to C code
+ kCallRtRedirected = 0x0010,
+ // Breakpoint
+ kBreakpoint = 0x0000,
+ // Stop
+ kStopCode = 1 << 23
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+// FP rounding modes.
+enum FPRoundingMode {
+ RN = 0, // Round to Nearest.
+ RZ = 1, // Round towards zero.
+ RP = 2, // Round towards Plus Infinity.
+ RM = 3, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM
+};
+
+const uint32_t kFPRoundingModeMask = 3;
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+// use TRAP4 to indicate redirection call for simulation mode
+const Instr rtCallRedirInstr = TRAP4;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the
+// z/Architecture instruction set encoding.
+class Instruction {
+ public:
+ // S390 Opcode Format Types
+ // Based on the first byte of the opcode, we can determine how to extract
+ // the entire opcode of the instruction. The various favours include:
+ enum OpcodeFormatType {
+ ONE_BYTE_OPCODE, // One Byte - Bits 0 to 7
+ TWO_BYTE_OPCODE, // Two Bytes - Bits 0 to 15
+ TWO_BYTE_DISJOINT_OPCODE, // Two Bytes - Bits 0 to 7, 40 to 47
+ THREE_NIBBLE_OPCODE // Three Nibbles - Bits 0 to 7, 12 to 15
+ };
+
+// Helper macro to define static accessors.
+// We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+ // Get the raw instruction bits.
+ template <typename T>
+ inline T InstructionBits() const {
+ return Instruction::InstructionBits<T>(reinterpret_cast<const byte*>(this));
+ }
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ template <typename T>
+ inline void SetInstructionBits(T value) const {
+ Instruction::SetInstructionBits<T>(reinterpret_cast<const byte*>(this),
+ value);
+ }
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read bits according to instruction type
+ template <typename T, typename U>
+ inline U Bits(int hi, int lo) const {
+ return (InstructionBits<T>() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Determine the instruction length
+ inline int InstructionLength() {
+ return Instruction::InstructionLength(reinterpret_cast<const byte*>(this));
+ }
+ // Extract the Instruction Opcode
+ inline Opcode S390OpcodeValue() {
+ return Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(this));
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Determine the instruction length of the given instruction
+ static inline int InstructionLength(const byte* instr) {
+ // Length can be determined by the first nibble.
+ // 0x0 to 0x3 => 2-bytes
+ // 0x4 to 0xB => 4-bytes
+ // 0xC to 0xF => 6-bytes
+ byte topNibble = (*instr >> 4) & 0xF;
+ if (topNibble <= 3)
+ return 2;
+ else if (topNibble <= 0xB)
+ return 4;
+ return 6;
+ }
+
+ // Returns the instruction bits of the given instruction
+ static inline uint64_t InstructionBits(const byte* instr) {
+ int length = InstructionLength(instr);
+ if (2 == length)
+ return static_cast<uint64_t>(InstructionBits<TwoByteInstr>(instr));
+ else if (4 == length)
+ return static_cast<uint64_t>(InstructionBits<FourByteInstr>(instr));
+ else
+ return InstructionBits<SixByteInstr>(instr);
+ }
+
+ // Extract the raw instruction bits
+ template <typename T>
+ static inline T InstructionBits(const byte* instr) {
+#if !V8_TARGET_LITTLE_ENDIAN
+ if (sizeof(T) <= 4) {
+ return *reinterpret_cast<const T*>(instr);
+ } else {
+ // We cannot read 8-byte instructon address directly, because for a
+ // six-byte instruction, the extra 2-byte address might not be
+ // allocated.
+ uint64_t fourBytes = *reinterpret_cast<const uint32_t*>(instr);
+ uint16_t twoBytes = *reinterpret_cast<const uint16_t*>(instr + 4);
+ return (fourBytes << 16 | twoBytes);
+ }
+#else
+ // Even on little endian hosts (simulation), the instructions
+ // are stored as big-endian in order to decode the opcode and
+ // instruction length.
+ T instr_bits = 0;
+
+ // 6-byte instrs are represented by uint64_t
+ uint32_t size = (sizeof(T) == 8) ? 6 : sizeof(T);
+
+ for (T i = 0; i < size; i++) {
+ instr_bits <<= 8;
+ instr_bits |= *(instr + i);
+ }
+ return instr_bits;
+#endif
+ }
+
+ // Set the Instruction Bits to value
+ template <typename T>
+ static inline void SetInstructionBits(byte* instr, T value) {
+#if V8_TARGET_LITTLE_ENDIAN
+ // The instruction bits are stored in big endian format even on little
+ // endian hosts, in order to decode instruction length and opcode.
+ // The following code will reverse the bytes so that the stores later
+ // (which are in native endianess) will effectively save the instruction
+ // in big endian.
+ if (sizeof(T) == 2) {
+ // Two Byte Instruction
+ value = ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ } else if (sizeof(T) == 4) {
+ // Four Byte Instruction
+ value = ((value & 0x000000FF) << 24) | ((value & 0x0000FF00) << 8) |
+ ((value & 0x00FF0000) >> 8) | ((value & 0xFF000000) >> 24);
+ } else if (sizeof(T) == 8) {
+ // Six Byte Instruction
+ uint64_t orig_value = static_cast<uint64_t>(value);
+ value = (static_cast<uint64_t>(orig_value & 0xFF) << 40) |
+ (static_cast<uint64_t>((orig_value >> 8) & 0xFF) << 32) |
+ (static_cast<uint64_t>((orig_value >> 16) & 0xFF) << 24) |
+ (static_cast<uint64_t>((orig_value >> 24) & 0xFF) << 16) |
+ (static_cast<uint64_t>((orig_value >> 32) & 0xFF) << 8) |
+ (static_cast<uint64_t>((orig_value >> 40) & 0xFF));
+ }
+#endif
+ if (sizeof(T) <= 4) {
+ *reinterpret_cast<T*>(instr) = value;
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ uint64_t orig_value = static_cast<uint64_t>(value);
+ *reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value);
+ *reinterpret_cast<uint16_t*>(instr + 4) =
+ static_cast<uint16_t>((orig_value >> 32) & 0xFFFF);
+#else
+ *reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value >> 16);
+ *reinterpret_cast<uint16_t*>(instr + 4) =
+ static_cast<uint16_t>(value & 0xFFFF);
+#endif
+ }
+ }
+
+ // Get Instruction Format Type
+ static OpcodeFormatType getOpcodeFormatType(const byte* instr) {
+ const byte firstByte = *instr;
+ // Based on Figure B-3 in z/Architecture Principles of
+ // Operation.
+
+ // 1-byte opcodes
+ // I, RR, RS, RSI, RX, SS Formats
+ if ((0x04 <= firstByte && 0x9B >= firstByte) ||
+ (0xA8 <= firstByte && 0xB1 >= firstByte) ||
+ (0xBA <= firstByte && 0xBF >= firstByte) || (0xC5 == firstByte) ||
+ (0xC7 == firstByte) || (0xD0 <= firstByte && 0xE2 >= firstByte) ||
+ (0xE8 <= firstByte && 0xEA >= firstByte) ||
+ (0xEE <= firstByte && 0xFD >= firstByte)) {
+ return ONE_BYTE_OPCODE;
+ }
+
+ // 2-byte opcodes
+ // E, IE, RRD, RRE, RRF, SIL, S, SSE Formats
+ if ((0x00 == firstByte) || // Software breakpoint 0x0001
+ (0x01 == firstByte) || (0xB2 == firstByte) || (0xB3 == firstByte) ||
+ (0xB9 == firstByte) || (0xE5 == firstByte)) {
+ return TWO_BYTE_OPCODE;
+ }
+
+ // 3-nibble opcodes
+ // RI, RIL, SSF Formats
+ if ((0xA5 == firstByte) || (0xA7 == firstByte) ||
+ (0xC0 <= firstByte && 0xCC >= firstByte)) { // C5,C7 handled above
+ return THREE_NIBBLE_OPCODE;
+ }
+ // Remaining ones are all TWO_BYTE_DISJOINT OPCODES.
+ DCHECK(InstructionLength(instr) == 6);
+ return TWO_BYTE_DISJOINT_OPCODE;
+ }
+
+ // Extract the full opcode from the instruction.
+ static inline Opcode S390OpcodeValue(const byte* instr) {
+ OpcodeFormatType opcodeType = getOpcodeFormatType(instr);
+
+ // The native instructions are encoded in big-endian format
+ // even if running on little-endian host. Hence, we need
+ // to ensure we use byte* based bit-wise logic.
+ switch (opcodeType) {
+ case ONE_BYTE_OPCODE:
+ // One Byte - Bits 0 to 7
+ return static_cast<Opcode>(*instr);
+ case TWO_BYTE_OPCODE:
+ // Two Bytes - Bits 0 to 15
+ return static_cast<Opcode>((*instr << 8) | (*(instr + 1)));
+ case TWO_BYTE_DISJOINT_OPCODE:
+ // Two Bytes - Bits 0 to 7, 40 to 47
+ return static_cast<Opcode>((*instr << 8) | (*(instr + 5) & 0xFF));
+ case THREE_NIBBLE_OPCODE:
+ // Three Nibbles - Bits 0 to 7, 12 to 15
+ return static_cast<Opcode>((*instr << 4) | (*(instr + 1) & 0xF));
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return static_cast<Opcode>(-1);
+ }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SvcValue() const {
+ return static_cast<SoftwareInterruptCodes>(Bits<FourByteInstr, int>(15, 0));
+ }
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// I Instruction -- suspect this will not be used,
+// but implement for completeness
+class IInstruction : Instruction {
+ public:
+ inline int IValue() const { return Bits<TwoByteInstr, int>(7, 0); }
+
+ inline int size() const { return 2; }
+};
+
+// RR Instruction
+class RRInstruction : Instruction {
+ public:
+ inline int R1Value() const {
+ // the high and low parameters of Bits is the number of bits from
+ // rightmost place
+ return Bits<TwoByteInstr, int>(7, 4);
+ }
+ inline int R2Value() const { return Bits<TwoByteInstr, int>(3, 0); }
+ inline Condition M1Value() const {
+ return static_cast<Condition>(Bits<TwoByteInstr, int>(7, 4));
+ }
+
+ inline int size() const { return 2; }
+};
+
+// RRE Instruction
+class RREInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
+ inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+ inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline int M4Value() const { return Bits<FourByteInstr, int>(19, 16); }
+ inline int size() const { return 4; }
+};
+
+// RRF Instruction
+class RRFInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
+ inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+ inline int R3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline int M4Value() const { return Bits<FourByteInstr, int>(11, 8); }
+ inline int size() const { return 4; }
+};
+
+// RRD Isntruction
+class RRDInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+ inline int R3Value() const { return Bits<FourByteInstr, int>(7, 4); }
+ inline int size() const { return 4; }
+};
+
+// RI Instruction
+class RIInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+ inline int16_t I2Value() const { return Bits<FourByteInstr, int16_t>(15, 0); }
+ inline uint16_t I2UnsignedValue() const {
+ return Bits<FourByteInstr, uint16_t>(15, 0);
+ }
+ inline Condition M1Value() const {
+ return static_cast<Condition>(Bits<FourByteInstr, int>(23, 20));
+ }
+ inline int size() const { return 4; }
+};
+
+// RS Instruction
+class RSInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+ inline int R3Value() const { return Bits<FourByteInstr, int>(19, 16); }
+ inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline unsigned int D2Value() const {
+ return Bits<FourByteInstr, unsigned int>(11, 0);
+ }
+ inline int size() const { return 4; }
+};
+
+// RSY Instruction
+class RSYInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+ inline int R3Value() const { return Bits<SixByteInstr, int>(35, 32); }
+ inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int32_t D2Value() const {
+ int32_t value = Bits<SixByteInstr, int32_t>(27, 16);
+ value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+ return value;
+ }
+ inline int size() const { return 6; }
+};
+
+// RX Instruction
+class RXInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+ inline int X2Value() const { return Bits<FourByteInstr, int>(19, 16); }
+ inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline uint32_t D2Value() const {
+ return Bits<FourByteInstr, uint32_t>(11, 0);
+ }
+ inline int size() const { return 4; }
+};
+
+// RXY Instruction
+class RXYInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+ inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+ inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int32_t D2Value() const {
+ int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
+ value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+ return value;
+ }
+ inline int size() const { return 6; }
+};
+
+// RIL Instruction
+class RILInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+ inline int32_t I2Value() const { return Bits<SixByteInstr, int32_t>(31, 0); }
+ inline uint32_t I2UnsignedValue() const {
+ return Bits<SixByteInstr, uint32_t>(31, 0);
+ }
+ inline int size() const { return 6; }
+};
+
+// SI Instruction
+class SIInstruction : Instruction {
+ public:
+ inline int B1Value() const { return Bits<FourByteInstr, int>(15, 12); }
+ inline uint32_t D1Value() const {
+ return Bits<FourByteInstr, uint32_t>(11, 0);
+ }
+ inline uint8_t I2Value() const {
+ return Bits<FourByteInstr, uint8_t>(23, 16);
+ }
+ inline int size() const { return 4; }
+};
+
+// SIY Instruction
+class SIYInstruction : Instruction {
+ public:
+ inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int32_t D1Value() const {
+ int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
+ value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+ return value;
+ }
+ inline uint8_t I2Value() const { return Bits<SixByteInstr, uint8_t>(39, 32); }
+ inline int size() const { return 6; }
+};
+
+// SIL Instruction
+class SILInstruction : Instruction {
+ public:
+ inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
+ inline int I2Value() const { return Bits<SixByteInstr, int>(15, 0); }
+ inline int size() const { return 6; }
+};
+
+// SS Instruction
+class SSInstruction : Instruction {
+ public:
+ inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int B2Value() const { return Bits<SixByteInstr, int>(15, 12); }
+ inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
+ inline int D2Value() const { return Bits<SixByteInstr, int>(11, 0); }
+ inline int Length() const { return Bits<SixByteInstr, int>(39, 32); }
+ inline int size() const { return 6; }
+};
+
+// RXE Instruction
+class RXEInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+ inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+ inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+ inline int D2Value() const { return Bits<SixByteInstr, int>(27, 16); }
+ inline int size() const { return 6; }
+};
+
+// RIE Instruction
+class RIEInstruction : Instruction {
+ public:
+ inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+ inline int R2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+ inline int I3Value() const { return Bits<SixByteInstr, uint32_t>(31, 24); }
+ inline int I4Value() const { return Bits<SixByteInstr, uint32_t>(23, 16); }
+ inline int I5Value() const { return Bits<SixByteInstr, uint32_t>(15, 8); }
+ inline int I6Value() const {
+ return static_cast<int32_t>(Bits<SixByteInstr, int16_t>(31, 16));
+ }
+ inline int size() const { return 6; }
+};
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ private:
+ static const char* names_[kNumRegisters];
+};
+
+// Helper functions for converting between FP register numbers and names.
+class DoubleRegisters {
+ public:
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ private:
+ static const char* names_[kNumDoubleRegisters];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_CONSTANTS_S390_H_
diff --git a/deps/v8/src/s390/cpu-s390.cc b/deps/v8/src/s390/cpu-s390.cc
new file mode 100644
index 0000000000..d0d54a8a6b
--- /dev/null
+++ b/deps/v8/src/s390/cpu-s390.cc
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for s390 independent of OS goes here.
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+#include "src/assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* buffer, size_t size) {
+ // Given the strong memory model on z/Architecture, and the single
+ // thread nature of V8 and JavaScript, instruction cache flushing
+ // is not necessary. The architecture guarantees that if a core
+ // patches its own instruction cache, the updated instructions will be
+ // reflected automatically.
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
new file mode 100644
index 0000000000..44062d6e79
--- /dev/null
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -0,0 +1,338 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer.h"
+#include "src/codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// LAY + LGHI/LHI + BRCL
+const int Deoptimizer::table_entry_size_ = 16;
+
+int Deoptimizer::patch_size() {
+#if V8_TARGET_ARCH_S390X
+ const int kCallInstructionSize = 16;
+#else
+ const int kCallInstructionSize = 10;
+#endif
+ return kCallInstructionSize;
+}
+
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ // Empty because there is no need for relocation information for the code
+ // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 2);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 2);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ // We need calls to have a predictable size in the unoptimized code, but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
+ deopt_entry, kRelocInfo_NONEPTR);
+ DCHECK(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(isolate, call_address, call_size_in_bytes);
+ patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
+ DCHECK(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ DCHECK(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler());
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(r2.code(), params);
+ output_frame->SetRegister(r3.code(), handler);
+}
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+#define __ masm()->
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::TableEntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // Save all the registers onto the stack
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double registers before messing with them.
+ __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ StoreDouble(dreg, MemOperand(sp, offset));
+ }
+
+ // Push all GPRs onto the stack
+ __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
+ __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
+
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ StoreP(fp, MemOperand(ip));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id from the stack.
+ __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Cleanse the Return address for 31-bit
+ __ CleanseP(r14);
+
+ // Get the address of the location in the code object (r5)(return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r6.
+ __ LoadRR(r5, r14);
+ __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ SubP(r6, fp, r6);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r2 to r7.
+ __ PrepareCallCFunction(6, r7);
+ __ LoadImmP(r2, Operand::Zero());
+ Label context_check;
+ __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r3, &context_check);
+ __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ LoadImmP(r3, Operand(type())); // bailout type,
+ // r4: bailout id already loaded.
+ // r5: code address or 0 already loaded.
+ // r6: Fp-to-sp delta.
+ // Parm6: isolate is passed on the stack.
+ __ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
+ __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r2 and get the input
+ // frame descriptor pointer to r3 (deoptimizer->input_);
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
+ // MemOperand(sp), kNumberOfRegisters * kPointerSize);
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // TODO(john.yan): optimize the following code by using mvc instruction
+ DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r4, MemOperand(sp, i * kPointerSize));
+ __ StoreP(r4, MemOperand(r3, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ // TODO(joransiu): MVC opportunity
+ __ LoadDouble(d0, MemOperand(sp, src_offset));
+ __ StoreDouble(d0, MemOperand(r3, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Compute a pointer to the unwinding limit in register r4; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
+ __ AddP(r4, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header, Label::kNear);
+ __ bind(&pop_loop);
+ __ pop(r6);
+ __ StoreP(r6, MemOperand(r5, 0));
+ __ la(r5, MemOperand(r5, kPointerSize));
+ __ bind(&pop_loop_header);
+ __ CmpP(r4, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r2); // Preserve deoptimizer object across call.
+ // r2: deoptimizer object; r3: scratch.
+ __ PrepareCallCFunction(1, r3);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
+
+ __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r6 = current "FrameDescription** output_",
+ // r3 = one past the last FrameDescription**.
+ __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
+ __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
+ __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+ __ AddP(r3, r6, r3);
+ __ b(&outer_loop_header, Label::kNear);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
+ __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header, Label::kNear);
+
+ __ bind(&inner_push_loop);
+ __ AddP(r5, Operand(-sizeof(intptr_t)));
+ __ AddP(r8, r4, r5);
+ __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
+ __ push(r8);
+
+ __ bind(&inner_loop_header);
+ __ CmpP(r5, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ AddP(r6, r6, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ CmpP(r6, r3);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ ld(dreg, MemOperand(r3, src_offset));
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
+ __ push(r8);
+ __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
+ __ push(r8);
+ __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
+ __ push(r8);
+
+ // Restore the registers from the last output frame.
+ __ LoadRR(r1, r4);
+ for (int i = kNumberOfRegisters - 1; i > 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(r1, offset));
+ }
+ }
+
+ __ InitializeRootRegister();
+
+ __ pop(ip); // get continuation, leave pc on stack
+ __ pop(r14);
+ __ Jump(ip);
+ __ stop("Unreachable.");
+}
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ // Create a sequence of deoptimization entries. Note that any
+ // registers may be still live.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ lay(sp, MemOperand(sp, -kPointerSize));
+ __ LoadImmP(ip, Operand(i));
+ __ b(&done);
+ int end = masm()->pc_offset();
+ USE(end);
+ DCHECK(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
+ __ StoreP(ip, MemOperand(sp));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
new file mode 100644
index 0000000000..5bab604b7b
--- /dev/null
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -0,0 +1,1421 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintDRegister(int reg);
+ void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFloatingRegister(Instruction* instr, const char* option);
+ int FormatMask(Instruction* instr, const char* option);
+ int FormatDisplacement(Instruction* instr, const char* option);
+ int FormatImmediate(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ void UnknownFormat(Instruction* instr, const char* opcname);
+
+ bool DecodeTwoByte(Instruction* instr);
+ bool DecodeFourByte(Instruction* instr);
+ bool DecodeSixByte(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+// Print the double FP register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) {
+ Print(DoubleRegister::from_code(reg).ToString());
+}
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
+ switch (svc) {
+ case kCallRtRedirected:
+ Print("call rt redirected");
+ return;
+ case kBreakpoint:
+ Print("breakpoint");
+ return;
+ default:
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+ svc & kStopCodeMask, svc & kStopCodeMask);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
+ }
+ return;
+ }
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'r');
+
+ if (format[1] == '1') { // 'r1: register resides in bit 8-11
+ RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+ int reg = rrinstr->R1Value();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == '2') { // 'r2: register resides in bit 12-15
+ RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+ int reg = rrinstr->R2Value();
+ // indicating it is a r0 for displacement, in which case the offset
+ // should be 0.
+ if (format[2] == 'd') {
+ if (reg == 0) return 4;
+ PrintRegister(reg);
+ return 3;
+ } else {
+ PrintRegister(reg);
+ return 2;
+ }
+ } else if (format[1] == '3') { // 'r3: register resides in bit 16-19
+ RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+ int reg = rsinstr->B2Value();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == '4') { // 'r4: register resides in bit 20-23
+ RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+ int reg = rsinstr->B2Value();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == '5') { // 'r5: register resides in bit 24-28
+ RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+ int reg = rreinstr->R1Value();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == '6') { // 'r6: register resides in bit 29-32
+ RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+ int reg = rreinstr->R2Value();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == '7') { // 'r6: register resides in bit 32-35
+ SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+ int reg = ssinstr->B2Value();
+ PrintRegister(reg);
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'f');
+
+ // reuse 1, 5 and 6 because it is coresponding
+ if (format[1] == '1') { // 'r1: register resides in bit 8-11
+ RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+ int reg = rrinstr->R1Value();
+ PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == '2') { // 'f2: register resides in bit 12-15
+ RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+ int reg = rrinstr->R2Value();
+ PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == '3') { // 'f3: register resides in bit 16-19
+ RRDInstruction* rrdinstr = reinterpret_cast<RRDInstruction*>(instr);
+ int reg = rrdinstr->R1Value();
+ PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == '5') { // 'f5: register resides in bit 24-28
+ RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+ int reg = rreinstr->R1Value();
+ PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == '6') { // 'f6: register resides in bit 29-32
+ RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+ int reg = rreinstr->R2Value();
+ PrintDRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'o': {
+ if (instr->Bit(10) == 1) {
+ Print("o");
+ }
+ return 1;
+ }
+ case '.': {
+ if (instr->Bit(0) == 1) {
+ Print(".");
+ } else {
+ Print(" "); // ensure consistent spacing
+ }
+ return 1;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 'f': {
+ return FormatFloatingRegister(instr, format);
+ }
+ case 'i': { // int16
+ return FormatImmediate(instr, format);
+ }
+ case 'u': { // uint16
+ int32_t value = instr->Bits(15, 0);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 6;
+ }
+ case 'l': {
+ // Link (LK) Bit 0
+ if (instr->Bit(0) == 1) {
+ Print("l");
+ }
+ return 1;
+ }
+ case 'a': {
+ // Absolute Address Bit 1
+ if (instr->Bit(1) == 1) {
+ Print("a");
+ }
+ return 1;
+ }
+ case 't': { // 'target: target of branch instructions
+ // target26 or target16
+ DCHECK(STRING_STARTS_WITH(format, "target"));
+ if ((format[6] == '2') && (format[7] == '6')) {
+ int off = ((instr->Bits(25, 2)) << 8) >> 6;
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 8;
+ } else if ((format[6] == '1') && (format[7] == '6')) {
+ int off = ((instr->Bits(15, 2)) << 18) >> 16;
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 8;
+ }
+ case 'm': {
+ return FormatMask(instr, format);
+ }
+ }
+ case 'd': { // ds value for offset
+ return FormatDisplacement(instr, format);
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+int Decoder::FormatMask(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'm');
+ int32_t value = 0;
+ if ((format[1] == '1')) { // prints the mask format in bits 8-12
+ value = reinterpret_cast<RRInstruction*>(instr)->R1Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
+ } else if (format[1] == '2') { // mask format in bits 16-19
+ value = reinterpret_cast<RXInstruction*>(instr)->B2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
+ } else if (format[1] == '3') { // mask format in bits 20-23
+ value = reinterpret_cast<RRFInstruction*>(instr)->M4Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
+ }
+
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+}
+
+int Decoder::FormatDisplacement(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'd');
+
+ if (format[1] == '1') { // displacement in 20-31
+ RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+ uint16_t value = rsinstr->D2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+
+ return 2;
+ } else if (format[1] == '2') { // displacement in 20-39
+ RXYInstruction* rxyinstr = reinterpret_cast<RXYInstruction*>(instr);
+ int32_t value = rxyinstr->D2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '4') { // SS displacement 2 36-47
+ SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+ uint16_t value = ssInstr->D2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '3') { // SS displacement 1 20 - 32
+ SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+ uint16_t value = ssInstr->D1Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else { // s390 specific
+ int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 1;
+ }
+}
+
+int Decoder::FormatImmediate(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'i');
+
+ if (format[1] == '1') { // immediate in 16-31
+ RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+ int16_t value = riinstr->I2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '2') { // immediate in 16-48
+ RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+ int32_t value = rilinstr->I2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '3') { // immediate in I format
+ IInstruction* iinstr = reinterpret_cast<IInstruction*>(instr);
+ int8_t value = iinstr->IValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '4') { // immediate in 16-31, but outputs as offset
+ RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+ int16_t value = riinstr->I2Value() * 2;
+ if (value >= 0)
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+ else
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+ return 2;
+ } else if (format[1] == '5') { // immediate in 16-31, but outputs as offset
+ RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+ int32_t value = rilinstr->I2Value() * 2;
+ if (value >= 0)
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+ else
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+ return 2;
+ } else if (format[1] == '6') { // unsigned immediate in 16-31
+ RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+ uint16_t value = riinstr->I2UnsignedValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '7') { // unsigned immediate in 16-47
+ RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+ uint32_t value = rilinstr->I2UnsignedValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '8') { // unsigned immediate in 8-15
+ SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+ uint8_t value = ssinstr->Length();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == '9') { // unsigned immediate in 16-23
+ RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+ uint8_t value = rie_instr->I3Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == 'a') { // unsigned immediate in 24-31
+ RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+ uint8_t value = rie_instr->I4Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == 'b') { // unsigned immediate in 32-39
+ RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+ uint8_t value = rie_instr->I5Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == 'c') { // signed immediate in 8-15
+ SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+ int8_t value = ssinstr->Length();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == 'd') { // signed immediate in 32-47
+ SILInstruction* silinstr = reinterpret_cast<SILInstruction*>(instr);
+ int16_t value = silinstr->I2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ } else if (format[1] == 'e') { // immediate in 16-47, but outputs as offset
+ RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+ int32_t value = rilinstr->I2Value() * 2;
+ if (value >= 0)
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+ else
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+// For currently unimplemented decodings the disassembler calls
+// UnknownFormat(instr) which will just print opcode name of the
+// instruction bits.
+void Decoder::UnknownFormat(Instruction* instr, const char* name) {
+ char buffer[100];
+ snprintf(buffer, sizeof(buffer), "%s (unknown-format)", name);
+ Format(instr, buffer);
+}
+
+// Disassembles Two Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeTwoByte(Instruction* instr) {
+ // Print the Instruction bits.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%04x ",
+ instr->InstructionBits<TwoByteInstr>());
+
+ Opcode opcode = instr->S390OpcodeValue();
+ switch (opcode) {
+ case AR:
+ Format(instr, "ar\t'r1,'r2");
+ break;
+ case SR:
+ Format(instr, "sr\t'r1,'r2");
+ break;
+ case MR:
+ Format(instr, "mr\t'r1,'r2");
+ break;
+ case DR:
+ Format(instr, "dr\t'r1,'r2");
+ break;
+ case OR:
+ Format(instr, "or\t'r1,'r2");
+ break;
+ case NR:
+ Format(instr, "nr\t'r1,'r2");
+ break;
+ case XR:
+ Format(instr, "xr\t'r1,'r2");
+ break;
+ case LR:
+ Format(instr, "lr\t'r1,'r2");
+ break;
+ case CR:
+ Format(instr, "cr\t'r1,'r2");
+ break;
+ case CLR:
+ Format(instr, "clr\t'r1,'r2");
+ break;
+ case BCR:
+ Format(instr, "bcr\t'm1,'r2");
+ break;
+ case LTR:
+ Format(instr, "ltr\t'r1,'r2");
+ break;
+ case ALR:
+ Format(instr, "alr\t'r1,'r2");
+ break;
+ case SLR:
+ Format(instr, "slr\t'r1,'r2");
+ break;
+ case LNR:
+ Format(instr, "lnr\t'r1,'r2");
+ break;
+ case LCR:
+ Format(instr, "lcr\t'r1,'r2");
+ break;
+ case BASR:
+ Format(instr, "basr\t'r1,'r2");
+ break;
+ case LDR:
+ Format(instr, "ldr\t'f1,'f2");
+ break;
+ case BKPT:
+ Format(instr, "bkpt");
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+// Disassembles Four Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeFourByte(Instruction* instr) {
+ // Print the Instruction bits.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits<FourByteInstr>());
+
+ Opcode opcode = instr->S390OpcodeValue();
+ switch (opcode) {
+ case AHI:
+ Format(instr, "ahi\t'r1,'i1");
+ break;
+ case AGHI:
+ Format(instr, "aghi\t'r1,'i1");
+ break;
+ case LHI:
+ Format(instr, "lhi\t'r1,'i1");
+ break;
+ case LGHI:
+ Format(instr, "lghi\t'r1,'i1");
+ break;
+ case MHI:
+ Format(instr, "mhi\t'r1,'i1");
+ break;
+ case MGHI:
+ Format(instr, "mghi\t'r1,'i1");
+ break;
+ case CHI:
+ Format(instr, "chi\t'r1,'i1");
+ break;
+ case CGHI:
+ Format(instr, "cghi\t'r1,'i1");
+ break;
+ case BRAS:
+ Format(instr, "bras\t'r1,'i1");
+ break;
+ case BRC:
+ Format(instr, "brc\t'm1,'i4");
+ break;
+ case BRCT:
+ Format(instr, "brct\t'r1,'i4");
+ break;
+ case BRCTG:
+ Format(instr, "brctg\t'r1,'i4");
+ break;
+ case IIHH:
+ Format(instr, "iihh\t'r1,'i1");
+ break;
+ case IIHL:
+ Format(instr, "iihl\t'r1,'i1");
+ break;
+ case IILH:
+ Format(instr, "iilh\t'r1,'i1");
+ break;
+ case IILL:
+ Format(instr, "iill\t'r1,'i1");
+ break;
+ case OILL:
+ Format(instr, "oill\t'r1,'i1");
+ break;
+ case TMLL:
+ Format(instr, "tmll\t'r1,'i1");
+ break;
+ case STM:
+ Format(instr, "stm\t'r1,'r2,'d1('r3)");
+ break;
+ case LM:
+ Format(instr, "lm\t'r1,'r2,'d1('r3)");
+ break;
+ case SLL:
+ Format(instr, "sll\t'r1,'d1('r3)");
+ break;
+ case SRL:
+ Format(instr, "srl\t'r1,'d1('r3)");
+ break;
+ case SLA:
+ Format(instr, "sla\t'r1,'d1('r3)");
+ break;
+ case SRA:
+ Format(instr, "sra\t'r1,'d1('r3)");
+ break;
+ case SLDL:
+ Format(instr, "sldl\t'r1,'d1('r3)");
+ break;
+ case AGR:
+ Format(instr, "agr\t'r5,'r6");
+ break;
+ case AGFR:
+ Format(instr, "agfr\t'r5,'r6");
+ break;
+ case ARK:
+ Format(instr, "ark\t'r5,'r6,'r3");
+ break;
+ case AGRK:
+ Format(instr, "agrk\t'r5,'r6,'r3");
+ break;
+ case SGR:
+ Format(instr, "sgr\t'r5,'r6");
+ break;
+ case SGFR:
+ Format(instr, "sgfr\t'r5,'r6");
+ break;
+ case SRK:
+ Format(instr, "srk\t'r5,'r6,'r3");
+ break;
+ case SGRK:
+ Format(instr, "sgrk\t'r5,'r6,'r3");
+ break;
+ case NGR:
+ Format(instr, "ngr\t'r5,'r6");
+ break;
+ case NRK:
+ Format(instr, "nrk\t'r5,'r6,'r3");
+ break;
+ case NGRK:
+ Format(instr, "ngrk\t'r5,'r6,'r3");
+ break;
+ case NILL:
+ Format(instr, "nill\t'r1,'i1");
+ break;
+ case NILH:
+ Format(instr, "nilh\t'r1,'i1");
+ break;
+ case OGR:
+ Format(instr, "ogr\t'r5,'r6");
+ break;
+ case ORK:
+ Format(instr, "ork\t'r5,'r6,'r3");
+ break;
+ case OGRK:
+ Format(instr, "ogrk\t'r5,'r6,'r3");
+ break;
+ case XGR:
+ Format(instr, "xgr\t'r5,'r6");
+ break;
+ case XRK:
+ Format(instr, "xrk\t'r5,'r6,'r3");
+ break;
+ case XGRK:
+ Format(instr, "xgrk\t'r5,'r6,'r3");
+ break;
+ case CGR:
+ Format(instr, "cgr\t'r5,'r6");
+ break;
+ case CLGR:
+ Format(instr, "clgr\t'r5,'r6");
+ break;
+ case LLGFR:
+ Format(instr, "llgfr\t'r5,'r6");
+ break;
+ case LBR:
+ Format(instr, "lbr\t'r5,'r6");
+ break;
+ case LEDBR:
+ Format(instr, "ledbr\t'f5,'f6");
+ break;
+ case LDEBR:
+ Format(instr, "ldebr\t'f5,'f6");
+ break;
+ case LTGR:
+ Format(instr, "ltgr\t'r5,'r6");
+ break;
+ case LTDBR:
+ Format(instr, "ltdbr\t'f5,'f6");
+ break;
+ case LTEBR:
+ Format(instr, "ltebr\t'f5,'f6");
+ break;
+ case LGR:
+ Format(instr, "lgr\t'r5,'r6");
+ break;
+ case LGDR:
+ Format(instr, "lgdr\t'r5,'f6");
+ break;
+ case LGFR:
+ Format(instr, "lgfr\t'r5,'r6");
+ break;
+ case LTGFR:
+ Format(instr, "ltgfr\t'r5,'r6");
+ break;
+ case LCGR:
+ Format(instr, "lcgr\t'r5,'r6");
+ break;
+ case MSR:
+ Format(instr, "msr\t'r5,'r6");
+ break;
+ case LGBR:
+ Format(instr, "lgbr\t'r5,'r6");
+ break;
+ case LGHR:
+ Format(instr, "lghr\t'r5,'r6");
+ break;
+ case MSGR:
+ Format(instr, "msgr\t'r5,'r6");
+ break;
+ case DSGR:
+ Format(instr, "dsgr\t'r5,'r6");
+ break;
+ case LZDR:
+ Format(instr, "lzdr\t'f5");
+ break;
+ case MLR:
+ Format(instr, "mlr\t'r5,'r6");
+ break;
+ case MLGR:
+ Format(instr, "mlgr\t'r5,'r6");
+ break;
+ case ALCR:
+ Format(instr, "alcr\t'r5,'r6");
+ break;
+ case ALGR:
+ Format(instr, "algr\t'r5,'r6");
+ break;
+ case ALRK:
+ Format(instr, "alrk\t'r5,'r6,'r3");
+ break;
+ case ALGRK:
+ Format(instr, "algrk\t'r5,'r6,'r3");
+ break;
+ case SLGR:
+ Format(instr, "slgr\t'r5,'r6");
+ break;
+ case SLBR:
+ Format(instr, "slbr\t'r5,'r6");
+ break;
+ case DLR:
+ Format(instr, "dlr\t'r5,'r6");
+ break;
+ case DLGR:
+ Format(instr, "dlgr\t'r5,'r6");
+ break;
+ case SLRK:
+ Format(instr, "slrk\t'r5,'r6,'r3");
+ break;
+ case SLGRK:
+ Format(instr, "slgrk\t'r5,'r6,'r3");
+ break;
+ case LHR:
+ Format(instr, "lhr\t'r5,'r6");
+ break;
+ case LLHR:
+ Format(instr, "llhr\t'r5,'r6");
+ break;
+ case LLGHR:
+ Format(instr, "llghr\t'r5,'r6");
+ break;
+ case LNGR:
+ Format(instr, "lngr\t'r5,'r6");
+ break;
+ case A:
+ Format(instr, "a\t'r1,'d1('r2d,'r3)");
+ break;
+ case S:
+ Format(instr, "s\t'r1,'d1('r2d,'r3)");
+ break;
+ case M:
+ Format(instr, "m\t'r1,'d1('r2d,'r3)");
+ break;
+ case D:
+ Format(instr, "d\t'r1,'d1('r2d,'r3)");
+ break;
+ case O:
+ Format(instr, "o\t'r1,'d1('r2d,'r3)");
+ break;
+ case N:
+ Format(instr, "n\t'r1,'d1('r2d,'r3)");
+ break;
+ case L:
+ Format(instr, "l\t'r1,'d1('r2d,'r3)");
+ break;
+ case C:
+ Format(instr, "c\t'r1,'d1('r2d,'r3)");
+ break;
+ case AH:
+ Format(instr, "ah\t'r1,'d1('r2d,'r3)");
+ break;
+ case SH:
+ Format(instr, "sh\t'r1,'d1('r2d,'r3)");
+ break;
+ case MH:
+ Format(instr, "mh\t'r1,'d1('r2d,'r3)");
+ break;
+ case AL:
+ Format(instr, "al\t'r1,'d1('r2d,'r3)");
+ break;
+ case SL:
+ Format(instr, "sl\t'r1,'d1('r2d,'r3)");
+ break;
+ case LA:
+ Format(instr, "la\t'r1,'d1('r2d,'r3)");
+ break;
+ case CH:
+ Format(instr, "ch\t'r1,'d1('r2d,'r3)");
+ break;
+ case CL:
+ Format(instr, "cl\t'r1,'d1('r2d,'r3)");
+ break;
+ case CLI:
+ Format(instr, "cli\t'd1('r3),'i8");
+ break;
+ case TM:
+ Format(instr, "tm\t'd1('r3),'i8");
+ break;
+ case BC:
+ Format(instr, "bc\t'm1,'d1('r2d,'r3)");
+ break;
+ case BCT:
+ Format(instr, "bct\t'r1,'d1('r2d,'r3)");
+ break;
+ case ST:
+ Format(instr, "st\t'r1,'d1('r2d,'r3)");
+ break;
+ case STC:
+ Format(instr, "stc\t'r1,'d1('r2d,'r3)");
+ break;
+ case IC_z:
+ Format(instr, "ic\t'r1,'d1('r2d,'r3)");
+ break;
+ case LD:
+ Format(instr, "ld\t'f1,'d1('r2d,'r3)");
+ break;
+ case LE:
+ Format(instr, "le\t'f1,'d1('r2d,'r3)");
+ break;
+ case LDGR:
+ Format(instr, "ldgr\t'f5,'r6");
+ break;
+ case STE:
+ Format(instr, "ste\t'f1,'d1('r2d,'r3)");
+ break;
+ case STD:
+ Format(instr, "std\t'f1,'d1('r2d,'r3)");
+ break;
+ case CFDBR:
+ Format(instr, "cfdbr\t'r5,'m2,'f6");
+ break;
+ case CDFBR:
+ Format(instr, "cdfbr\t'f5,'m2,'r6");
+ break;
+ case CFEBR:
+ Format(instr, "cfebr\t'r5,'m2,'f6");
+ break;
+ case CEFBR:
+ Format(instr, "cefbr\t'f5,'m2,'r6");
+ break;
+ case CGEBR:
+ Format(instr, "cgebr\t'r5,'m2,'f6");
+ break;
+ case CGDBR:
+ Format(instr, "cgdbr\t'r5,'m2,'f6");
+ break;
+ case CEGBR:
+ Format(instr, "cegbr\t'f5,'m2,'r6");
+ break;
+ case CDGBR:
+ Format(instr, "cdgbr\t'f5,'m2,'r6");
+ break;
+ case CDLFBR:
+ Format(instr, "cdlfbr\t'f5,'m2,'r6");
+ break;
+ case CDLGBR:
+ Format(instr, "cdlgbr\t'f5,'m2,'r6");
+ break;
+ case CELGBR:
+ Format(instr, "celgbr\t'f5,'m2,'r6");
+ break;
+ case CLFDBR:
+ Format(instr, "clfdbr\t'r5,'m2,'f6");
+ break;
+ case CLGDBR:
+ Format(instr, "clgdbr\t'r5,'m2,'f6");
+ break;
+ case AEBR:
+ Format(instr, "aebr\t'f5,'f6");
+ break;
+ case SEBR:
+ Format(instr, "sebr\t'f5,'f6");
+ break;
+ case MEEBR:
+ Format(instr, "meebr\t'f5,'f6");
+ break;
+ case DEBR:
+ Format(instr, "debr\t'f5,'f6");
+ break;
+ case ADBR:
+ Format(instr, "adbr\t'f5,'f6");
+ break;
+ case SDBR:
+ Format(instr, "sdbr\t'f5,'f6");
+ break;
+ case MDBR:
+ Format(instr, "mdbr\t'f5,'f6");
+ break;
+ case DDBR:
+ Format(instr, "ddbr\t'f5,'f6");
+ break;
+ case CDBR:
+ Format(instr, "cdbr\t'f5,'f6");
+ break;
+ case CEBR:
+ Format(instr, "cebr\t'f5,'f6");
+ break;
+ case SQDBR:
+ Format(instr, "sqdbr\t'f5,'f6");
+ break;
+ case SQEBR:
+ Format(instr, "sqebr\t'f5,'f6");
+ break;
+ case LCDBR:
+ Format(instr, "lcdbr\t'f5,'f6");
+ break;
+ case STH:
+ Format(instr, "sth\t'r1,'d1('r2d,'r3)");
+ break;
+ case SRDA:
+ Format(instr, "srda\t'r1,'d1('r3)");
+ break;
+ case SRDL:
+ Format(instr, "srdl\t'r1,'d1('r3)");
+ break;
+ case MADBR:
+ Format(instr, "madbr\t'f3,'f5,'f6");
+ break;
+ case MSDBR:
+ Format(instr, "msdbr\t'f3,'f5,'f6");
+ break;
+ case FLOGR:
+ Format(instr, "flogr\t'r5,'r6");
+ break;
+ case FIEBRA:
+ Format(instr, "fiebra\t'f5,'m2,'f6,'m3");
+ break;
+ case FIDBRA:
+ Format(instr, "fidbra\t'f5,'m2,'f6,'m3");
+ break;
+ // TRAP4 is used in calling to native function. it will not be generated
+ // in native code.
+ case TRAP4: {
+ Format(instr, "trap4");
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+// Disassembles Six Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeSixByte(Instruction* instr) {
+ // Print the Instruction bits.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%012" PRIx64 " ",
+ instr->InstructionBits<SixByteInstr>());
+
+ Opcode opcode = instr->S390OpcodeValue();
+ switch (opcode) {
+ case LLILF:
+ Format(instr, "llilf\t'r1,'i7");
+ break;
+ case LLIHF:
+ Format(instr, "llihf\t'r1,'i7");
+ break;
+ case AFI:
+ Format(instr, "afi\t'r1,'i7");
+ break;
+ case ASI:
+ Format(instr, "asi\t'd2('r3),'ic");
+ break;
+ case AGSI:
+ Format(instr, "agsi\t'd2('r3),'ic");
+ break;
+ case ALFI:
+ Format(instr, "alfi\t'r1,'i7");
+ break;
+ case AHIK:
+ Format(instr, "ahik\t'r1,'r2,'i1");
+ break;
+ case AGHIK:
+ Format(instr, "aghik\t'r1,'r2,'i1");
+ break;
+ case CLGFI:
+ Format(instr, "clgfi\t'r1,'i7");
+ break;
+ case CLFI:
+ Format(instr, "clfi\t'r1,'i7");
+ break;
+ case CFI:
+ Format(instr, "cfi\t'r1,'i2");
+ break;
+ case CGFI:
+ Format(instr, "cgfi\t'r1,'i2");
+ break;
+ case BRASL:
+ Format(instr, "brasl\t'r1,'ie");
+ break;
+ case BRCL:
+ Format(instr, "brcl\t'm1,'i5");
+ break;
+ case IIHF:
+ Format(instr, "iihf\t'r1,'i7");
+ break;
+ case IILF:
+ Format(instr, "iilf\t'r1,'i7");
+ break;
+ case XIHF:
+ Format(instr, "xihf\t'r1,'i7");
+ break;
+ case XILF:
+ Format(instr, "xilf\t'r1,'i7");
+ break;
+ case SLLK:
+ Format(instr, "sllk\t'r1,'r2,'d2('r3)");
+ break;
+ case SLLG:
+ Format(instr, "sllg\t'r1,'r2,'d2('r3)");
+ break;
+ case RLL:
+ Format(instr, "rll\t'r1,'r2,'d2('r3)");
+ break;
+ case RLLG:
+ Format(instr, "rllg\t'r1,'r2,'d2('r3)");
+ break;
+ case SRLK:
+ Format(instr, "srlk\t'r1,'r2,'d2('r3)");
+ break;
+ case SRLG:
+ Format(instr, "srlg\t'r1,'r2,'d2('r3)");
+ break;
+ case SLAK:
+ Format(instr, "slak\t'r1,'r2,'d2('r3)");
+ break;
+ case SLAG:
+ Format(instr, "slag\t'r1,'r2,'d2('r3)");
+ break;
+ case SRAK:
+ Format(instr, "srak\t'r1,'r2,'d2('r3)");
+ break;
+ case SRAG:
+ Format(instr, "srag\t'r1,'r2,'d2('r3)");
+ break;
+ case RISBG:
+ Format(instr, "risbg\t'r1,'r2,'i9,'ia,'ib");
+ break;
+ case RISBGN:
+ Format(instr, "risbgn\t'r1,'r2,'i9,'ia,'ib");
+ break;
+ case LMY:
+ Format(instr, "lmy\t'r1,'r2,'d2('r3)");
+ break;
+ case LMG:
+ Format(instr, "lmg\t'r1,'r2,'d2('r3)");
+ break;
+ case STMY:
+ Format(instr, "stmy\t'r1,'r2,'d2('r3)");
+ break;
+ case STMG:
+ Format(instr, "stmg\t'r1,'r2,'d2('r3)");
+ break;
+ case LT:
+ Format(instr, "lt\t'r1,'d2('r2d,'r3)");
+ break;
+ case LTG:
+ Format(instr, "ltg\t'r1,'d2('r2d,'r3)");
+ break;
+ case ML:
+ Format(instr, "ml\t'r1,'d2('r2d,'r3)");
+ break;
+ case AY:
+ Format(instr, "ay\t'r1,'d2('r2d,'r3)");
+ break;
+ case SY:
+ Format(instr, "sy\t'r1,'d2('r2d,'r3)");
+ break;
+ case NY:
+ Format(instr, "ny\t'r1,'d2('r2d,'r3)");
+ break;
+ case OY:
+ Format(instr, "oy\t'r1,'d2('r2d,'r3)");
+ break;
+ case XY:
+ Format(instr, "xy\t'r1,'d2('r2d,'r3)");
+ break;
+ case CY:
+ Format(instr, "cy\t'r1,'d2('r2d,'r3)");
+ break;
+ case AHY:
+ Format(instr, "ahy\t'r1,'d2('r2d,'r3)");
+ break;
+ case SHY:
+ Format(instr, "shy\t'r1,'d2('r2d,'r3)");
+ break;
+ case LGH:
+ Format(instr, "lgh\t'r1,'d2('r2d,'r3)");
+ break;
+ case AG:
+ Format(instr, "ag\t'r1,'d2('r2d,'r3)");
+ break;
+ case AGF:
+ Format(instr, "agf\t'r1,'d2('r2d,'r3)");
+ break;
+ case SG:
+ Format(instr, "sg\t'r1,'d2('r2d,'r3)");
+ break;
+ case NG:
+ Format(instr, "ng\t'r1,'d2('r2d,'r3)");
+ break;
+ case OG:
+ Format(instr, "og\t'r1,'d2('r2d,'r3)");
+ break;
+ case XG:
+ Format(instr, "xg\t'r1,'d2('r2d,'r3)");
+ break;
+ case CG:
+ Format(instr, "cg\t'r1,'d2('r2d,'r3)");
+ break;
+ case LB:
+ Format(instr, "lb\t'r1,'d2('r2d,'r3)");
+ break;
+ case LG:
+ Format(instr, "lg\t'r1,'d2('r2d,'r3)");
+ break;
+ case LGF:
+ Format(instr, "lgf\t'r1,'d2('r2d,'r3)");
+ break;
+ case LLGF:
+ Format(instr, "llgf\t'r1,'d2('r2d,'r3)");
+ break;
+ case LY:
+ Format(instr, "ly\t'r1,'d2('r2d,'r3)");
+ break;
+ case ALY:
+ Format(instr, "aly\t'r1,'d2('r2d,'r3)");
+ break;
+ case ALG:
+ Format(instr, "alg\t'r1,'d2('r2d,'r3)");
+ break;
+ case SLG:
+ Format(instr, "slg\t'r1,'d2('r2d,'r3)");
+ break;
+ case SGF:
+ Format(instr, "sgf\t'r1,'d2('r2d,'r3)");
+ break;
+ case SLY:
+ Format(instr, "sly\t'r1,'d2('r2d,'r3)");
+ break;
+ case LLH:
+ Format(instr, "llh\t'r1,'d2('r2d,'r3)");
+ break;
+ case LLGH:
+ Format(instr, "llgh\t'r1,'d2('r2d,'r3)");
+ break;
+ case LLC:
+ Format(instr, "llc\t'r1,'d2('r2d,'r3)");
+ break;
+ case LLGC:
+ Format(instr, "llgc\t'r1,'d2('r2d,'r3)");
+ break;
+ case LDEB:
+ Format(instr, "ldeb\t'f1,'d2('r2d,'r3)");
+ break;
+ case LAY:
+ Format(instr, "lay\t'r1,'d2('r2d,'r3)");
+ break;
+ case LARL:
+ Format(instr, "larl\t'r1,'i5");
+ break;
+ case LGB:
+ Format(instr, "lgb\t'r1,'d2('r2d,'r3)");
+ break;
+ case CHY:
+ Format(instr, "chy\t'r1,'d2('r2d,'r3)");
+ break;
+ case CLY:
+ Format(instr, "cly\t'r1,'d2('r2d,'r3)");
+ break;
+ case CLIY:
+ Format(instr, "cliy\t'd2('r3),'i8");
+ break;
+ case TMY:
+ Format(instr, "tmy\t'd2('r3),'i8");
+ break;
+ case CLG:
+ Format(instr, "clg\t'r1,'d2('r2d,'r3)");
+ break;
+ case BCTG:
+ Format(instr, "bctg\t'r1,'d2('r2d,'r3)");
+ break;
+ case STY:
+ Format(instr, "sty\t'r1,'d2('r2d,'r3)");
+ break;
+ case STG:
+ Format(instr, "stg\t'r1,'d2('r2d,'r3)");
+ break;
+ case ICY:
+ Format(instr, "icy\t'r1,'d2('r2d,'r3)");
+ break;
+ case MVC:
+ Format(instr, "mvc\t'd3('i8,'r3),'d4('r7)");
+ break;
+ case MVHI:
+ Format(instr, "mvhi\t'd3('r3),'id");
+ break;
+ case MVGHI:
+ Format(instr, "mvghi\t'd3('r3),'id");
+ break;
+ case ALGFI:
+ Format(instr, "algfi\t'r1,'i7");
+ break;
+ case SLGFI:
+ Format(instr, "slgfi\t'r1,'i7");
+ break;
+ case SLFI:
+ Format(instr, "slfi\t'r1,'i7");
+ break;
+ case NIHF:
+ Format(instr, "nihf\t'r1,'i7");
+ break;
+ case NILF:
+ Format(instr, "nilf\t'r1,'i7");
+ break;
+ case OIHF:
+ Format(instr, "oihf\t'r1,'i7");
+ break;
+ case OILF:
+ Format(instr, "oilf\t'r1,'i7");
+ break;
+ case MSFI:
+ Format(instr, "msfi\t'r1,'i7");
+ break;
+ case MSGFI:
+ Format(instr, "msgfi\t'r1,'i7");
+ break;
+ case LDY:
+ Format(instr, "ldy\t'f1,'d2('r2d,'r3)");
+ break;
+ case LEY:
+ Format(instr, "ley\t'f1,'d2('r2d,'r3)");
+ break;
+ case STEY:
+ Format(instr, "stey\t'f1,'d2('r2d,'r3)");
+ break;
+ case STDY:
+ Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
+ break;
+ case ADB:
+ Format(instr, "adb\t'r1,'d1('r2d, 'r3)");
+ break;
+ case SDB:
+ Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
+ break;
+ case MDB:
+ Format(instr, "mdb\t'r1,'d1('r2d, 'r3)");
+ break;
+ case DDB:
+ Format(instr, "ddb\t'r1,'d1('r2d, 'r3)");
+ break;
+ case SQDB:
+ Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+#undef VERIFIY
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ int instrLength = instr->InstructionLength();
+
+ if (2 == instrLength)
+ DecodeTwoByte(instr);
+ else if (4 == instrLength)
+ DecodeFourByte(instr);
+ else
+ DecodeSixByte(instr);
+
+ return instrLength;
+}
+
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Register::from_code(reg).ToString();
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // S390 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ // S390 does not have XMM register
+ // TODO(joransiu): Consider update this for Vector Regs
+ UNREACHABLE();
+ return "noxmmreg";
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+Disassembler::~Disassembler() {}
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+// The S390 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", prev_pc,
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/frames-s390.cc b/deps/v8/src/s390/frames-s390.cc
new file mode 100644
index 0000000000..20506ec13c
--- /dev/null
+++ b/deps/v8/src/s390/frames-s390.cc
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/frames.h"
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/s390/assembler-s390.h"
+#include "src/s390/frames-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/frames-s390.h b/deps/v8/src/s390/frames-s390.h
new file mode 100644
index 0000000000..cca060adcd
--- /dev/null
+++ b/deps/v8/src/s390/frames-s390.h
@@ -0,0 +1,189 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_FRAMES_S390_H_
+#define V8_S390_FRAMES_S390_H_
+
+namespace v8 {
+namespace internal {
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 16;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
+ 1 << 3 | // r3 a2
+ 1 << 4 | // r4 a3
+ 1 << 5; // r5 a4
+
+const int kNumJSCallerSaved = 5;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved =
+ 1 << 6 | // r6 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 7 | // r7 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 8 | // r8 (argument passing in CEntryStub)
+ // (HandleScope logic in MacroAssembler)
+ 1 << 9 | // r9 (HandleScope logic in MacroAssembler)
+ 1 << 10 | // r10 (Roots register in Javascript)
+ 1 << 11 | // r11 (fp in Javascript)
+ 1 << 12 | // r12 (ip in Javascript)
+ 1 << 13; // r13 (cp in Javascript)
+// 1 << 15; // r15 (sp in Javascript)
+
+const int kNumCalleeSaved = 8;
+
+#ifdef V8_TARGET_ARCH_S390X
+
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7; // d7
+
+const int kNumCallerSavedDoubles = 8;
+
+const RegList kCalleeSavedDoubles = 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13 | // d12
+ 1 << 14 | // d12
+ 1 << 15; // d13
+
+const int kNumCalleeSavedDoubles = 8;
+
+#else
+
+const RegList kCallerSavedDoubles = 1 << 14 | // d14
+ 1 << 15 | // d15
+ 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 5 | // d5
+ 1 << 7 | // d7
+ 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d10
+ 1 << 12 | // d10
+ 1 << 13; // d11
+
+const int kNumCallerSavedDoubles = 14;
+
+const RegList kCalleeSavedDoubles = 1 << 4 | // d4
+ 1 << 6; // d6
+
+const int kNumCalleeSavedDoubles = 2;
+
+#endif
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+// const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+// const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI.
+
+#if V8_TARGET_ARCH_S390X
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16] FPR 0
+// [17] FPR 2
+// [18] FPR 4
+// [19] FPR 6
+const int kNumRequiredStackFrameSlots = 20;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 20;
+#else
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16..17] FPR 0
+// [18..19] FPR 2
+// [20..21] FPR 4
+// [22..23] FPR 6
+const int kNumRequiredStackFrameSlots = 24;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 24;
+#endif
+
+// zLinux ABI requires caller frames to include sufficient space for
+// callee preserved register save area.
+#if V8_TARGET_ARCH_S390X
+const int kCalleeRegisterSaveAreaSize = 160;
+#elif V8_TARGET_ARCH_S390
+const int kCalleeRegisterSaveAreaSize = 96;
+#else
+const int kCalleeRegisterSaveAreaSize = 0;
+#endif
+
+// ----------------------------------------------------
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+class ExitFrameConstants : public TypedFrameConstants {
+ public:
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+};
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_FRAMES_S390_H_
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
new file mode 100644
index 0000000000..63afca85ee
--- /dev/null
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -0,0 +1,373 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+const Register LoadDescriptor::ReceiverRegister() { return r3; }
+const Register LoadDescriptor::NameRegister() { return r4; }
+const Register LoadDescriptor::SlotRegister() { return r2; }
+
+const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
+
+const Register StoreDescriptor::ReceiverRegister() { return r3; }
+const Register StoreDescriptor::NameRegister() { return r4; }
+const Register StoreDescriptor::ValueRegister() { return r2; }
+
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r6; }
+
+const Register VectorStoreICDescriptor::VectorRegister() { return r5; }
+
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
+
+const Register StoreTransitionDescriptor::MapRegister() { return r5; }
+
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
+
+const Register InstanceOfDescriptor::LeftRegister() { return r3; }
+const Register InstanceOfDescriptor::RightRegister() { return r2; }
+
+const Register StringCompareDescriptor::LeftRegister() { return r3; }
+const Register StringCompareDescriptor::RightRegister() { return r2; }
+
+const Register ApiGetterDescriptor::function_address() { return r4; }
+
+const Register MathPowTaggedDescriptor::exponent() { return r4; }
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
+const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
+
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+// static
+const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
+
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5, r4, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5, r4, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r5, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r5, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
+ // r3 : the function to call
+ // r4 : feedback vector
+ // r5 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r6 : new target (for IsSuperConstructorCall)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {r2, r3, r6, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
+ // r3 : the target to call
+ Register registers[] = {r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : allocation site or undefined
+ Register registers[] = {r3, r5, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
+ // r3 : the target to call
+ // r5 : the new target
+ Register registers[] = {r3, r5, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r2, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
+}
+
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
+
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // r2 -- number of arguments
+ // r3 -- function
+ // r4 -- allocation site with elements kind
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {r3, r4, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ // register state
+ // r2 -- number of arguments
+ // r3 -- constructor function
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
+ Register registers[] = {r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r4, // key
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r4, // name
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // receiver
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // JSFunction
+ r5, // the new target
+ r2, // actual number of arguments
+ r4, // expected number of arguments
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // callee
+ r6, // call_data
+ r4, // holder
+ r3, // api_function_address
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // argument count (not including receiver)
+ r4, // address of first argument
+ r3 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // argument count (not including receiver)
+ r5, // new target
+ r3, // constructor to call
+ r4 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // argument count (argc)
+ r4, // address of first argument (argv)
+ r3 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
new file mode 100644
index 0000000000..21058f420f
--- /dev/null
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -0,0 +1,5409 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h> // For assert
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/register-configuration.h"
+#include "src/runtime/runtime.h"
+
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
+ : Assembler(arg_isolate, buffer, size),
+ generating_stub_(false),
+ has_frame_(false) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
+ }
+}
+
+void MacroAssembler::Jump(Register target) { b(target); }
+
+void MacroAssembler::JumpToJSEntry(Register target) {
+ Move(ip, target);
+ Jump(ip);
+}
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, CRegister) {
+ Label skip;
+
+ if (cond != al) b(NegateCondition(cond), &skip);
+
+ DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
+
+ mov(ip, Operand(target, rmode));
+ b(ip);
+
+ bind(&skip);
+}
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ CRegister cr) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
+}
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ jump(code, rmode, cond);
+}
+
+int MacroAssembler::CallSize(Register target) { return 2; } // BASR
+
+void MacroAssembler::Call(Register target) {
+ Label start;
+ bind(&start);
+
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Branch to target via indirect branch
+ basr(r14, target);
+
+ DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
+}
+
+void MacroAssembler::CallJSEntry(Register target) {
+ DCHECK(target.is(ip));
+ Call(target);
+}
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
+ // S390 Assembler::move sequence is IILF / IIHF
+ int size;
+#if V8_TARGET_ARCH_S390X
+ size = 14; // IILF + IIHF + BASR
+#else
+ size = 8; // IILF + BASR
+#endif
+ return size;
+}
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond) {
+ // S390 Assembler::move sequence is IILF / IIHF
+ int size;
+#if V8_TARGET_ARCH_S390X
+ size = 14; // IILF + IIHF + BASR
+#else
+ size = 8; // IILF + BASR
+#endif
+ return size;
+}
+
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
+ DCHECK(cond == al);
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+ Label start;
+ bind(&start);
+#endif
+
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ basr(r14, ip);
+
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id, Condition cond) {
+ return 6; // BRASL
+}
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id, Condition cond) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(code, rmode, ast_id, cond);
+ Label start;
+ bind(&start);
+#endif
+ call(code, rmode, ast_id);
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+void MacroAssembler::Drop(int count) {
+ if (count > 0) {
+ int total = count * kPointerSize;
+ if (is_uint12(total)) {
+ la(sp, MemOperand(sp, total));
+ } else if (is_int20(total)) {
+ lay(sp, MemOperand(sp, total));
+ } else {
+ AddP(sp, Operand(total));
+ }
+ }
+}
+
+void MacroAssembler::Drop(Register count, Register scratch) {
+ ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
+ AddP(sp, sp, scratch);
+}
+
+void MacroAssembler::Call(Label* target) { b(r14, target); }
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ mov(r0, Operand(handle));
+ push(r0);
+}
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
+ } else {
+ DCHECK(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
+}
+
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+ if (!dst.is(src)) {
+ LoadRR(dst, src);
+ }
+}
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+ if (!dst.is(src)) {
+ ldr(dst, src);
+ }
+}
+
+void MacroAssembler::MultiPush(RegList regs, Register location) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kPointerSize;
+
+ SubP(location, location, Operand(stack_offset));
+ for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ StoreP(ToRegister(i), MemOperand(location, stack_offset));
+ }
+ }
+}
+
+void MacroAssembler::MultiPop(RegList regs, Register location) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < Register::kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ LoadP(ToRegister(i), MemOperand(location, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ AddP(location, location, Operand(stack_offset));
+}
+
+void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
+ int16_t num_to_push = NumberOfBitsSet(dregs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ SubP(location, location, Operand(stack_offset));
+ for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
+ if ((dregs & (1 << i)) != 0) {
+ DoubleRegister dreg = DoubleRegister::from_code(i);
+ stack_offset -= kDoubleSize;
+ StoreDouble(dreg, MemOperand(location, stack_offset));
+ }
+ }
+}
+
+void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
+ if ((dregs & (1 << i)) != 0) {
+ DoubleRegister dreg = DoubleRegister::from_code(i);
+ LoadDouble(dreg, MemOperand(location, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ AddP(location, location, Operand(stack_offset));
+}
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition) {
+ LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
+ Condition) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
+ StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch,
+ Condition cond, Label* branch) {
+ DCHECK(cond == eq || cond == ne);
+ // TODO(joransiu): check if we can merge mov Operand into AndP.
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cond, branch);
+}
+
+void MacroAssembler::RecordWriteField(
+ Register object, int offset, Register value, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ lay(dst, MemOperand(object, offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ beq(&ok, Label::kNear);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
+ mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+ }
+}
+
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object, Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ CmpP(dst, Operand(isolate()->factory()->meta_map()));
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+
+ lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ beq(&ok, Label::kNear);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(r14);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(r14);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+ }
+}
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
+ if (emit_debug_code()) {
+ CmpP(value, MemOperand(address));
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ }
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(r14);
+ }
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(r14);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+ value);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+ }
+}
+
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(r3));
+ DCHECK(code_entry.is(r6));
+ DCHECK(scratch.is(r7));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
+ LoadP(ip, MemOperand(scratch));
+ CmpP(ip, code_entry);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ AddP(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | r14.bit());
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+
+ LoadRR(r2, js_function);
+ LoadRR(r3, dst);
+ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers (including js_function and code_entry).
+ MultiPop(kJSCallerSaved | r14.bit());
+
+ bind(&done);
+}
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(ip, Operand(store_buffer));
+ LoadP(scratch, MemOperand(ip));
+ // Store pointer to buffer and increment buffer top.
+ StoreP(address, MemOperand(scratch));
+ AddP(scratch, Operand(kPointerSize));
+ // Write back new top of buffer.
+ StoreP(scratch, MemOperand(ip));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
+
+ if (and_then == kFallThroughAtEnd) {
+ bne(&done, Label::kNear);
+ } else {
+ DCHECK(and_then == kReturnAtEnd);
+ bne(&done, Label::kNear);
+ }
+ push(r14);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(r14);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+ int fp_delta = 0;
+ CleanseP(r14);
+ if (marker_reg.is_valid()) {
+ Push(r14, fp, marker_reg);
+ fp_delta = 1;
+ } else {
+ Push(r14, fp);
+ fp_delta = 0;
+ }
+ la(fp, MemOperand(sp, fp_delta * kPointerSize));
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Pop(r14, fp, marker_reg);
+ } else {
+ Pop(r14, fp);
+ }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+ int fp_delta = 0;
+ CleanseP(r14);
+ if (function_reg.is_valid()) {
+ Push(r14, fp, cp, function_reg);
+ fp_delta = 2;
+ } else {
+ Push(r14, fp, cp);
+ fp_delta = 1;
+ }
+ la(fp, MemOperand(sp, fp_delta * kPointerSize));
+}
+
+void MacroAssembler::RestoreFrameStateForTailCall() {
+ // if (FLAG_enable_embedded_constant_pool) {
+ // LoadP(kConstantPoolRegister,
+ // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ // set_constant_pool_available(false);
+ // }
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+}
+
+const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
+const int MacroAssembler::kNumSafepointSavedRegisters =
+ Register::kNumAllocatable;
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the
+ // stack, so adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ DCHECK(num_unsaved >= 0);
+ if (num_unsaved > 0) {
+ lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
+ }
+ MultiPush(kSafepointSavedRegisters);
+}
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ MultiPop(kSafepointSavedRegisters);
+ if (num_unsaved > 0) {
+ la(sp, MemOperand(sp, num_unsaved * kPointerSize));
+ }
+}
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ StoreP(src, SafepointRegisterSlot(dst));
+}
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ LoadP(dst, SafepointRegisterSlot(src));
+}
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ RegList regs = kSafepointSavedRegisters;
+ int index = 0;
+
+ DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+
+ for (int16_t i = 0; i < reg_code; i++) {
+ if ((regs & (1 << i)) != 0) {
+ index++;
+ }
+ }
+
+ return index;
+}
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // General purpose registers are pushed last on the stack.
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ // Turn potential sNaN into qNaN
+ if (!dst.is(src)) ldr(dst, src);
+ lzdr(kDoubleRegZero);
+ sdbr(dst, kDoubleRegZero);
+}
+
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+ cdfbr(dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
+ DoubleRegister dst) {
+ if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
+ cdlfbr(Condition(5), Condition(0), dst, src);
+ } else {
+ // zero-extend src
+ llgfr(src, src);
+ // convert to double
+ cdgbr(dst, src);
+ }
+}
+
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+ cefbr(dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+ DoubleRegister dst) {
+ celfbr(Condition(0), Condition(0), dst, src);
+}
+
+#if V8_TARGET_ARCH_S390X
+void MacroAssembler::ConvertInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ cdgbr(double_dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ celgbr(Condition(0), Condition(0), double_dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ cdlgbr(Condition(0), Condition(0), double_dst, src);
+}
+
+void MacroAssembler::ConvertInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ cegbr(double_dst, src);
+}
+#endif
+
+void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+ const Register dst_hi,
+#endif
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ cgebr(m, dst, double_input);
+ ldgr(double_dst, dst);
+#if !V8_TARGET_ARCH_S390X
+ srlg(dst_hi, dst, Operand(32));
+#endif
+}
+
+void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+ const Register dst_hi,
+#endif
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ cgdbr(m, dst, double_input);
+ ldgr(double_dst, dst);
+#if !V8_TARGET_ARCH_S390X
+ srlg(dst_hi, dst, Operand(32));
+#endif
+}
+
+void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ cfebr(m, dst, double_input);
+ ldgr(double_dst, dst);
+}
+
+void MacroAssembler::ConvertFloat32ToUnsignedInt32(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ clfebr(m, Condition(0), dst, double_input);
+ ldgr(double_dst, dst);
+}
+
+#if V8_TARGET_ARCH_S390X
+void MacroAssembler::ConvertFloat32ToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ clgebr(m, Condition(0), dst, double_input);
+ ldgr(double_dst, dst);
+}
+
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ Condition m = Condition(0);
+ switch (rounding_mode) {
+ case kRoundToZero:
+ m = Condition(5);
+ break;
+ case kRoundToNearest:
+ UNIMPLEMENTED();
+ break;
+ case kRoundToPlusInf:
+ m = Condition(6);
+ break;
+ case kRoundToMinusInf:
+ m = Condition(7);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ clgdbr(m, Condition(0), dst, double_input);
+ ldgr(double_dst, dst);
+}
+
+#endif
+
+#if !V8_TARGET_ARCH_S390X
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ sldl(r0, shift, Operand::Zero());
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ sldl(r0, r0, Operand(shift));
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ srdl(r0, shift, Operand::Zero());
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ srdl(r0, r0, Operand(shift));
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ srda(r0, shift, Operand::Zero());
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ LoadRR(r0, src_high);
+ LoadRR(r1, src_low);
+ srda(r0, r0, Operand(shift));
+ LoadRR(dst_high, r0);
+ LoadRR(dst_low, r1);
+}
+#endif
+
+void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
+ lgdr(dst, src);
+}
+
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
+ ldgr(dst, src);
+}
+
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+ int prologue_offset) {
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ LoadSmiLiteral(r1, Smi::FromInt(type));
+ PushCommonFrame(r1);
+ }
+}
+
+void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+ int prologue_offset) {
+ DCHECK(!base.is(no_reg));
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ // This matches the code found in PatchPlatformCodeAge()
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+ nop();
+ CleanseP(r14);
+ Push(r14);
+ mov(r2, Operand(target));
+ Call(r2);
+ for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
+ i += 2) {
+ // TODO(joransiu): Create nop function to pad
+ // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
+ nop(); // 2-byte nops().
+ }
+ } else {
+ // This matches the code found in GetNoCodeAgeSequence()
+ PushStandardFrame(r3);
+ }
+ }
+}
+
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // We create a stack frame with:
+ // Return Addr <-- old sp
+ // Old FP <-- new fp
+ // CP
+ // type
+ // CodeObject <-- new sp
+
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ PushCommonFrame(ip);
+
+ if (type == StackFrame::INTERNAL) {
+ mov(r0, Operand(CodeObject()));
+ push(r0);
+ }
+}
+
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer, return address and constant pool pointer.
+ LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ lay(r1, MemOperand(
+ fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
+ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ LoadRR(sp, r1);
+ int frame_ends = pc_offset();
+ return frame_ends;
+}
+
+// ExitFrame layout (probably wrongish.. needs updating)
+//
+// SP -> previousSP
+// LK reserved
+// code
+// sp_on_exit (for debug?)
+// oldSP->prev SP
+// LK
+// <parameters on stack>
+
+// Prior to calling EnterExitFrame, we've got a bunch of parameters
+// on the stack that we need to wrap a real frame around.. so first
+// we reserve a slot for LK and push the previous SP which is captured
+// in the fp register (r11)
+// Then - we buy a new frame
+
+// r14
+// oldFP <- newFP
+// SP
+// Code
+// Floats
+// gaps
+// Args
+// ABIRes <- newSP
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+ // Set up the frame structure on the stack.
+ DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK(stack_space > 0);
+
+ // This is an opportunity to build a frame to wrap
+ // all of the pushes that have happened inside of V8
+ // since we were called from C code
+ CleanseP(r14);
+ LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
+ PushCommonFrame(r1);
+ // Reserve room for saved entry sp and code object.
+ lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
+
+ if (emit_debug_code()) {
+ StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
+ }
+ mov(r1, Operand(CodeObject()));
+ StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+ // Save the frame pointer and the context in top.
+ mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ StoreP(fp, MemOperand(r1));
+ mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ StoreP(cp, MemOperand(r1));
+
+ // Optionally save all volatile double registers.
+ if (save_doubles) {
+ MultiPushDoubles(kCallerSavedDoubles);
+ // Note that d0 will be accessible at
+ // fp - ExitFrameConstants::kFrameSize -
+ // kNumCallerSavedDoubles * kDoubleSize,
+ // since the sp slot and code slot were pushed after the fp.
+ }
+
+ lay(sp, MemOperand(sp, -stack_space * kPointerSize));
+
+ // Allocate and align the frame preparing for calling the runtime
+ // function.
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (frame_alignment > 0) {
+ DCHECK(frame_alignment == 8);
+ ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
+ }
+
+ lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+ StoreP(MemOperand(sp), Operand::Zero(), r0);
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
+ StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::InitializeNewString(Register string, Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1, Register scratch2) {
+ SmiTag(scratch1, length);
+ LoadRoot(scratch2, map_index);
+ StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ StoreP(FieldMemOperand(string, String::kHashFieldSlot),
+ Operand(String::kEmptyHashField), scratch1);
+ StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+}
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if !defined(USE_SIMULATOR)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one S390
+ // platform for another S390 platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // Simulated
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context,
+ bool argument_count_is_length) {
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Calculate the stack location of the saved doubles and restore them.
+ const int kNumRegs = kNumCallerSavedDoubles;
+ lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumRegs * kDoubleSize)));
+ MultiPopDoubles(kCallerSavedDoubles, r5);
+ }
+
+ // Clear top frame.
+ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+
+ // Restore current context from top and clear it in debug mode.
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ LoadP(cp, MemOperand(ip));
+ }
+#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+#endif
+
+ // Tear down the exit frame, pop the arguments, and return.
+ LeaveFrame(StackFrame::EXIT);
+
+ if (argument_count.is_valid()) {
+ if (!argument_count_is_length) {
+ ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
+ }
+ la(sp, MemOperand(sp, argument_count));
+ }
+}
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+ Move(dst, d0);
+}
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+ Move(dst, d0);
+}
+
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We AddP kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+ AddP(dst_reg, fp, dst_reg);
+ AddP(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+ AddP(src_reg, sp, src_reg);
+ AddP(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ AddP(src_reg, src_reg, sp);
+ }
+
+ if (FLAG_debug_code) {
+ CmpLogicalP(src_reg, dst_reg);
+ Check(lt, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ RestoreFrameStateForTailCall();
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop;
+ if (callee_args_count.is_reg()) {
+ AddP(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
+ } else {
+ mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
+ }
+ LoadRR(r1, tmp_reg);
+ bind(&loop);
+ LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
+ StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+ lay(src_reg, MemOperand(src_reg, -kPointerSize));
+ lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
+ BranchOnCount(r1, &loop);
+
+ // Leave current frame.
+ LoadRR(sp, dst_reg);
+}
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // r2: actual arguments count
+ // r3: function (passed through to callee)
+ // r4: expected arguments count
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+
+ // ARM has some sanity checks as per below, considering add them for S390
+ // DCHECK(actual.is_immediate() || actual.reg().is(r2));
+ // DCHECK(expected.is_immediate() || expected.reg().is(r4));
+
+ if (expected.is_immediate()) {
+ DCHECK(actual.is_immediate());
+ mov(r2, Operand(actual.immediate()));
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ mov(r4, Operand(expected.immediate()));
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ mov(r2, Operand(actual.immediate()));
+ CmpPH(expected.reg(), Operand(actual.immediate()));
+ beq(&regular_invoke);
+ } else {
+ CmpP(expected.reg(), actual.reg());
+ beq(&regular_invoke);
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ b(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&regular_invoke);
+ }
+}
+
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r6, Operand(step_in_enabled));
+ LoadlB(r6, MemOperand(r6));
+ CmpP(r6, Operand::Zero());
+ beq(&skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun, fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ DCHECK(function.is(r3));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ }
+
+ Label done;
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ call_wrapper);
+ if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = ip;
+ LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ CallJSEntry(code);
+ call_wrapper.AfterCall();
+ } else {
+ DCHECK(flag == JUMP_FUNCTION);
+ JumpToJSEntry(code);
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
+}
+
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in r3.
+ DCHECK(fun.is(r3));
+
+ Register expected_reg = r4;
+ Register temp_reg = r6;
+ LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ LoadW(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+#if !defined(V8_TARGET_ARCH_S390X)
+ SmiUntag(expected_reg);
+#endif
+
+ ParameterCount expected(expected_reg);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in r3.
+ DCHECK(function.is(r3));
+
+ // Get the function and setup the context.
+ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(r3, function);
+ InvokeFunction(r3, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
+ Label* fail) {
+ DCHECK(kNotStringTag != 0);
+
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ mov(r0, Operand(kIsNotStringMask));
+ AndP(r0, scratch);
+ bne(fail);
+}
+
+void MacroAssembler::IsObjectNameType(Register object, Register scratch,
+ Label* fail) {
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ CmpP(scratch, Operand(LAST_NAME_TYPE));
+ bgt(fail);
+}
+
+void MacroAssembler::DebugBreak() {
+ LoadImmP(r2, Operand::Zero());
+ mov(r3,
+ Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+}
+
+void MacroAssembler::PushStackHandler() {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ // Link the current handler as the next handler.
+ mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+
+ // Buy the full stack frame for 5 slots.
+ lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
+
+ // Copy the old handler into the next handler slot.
+ mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
+ kPointerSize);
+ // Set this new handler as the current one.
+ StoreP(sp, MemOperand(r7));
+}
+
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+
+ // Pop the Next Handler into r3 and store it into Handler Address reference.
+ Pop(r3);
+ mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+
+ StoreP(r3, MemOperand(ip));
+}
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch, Label* miss) {
+ Label same_contexts;
+
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!holder_reg.is(ip));
+ DCHECK(!scratch.is(ip));
+
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ DCHECK(!ip.is(scratch));
+ LoadRR(ip, fp);
+ bind(&load_context);
+ LoadP(scratch,
+ MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch, &has_context);
+ LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+ b(&load_context);
+ bind(&has_context);
+
+// In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ CmpP(scratch, Operand::Zero());
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the native_context_map.
+ LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ CmpP(scratch, ip);
+ beq(&same_contexts, Label::kNear);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ LoadRR(holder_reg, ip); // Move ip to its holding place.
+ CompareRoot(holder_reg, Heap::kNullValueRootIndex);
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull);
+
+ LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ // Restore ip is not needed. ip is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore ip to holder's context.
+ LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset =
+ Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ LoadP(scratch, FieldMemOperand(scratch, token_offset));
+ LoadP(ip, FieldMemOperand(ip, token_offset));
+ CmpP(scratch, ip);
+ bne(miss);
+
+ bind(&same_contexts);
+}
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiUntag(scratch);
+
+ // Xor original key with a seed.
+ XorP(t0, scratch);
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ LoadRR(scratch, t0);
+ NotP(scratch);
+ sll(t0, Operand(15));
+ AddP(t0, scratch, t0);
+ // hash = hash ^ (hash >> 12);
+ ShiftRight(scratch, t0, Operand(12));
+ XorP(t0, scratch);
+ // hash = hash + (hash << 2);
+ ShiftLeft(scratch, t0, Operand(2));
+ AddP(t0, t0, scratch);
+ // hash = hash ^ (hash >> 4);
+ ShiftRight(scratch, t0, Operand(4));
+ XorP(t0, scratch);
+ // hash = hash * 2057;
+ LoadRR(r0, t0);
+ ShiftLeft(scratch, t0, Operand(3));
+ AddP(t0, t0, scratch);
+ ShiftLeft(scratch, r0, Operand(11));
+ AddP(t0, t0, scratch);
+ // hash = hash ^ (hash >> 16);
+ ShiftRight(scratch, t0, Operand(16));
+ XorP(t0, scratch);
+ // hash & 0x3fffffff
+ ExtractBitRange(t0, t0, 29, 0);
+}
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
+ Register key, Register result,
+ Register t0, Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ GetNumberHash(t0, t1);
+
+ // Compute the capacity mask.
+ LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+ SmiUntag(t1);
+ SubP(t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ LoadRR(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+ }
+ AndP(t2, t1);
+
+ // Scale the index by multiplying by the element size.
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
+ LoadRR(ip, t2);
+ sll(ip, Operand(1));
+ AddP(t2, ip); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ sll(t2, Operand(kPointerSizeLog2));
+ AddP(t2, elements);
+ LoadP(ip,
+ FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+ CmpP(key, ip);
+ if (i != kNumberDictionaryProbes - 1) {
+ beq(&done, Label::kNear);
+ } else {
+ bne(miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a field property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
+ LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ DCHECK_EQ(DATA, 0);
+ AndP(r0, ip, t1);
+ bne(miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ LoadP(result, FieldMemOperand(t2, kValueOffset));
+}
+
+void MacroAssembler::Allocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ Label* gc_required, AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ LoadImmP(result, Operand(0x7091));
+ LoadImmP(scratch1, Operand(0x7191));
+ LoadImmP(scratch2, Operand(0x7291));
+ }
+ b(gc_required);
+ return;
+ }
+
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ // This code stores a temporary value in ip. This is OK, as the code below
+ // does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into ip.
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ CmpP(result, alloc_limit);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
+ }
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned);
+ if ((flags & PRETENURE) != 0) {
+ CmpLogicalP(result, alloc_limit);
+ bge(gc_required);
+ }
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ SubP(r0, alloc_limit, result);
+ if (is_int16(object_size)) {
+ CmpP(r0, Operand(object_size));
+ blt(gc_required);
+ AddP(result_end, result, Operand(object_size));
+ } else {
+ mov(result_end, Operand(object_size));
+ CmpP(r0, result_end);
+ blt(gc_required);
+ AddP(result_end, result, result_end);
+ }
+ StoreP(result_end, MemOperand(top_address));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ AddP(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ LoadImmP(result, Operand(0x7091));
+ LoadImmP(scratch, Operand(0x7191));
+ LoadImmP(result_end, Operand(0x7291));
+ }
+ b(gc_required);
+ return;
+ }
+
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ // Check relative positions of allocation top and limit addresses.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
+ // This code stores a temporary value in ip. This is OK, as the code below
+ // does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into alloc_limit..
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ CmpP(result, alloc_limit);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
+ }
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned);
+ if ((flags & PRETENURE) != 0) {
+ CmpLogicalP(result, alloc_limit);
+ bge(gc_required);
+ }
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ SubP(r0, alloc_limit, result);
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
+ CmpP(r0, result_end);
+ blt(gc_required);
+ AddP(result_end, result, result_end);
+ } else {
+ CmpP(r0, object_size);
+ blt(gc_required);
+ AddP(result_end, result, object_size);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ AndP(r0, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, cr0);
+ }
+ StoreP(result_end, MemOperand(top_address));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ AddP(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+void MacroAssembler::AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+
+ ShiftLeftP(scratch1, length, Operand(1)); // Length in bytes, not chars.
+ AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+
+ AndP(scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK(kCharSize == 1);
+ AddP(scratch1, length,
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
+ AndP(scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate one-byte string in new space.
+ Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+void MacroAssembler::CompareObjectType(Register object, Register map,
+ Register type_reg, InstanceType type) {
+ const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+
+ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, temp, type);
+}
+
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
+ InstanceType type) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
+ LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ CmpP(type_reg, Operand(type));
+}
+
+void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+ CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::CheckFastElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
+ CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ bgt(fail);
+}
+
+void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ ble(fail);
+ CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ bgt(fail);
+}
+
+void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ bgt(fail);
+}
+
+void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
+ SmiUntag(ip, smi);
+ ConvertIntToDouble(ip, value);
+}
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg, Register key_reg, Register elements_reg,
+ Register scratch1, DoubleRegister double_scratch, Label* fail,
+ int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label smi_value, store;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
+ DONT_DO_SMI_CHECK);
+
+ LoadDouble(double_scratch,
+ FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ // Force a canonical NaN.
+ CanonicalizeNaN(double_scratch);
+ b(&store);
+
+ bind(&smi_value);
+ SmiToDouble(double_scratch, value_reg);
+
+ bind(&store);
+ SmiToDoubleArrayOffset(scratch1, key_reg);
+ StoreDouble(double_scratch,
+ FieldMemOperand(elements_reg, scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+
+ // TODO(joransiu): Optimize paths for left == right.
+ bool left_is_right = left.is(right);
+
+ // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+ if (dst.is(left)) {
+ LoadRR(scratch, left); // Preserve left.
+ AddP(dst, left, right); // Left is overwritten.
+ XorP(overflow_dst, scratch, dst); // Original left.
+ if (!left_is_right) XorP(scratch, dst, right);
+ } else if (dst.is(right)) {
+ LoadRR(scratch, right); // Preserve right.
+ AddP(dst, left, right); // Right is overwritten.
+ XorP(overflow_dst, dst, left);
+ if (!left_is_right) XorP(scratch, dst, scratch);
+ } else {
+ AddP(dst, left, right);
+ XorP(overflow_dst, dst, left);
+ if (!left_is_right) XorP(scratch, dst, right);
+ }
+ if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
+ LoadAndTestRR(overflow_dst, overflow_dst);
+}
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+ intptr_t right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+
+ mov(r1, Operand(right));
+ AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
+}
+
+void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+
+ // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
+ if (dst.is(left)) {
+ LoadRR(scratch, left); // Preserve left.
+ SubP(dst, left, right); // Left is overwritten.
+ XorP(overflow_dst, dst, scratch);
+ XorP(scratch, right);
+ AndP(overflow_dst, scratch /*, SetRC*/);
+ LoadAndTestRR(overflow_dst, overflow_dst);
+ // Should be okay to remove rc
+ } else if (dst.is(right)) {
+ LoadRR(scratch, right); // Preserve right.
+ SubP(dst, left, right); // Right is overwritten.
+ XorP(overflow_dst, dst, left);
+ XorP(scratch, left);
+ AndP(overflow_dst, scratch /*, SetRC*/);
+ LoadAndTestRR(overflow_dst, overflow_dst);
+ // Should be okay to remove rc
+ } else {
+ SubP(dst, left, right);
+ XorP(overflow_dst, dst, left);
+ XorP(scratch, left, right);
+ AndP(overflow_dst, scratch /*, SetRC*/);
+ LoadAndTestRR(overflow_dst, overflow_dst);
+ // Should be okay to remove rc
+ }
+}
+
+void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
+ Label* early_success) {
+ LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(obj, map, early_success);
+}
+
+void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
+ Label* early_success) {
+ mov(r0, Operand(map));
+ CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
+ Label* fail, SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ Label success;
+ CompareMap(obj, scratch, map, &success);
+ bne(fail);
+ bind(&success);
+}
+
+void MacroAssembler::CheckMap(Register obj, Register scratch,
+ Heap::RootListIndex index, Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareRoot(scratch, index);
+ bne(fail);
+}
+
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+ Register scratch2, Handle<WeakCell> cell,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CmpWeakValue(scratch1, cell, scratch2);
+ Jump(success, RelocInfo::CODE_TARGET, eq);
+ bind(&fail);
+}
+
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+ Register scratch, CRegister) {
+ mov(scratch, Operand(cell));
+ CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
+}
+
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+ mov(value, Operand(cell));
+ LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
+ JumpIfSmi(value, miss);
+}
+
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CompareObjectType(result, temp, temp2, MAP_TYPE);
+ bne(&done);
+ LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ b(&loop);
+ bind(&done);
+}
+
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
+ // Get the prototype or initial map from the function.
+ LoadP(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ beq(miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ bne(&done, Label::kNear);
+
+ // Get the prototype from the initial map.
+ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+ // All done.
+ bind(&done);
+}
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
+ Condition cond) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
+ Register scratch1, Register scratch2,
+ DoubleRegister double_scratch) {
+ TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
+}
+
+void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
+ Register scratch1,
+ Register scratch2) {
+ lgdr(scratch1, input);
+#if V8_TARGET_ARCH_S390X
+ llihf(scratch2, Operand(0x80000000)); // scratch2 = 0x80000000_00000000
+ CmpP(scratch1, scratch2);
+#else
+ Label done;
+ CmpP(scratch1, Operand::Zero());
+ bne(&done, Label::kNear);
+
+ srlg(scratch1, scratch1, Operand(32));
+ CmpP(scratch1, Operand(HeapNumber::kSignMask));
+ bind(&done);
+#endif
+}
+
+void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
+ lgdr(scratch, input);
+ cgfi(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
+ LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
+ Register::kExponentOffset));
+ Cmp32(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+ DoubleRegister double_input,
+ Register scratch,
+ DoubleRegister double_scratch) {
+ Label done;
+ DCHECK(!double_input.is(double_scratch));
+
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+ scratch,
+#endif
+ result, double_scratch);
+
+#if V8_TARGET_ARCH_S390X
+ TestIfInt32(result, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ bne(&done);
+
+ // convert back and compare
+ lgdr(scratch, double_scratch);
+ cdfbr(double_scratch, scratch);
+ cdbr(double_scratch, double_input);
+ bind(&done);
+}
+
+void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
+ Register input_high, Register scratch,
+ DoubleRegister double_scratch, Label* done,
+ Label* exact) {
+ DCHECK(!result.is(input_high));
+ DCHECK(!double_input.is(double_scratch));
+ Label exception;
+
+ // Move high word into input_high
+ lay(sp, MemOperand(sp, -kDoubleSize));
+ StoreDouble(double_input, MemOperand(sp));
+ LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
+ la(sp, MemOperand(sp, kDoubleSize));
+
+ // Test for NaN/Inf
+ ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
+ CmpLogicalP(result, Operand(0x7ff));
+ beq(&exception);
+
+ // Convert (rounding to -Inf)
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+ scratch,
+#endif
+ result, double_scratch, kRoundToMinusInf);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+ TestIfInt32(result, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ bne(&exception);
+
+ // Test for exactness
+ lgdr(scratch, double_scratch);
+ cdfbr(double_scratch, scratch);
+ cdbr(double_scratch, double_input);
+ beq(exact);
+ b(done);
+
+ bind(&exception);
+}
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister double_scratch = kScratchDoubleReg;
+#if !V8_TARGET_ARCH_S390X
+ Register scratch = ip;
+#endif
+
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+ scratch,
+#endif
+ result, double_scratch);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+ TestIfInt32(result, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ beq(done);
+}
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(r14);
+ // Put input on stack.
+ lay(sp, MemOperand(sp, -kDoubleSize));
+ StoreDouble(double_input, MemOperand(sp));
+
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+ CallStub(&stub);
+
+ la(sp, MemOperand(sp, kDoubleSize));
+ pop(r14);
+
+ bind(&done);
+}
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = kScratchDoubleReg;
+ DCHECK(!result.is(object));
+
+ LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(r14);
+ DoubleToIStub stub(isolate(), object, result,
+ HeapNumber::kValueOffset - kHeapObjectTag, true, true);
+ CallStub(&stub);
+ pop(r14);
+
+ bind(&done);
+}
+
+void MacroAssembler::TruncateNumberToI(Register object, Register result,
+ Register heap_number_map,
+ Register scratch1, Label* not_number) {
+ Label done;
+ DCHECK(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
+
+ bind(&done);
+}
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
+ int num_least_bits) {
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // We rotate by kSmiShift amount, and extract the num_least_bits
+ risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
+ Operand(64 - kSmiShift), true);
+ } else {
+ SmiUntag(dst, src);
+ AndP(dst, Operand((1 << num_least_bits) - 1));
+ }
+}
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
+ int num_least_bits) {
+ AndP(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. r2 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r2, Operand(num_arguments));
+ mov(r3, Operand(ExternalReference(f, isolate())));
+ CEntryStub stub(isolate(),
+#if V8_TARGET_ARCH_S390X
+ f->result_size,
+#else
+ 1,
+#endif
+ save_doubles);
+ CallStub(&stub);
+}
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ mov(r2, Operand(num_arguments));
+ mov(r3, Operand(ext));
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ mov(r2, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ mov(r3, Operand(builtin));
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch1, Operand(value));
+ mov(scratch2, Operand(ExternalReference(counter)));
+ StoreW(scratch1, MemOperand(scratch2));
+ }
+}
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK(value > 0 && is_int8(value));
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch1, Operand(ExternalReference(counter)));
+ // @TODO(john.yan): can be optimized by asi()
+ LoadW(scratch2, MemOperand(scratch1));
+ AddP(scratch2, Operand(value));
+ StoreW(scratch2, MemOperand(scratch1));
+ }
+}
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK(value > 0 && is_int8(value));
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch1, Operand(ExternalReference(counter)));
+ // @TODO(john.yan): can be optimized by asi()
+ LoadW(scratch2, MemOperand(scratch1));
+ AddP(scratch2, Operand(-value));
+ StoreW(scratch2, MemOperand(scratch1));
+ }
+}
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+ CRegister cr) {
+ if (emit_debug_code()) Check(cond, reason, cr);
+}
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ DCHECK(!elements.is(r0));
+ Label ok;
+ push(elements);
+ LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
+ beq(&ok, Label::kNear);
+ CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
+ beq(&ok, Label::kNear);
+ CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
+ beq(&ok, Label::kNear);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ bind(&ok);
+ pop(elements);
+ }
+}
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+ Label L;
+ b(cond, &L);
+ Abort(reason);
+ // will not return here
+ bind(&L);
+}
+
+void MacroAssembler::Abort(BailoutReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
+#endif
+
+ LoadSmiLiteral(r0, Smi::FromInt(reason));
+ push(r0);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort);
+ } else {
+ CallRuntime(Runtime::kAbort);
+ }
+ // will not return here
+}
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ LoadRR(dst, cp);
+ }
+}
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind, ElementsKind transitioned_kind,
+ Register map_in_out, Register scratch, Label* no_map_match) {
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
+
+ // Check that the function's map is the same as the expected cached map.
+ LoadP(scratch, NativeContextMemOperand());
+ LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
+ CmpP(map_in_out, ip);
+ bne(no_map_match);
+
+ // Use the transitioned cached map.
+ LoadP(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
+}
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ LoadP(dst, NativeContextMemOperand());
+ LoadP(dst, ContextMemOperand(dst, index));
+}
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ LoadP(map,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ b(&ok);
+ bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ bind(&ok);
+ }
+}
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg, Register scratch, Label* not_power_of_two_or_zero) {
+ SubP(scratch, reg, Operand(1));
+ CmpP(scratch, Operand::Zero());
+ blt(not_power_of_two_or_zero);
+ AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
+ bne(not_power_of_two_or_zero /*, cr0*/);
+}
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two) {
+ SubP(scratch, reg, Operand(1));
+ CmpP(scratch, Operand::Zero());
+ blt(zero_and_neg);
+ AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc
+ bne(not_power_of_two /*, cr0*/);
+}
+
+#if !V8_TARGET_ARCH_S390X
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+ DCHECK(!reg.is(overflow));
+ LoadRR(overflow, reg); // Save original value.
+ SmiTag(reg);
+ XorP(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+ LoadAndTestRR(overflow, overflow);
+}
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
+ Register overflow) {
+ if (dst.is(src)) {
+ // Fall back to slower case.
+ SmiTagCheckOverflow(dst, overflow);
+ } else {
+ DCHECK(!dst.is(src));
+ DCHECK(!dst.is(overflow));
+ DCHECK(!src.is(overflow));
+ SmiTag(dst, src);
+ XorP(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
+ LoadAndTestRR(overflow, overflow);
+ }
+}
+#endif
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ OrP(r0, reg1, reg2 /*, LeaveRC*/); // should be okay to remove LeaveRC
+ JumpIfNotSmi(r0, on_not_both_smi);
+}
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
+ Label* smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ // this won't work if src == dst
+ DCHECK(src.code() != dst.code());
+ SmiUntag(dst, src);
+ TestIfSmi(src);
+ beq(smi_case);
+}
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
+ Label* non_smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ // We can more optimally use TestIfSmi if dst != src
+ // otherwise, the UnTag operation will kill the CC and we cannot
+ // test the Tag bit.
+ if (src.code() != dst.code()) {
+ SmiUntag(dst, src);
+ TestIfSmi(src);
+ } else {
+ TestBit(src, 0, r0);
+ SmiUntag(dst, src);
+ LoadAndTestRR(r0, r0);
+ }
+ bne(non_smi_case);
+}
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfSmi(reg1, on_either_smi);
+ JumpIfSmi(reg2, on_either_smi);
+}
+
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsANumber, cr0);
+ push(object);
+ CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+ pop(object);
+ Check(ne, kOperandIsANumber);
+ }
+}
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmi, cr0);
+ }
+}
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(eq, kOperandIsNotSmi, cr0);
+ }
+}
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAString, cr0);
+ push(object);
+ LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(lt, kOperandIsNotAString);
+ }
+}
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAName, cr0);
+ push(object);
+ LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, LAST_NAME_TYPE);
+ pop(object);
+ Check(le, kOperandIsNotAName);
+ }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFunction);
+ }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
+ push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+ pop(object);
+ Check(ge, kOperandIsNotAReceiver);
+ }
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ beq(&done_checking, Label::kNear);
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, kHeapNumberMapRegisterClobbered);
+ }
+}
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ CmpP(scratch, heap_number_map);
+ bne(on_not_heap_number);
+}
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential one-byte strings.
+ // Assume that they are non-smis.
+ LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ AndP(scratch1, first, second);
+ JumpIfSmi(scratch1, failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ beq(&succeed, Label::kNear);
+ CmpP(reg, Operand(SYMBOL_TYPE));
+ bne(not_unique_name);
+
+ bind(&succeed);
+}
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required,
+ TaggingMode tagging_mode,
+ MutableMode mode) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+ AssertIsRoot(heap_number_map, map_index);
+
+ // Store heap number map in the allocated object.
+ if (tagging_mode == TAG_RESULT) {
+ StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
+}
+
+void MacroAssembler::AllocateHeapNumberWithValue(
+ Register result, DoubleRegister value, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required) {
+ AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+ StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
+ StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
+ Register scratch) {
+ Label big_loop, left_bytes, done, fake_call;
+
+ DCHECK(!scratch.is(r0));
+
+ // big loop moves 256 bytes at a time
+ bind(&big_loop);
+ CmpP(length, Operand(static_cast<intptr_t>(0x100)));
+ blt(&left_bytes);
+
+ mvc(MemOperand(dst), MemOperand(src), 0x100);
+
+ AddP(src, Operand(static_cast<intptr_t>(0x100)));
+ AddP(dst, Operand(static_cast<intptr_t>(0x100)));
+ SubP(length, Operand(static_cast<intptr_t>(0x100)));
+ b(&big_loop);
+
+ bind(&left_bytes);
+ CmpP(length, Operand::Zero());
+ beq(&done);
+
+ // TODO(john.yan): More optimal version is to use MVC
+ // Sequence below has some undiagnosed issue.
+ /*
+ b(scratch, &fake_call); // use brasl to Save mvc addr to scratch
+ mvc(MemOperand(dst), MemOperand(src), 1);
+ bind(&fake_call);
+ SubP(length, Operand(static_cast<intptr_t>(-1)));
+ ex(length, MemOperand(scratch)); // execute mvc instr above
+ AddP(src, length);
+ AddP(dst, length);
+ AddP(src, Operand(static_cast<intptr_t>(0x1)));
+ AddP(dst, Operand(static_cast<intptr_t>(0x1)));
+ */
+
+ mvc(MemOperand(dst), MemOperand(src), 1);
+ AddP(src, Operand(static_cast<intptr_t>(0x1)));
+ AddP(dst, Operand(static_cast<intptr_t>(0x1)));
+ SubP(length, Operand(static_cast<intptr_t>(0x1)));
+
+ b(&left_bytes);
+ bind(&done);
+}
+
+void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
+ Register count,
+ Register filler) {
+ Label loop;
+ bind(&loop);
+ StoreP(filler, MemOperand(current_address));
+ AddP(current_address, current_address, Operand(kPointerSize));
+ BranchOnCount(r1, &loop);
+}
+
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
+ Register filler) {
+ Label done;
+ DCHECK(!filler.is(r1));
+ DCHECK(!current_address.is(r1));
+ DCHECK(!end_address.is(r1));
+ SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
+ beq(&done, Label::kNear);
+ ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
+ InitializeNFieldsWithFiller(current_address, r1, filler);
+ bind(&done);
+}
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ if (!scratch1.is(first)) LoadRR(scratch1, first);
+ if (!scratch2.is(second)) LoadRR(scratch2, second);
+ nilf(scratch1, Operand(kFlatOneByteStringMask));
+ CmpP(scratch1, Operand(kFlatOneByteStringTag));
+ bne(failure);
+ nilf(scratch2, Operand(kFlatOneByteStringMask));
+ CmpP(scratch2, Operand(kFlatOneByteStringTag));
+ bne(failure);
+}
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+
+ if (!scratch.is(type)) LoadRR(scratch, type);
+ nilf(scratch, Operand(kFlatOneByteStringMask));
+ CmpP(scratch, Operand(kFlatOneByteStringTag));
+ bne(failure);
+}
+
+static const int kRegisterPassedArguments = 5;
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ stack_passed_words +=
+ 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ }
+ // Up to five simple arguments are passed in registers r2..r6
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ TestIfSmi(string);
+ Check(ne, kNonObject, cr0);
+
+ LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ CmpP(ip, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType);
+
+// The index is assumed to be untagged coming in, tag it to compare with the
+// string length without using a temp register, it is restored at the end of
+// this function.
+#if !V8_TARGET_ARCH_S390X
+ Label index_tag_ok, index_tag_bad;
+ JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
+#endif
+ SmiTag(index, index);
+#if !V8_TARGET_ARCH_S390X
+ b(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+#endif
+
+ LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
+ CmpP(index, ip);
+ Check(lt, kIndexIsTooLarge);
+
+ DCHECK(Smi::FromInt(0) == 0);
+ CmpP(index, Operand::Zero());
+ Check(ge, kIndexIsNegative);
+
+ SmiUntag(index, index);
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ int stack_space = kNumRequiredStackFrameSlots;
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for stack arguments
+ // -- preserving original value of sp.
+ LoadRR(scratch, sp);
+ lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
+ } else {
+ stack_space += stack_passed_arguments;
+ }
+ lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (src2.is(d0)) {
+ DCHECK(!src1.is(d2));
+ Move(d2, src2);
+ Move(d0, src1);
+ } else {
+ Move(d0, src1);
+ Move(d2, src2);
+ }
+}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ mov(ip, Operand(function));
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+}
+
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK(has_frame());
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ Register dest = function;
+ if (ABI_CALL_VIA_IP) {
+ Move(ip, function);
+ dest = ip;
+ }
+
+ Call(dest);
+
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+ if (ActivationFrameAlignment() > kPointerSize) {
+ // Load the original stack pointer (pre-alignment) from the stack
+ LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ } else {
+ la(sp, MemOperand(sp, stack_space * kPointerSize));
+ }
+}
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch, // scratch may be same register as object
+ int mask, Condition cc, Label* condition_met) {
+ DCHECK(cc == ne || cc == eq);
+ ClearRightImm(scratch, object, Operand(kPageSizeBits));
+
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
+ // which allows testing of a single byte in memory.
+ int32_t byte_offset = 4;
+ uint32_t shifted_mask = mask;
+ // Determine the byte offset to be tested
+ if (mask <= 0x80) {
+ byte_offset = kPointerSize - 1;
+ } else if (mask < 0x8000) {
+ byte_offset = kPointerSize - 2;
+ shifted_mask = mask >> 8;
+ } else if (mask < 0x800000) {
+ byte_offset = kPointerSize - 3;
+ shifted_mask = mask >> 16;
+ } else {
+ byte_offset = kPointerSize - 4;
+ shifted_mask = mask >> 24;
+ }
+#if V8_TARGET_LITTLE_ENDIAN
+ // Reverse the byte_offset if emulating on little endian platform
+ byte_offset = kPointerSize - byte_offset - 1;
+#endif
+ tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
+ Operand(shifted_mask));
+ } else {
+ LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ AndP(r0, scratch, Operand(mask));
+ }
+ // Should be okay to remove rc
+
+ if (cc == ne) {
+ bne(condition_met);
+ }
+ if (cc == eq) {
+ beq(condition_met);
+ }
+}
+
+void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
+ Register scratch1, Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+}
+
+void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
+ Register mask_scratch, Label* has_color,
+ int first_bit, int second_bit) {
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Test the first bit
+ AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
+ b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
+ // Shift left 1
+ // May need to load the next cell
+ sll(mask_scratch, Operand(1) /*, SetRC*/);
+ LoadAndTest32(mask_scratch, mask_scratch);
+ beq(&word_boundary, Label::kNear);
+ // Test the second bit
+ AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc
+ b(second_bit == 1 ? ne : eq, has_color);
+ b(&other_color, Label::kNear);
+
+ bind(&word_boundary);
+ LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
+ AndP(r0, ip, Operand(1));
+ b(second_bit == 1 ? ne : eq, has_color);
+ bind(&other_color);
+}
+
+void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
+ Register mask_reg) {
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ LoadRR(bitmap_reg, addr_reg);
+ nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
+ ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
+ ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
+ AddP(bitmap_reg, ip);
+ LoadRR(ip, mask_reg); // Have to do some funky reg shuffling as
+ // 31-bit shift left clobbers on s390.
+ LoadImmP(mask_reg, Operand(1));
+ ShiftLeftP(mask_reg, mask_reg, ip);
+}
+
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ LoadRR(r0, load_scratch);
+ AndP(r0, mask_scratch);
+ beq(value_is_white);
+}
+
+// Saturate a value into 8-bit unsigned integer
+// if input_value < 0, output_value is 0
+// if input_value > 255, output_value is 255
+// otherwise output_value is the input_value
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ int satval = (1 << 8) - 1;
+
+ Label done, negative_label, overflow_label;
+ CmpP(input_reg, Operand::Zero());
+ blt(&negative_label);
+
+ CmpP(input_reg, Operand(satval));
+ bgt(&overflow_label);
+ if (!output_reg.is(input_reg)) {
+ LoadRR(output_reg, input_reg);
+ }
+ b(&done);
+
+ bind(&negative_label);
+ LoadImmP(output_reg, Operand::Zero()); // set to 0 if negative
+ b(&done);
+
+ bind(&overflow_label); // set to satval if > satval
+ LoadImmP(output_reg, Operand(satval));
+
+ bind(&done);
+}
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister double_scratch) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ LoadDoubleLiteral(double_scratch, 0.0, result_reg);
+ cdbr(input_reg, double_scratch);
+ bgt(&above_zero, Label::kNear);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ LoadIntLiteral(result_reg, 0);
+ b(&done, Label::kNear);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ LoadDoubleLiteral(double_scratch, 255.0, result_reg);
+ cdbr(input_reg, double_scratch);
+ ble(&in_bounds, Label::kNear);
+ LoadIntLiteral(result_reg, 255);
+ b(&done, Label::kNear);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+
+ // round to nearest (default rounding mode)
+ cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
+ bind(&done);
+}
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
+}
+
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ LoadP(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ const int getterOffset = AccessorPair::kGetterOffset;
+ const int setterOffset = AccessorPair::kSetterOffset;
+ int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
+ LoadP(dst, FieldMemOperand(dst, offset));
+}
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = r7;
+ Register empty_fixed_array_value = r8;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+ LoadRR(r4, r2);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+ EnumLength(r5, r3);
+ CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
+ beq(call_runtime);
+
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ b(&start, Label::kNear);
+
+ bind(&next);
+ LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLength(r5, r3);
+ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+ bne(call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register r4 contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
+ CmpP(r4, empty_fixed_array_value);
+ beq(&no_elements, Label::kNear);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
+ bne(call_runtime);
+
+ bind(&no_elements);
+ LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
+ CmpP(r4, null_value);
+ bne(&next);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// New MacroAssembler Interfaces added for S390
+//
+////////////////////////////////////////////////////////////////////////////////
+// Primarily used for loading constants
+// This should really move to be in macro-assembler as it
+// is really a pseudo instruction
+// Some usages of this intend for a FIXED_SEQUENCE to be used
+// @TODO - break this dependency so we can optimize mov() in general
+// and only use the generic version when we require a fixed sequence
+void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
+ Representation r, Register scratch) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8()) {
+ LoadB(dst, mem);
+ lgbr(dst, dst);
+ } else if (r.IsUInteger8()) {
+ LoadlB(dst, mem);
+ } else if (r.IsInteger16()) {
+ LoadHalfWordP(dst, mem, scratch);
+ lghr(dst, dst);
+ } else if (r.IsUInteger16()) {
+ LoadHalfWordP(dst, mem, scratch);
+#if V8_TARGET_ARCH_S390X
+ } else if (r.IsInteger32()) {
+ LoadW(dst, mem, scratch);
+#endif
+ } else {
+ LoadP(dst, mem, scratch);
+ }
+}
+
+void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
+ Representation r, Register scratch) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ StoreByte(src, mem, scratch);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ StoreHalfWord(src, mem, scratch);
+#if V8_TARGET_ARCH_S390X
+ } else if (r.IsInteger32()) {
+ StoreW(src, mem, scratch);
+#endif
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ StoreP(src, mem, scratch);
+ }
+}
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Register scratch2_reg,
+ Label* no_memento_found) {
+ Label map_check;
+ Label top_check;
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ DCHECK(!AreAliased(receiver_reg, scratch_reg));
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+
+ DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+ AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ XorP(r0, scratch_reg, Operand(new_space_allocation_top));
+ AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
+ beq(&top_check, Label::kNear);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ XorP(r0, scratch_reg, receiver_reg);
+ AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
+ bne(no_memento_found);
+ // Continue with the actual map check.
+ b(&map_check, Label::kNear);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ CmpP(scratch_reg, Operand(new_space_allocation_top));
+ bgt(no_memento_found);
+ // Memento map check.
+ bind(&map_check);
+ LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ DCHECK(!scratch1.is(scratch0));
+ Register current = scratch0;
+ Label loop_again, end;
+
+ // scratch contained elements pointer.
+ LoadRR(current, object);
+ LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ beq(&end);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ CmpP(scratch1, Operand(JS_OBJECT_TYPE));
+ blt(found);
+
+ LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
+ beq(found);
+ LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ bne(&loop_again);
+
+ bind(&end);
+}
+
+void MacroAssembler::mov(Register dst, const Operand& src) {
+ if (src.rmode_ != kRelocInfo_NONEPTR) {
+ // some form of relocation needed
+ RecordRelocInfo(src.rmode_, src.imm_);
+ }
+
+#if V8_TARGET_ARCH_S390X
+ int64_t value = src.immediate();
+ int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+ int32_t lo_32 = static_cast<int32_t>(value);
+
+ iihf(dst, Operand(hi_32));
+ iilf(dst, Operand(lo_32));
+#else
+ int value = src.immediate();
+ iilf(dst, Operand(value));
+#endif
+}
+
+void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
+ if (dst.is(src2)) {
+ MulP(dst, src1);
+ } else if (dst.is(src1)) {
+ MulP(dst, src2);
+ } else {
+ Move(dst, src1);
+ MulP(dst, src2);
+ }
+}
+
+void MacroAssembler::DivP(Register dividend, Register divider) {
+ // have to make sure the src and dst are reg pairs
+ DCHECK(dividend.code() % 2 == 0);
+#if V8_TARGET_ARCH_S390X
+ dsgr(dividend, divider);
+#else
+ dr(dividend, divider);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ msgfi(dst, opnd);
+#else
+ msfi(dst, opnd);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ msgr(dst, src);
+#else
+ msr(dst, src);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ if (is_uint16(opnd.offset())) {
+ ms(dst, opnd);
+ } else if (is_int20(opnd.offset())) {
+ msy(dst, opnd);
+ } else {
+ UNIMPLEMENTED();
+ }
+#else
+ if (is_int20(opnd.offset())) {
+ msg(dst, opnd);
+ } else {
+ UNIMPLEMENTED();
+ }
+#endif
+}
+
+//----------------------------------------------------------------------------
+// Add Instructions
+//----------------------------------------------------------------------------
+
+// Add 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::Add32(Register dst, const Operand& opnd) {
+ if (is_int16(opnd.immediate()))
+ ahi(dst, opnd);
+ else
+ afi(dst, opnd);
+}
+
+// Add Pointer Size (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ if (is_int16(opnd.immediate()))
+ aghi(dst, opnd);
+ else
+ agfi(dst, opnd);
+#else
+ Add32(dst, opnd);
+#endif
+}
+
+// Add 32-bit (Register dst = Register src + Immediate opnd)
+void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
+ ahik(dst, src, opnd);
+ return;
+ }
+ lr(dst, src);
+ }
+ Add32(dst, opnd);
+}
+
+// Add Pointer Size (Register dst = Register src + Immediate opnd)
+void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
+ AddPImm_RRI(dst, src, opnd);
+ return;
+ }
+ LoadRR(dst, src);
+ }
+ AddP(dst, opnd);
+}
+
+// Add 32-bit (Register dst = Register dst + Register src)
+void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
+
+// Add Pointer Size (Register dst = Register dst + Register src)
+void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
+
+// Add Pointer Size with src extension
+// (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ agfr(dst, src);
+#else
+ ar(dst, src);
+#endif
+}
+
+// Add 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
+ // as AR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ark(dst, src1, src2);
+ return;
+ } else {
+ lr(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ ar(dst, src2);
+}
+
+// Add Pointer Size (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
+ // as AR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ AddP_RRR(dst, src1, src2);
+ return;
+ } else {
+ LoadRR(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ AddRR(dst, src2);
+}
+
+// Add Pointer Size with src extension
+// (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
+// Register src2 (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
+ Register src2) {
+#if V8_TARGET_ARCH_S390X
+ if (dst.is(src2)) {
+ // The source we need to sign extend is the same as result.
+ lgfr(dst, src2);
+ agr(dst, src1);
+ } else {
+ if (!dst.is(src1)) LoadRR(dst, src1);
+ agfr(dst, src2);
+ }
+#else
+ AddP(dst, src1, src2);
+#endif
+}
+
+// Add 32-bit (Register-Memory)
+void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ a(dst, opnd);
+ else
+ ay(dst, opnd);
+}
+
+// Add Pointer Size (Register-Memory)
+void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(opnd.offset()));
+ ag(dst, opnd);
+#else
+ Add32(dst, opnd);
+#endif
+}
+
+// Add Pointer Size with src extension
+// (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(opnd.offset()));
+ agf(dst, opnd);
+#else
+ Add32(dst, opnd);
+#endif
+}
+
+// Add 32-bit (Memory - Immediate)
+void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
+ DCHECK(is_int8(imm.immediate()));
+ DCHECK(is_int20(opnd.offset()));
+ DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+ asi(opnd, imm);
+}
+
+// Add Pointer-sized (Memory - Immediate)
+void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
+ DCHECK(is_int8(imm.immediate()));
+ DCHECK(is_int20(opnd.offset()));
+ DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+#if V8_TARGET_ARCH_S390X
+ agsi(opnd, imm);
+#else
+ asi(opnd, imm);
+#endif
+}
+
+//----------------------------------------------------------------------------
+// Add Logical Instructions
+//----------------------------------------------------------------------------
+
+// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
+ Register src2) {
+ if (!dst.is(src2) && !dst.is(src1)) {
+ lr(dst, src1);
+ alcr(dst, src2);
+ } else if (!dst.is(src2)) {
+ // dst == src1
+ DCHECK(dst.is(src1));
+ alcr(dst, src2);
+ } else {
+ // dst == src2
+ DCHECK(dst.is(src2));
+ alcr(dst, src1);
+ }
+}
+
+// Add Logical 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
+ if (!dst.is(src2) && !dst.is(src1)) {
+ lr(dst, src1);
+ alr(dst, src2);
+ } else if (!dst.is(src2)) {
+ // dst == src1
+ DCHECK(dst.is(src1));
+ alr(dst, src2);
+ } else {
+ // dst == src2
+ DCHECK(dst.is(src2));
+ alr(dst, src1);
+ }
+}
+
+// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
+ alfi(dst, imm);
+}
+
+// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
+#ifdef V8_TARGET_ARCH_S390X
+ algfi(dst, imm);
+#else
+ AddLogical(dst, imm);
+#endif
+}
+
+// Add Logical 32-bit (Register-Memory)
+void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ al_z(dst, opnd);
+ else
+ aly(dst, opnd);
+}
+
+// Add Logical Pointer Size (Register-Memory)
+void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(opnd.offset()));
+ alg(dst, opnd);
+#else
+ AddLogical(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+// Subtract Instructions
+//----------------------------------------------------------------------------
+
+// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
+// src2)
+void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
+ Register src2) {
+ if (!dst.is(src2) && !dst.is(src1)) {
+ lr(dst, src1);
+ slbr(dst, src2);
+ } else if (!dst.is(src2)) {
+ // dst == src1
+ DCHECK(dst.is(src1));
+ slbr(dst, src2);
+ } else {
+ // dst == src2
+ DCHECK(dst.is(src2));
+ lr(r0, dst);
+ SubLogicalWithBorrow32(dst, src1, r0);
+ }
+}
+
+// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
+void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
+ if (!dst.is(src2) && !dst.is(src1)) {
+ lr(dst, src1);
+ slr(dst, src2);
+ } else if (!dst.is(src2)) {
+ // dst == src1
+ DCHECK(dst.is(src1));
+ slr(dst, src2);
+ } else {
+ // dst == src2
+ DCHECK(dst.is(src2));
+ lr(r0, dst);
+ SubLogical32(dst, src1, r0);
+ }
+}
+
+// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
+void MacroAssembler::Sub32(Register dst, const Operand& imm) {
+ Add32(dst, Operand(-(imm.imm_)));
+}
+
+// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
+void MacroAssembler::SubP(Register dst, const Operand& imm) {
+ AddP(dst, Operand(-(imm.imm_)));
+}
+
+// Subtract 32-bit (Register dst = Register src - Immediate opnd)
+void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
+ Add32(dst, src, Operand(-(imm.imm_)));
+}
+
+// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
+void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
+ AddP(dst, src, Operand(-(imm.imm_)));
+}
+
+// Subtract 32-bit (Register dst = Register dst - Register src)
+void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
+
+// Subtract Pointer Size (Register dst = Register dst - Register src)
+void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
+
+// Subtract Pointer Size with src extension
+// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ sgfr(dst, src);
+#else
+ sr(dst, src);
+#endif
+}
+
+// Subtract 32-bit (Register = Register - Register)
+void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
+ // Use non-clobbering version if possible
+ if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ srk(dst, src1, src2);
+ return;
+ }
+ if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
+ // In scenario where we have dst = src - dst, we need to swap and negate
+ if (!dst.is(src1) && dst.is(src2)) {
+ sr(dst, src1); // dst = (dst - src)
+ lcr(dst, dst); // dst = -dst
+ } else {
+ sr(dst, src2);
+ }
+}
+
+// Subtract Pointer Sized (Register = Register - Register)
+void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
+ // Use non-clobbering version if possible
+ if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ SubP_RRR(dst, src1, src2);
+ return;
+ }
+ if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+ // In scenario where we have dst = src - dst, we need to swap and negate
+ if (!dst.is(src1) && dst.is(src2)) {
+ SubP(dst, src1); // dst = (dst - src)
+ LoadComplementRR(dst, dst); // dst = -dst
+ } else {
+ SubP(dst, src2);
+ }
+}
+
+// Subtract Pointer Size with src extension
+// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
+ Register src2) {
+#if V8_TARGET_ARCH_S390X
+ if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+
+ // In scenario where we have dst = src - dst, we need to swap and negate
+ if (!dst.is(src1) && dst.is(src2)) {
+ lgfr(dst, dst); // Sign extend this operand first.
+ SubP(dst, src1); // dst = (dst - src)
+ LoadComplementRR(dst, dst); // dst = -dst
+ } else {
+ sgfr(dst, src2);
+ }
+#else
+ SubP(dst, src1, src2);
+#endif
+}
+
+// Subtract 32-bit (Register-Memory)
+void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ s(dst, opnd);
+ else
+ sy(dst, opnd);
+}
+
+// Subtract Pointer Sized (Register - Memory)
+void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ sg(dst, opnd);
+#else
+ Sub32(dst, opnd);
+#endif
+}
+
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+ sllg(src, src, Operand(32));
+ ldgr(dst, src);
+}
+
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+ lgdr(dst, src);
+ srlg(dst, dst, Operand(32));
+}
+
+void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(opnd.offset()));
+ sgf(dst, opnd);
+#else
+ Sub32(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+// Subtract Logical Instructions
+//----------------------------------------------------------------------------
+
+// Subtract Logical 32-bit (Register - Memory)
+void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ sl(dst, opnd);
+ else
+ sly(dst, opnd);
+}
+
+// Subtract Logical Pointer Sized (Register - Memory)
+void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ slgf(dst, opnd);
+#else
+ SubLogical(dst, opnd);
+#endif
+}
+
+// Subtract Logical Pointer Size with src extension
+// (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
+ const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(opnd.offset()));
+ slgf(dst, opnd);
+#else
+ SubLogical(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+// Bitwise Operations
+//----------------------------------------------------------------------------
+
+// AND 32-bit - dst = dst & src
+void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
+
+// AND Pointer Size - dst = dst & src
+void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
+
+// Non-clobbering AND 32-bit - dst = src1 & src1
+void MacroAssembler::And(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ nrk(dst, src1, src2);
+ return;
+ } else {
+ lr(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ And(dst, src2);
+}
+
+// Non-clobbering AND pointer size - dst = src1 & src1
+void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ AndP_RRR(dst, src1, src2);
+ return;
+ } else {
+ LoadRR(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ AndP(dst, src2);
+}
+
+// AND 32-bit (Reg - Mem)
+void MacroAssembler::And(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ n(dst, opnd);
+ else
+ ny(dst, opnd);
+}
+
+// AND Pointer Size (Reg - Mem)
+void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ ng(dst, opnd);
+#else
+ And(dst, opnd);
+#endif
+}
+
+// AND 32-bit - dst = dst & imm
+void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
+
+// AND Pointer Size - dst = dst & imm
+void MacroAssembler::AndP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ intptr_t value = opnd.imm_;
+ if (value >> 32 != -1) {
+ // this may not work b/c condition code won't be set correctly
+ nihf(dst, Operand(value >> 32));
+ }
+ nilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+ And(dst, opnd);
+#endif
+}
+
+// AND 32-bit - dst = src & imm
+void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) lr(dst, src);
+ nilf(dst, opnd);
+}
+
+// AND Pointer Size - dst = src & imm
+void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
+ // Try to exploit RISBG first
+ intptr_t value = opnd.imm_;
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ intptr_t shifted_value = value;
+ int trailing_zeros = 0;
+
+ // We start checking how many trailing zeros are left at the end.
+ while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
+ trailing_zeros++;
+ shifted_value >>= 1;
+ }
+
+ // If temp (value with right-most set of zeros shifted out) is 1 less
+ // than power of 2, we have consecutive bits of 1.
+ // Special case: If shift_value is zero, we cannot use RISBG, as it requires
+ // selection of at least 1 bit.
+ if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
+ int startBit =
+ base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
+ int endBit = 63 - trailing_zeros;
+ // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
+ risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
+ true);
+ return;
+ } else if (-1 == shifted_value) {
+ // A Special case in which all top bits up to MSB are 1's. In this case,
+ // we can set startBit to be 0.
+ int endBit = 63 - trailing_zeros;
+ risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+ return;
+ }
+ }
+
+ // If we are &'ing zero, we can just whack the dst register and skip copy
+ if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
+ AndP(dst, opnd);
+}
+
+// OR 32-bit - dst = dst & src
+void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
+
+// OR Pointer Size - dst = dst & src
+void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
+
+// Non-clobbering OR 32-bit - dst = src1 & src1
+void MacroAssembler::Or(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ork(dst, src1, src2);
+ return;
+ } else {
+ lr(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ Or(dst, src2);
+}
+
+// Non-clobbering OR pointer size - dst = src1 & src1
+void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ OrP_RRR(dst, src1, src2);
+ return;
+ } else {
+ LoadRR(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ OrP(dst, src2);
+}
+
+// OR 32-bit (Reg - Mem)
+void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ o(dst, opnd);
+ else
+ oy(dst, opnd);
+}
+
+// OR Pointer Size (Reg - Mem)
+void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ og(dst, opnd);
+#else
+ Or(dst, opnd);
+#endif
+}
+
+// OR 32-bit - dst = dst & imm
+void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
+
+// OR Pointer Size - dst = dst & imm
+void MacroAssembler::OrP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ intptr_t value = opnd.imm_;
+ if (value >> 32 != 0) {
+ // this may not work b/c condition code won't be set correctly
+ oihf(dst, Operand(value >> 32));
+ }
+ oilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+ Or(dst, opnd);
+#endif
+}
+
+// OR 32-bit - dst = src & imm
+void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) lr(dst, src);
+ oilf(dst, opnd);
+}
+
+// OR Pointer Size - dst = src & imm
+void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ OrP(dst, opnd);
+}
+
+// XOR 32-bit - dst = dst & src
+void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
+
+// XOR Pointer Size - dst = dst & src
+void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
+
+// Non-clobbering XOR 32-bit - dst = src1 & src1
+void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ xrk(dst, src1, src2);
+ return;
+ } else {
+ lr(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ Xor(dst, src2);
+}
+
+// Non-clobbering XOR pointer size - dst = src1 & src1
+void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1) && !dst.is(src2)) {
+ // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+ // as XR is a smaller instruction
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ XorP_RRR(dst, src1, src2);
+ return;
+ } else {
+ LoadRR(dst, src1);
+ }
+ } else if (dst.is(src2)) {
+ src2 = src1;
+ }
+ XorP(dst, src2);
+}
+
+// XOR 32-bit (Reg - Mem)
+void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ x(dst, opnd);
+ else
+ xy(dst, opnd);
+}
+
+// XOR Pointer Size (Reg - Mem)
+void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ xg(dst, opnd);
+#else
+ Xor(dst, opnd);
+#endif
+}
+
+// XOR 32-bit - dst = dst & imm
+void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
+
+// XOR Pointer Size - dst = dst & imm
+void MacroAssembler::XorP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ intptr_t value = opnd.imm_;
+ xihf(dst, Operand(value >> 32));
+ xilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+ Xor(dst, opnd);
+#endif
+}
+
+// XOR 32-bit - dst = src & imm
+void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) lr(dst, src);
+ xilf(dst, opnd);
+}
+
+// XOR Pointer Size - dst = src & imm
+void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ XorP(dst, opnd);
+}
+
+void MacroAssembler::NotP(Register dst) {
+#if V8_TARGET_ARCH_S390X
+ xihf(dst, Operand(0xFFFFFFFF));
+ xilf(dst, Operand(0xFFFFFFFF));
+#else
+ XorP(dst, Operand(0xFFFFFFFF));
+#endif
+}
+
+// works the same as mov
+void MacroAssembler::Load(Register dst, const Operand& opnd) {
+ intptr_t value = opnd.immediate();
+ if (is_int16(value)) {
+#if V8_TARGET_ARCH_S390X
+ lghi(dst, opnd);
+#else
+ lhi(dst, opnd);
+#endif
+ } else {
+#if V8_TARGET_ARCH_S390X
+ llilf(dst, opnd);
+#else
+ iilf(dst, opnd);
+#endif
+ }
+}
+
+void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ lgf(dst, opnd); // 64<-32
+#else
+ if (is_uint12(opnd.offset())) {
+ l(dst, opnd);
+ } else {
+ ly(dst, opnd);
+ }
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Compare Helpers
+//-----------------------------------------------------------------------------
+
+// Compare 32-bit Register vs Register
+void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
+
+// Compare Pointer Sized Register vs Register
+void MacroAssembler::CmpP(Register src1, Register src2) {
+#if V8_TARGET_ARCH_S390X
+ cgr(src1, src2);
+#else
+ Cmp32(src1, src2);
+#endif
+}
+
+// Compare 32-bit Register vs Immediate
+// This helper will set up proper relocation entries if required.
+void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
+ if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+ intptr_t value = opnd.immediate();
+ if (is_int16(value))
+ chi(dst, opnd);
+ else
+ cfi(dst, opnd);
+ } else {
+ // Need to generate relocation record here
+ RecordRelocInfo(opnd.rmode_, opnd.imm_);
+ cfi(dst, opnd);
+ }
+}
+
+// Compare Pointer Sized Register vs Immediate
+// This helper will set up proper relocation entries if required.
+void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+ cgfi(dst, opnd);
+ } else {
+ mov(r0, opnd); // Need to generate 64-bit relocation
+ cgr(dst, r0);
+ }
+#else
+ Cmp32(dst, opnd);
+#endif
+}
+
+// Compare 32-bit Register vs Memory
+void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
+ // make sure offset is within 20 bit range
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ c(dst, opnd);
+ else
+ cy(dst, opnd);
+}
+
+// Compare Pointer Size Register vs Memory
+void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
+ // make sure offset is within 20 bit range
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ cg(dst, opnd);
+#else
+ Cmp32(dst, opnd);
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Compare Logical Helpers
+//-----------------------------------------------------------------------------
+
+// Compare Logical 32-bit Register vs Register
+void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
+
+// Compare Logical Pointer Sized Register vs Register
+void MacroAssembler::CmpLogicalP(Register dst, Register src) {
+#ifdef V8_TARGET_ARCH_S390X
+ clgr(dst, src);
+#else
+ CmpLogical32(dst, src);
+#endif
+}
+
+// Compare Logical 32-bit Register vs Immediate
+void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
+ clfi(dst, opnd);
+}
+
+// Compare Logical Pointer Sized Register vs Immediate
+void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
+ clgfi(dst, opnd);
+#else
+ CmpLogical32(dst, opnd);
+#endif
+}
+
+// Compare Logical 32-bit Register vs Memory
+void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
+ // make sure offset is within 20 bit range
+ DCHECK(is_int20(opnd.offset()));
+ if (is_uint12(opnd.offset()))
+ cl(dst, opnd);
+ else
+ cly(dst, opnd);
+}
+
+// Compare Logical Pointer Sized Register vs Memory
+void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
+ // make sure offset is within 20 bit range
+ DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+ clg(dst, opnd);
+#else
+ CmpLogical32(dst, opnd);
+#endif
+}
+
+// Compare Logical Byte (Mem - Imm)
+void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
+ DCHECK(is_uint8(imm.immediate()));
+ if (is_uint12(mem.offset()))
+ cli(mem, imm);
+ else
+ cliy(mem, imm);
+}
+
+void MacroAssembler::Branch(Condition c, const Operand& opnd) {
+ intptr_t value = opnd.immediate();
+ if (is_int16(value))
+ brc(c, opnd);
+ else
+ brcl(c, opnd);
+}
+
+// Branch On Count. Decrement R1, and branch if R1 != 0.
+void MacroAssembler::BranchOnCount(Register r1, Label* l) {
+ int32_t offset = branch_offset(l);
+ positions_recorder()->WriteRecordedPositions();
+ if (is_int16(offset)) {
+#if V8_TARGET_ARCH_S390X
+ brctg(r1, Operand(offset));
+#else
+ brct(r1, Operand(offset));
+#endif
+ } else {
+ AddP(r1, Operand(-1));
+ Branch(ne, Operand(offset));
+ }
+}
+
+void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+ Load(dst, Operand(value));
+}
+
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+ intptr_t value = reinterpret_cast<intptr_t>(smi);
+#if V8_TARGET_ARCH_S390X
+ DCHECK((value & 0xffffffff) == 0);
+ // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
+ llihf(dst, Operand(value >> 32));
+#else
+ llilf(dst, Operand(value));
+#endif
+}
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
+ Register scratch) {
+ uint32_t hi_32 = value >> 32;
+ uint32_t lo_32 = static_cast<uint32_t>(value);
+
+ // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
+ iihf(scratch, Operand(hi_32));
+ iilf(scratch, Operand(lo_32));
+ ldgr(result, scratch);
+}
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+ Register scratch) {
+ uint64_t int_val = bit_cast<uint64_t, double>(value);
+ LoadDoubleLiteral(result, int_val, scratch);
+}
+
+void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
+ Register scratch) {
+ uint32_t hi_32 = bit_cast<uint32_t>(value);
+ uint32_t lo_32 = 0;
+
+ // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
+ iihf(scratch, Operand(hi_32));
+ iilf(scratch, Operand(lo_32));
+ ldgr(result, scratch);
+}
+
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
+#if V8_TARGET_ARCH_S390X
+ LoadSmiLiteral(scratch, smi);
+ cgr(src1, scratch);
+#else
+ // CFI takes 32-bit immediate.
+ cfi(src1, Operand(smi));
+#endif
+}
+
+void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
+ Register scratch) {
+#if V8_TARGET_ARCH_S390X
+ LoadSmiLiteral(scratch, smi);
+ clgr(src1, scratch);
+#else
+ // CLFI takes 32-bit immediate
+ clfi(src1, Operand(smi));
+#endif
+}
+
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch) {
+#if V8_TARGET_ARCH_S390X
+ LoadSmiLiteral(scratch, smi);
+ AddP(dst, src, scratch);
+#else
+ AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
+#endif
+}
+
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch) {
+#if V8_TARGET_ARCH_S390X
+ LoadSmiLiteral(scratch, smi);
+ SubP(dst, src, scratch);
+#else
+ AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
+#endif
+}
+
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
+ if (!dst.is(src)) LoadRR(dst, src);
+#if V8_TARGET_ARCH_S390X
+ DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
+ int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
+ nihf(dst, Operand(value));
+#else
+ nilf(dst, Operand(reinterpret_cast<int>(smi)));
+#endif
+}
+
+// Load a "pointer" sized value from the memory location
+void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!scratch.is(no_reg) && !is_int20(offset)) {
+ /* cannot use d-form */
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+ lg(dst, MemOperand(mem.rb(), scratch));
+#else
+ l(dst, MemOperand(mem.rb(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_S390X
+ lg(dst, mem);
+#else
+ if (is_uint12(offset)) {
+ l(dst, mem);
+ } else {
+ ly(dst, mem);
+ }
+#endif
+ }
+}
+
+// Store a "pointer" sized value to the memory location
+void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+ Register scratch) {
+ if (!is_int20(mem.offset())) {
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!scratch.is(r0));
+ LoadIntLiteral(scratch, mem.offset());
+#if V8_TARGET_ARCH_S390X
+ stg(src, MemOperand(mem.rb(), scratch));
+#else
+ st(src, MemOperand(mem.rb(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_S390X
+ stg(src, mem);
+#else
+ // StoreW will try to generate ST if offset fits, otherwise
+ // it'll generate STY.
+ StoreW(src, mem);
+#endif
+ }
+}
+
+// Store a "pointer" sized constant to the memory location
+void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
+ Register scratch) {
+ // Relocations not supported
+ DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
+
+ // Try to use MVGHI/MVHI
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
+ mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
+#if V8_TARGET_ARCH_S390X
+ mvghi(mem, opnd);
+#else
+ mvhi(mem, opnd);
+#endif
+ } else {
+ LoadImmP(scratch, opnd);
+ StoreP(scratch, mem);
+ }
+}
+
+void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
+ const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(mem.offset()));
+ lmg(dst1, dst2, mem);
+#else
+ if (is_uint12(mem.offset())) {
+ lm(dst1, dst2, mem);
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ lmy(dst1, dst2, mem);
+ }
+#endif
+}
+
+void MacroAssembler::StoreMultipleP(Register src1, Register src2,
+ const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ DCHECK(is_int20(mem.offset()));
+ stmg(src1, src2, mem);
+#else
+ if (is_uint12(mem.offset())) {
+ stm(src1, src2, mem);
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ stmy(src1, src2, mem);
+ }
+#endif
+}
+
+void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
+ const MemOperand& mem) {
+ if (is_uint12(mem.offset())) {
+ lm(dst1, dst2, mem);
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ lmy(dst1, dst2, mem);
+ }
+}
+
+void MacroAssembler::StoreMultipleW(Register src1, Register src2,
+ const MemOperand& mem) {
+ if (is_uint12(mem.offset())) {
+ stm(src1, src2, mem);
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ stmy(src1, src2, mem);
+ }
+}
+
+// Load 32-bits and sign extend if necessary.
+void MacroAssembler::LoadW(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ lgfr(dst, src);
+#else
+ if (!dst.is(src)) lr(dst, src);
+#endif
+}
+
+// Load 32-bits and sign extend if necessary.
+void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!is_int20(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+ lgf(dst, MemOperand(mem.rb(), scratch));
+#else
+ l(dst, MemOperand(mem.rb(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_S390X
+ lgf(dst, mem);
+#else
+ if (is_uint12(offset)) {
+ l(dst, mem);
+ } else {
+ ly(dst, mem);
+ }
+#endif
+ }
+}
+
+// Load 32-bits and zero extend if necessary.
+void MacroAssembler::LoadlW(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ llgfr(dst, src);
+#else
+ if (!dst.is(src)) lr(dst, src);
+#endif
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand of RX or RXY format
+void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.rb();
+ int offset = mem.offset();
+
+#if V8_TARGET_ARCH_S390X
+ if (is_int20(offset)) {
+ llgf(dst, mem);
+ } else if (!scratch.is(no_reg)) {
+ // Materialize offset into scratch register.
+ LoadIntLiteral(scratch, offset);
+ llgf(dst, MemOperand(base, scratch));
+ } else {
+ DCHECK(false);
+ }
+#else
+ bool use_RXform = false;
+ bool use_RXYform = false;
+ if (is_uint12(offset)) {
+ // RX-format supports unsigned 12-bits offset.
+ use_RXform = true;
+ } else if (is_int20(offset)) {
+ // RXY-format supports signed 20-bits offset.
+ use_RXYform = true;
+ } else if (!scratch.is(no_reg)) {
+ // Materialize offset into scratch register.
+ LoadIntLiteral(scratch, offset);
+ } else {
+ DCHECK(false);
+ }
+
+ if (use_RXform) {
+ l(dst, mem);
+ } else if (use_RXYform) {
+ ly(dst, mem);
+ } else {
+ ly(dst, MemOperand(base, scratch));
+ }
+#endif
+}
+
+void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ lgb(dst, mem);
+#else
+ lb(dst, mem);
+#endif
+}
+
+void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ llgc(dst, mem);
+#else
+ llc(dst, mem);
+#endif
+}
+
+// Load And Test (Reg <- Reg)
+void MacroAssembler::LoadAndTest32(Register dst, Register src) {
+ ltr(dst, src);
+}
+
+// Load And Test
+// (Register dst(ptr) = Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ ltgfr(dst, src);
+#else
+ ltr(dst, src);
+#endif
+}
+
+// Load And Test Pointer Sized (Reg <- Reg)
+void MacroAssembler::LoadAndTestP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ ltgr(dst, src);
+#else
+ ltr(dst, src);
+#endif
+}
+
+// Load And Test 32-bit (Reg <- Mem)
+void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
+ lt_z(dst, mem);
+}
+
+// Load And Test Pointer Sized (Reg <- Mem)
+void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ ltg(dst, mem);
+#else
+ lt_z(dst, mem);
+#endif
+}
+
+// Load Double Precision (64-bit) Floating Point number from memory
+void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
+ // for 32bit and 64bit we all use 64bit floating point regs
+ if (is_uint12(mem.offset())) {
+ ld(dst, mem);
+ } else {
+ ldy(dst, mem);
+ }
+}
+
+// Load Single Precision (32-bit) Floating Point number from memory
+void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
+ if (is_uint12(mem.offset())) {
+ le_z(dst, mem);
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ ley(dst, mem);
+ }
+}
+
+// Load Single Precision (32-bit) Floating Point number from memory,
+// and convert to Double Precision (64-bit)
+void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
+ const MemOperand& mem) {
+ LoadFloat32(dst, mem);
+ ldebr(dst, dst);
+}
+
+// Store Double Precision (64-bit) Floating Point number to memory
+void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
+ if (is_uint12(mem.offset())) {
+ std(dst, mem);
+ } else {
+ stdy(dst, mem);
+ }
+}
+
+// Store Single Precision (32-bit) Floating Point number to memory
+void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
+ if (is_uint12(mem.offset())) {
+ ste(src, mem);
+ } else {
+ stey(src, mem);
+ }
+}
+
+// Convert Double precision (64-bit) to Single Precision (32-bit)
+// and store resulting Float32 to memory
+void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
+ const MemOperand& mem,
+ DoubleRegister scratch) {
+ ledbr(scratch, src);
+ StoreFloat32(scratch, mem);
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand of RX or RXY format
+void MacroAssembler::StoreW(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.rb();
+ int offset = mem.offset();
+
+ bool use_RXform = false;
+ bool use_RXYform = false;
+
+ if (is_uint12(offset)) {
+ // RX-format supports unsigned 12-bits offset.
+ use_RXform = true;
+ } else if (is_int20(offset)) {
+ // RXY-format supports signed 20-bits offset.
+ use_RXYform = true;
+ } else if (!scratch.is(no_reg)) {
+ // Materialize offset into scratch register.
+ LoadIntLiteral(scratch, offset);
+ } else {
+ // scratch is no_reg
+ DCHECK(false);
+ }
+
+ if (use_RXform) {
+ st(src, mem);
+ } else if (use_RXYform) {
+ sty(src, mem);
+ } else {
+ StoreW(src, MemOperand(base, scratch));
+ }
+}
+
+// Loads 16-bits half-word value from memory and sign extends to pointer
+// sized register
+void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.rb();
+ int offset = mem.offset();
+
+ if (!is_int20(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+ lgh(dst, MemOperand(base, scratch));
+#else
+ lh(dst, MemOperand(base, scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_S390X
+ lgh(dst, mem);
+#else
+ if (is_uint12(offset)) {
+ lh(dst, mem);
+ } else {
+ lhy(dst, mem);
+ }
+#endif
+ }
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.rb();
+ int offset = mem.offset();
+
+ if (is_uint12(offset)) {
+ sth(src, mem);
+ } else if (is_int20(offset)) {
+ sthy(src, mem);
+ } else {
+ DCHECK(!scratch.is(no_reg));
+ LoadIntLiteral(scratch, offset);
+ sth(src, MemOperand(base, scratch));
+ }
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.rb();
+ int offset = mem.offset();
+
+ if (is_uint12(offset)) {
+ stc(src, mem);
+ } else if (is_int20(offset)) {
+ stcy(src, mem);
+ } else {
+ DCHECK(!scratch.is(no_reg));
+ LoadIntLiteral(scratch, offset);
+ stc(src, MemOperand(base, scratch));
+ }
+}
+
+// Shift left logical for 32-bit integer types.
+void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
+ if (dst.is(src)) {
+ sll(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ sllk(dst, src, val);
+ } else {
+ lr(dst, src);
+ sll(dst, val);
+ }
+}
+
+// Shift left logical for 32-bit integer types.
+void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
+ if (dst.is(src)) {
+ sll(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ sllk(dst, src, val);
+ } else {
+ DCHECK(!dst.is(val)); // The lr/sll path clobbers val.
+ lr(dst, src);
+ sll(dst, val);
+ }
+}
+
+// Shift right logical for 32-bit integer types.
+void MacroAssembler::ShiftRight(Register dst, Register src,
+ const Operand& val) {
+ if (dst.is(src)) {
+ srl(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ srlk(dst, src, val);
+ } else {
+ lr(dst, src);
+ srl(dst, val);
+ }
+}
+
+// Shift right logical for 32-bit integer types.
+void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
+ if (dst.is(src)) {
+ srl(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ srlk(dst, src, val);
+ } else {
+ DCHECK(!dst.is(val)); // The lr/srl path clobbers val.
+ lr(dst, src);
+ srl(dst, val);
+ }
+}
+
+// Shift left arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftLeftArith(Register dst, Register src,
+ const Operand& val) {
+ if (dst.is(src)) {
+ sla(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ slak(dst, src, val);
+ } else {
+ lr(dst, src);
+ sla(dst, val);
+ }
+}
+
+// Shift left arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
+ if (dst.is(src)) {
+ sla(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ slak(dst, src, val);
+ } else {
+ DCHECK(!dst.is(val)); // The lr/sla path clobbers val.
+ lr(dst, src);
+ sla(dst, val);
+ }
+}
+
+// Shift right arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftRightArith(Register dst, Register src,
+ const Operand& val) {
+ if (dst.is(src)) {
+ sra(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ srak(dst, src, val);
+ } else {
+ lr(dst, src);
+ sra(dst, val);
+ }
+}
+
+// Shift right arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
+ if (dst.is(src)) {
+ sra(dst, val);
+ } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ srak(dst, src, val);
+ } else {
+ DCHECK(!dst.is(val)); // The lr/sra path clobbers val.
+ lr(dst, src);
+ sra(dst, val);
+ }
+}
+
+// Clear right most # of bits
+void MacroAssembler::ClearRightImm(Register dst, Register src,
+ const Operand& val) {
+ int numBitsToClear = val.imm_ % (kPointerSize * 8);
+
+ // Try to use RISBG if possible
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int endBit = 63 - numBitsToClear;
+ risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+ return;
+ }
+
+ uint64_t hexMask = ~((1L << numBitsToClear) - 1);
+
+ // S390 AND instr clobbers source. Make a copy if necessary
+ if (!dst.is(src)) LoadRR(dst, src);
+
+ if (numBitsToClear <= 16) {
+ nill(dst, Operand(static_cast<uint16_t>(hexMask)));
+ } else if (numBitsToClear <= 32) {
+ nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
+ } else if (numBitsToClear <= 64) {
+ nilf(dst, Operand(static_cast<intptr_t>(0)));
+ nihf(dst, Operand(hexMask >> 32));
+ }
+}
+
+void MacroAssembler::Popcnt32(Register dst, Register src) {
+ DCHECK(!src.is(r0));
+ DCHECK(!dst.is(r0));
+
+ popcnt(dst, src);
+ ShiftRight(r0, dst, Operand(16));
+ ar(dst, r0);
+ ShiftRight(r0, dst, Operand(8));
+ ar(dst, r0);
+ lbr(dst, dst);
+}
+
+#ifdef V8_TARGET_ARCH_S390X
+void MacroAssembler::Popcnt64(Register dst, Register src) {
+ DCHECK(!src.is(r0));
+ DCHECK(!dst.is(r0));
+
+ popcnt(dst, src);
+ ShiftRightP(r0, dst, Operand(32));
+ AddP(dst, r0);
+ ShiftRightP(r0, dst, Operand(16));
+ AddP(dst, r0);
+ ShiftRightP(r0, dst, Operand(8));
+ AddP(dst, r0);
+ lbr(dst, dst);
+}
+#endif
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+#endif
+
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
+ FlushICache flush_cache)
+ : address_(address),
+ size_(size),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
+ flush_cache_(flush_cache) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ if (flush_cache_ == FLUSH) {
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
+ }
+
+ // Check that the code was patched as expected.
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+void MacroAssembler::TruncatingDiv(Register result, Register dividend,
+ int32_t divisor) {
+ DCHECK(!dividend.is(result));
+ DCHECK(!dividend.is(r0));
+ DCHECK(!result.is(r0));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+#ifdef V8_TARGET_ARCH_S390X
+ LoadRR(result, dividend);
+ MulP(result, Operand(mag.multiplier));
+ ShiftRightArithP(result, result, Operand(32));
+
+#else
+ lay(sp, MemOperand(sp, -kPointerSize));
+ StoreP(r1, MemOperand(sp));
+
+ mov(r1, Operand(mag.multiplier));
+ mr_z(r0, dividend); // r0:r1 = r1 * dividend
+
+ LoadRR(result, r0);
+ LoadP(r1, MemOperand(sp));
+ la(sp, MemOperand(sp, kPointerSize));
+#endif
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
+ AddP(result, dividend);
+ }
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
+ SubP(result, dividend);
+ }
+ if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
+ ExtractBit(r0, dividend, 31);
+ AddP(result, r0);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
new file mode 100644
index 0000000000..77fcccb182
--- /dev/null
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -0,0 +1,1887 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
+#define V8_S390_MACRO_ASSEMBLER_S390_H_
+
+#include "src/assembler.h"
+#include "src/bailout-reason.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {Register::kCode_r2};
+const Register kReturnRegister1 = {Register::kCode_r3};
+const Register kReturnRegister2 = {Register::kCode_r4};
+const Register kJSFunctionRegister = {Register::kCode_r3};
+const Register kContextRegister = {Register::kCode_r13};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
+ return MemOperand(object, index, offset - kHeapObjectTag);
+}
+
+// Generate a MemOperand for loading a field from Root register
+inline MemOperand RootMemOperand(Heap::RootListIndex index) {
+ return MemOperand(kRootRegister, index << kPointerSizeLog2);
+}
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
+#endif
+
+// These exist to provide portability between 32 and 64bit
+#if V8_TARGET_ARCH_S390X
+#define Div divd
+
+// The length of the arithmetic operation is the length
+// of the register.
+
+// Length:
+// H = halfword
+// W = word
+
+// arithmetics and bitwise
+#define AddMI agsi
+#define AddRR agr
+#define SubRR sgr
+#define AndRR ngr
+#define OrRR ogr
+#define XorRR xgr
+#define LoadComplementRR lcgr
+#define LoadNegativeRR lngr
+
+// Distinct Operands
+#define AddP_RRR agrk
+#define AddPImm_RRI aghik
+#define AddLogicalP_RRR algrk
+#define SubP_RRR sgrk
+#define SubLogicalP_RRR slgrk
+#define AndP_RRR ngrk
+#define OrP_RRR ogrk
+#define XorP_RRR xgrk
+
+// Load / Store
+#define LoadRR lgr
+#define LoadAndTestRR ltgr
+#define LoadImmP lghi
+#define LoadLogicalHalfWordP llgh
+
+// Compare
+#define CmpPH cghi
+#define CmpLogicalPW clgfi
+
+// Shifts
+#define ShiftLeftP sllg
+#define ShiftRightP srlg
+#define ShiftLeftArithP slag
+#define ShiftRightArithP srag
+#else
+
+// arithmetics and bitwise
+// Reg2Reg
+#define AddMI asi
+#define AddRR ar
+#define SubRR sr
+#define AndRR nr
+#define OrRR or_z
+#define XorRR xr
+#define LoadComplementRR lcr
+#define LoadNegativeRR lnr
+
+// Distinct Operands
+#define AddP_RRR ark
+#define AddPImm_RRI ahik
+#define AddLogicalP_RRR alrk
+#define SubP_RRR srk
+#define SubLogicalP_RRR slrk
+#define AndP_RRR nrk
+#define OrP_RRR ork
+#define XorP_RRR xrk
+
+// Load / Store
+#define LoadRR lr
+#define LoadAndTestRR ltr
+#define LoadImmP lhi
+#define LoadLogicalHalfWordP llh
+
+// Compare
+#define CmpPH chi
+#define CmpLogicalPW clfi
+
+// Shifts
+#define ShiftLeftP ShiftLeft
+#define ShiftRightP ShiftRight
+#define ShiftLeftArithP ShiftLeftArith
+#define ShiftRightArithP ShiftRightArith
+
+#endif
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ // Returns the size of a call in instructions.
+ static int CallSize(Register target);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+ void Jump(Register target);
+ void JumpToJSEntry(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Register target);
+ void CallJSEntry(Register target);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ void Ret() { b(r14); }
+ void Ret(Condition cond) { b(cond, r14); }
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count);
+ void Drop(Register count, Register scratch = r0);
+
+ void Ret(int drop) {
+ Drop(drop);
+ Ret();
+ }
+
+ void Call(Label* target);
+
+ // Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
+ void Move(Register dst, Handle<Object> value);
+ void Move(Register dst, Register src, Condition cond = al);
+ void Move(DoubleRegister dst, DoubleRegister src);
+
+ void MultiPush(RegList regs, Register location = sp);
+ void MultiPop(RegList regs, Register location = sp);
+
+ void MultiPushDoubles(RegList dregs, Register location = sp);
+ void MultiPopDoubles(RegList dregs, Register location = sp);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond = al);
+ // Store an object to the root table.
+ void StoreRoot(Register source, Heap::RootListIndex index,
+ Condition cond = al);
+
+ //--------------------------------------------------------------------------
+ // S390 Macro Assemblers for Instructions
+ //--------------------------------------------------------------------------
+
+ // Arithmetic Operations
+
+ // Add (Register - Immediate)
+ void Add32(Register dst, const Operand& imm);
+ void AddP(Register dst, const Operand& imm);
+ void Add32(Register dst, Register src, const Operand& imm);
+ void AddP(Register dst, Register src, const Operand& imm);
+
+ // Add (Register - Register)
+ void Add32(Register dst, Register src);
+ void AddP(Register dst, Register src);
+ void AddP_ExtendSrc(Register dst, Register src);
+ void Add32(Register dst, Register src1, Register src2);
+ void AddP(Register dst, Register src1, Register src2);
+ void AddP_ExtendSrc(Register dst, Register src1, Register src2);
+
+ // Add (Register - Mem)
+ void Add32(Register dst, const MemOperand& opnd);
+ void AddP(Register dst, const MemOperand& opnd);
+ void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
+
+ // Add (Mem - Immediate)
+ void Add32(const MemOperand& opnd, const Operand& imm);
+ void AddP(const MemOperand& opnd, const Operand& imm);
+
+ // Add Logical (Register - Register)
+ void AddLogical32(Register dst, Register src1, Register src2);
+
+ // Add Logical With Carry (Register - Register)
+ void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
+
+ // Add Logical (Register - Immediate)
+ void AddLogical(Register dst, const Operand& imm);
+ void AddLogicalP(Register dst, const Operand& imm);
+
+ // Add Logical (Register - Mem)
+ void AddLogical(Register dst, const MemOperand& opnd);
+ void AddLogicalP(Register dst, const MemOperand& opnd);
+
+ // Subtract (Register - Immediate)
+ void Sub32(Register dst, const Operand& imm);
+ void SubP(Register dst, const Operand& imm);
+ void Sub32(Register dst, Register src, const Operand& imm);
+ void SubP(Register dst, Register src, const Operand& imm);
+
+ // Subtract (Register - Register)
+ void Sub32(Register dst, Register src);
+ void SubP(Register dst, Register src);
+ void SubP_ExtendSrc(Register dst, Register src);
+ void Sub32(Register dst, Register src1, Register src2);
+ void SubP(Register dst, Register src1, Register src2);
+ void SubP_ExtendSrc(Register dst, Register src1, Register src2);
+
+ // Subtract (Register - Mem)
+ void Sub32(Register dst, const MemOperand& opnd);
+ void SubP(Register dst, const MemOperand& opnd);
+ void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
+
+ // Subtract Logical (Register - Mem)
+ void SubLogical(Register dst, const MemOperand& opnd);
+ void SubLogicalP(Register dst, const MemOperand& opnd);
+ void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
+ // Subtract Logical 32-bit
+ void SubLogical32(Register dst, Register src1, Register src2);
+ // Subtract Logical With Borrow 32-bit
+ void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
+
+ // Multiply
+ void MulP(Register dst, const Operand& opnd);
+ void MulP(Register dst, Register src);
+ void MulP(Register dst, const MemOperand& opnd);
+ void Mul(Register dst, Register src1, Register src2);
+
+ // Divide
+ void DivP(Register dividend, Register divider);
+
+ // Compare
+ void Cmp32(Register src1, Register src2);
+ void CmpP(Register src1, Register src2);
+ void Cmp32(Register dst, const Operand& opnd);
+ void CmpP(Register dst, const Operand& opnd);
+ void Cmp32(Register dst, const MemOperand& opnd);
+ void CmpP(Register dst, const MemOperand& opnd);
+
+ // Compare Logical
+ void CmpLogical32(Register src1, Register src2);
+ void CmpLogicalP(Register src1, Register src2);
+ void CmpLogical32(Register src1, const Operand& opnd);
+ void CmpLogicalP(Register src1, const Operand& opnd);
+ void CmpLogical32(Register dst, const MemOperand& opnd);
+ void CmpLogicalP(Register dst, const MemOperand& opnd);
+
+ // Compare Logical Byte (CLI/CLIY)
+ void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
+
+ // Load 32bit
+ void Load(Register dst, const MemOperand& opnd);
+ void Load(Register dst, const Operand& opnd);
+ void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
+ void LoadW(Register dst, Register src);
+ void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
+ void LoadlW(Register dst, Register src);
+ void LoadB(Register dst, const MemOperand& opnd);
+ void LoadlB(Register dst, const MemOperand& opnd);
+
+ // Load And Test
+ void LoadAndTest32(Register dst, Register src);
+ void LoadAndTestP_ExtendSrc(Register dst, Register src);
+ void LoadAndTestP(Register dst, Register src);
+
+ void LoadAndTest32(Register dst, const MemOperand& opnd);
+ void LoadAndTestP(Register dst, const MemOperand& opnd);
+
+ // Load Floating Point
+ void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
+ void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
+ void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
+
+ // Store Floating Point
+ void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
+ void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
+ void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
+ DoubleRegister scratch);
+
+ void Branch(Condition c, const Operand& opnd);
+ void BranchOnCount(Register r1, Label* l);
+
+ // Shifts
+ void ShiftLeft(Register dst, Register src, Register val);
+ void ShiftLeft(Register dst, Register src, const Operand& val);
+ void ShiftRight(Register dst, Register src, Register val);
+ void ShiftRight(Register dst, Register src, const Operand& val);
+ void ShiftLeftArith(Register dst, Register src, Register shift);
+ void ShiftLeftArith(Register dst, Register src, const Operand& val);
+ void ShiftRightArith(Register dst, Register src, Register shift);
+ void ShiftRightArith(Register dst, Register src, const Operand& val);
+
+ void ClearRightImm(Register dst, Register src, const Operand& val);
+
+ // Bitwise operations
+ void And(Register dst, Register src);
+ void AndP(Register dst, Register src);
+ void And(Register dst, Register src1, Register src2);
+ void AndP(Register dst, Register src1, Register src2);
+ void And(Register dst, const MemOperand& opnd);
+ void AndP(Register dst, const MemOperand& opnd);
+ void And(Register dst, const Operand& opnd);
+ void AndP(Register dst, const Operand& opnd);
+ void And(Register dst, Register src, const Operand& opnd);
+ void AndP(Register dst, Register src, const Operand& opnd);
+ void Or(Register dst, Register src);
+ void OrP(Register dst, Register src);
+ void Or(Register dst, Register src1, Register src2);
+ void OrP(Register dst, Register src1, Register src2);
+ void Or(Register dst, const MemOperand& opnd);
+ void OrP(Register dst, const MemOperand& opnd);
+ void Or(Register dst, const Operand& opnd);
+ void OrP(Register dst, const Operand& opnd);
+ void Or(Register dst, Register src, const Operand& opnd);
+ void OrP(Register dst, Register src, const Operand& opnd);
+ void Xor(Register dst, Register src);
+ void XorP(Register dst, Register src);
+ void Xor(Register dst, Register src1, Register src2);
+ void XorP(Register dst, Register src1, Register src2);
+ void Xor(Register dst, const MemOperand& opnd);
+ void XorP(Register dst, const MemOperand& opnd);
+ void Xor(Register dst, const Operand& opnd);
+ void XorP(Register dst, const Operand& opnd);
+ void Xor(Register dst, Register src, const Operand& opnd);
+ void XorP(Register dst, Register src, const Operand& opnd);
+ void Popcnt32(Register dst, Register src);
+
+#ifdef V8_TARGET_ARCH_S390X
+ void Popcnt64(Register dst, Register src);
+#endif
+
+ void NotP(Register dst);
+
+ void mov(Register dst, const Operand& src);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+ Register address);
+
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+ lr_status, save_fp, remembered_set_action, smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
+ void RecordWriteForMap(Register object, Register map, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ void push(Register src) {
+ lay(sp, MemOperand(sp, -kPointerSize));
+ StoreP(src, MemOperand(sp));
+ }
+
+ void pop(Register dst) {
+ LoadP(dst, MemOperand(sp));
+ la(sp, MemOperand(sp, kPointerSize));
+ }
+
+ void pop() { la(sp, MemOperand(sp, kPointerSize)); }
+
+ void Push(Register src) { push(src); }
+
+ // Push a handle.
+ void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ lay(sp, MemOperand(sp, -kPointerSize * 2));
+ StoreP(src1, MemOperand(sp, kPointerSize));
+ StoreP(src2, MemOperand(sp, 0));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ lay(sp, MemOperand(sp, -kPointerSize * 3));
+ StoreP(src1, MemOperand(sp, kPointerSize * 2));
+ StoreP(src2, MemOperand(sp, kPointerSize));
+ StoreP(src3, MemOperand(sp, 0));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ lay(sp, MemOperand(sp, -kPointerSize * 4));
+ StoreP(src1, MemOperand(sp, kPointerSize * 3));
+ StoreP(src2, MemOperand(sp, kPointerSize * 2));
+ StoreP(src3, MemOperand(sp, kPointerSize));
+ StoreP(src4, MemOperand(sp, 0));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ DCHECK(!src1.is(src2));
+ DCHECK(!src1.is(src3));
+ DCHECK(!src2.is(src3));
+ DCHECK(!src1.is(src4));
+ DCHECK(!src2.is(src4));
+ DCHECK(!src3.is(src4));
+ DCHECK(!src1.is(src5));
+ DCHECK(!src2.is(src5));
+ DCHECK(!src3.is(src5));
+ DCHECK(!src4.is(src5));
+
+ lay(sp, MemOperand(sp, -kPointerSize * 5));
+ StoreP(src1, MemOperand(sp, kPointerSize * 4));
+ StoreP(src2, MemOperand(sp, kPointerSize * 3));
+ StoreP(src3, MemOperand(sp, kPointerSize * 2));
+ StoreP(src4, MemOperand(sp, kPointerSize));
+ StoreP(src5, MemOperand(sp, 0));
+ }
+
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ LoadP(src2, MemOperand(sp, 0));
+ LoadP(src1, MemOperand(sp, kPointerSize));
+ la(sp, MemOperand(sp, 2 * kPointerSize));
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ LoadP(src3, MemOperand(sp, 0));
+ LoadP(src2, MemOperand(sp, kPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kPointerSize));
+ la(sp, MemOperand(sp, 3 * kPointerSize));
+ }
+
+ // Pop four registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4) {
+ LoadP(src4, MemOperand(sp, 0));
+ LoadP(src3, MemOperand(sp, kPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kPointerSize));
+ la(sp, MemOperand(sp, 4 * kPointerSize));
+ }
+
+ // Pop five registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ LoadP(src5, MemOperand(sp, 0));
+ LoadP(src4, MemOperand(sp, kPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kPointerSize));
+ la(sp, MemOperand(sp, 5 * kPointerSize));
+ }
+
+ // Push a fixed frame, consisting of lr, fp, constant pool.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Push a standard frame, consisting of lr, fp, constant pool,
+ // context and JS function
+ void PushStandardFrame(Register function_reg);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
+
+ // Restore caller's frame pointer and return address prior to being
+ // overwritten by tail call stack preparation.
+ void RestoreFrameStateForTailCall();
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+ // from C.
+ // Does not handle errors.
+ void FlushICache(Register address, size_t size, Register scratch);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+ void CanonicalizeNaN(const DoubleRegister value) {
+ CanonicalizeNaN(value, value);
+ }
+
+ // Converts the integer (untagged smi) in |src| to a double, storing
+ // the result to |dst|
+ void ConvertIntToDouble(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a double, storing the result to |dst|
+ void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
+
+ // Converts the integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
+
+#if V8_TARGET_ARCH_S390X
+ void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
+#endif
+
+ void MovIntToFloat(DoubleRegister dst, Register src);
+ void MovFloatToInt(Register dst, DoubleRegister src);
+ void MovDoubleToInt64(Register dst, DoubleRegister src);
+ void MovInt64ToDouble(DoubleRegister dst, Register src);
+ // Converts the double_input to an integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertFloat32ToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+ const Register dst_hi,
+#endif
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+
+ // Converts the double_input to an integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+ const Register dst_hi,
+#endif
+ const Register dst, const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+
+ void ConvertFloat32ToInt32(const DoubleRegister double_input,
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+ void ConvertFloat32ToUnsignedInt32(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#if V8_TARGET_ARCH_S390X
+ // Converts the double_input to an unsigned integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+ void ConvertFloat32ToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
+#if !V8_TARGET_ARCH_S390X
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightArithPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register scratch, Register shift);
+ void ShiftRightArithPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high, uint32_t shift);
+#endif
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type, Register base = no_reg,
+ int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
+
+ // Enter exit frame.
+ // stack_space - extra stack space, used for parameters before call to C.
+ // At least one slot (for the return address) should be provided.
+ void EnterExitFrame(bool save_doubles, int stack_space = 1);
+
+ // Leave the current exit frame. Expects the return value in r0.
+ // Expect the number of values, pushed prior to the exit frame, to
+ // remove in a register (or no_reg, if there is nothing to remove).
+ void LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context,
+ bool argument_count_is_length = false);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ void LoadNativeContextSlot(int index, Register dst);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(kRootRegister, Operand(roots_array_start));
+ }
+
+ // ----------------------------------------------------------------
+ // new S390 macro-assembler interfaces that are slightly higher level
+ // than assembler-s390 and may generate variable length sequences
+
+ // load a literal signed int value <value> to GPR <dst>
+ void LoadIntLiteral(Register dst, int value);
+
+ // load an SMI value <value> to GPR <dst>
+ void LoadSmiLiteral(Register dst, Smi* smi);
+
+ // load a literal double value <value> to FPR <result>
+ void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
+ void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
+ Register scratch);
+
+ void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
+
+ void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+ void LoadHalfWordP(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreHalfWord(Register src, const MemOperand& mem,
+ Register scratch = r0);
+ void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
+
+ void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
+ Register scratch = no_reg);
+ void StoreRepresentation(Register src, const MemOperand& mem,
+ Representation r, Register scratch = no_reg);
+
+ void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+ void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+ void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
+ void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
+ void AndSmiLiteral(Register dst, Register src, Smi* smi);
+
+ // Set new rounding mode RN to FPSCR
+ void SetRoundingMode(FPRoundingMode RN);
+
+ // reset rounding mode to default (kRoundToNearest)
+ void ResetRoundingMode();
+
+ // These exist to provide portability between 32 and 64bit
+ void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+ void StoreP(const MemOperand& mem, const Operand& opnd,
+ Register scratch = no_reg);
+ void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
+ void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
+ void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
+ void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
+
+ // Cleanse pointer address on 31bit by zero out top bit.
+ // This is a NOP on 64-bit.
+ void CleanseP(Register src) {
+#if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
+ nilh(src, Operand(0x7FFF));
+#endif
+ }
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Set up call kind marking in ecx. The method takes ecx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ // void SetCallKind(Register dst, CallKind kind);
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+
+ void IsObjectNameType(Register object, Register scratch, Label* fail);
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
+
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
+ Label* miss);
+
+ void GetNumberHash(Register t0, Register scratch);
+
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register result, Register t0, Register t1,
+ Register t2);
+
+ inline void MarkCode(NopMarkerTypes type) { nop(type); }
+
+ // Check if the given instruction is a 'type' marker.
+ // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+ // These instructions are generated to mark special location in the code,
+ // like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+
+ static inline int GetCodeMarker(Instr instr) {
+ int dst_reg_offset = 12;
+ int dst_mask = 0xf << dst_reg_offset;
+ int src_mask = 0xf;
+ int dst_reg = (instr & dst_mask) >> dst_reg_offset;
+ int src_reg = instr & src_mask;
+ uint32_t non_register_mask = ~(dst_mask | src_mask);
+ uint32_t mov_mask = al | 13 << 21;
+
+ // Return <n> if we have a mov rn rn, else return -1.
+ int type = ((instr & non_register_mask) == mov_mask) &&
+ (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
+ (dst_reg < LAST_CODE_MARKER)
+ ? src_reg
+ : -1;
+ DCHECK((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
+
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT,
+ MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
+ Register scratch1, Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src, Register dst, Register length, Register scratch);
+
+ // Initialize fields with filler values. |count| fields starting at
+ // |current_address| are overwritten with the value in |filler|. At the end
+ // the loop, |current_address| points at the next uninitialized field.
+ // |count| is assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register current_address, Register count,
+ Register filler);
+
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ // Type_reg can be no_reg. In that case ip is used.
+ void CompareObjectType(Register heap_object, Register map, Register type_reg,
+ InstanceType type);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements. Otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
+ Register elements_reg, Register scratch1,
+ DoubleRegister double_scratch, Label* fail,
+ int elements_offset = 0);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+ // set with result of map compare. If multiple map compares are required, the
+ // compare sequences branches to early_success.
+ void CompareMap(Register obj, Register scratch, Handle<Map> map,
+ Label* early_success);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
+ SmiCheckType smi_check_type);
+
+ void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
+ Label* fail, SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified weak map and branch
+ // to a specified target if equal. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+ Handle<WeakCell> cell, Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Compare the given value and the value of weak cell.
+ void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
+ CRegister cr = cr7);
+
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
+ // Load the value of the weak cell in the value register. Branch to the given
+ // miss label if the weak cell was cleared.
+ void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+
+ // Compare the object in a register to a value from the root list.
+ // Uses the ip register as scratch.
+ void CompareRoot(Register obj, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(r0, index);
+ Push(r0);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ CompareRoot(with, index);
+ beq(if_equal);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(with, index);
+ bne(if_not_equal);
+ }
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj, Register type) {
+ LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ mov(r0, Operand(kIsNotStringMask));
+ AndP(r0, type);
+ DCHECK_EQ(0u, kStringTag);
+ return eq;
+ }
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // Get the number of least significant bits from a register
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+ // Load the value of a smi object into a FP double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDouble(DoubleRegister value, Register smi);
+
+ // Check if a double can be exactly represented as a signed 32-bit integer.
+ // CR_EQ in cr7 is set if true.
+ void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
+ Register scratch2, DoubleRegister double_scratch);
+
+ // Check if a double is equal to -0.0.
+ // CR_EQ in cr7 holds the result.
+ void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
+ Register scratch2);
+
+ // Check the sign of a double.
+ // CR_LT in cr7 holds the result.
+ void TestDoubleSign(DoubleRegister input, Register scratch);
+ void TestHeapNumberSign(Register input, Register scratch);
+
+ // Try to convert a double to a signed 32-bit integer.
+ // CR_EQ in cr7 is set and result assigned if the conversion is exact.
+ void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
+ Register scratch, DoubleRegister double_scratch);
+
+ // Floor a double and writes the value to the result register.
+ // Go to exact if the conversion is exact (to be able to test -0),
+ // fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
+ void TryInt32Floor(Register result, DoubleRegister double_input,
+ Register input_high, Register scratch,
+ DoubleRegister double_scratch, Label* done, Label* exact);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object, Register result,
+ Register heap_number_map, Register scratch1,
+ Label* not_int32);
+
+ // Overflow handling functions.
+ // Usage: call the appropriate arithmetic function and then call one of the
+ // flow control functions with the corresponding label.
+
+ // Compute dst = left + right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void AddAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+ void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
+ Register overflow_dst, Register scratch = r0);
+
+ // Compute dst = left - right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void SubAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+
+ void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
+
+ void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
+
+ void RetOnOverflow(void) {
+ Label label;
+
+ blt(&label /*, cr0*/);
+ Ret();
+ bind(&label);
+ }
+
+ void RetOnNoOverflow(void) {
+ Label label;
+
+ bge(&label /*, cr0*/);
+ Ret();
+ bind(&label);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+
+ // Call a code stub.
+ void TailCallStub(CodeStub* stub, Condition cond = al);
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext, int num_arguments);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void MovFromFloatParameter(DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+
+ Handle<Object> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void AssertFastElements(Register elements);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // ---------------------------------------------------------------------------
+ // Number utilities
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
+ Label* not_power_of_two_or_zero);
+ // Check whether the value of reg is a power of two and not zero.
+ // Control falls through if it is, with scratch containing the mask
+ // value (reg - 1).
+ // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+ // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+ // strictly positive but not a power of two.
+ void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two);
+
+ // ---------------------------------------------------------------------------
+ // Bit testing/extraction
+ //
+ // Bit numbering is such that the least significant bit is bit 0
+ // (for consistency between 32/64-bit).
+
+ // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+ // and place them into the least significant bits of dst.
+ inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+ int rangeEnd) {
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+
+ // Try to use RISBG if possible.
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
+ int endBit = 63; // End is always LSB after shifting.
+ int startBit = 63 - rangeStart + rangeEnd;
+ risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
+ true);
+ } else {
+ if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
+ ShiftRightP(dst, src, Operand(rangeEnd));
+ else if (!dst.is(src)) // If we didn't shift, we might need to copy
+ LoadRR(dst, src);
+ int width = rangeStart - rangeEnd + 1;
+#if V8_TARGET_ARCH_S390X
+ uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
+ nihf(dst, Operand(mask >> 32));
+ nilf(dst, Operand(mask & 0xFFFFFFFF));
+ ltgr(dst, dst);
+#else
+ uint32_t mask = (1 << width) - 1;
+ AndP(dst, Operand(mask));
+#endif
+ }
+ }
+
+ inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
+ ExtractBitRange(dst, src, bitNumber, bitNumber);
+ }
+
+ // Extract consecutive bits (defined by mask) from src and place them
+ // into the least significant bits of dst.
+ inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+ RCBit rc = LeaveRC) {
+ int start = kBitsPerPointer - 1;
+ int end;
+ uintptr_t bit = (1L << start);
+
+ while (bit && (mask & bit) == 0) {
+ start--;
+ bit >>= 1;
+ }
+ end = start;
+ bit >>= 1;
+
+ while (bit && (mask & bit)) {
+ end--;
+ bit >>= 1;
+ }
+
+ // 1-bits in mask must be contiguous
+ DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+ ExtractBitRange(dst, src, start, end);
+ }
+
+ // Test single bit in value.
+ inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+ ExtractBitRange(scratch, value, bitNumber, bitNumber);
+ }
+
+ // Test consecutive bit range in value. Range is defined by
+ // rangeStart - rangeEnd.
+ inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+ Register scratch = r0) {
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd);
+ }
+
+ // Test consecutive bit range in value. Range is defined by mask.
+ inline void TestBitMask(Register value, uintptr_t mask,
+ Register scratch = r0) {
+ ExtractBitMask(scratch, value, mask, SetRC);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Smi utilities
+
+ // Shift left by kSmiShift
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+ void SmiTag(Register dst, Register src) {
+ ShiftLeftP(dst, src, Operand(kSmiShift));
+ }
+
+#if !V8_TARGET_ARCH_S390X
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+ void SmiTagCheckOverflow(Register reg, Register overflow);
+ void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+ inline void JumpIfNotSmiCandidate(Register value, Register scratch,
+ Label* not_smi_label) {
+ // High bits must be identical to fit into an Smi
+ STATIC_ASSERT(kSmiShift == 1);
+ AddP(scratch, value, Operand(0x40000000u));
+ CmpP(scratch, Operand::Zero());
+ blt(not_smi_label);
+ }
+#endif
+ inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle any of the high bits being set in the value.
+ TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
+ scratch);
+ }
+ inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
+ Label* not_smi_label) {
+ TestUnsignedSmiCandidate(value, scratch);
+ bne(not_smi_label /*, cr0*/);
+ }
+
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ void SmiUntag(Register dst, Register src) {
+ ShiftRightArithP(dst, src, Operand(kSmiShift));
+ }
+
+ void SmiToPtrArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
+ ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#endif
+ }
+
+ void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
+
+ void SmiToShortArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
+ if (!dst.is(src)) {
+ LoadRR(dst, src);
+ }
+#endif
+ }
+
+ void SmiToIntArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
+ ShiftLeftP(dst, src, Operand(2 - kSmiShift));
+#endif
+ }
+
+#define SmiToFloatArrayOffset SmiToIntArrayOffset
+
+ void SmiToDoubleArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
+ ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
+#endif
+ }
+
+ void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
+ if (kSmiShift < elementSizeLog2) {
+ ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
+ } else if (kSmiShift > elementSizeLog2) {
+ ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
+ } else if (!dst.is(src)) {
+ LoadRR(dst, src);
+ }
+ }
+
+ void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
+ bool isSmi) {
+ if (isSmi) {
+ SmiToArrayOffset(dst, src, elementSizeLog2);
+ } else {
+#if V8_TARGET_ARCH_S390X
+ // src (key) is a 32-bit integer. Sign extension ensures
+ // upper 32-bit does not contain garbage before being used to
+ // reference memory.
+ lgfr(src, src);
+#endif
+ ShiftLeftP(dst, src, Operand(elementSizeLog2));
+ }
+ }
+
+ // Untag the source value into destination and jump if source is a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+ inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
+
+ inline void TestIfPositiveSmi(Register value, Register scratch) {
+ STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
+ (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
+ mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
+ AndP(scratch, value);
+ }
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ TestIfSmi(value);
+ beq(smi_label /*, cr0*/); // branch if SMI
+ }
+ // Jump if either of the registers contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ TestIfSmi(value);
+ bne(not_smi_label /*, cr0*/);
+ }
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a number, enabled via --debug-code.
+ void AssertNotNumber(Register object);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+#if V8_TARGET_ARCH_S390X
+ inline void TestIfInt32(Register value, Register scratch) {
+ // High bits must be identical to fit into an 32-bit integer
+ lgfr(scratch, value);
+ CmpP(scratch, value);
+ }
+#else
+ inline void TestIfInt32(Register hi_word, Register lo_word,
+ Register scratch) {
+ // High bits must be identical to fit into an 32-bit integer
+ ShiftRightArith(scratch, lo_word, Operand(31));
+ CmpP(scratch, hi_word);
+ }
+#endif
+
+#if V8_TARGET_ARCH_S390X
+ // Ensure it is permissable to read/write int value directly from
+ // upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#endif
+#if V8_TARGET_LITTLE_ENDIAN
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if reg is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object, Register heap_number_map,
+ Register scratch, Label* on_not_heap_number);
+
+ // ---------------------------------------------------------------------------
+ // String utilities
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
+
+ // Checks if both instance types are sequential one-byte strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
+
+ // Check if instance type is sequential one-byte string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
+
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
+
+ // ---------------------------------------------------------------------------
+ // Patching helpers.
+
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ // Saturate a value into 8-bit unsigned integer
+ // if input_value < 0, output_value is 0
+ // if input_value > 255, output_value is 255
+ // otherwise output_value is the (int)input_value (round to nearest)
+ void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template <typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
+ // TODO(joransiu): Optimize into single instruction
+ DecodeField<Field>(dst, src);
+ SmiTag(dst);
+ }
+
+ template <typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeFieldToSmi<Field>(reg, reg);
+ }
+
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+
+ // Expects object in r2 and returns map with validated enum cache
+ // in r2. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to eq.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Register scratch2_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Register scratch2_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
+ &no_memento_found);
+ beq(memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InitializeNewString(Register string, Register length,
+ Heap::RootListIndex map_index, Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
+ Register mask_reg);
+
+ static const RegList kSafepointSavedRegisters;
+ static const int kNumSafepointSavedRegisters;
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+ bool generating_stub_;
+ bool has_frame_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class StandardFrame;
+};
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ enum FlushICache { FLUSH, DONT_FLUSH };
+
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
+ FlushICache flush_cache = FLUSH);
+ ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
+};
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
+}
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) \
+ masm->stop(__FILE_LINE__); \
+ masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+} // namespace internal
+} // namespace v8
+
+#endif // V8_S390_MACRO_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
new file mode 100644
index 0000000000..06e52a7626
--- /dev/null
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -0,0 +1,5128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/assembler.h"
+#include "src/base/bits.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/s390/constants-s390.h"
+#include "src/s390/frames-s390.h"
+#include "src/s390/simulator-s390.h"
+#if defined(USE_SIMULATOR)
+
+// Only build the simulator if not compiling for real s390 hardware.
+namespace v8 {
+namespace internal {
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The S390Debugger class is used by the simulator while debugging simulated
+// z/Architecture code.
+class S390Debugger {
+ public:
+ explicit S390Debugger(Simulator* sim) : sim_(sim) {}
+ ~S390Debugger();
+
+ void Stop(Instruction* instr);
+ void Debug();
+
+ private:
+#if V8_TARGET_LITTLE_ENDIAN
+ static const Instr kBreakpointInstr = (0x0000FFB2); // TRAP4 0000
+ static const Instr kNopInstr = (0x00160016); // OR r0, r0 x2
+#else
+ static const Instr kBreakpointInstr = (0xB2FF0000); // TRAP4 0000
+ static const Instr kNopInstr = (0x16001600); // OR r0, r0 x2
+#endif
+
+ Simulator* sim_;
+
+ intptr_t GetRegisterValue(int regnum);
+ double GetRegisterPairDoubleValue(int regnum);
+ double GetFPDoubleRegisterValue(int regnum);
+ float GetFPFloatRegisterValue(int regnum);
+ bool GetValue(const char* desc, intptr_t* value);
+ bool GetFPDoubleValue(const char* desc, double* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* break_pc);
+ bool DeleteBreakpoint(Instruction* break_pc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+S390Debugger::~S390Debugger() {}
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+static void InitializeCoverage() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+void S390Debugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
+ char* msg = *msg_address;
+ DCHECK(msg != NULL);
+
+ // Update this stop description.
+ if (isWatchedStop(code) && !watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", msg);
+ fflush(coverage_log);
+ }
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
+ }
+ sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
+}
+
+#else // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {}
+
+void S390Debugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ // use of kStopCodeMask not right on PowerPC
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
+ // Update this stop description.
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ PrintF("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
+ Debug();
+}
+#endif
+
+intptr_t S390Debugger::GetRegisterValue(int regnum) {
+ return sim_->get_register(regnum);
+}
+
+double S390Debugger::GetRegisterPairDoubleValue(int regnum) {
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+double S390Debugger::GetFPDoubleRegisterValue(int regnum) {
+ return sim_->get_double_from_d_register(regnum);
+}
+
+float S390Debugger::GetFPFloatRegisterValue(int regnum) {
+ return sim_->get_float32_from_d_register(regnum);
+}
+
+bool S390Debugger::GetValue(const char* desc, intptr_t* value) {
+ int regnum = Registers::Number(desc);
+ if (regnum != kNoRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else {
+ if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" V8PRIxPTR,
+ reinterpret_cast<uintptr_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
+ 1;
+ }
+ }
+ return false;
+}
+
+bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
+ int regnum = DoubleRegisters::Number(desc);
+ if (regnum != kNoRegister) {
+ *value = sim_->get_double_from_d_register(regnum);
+ return true;
+ }
+ return false;
+}
+
+bool S390Debugger::SetBreakpoint(Instruction* break_pc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = break_pc;
+ sim_->break_instr_ = break_pc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool S390Debugger::DeleteBreakpoint(Instruction* break_pc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void S390Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+void S390Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+void S390Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // make sure to have a proper terminating character if reaching the limit
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+ // Disable tracing while simulating
+ bool trace = ::v8::internal::FLAG_trace_sim;
+ ::v8::internal::FLAG_trace_sim = false;
+
+ while (!done && !sim_->has_bad_pc()) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ intptr_t value;
+
+ // If at a breakpoint, proceed past it.
+ if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+ ->InstructionBits() == 0x7d821008) {
+ sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
+ } else {
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+
+ if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
+ for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
+ buffer.start());
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // If at a breakpoint, proceed past it.
+ if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+ ->InstructionBits() == 0x7d821008) {
+ sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
+ } else {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+ intptr_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF(" %3s: %08" V8PRIxPTR,
+ Register::from_code(i).ToString(), value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else if (i != 0 && !((i + 1) & 3)) {
+ PrintF("\n");
+ }
+ }
+ PrintF(" pc: %08" V8PRIxPTR " cr: %08x\n", sim_->special_reg_pc_,
+ sim_->condition_reg_);
+ } else if (strcmp(arg1, "alld") == 0) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
+ Register::from_code(i).ToString(), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else if (!((i + 1) % 2)) {
+ PrintF("\n");
+ }
+ }
+ PrintF(" pc: %08" V8PRIxPTR " cr: %08x\n", sim_->special_reg_pc_,
+ sim_->condition_reg_);
+ } else if (strcmp(arg1, "allf") == 0) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+ float fvalue = GetFPFloatRegisterValue(i);
+ uint32_t as_words = bit_cast<uint32_t>(fvalue);
+ PrintF("%3s: %f 0x%08x\n",
+ DoubleRegister::from_code(i).ToString(), fvalue, as_words);
+ }
+ } else if (strcmp(arg1, "alld") == 0) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+ dvalue = GetFPDoubleRegisterValue(i);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%3s: %f 0x%08x %08x\n",
+ DoubleRegister::from_code(i).ToString(), dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ }
+ } else if (arg1[0] == 'r' &&
+ (arg1[1] >= '0' && arg1[1] <= '2' &&
+ (arg1[2] == '\0' || (arg1[2] >= '0' && arg1[2] <= '5' &&
+ arg1[3] == '\0')))) {
+ int regnum = strtoul(&arg1[1], 0, 10);
+ if (regnum != kNoRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+ value);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+ value);
+ } else if (GetFPDoubleValue(arg1, &dvalue)) {
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF("print <register>\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ intptr_t value;
+ OFStream os(stdout);
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj->Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "setpc") == 0) {
+ intptr_t value;
+
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ sim_->set_pc(value);
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ intptr_t* cur = NULL;
+ intptr_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<intptr_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ intptr_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<intptr_t*>(value);
+ next_arg++;
+ }
+
+ intptr_t words; // likely inaccurate variable name for 64bit
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ intptr_t value = *cur;
+ Heap* current_heap = sim_->isolate_->heap();
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
+ PrintF("(smi %d)", PlatformSmiTagging::SmiToInt(obj));
+ } else if (current_heap->Contains(obj)) {
+ PrintF(" (");
+ obj->ShortPrint();
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* prev = NULL;
+ byte* cur = NULL;
+ // Default number of instructions to disassemble.
+ int32_t numInstructions = 10;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ }
+ } else {
+ // The argument is the number of instructions.
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ numInstructions = static_cast<int32_t>(value);
+ }
+ }
+ } else {
+ intptr_t value1;
+ intptr_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ // Disassemble <arg2> instructions.
+ numInstructions = static_cast<int32_t>(value2);
+ }
+ }
+
+ while (numInstructions > 0) {
+ prev = cur;
+ cur += dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
+ buffer.start());
+ numInstructions--;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "cr") == 0) {
+ PrintF("Condition reg: %08x\n", sim_->condition_reg_);
+ } else if (strcmp(cmd, "stop") == 0) {
+ intptr_t value;
+ intptr_t stop_pc =
+ sim_->get_pc() - (sizeof(FourByteInstr) + kPointerSize);
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + sizeof(FourByteInstr));
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi [num instructions]\n");
+ PrintF(" step one/num instruction(s) (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to display all integer registers\n");
+ PrintF(
+ " use register name 'alld' to display integer registers "
+ "with decimal values\n");
+ PrintF(" use register name 'rN' to display register number 'N'\n");
+ PrintF(" add argument 'fp' to print register pair double values\n");
+ PrintF(
+ " use register name 'allf' to display floating-point "
+ "registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("cr\n");
+ PrintF(" print condition register\n");
+ PrintF("stack [<num words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<num words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the S390Debugger.\n");
+ PrintF(" The first %d stop codes are watched:\n",
+ Simulator::kNumOfWatchedStops);
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+ // Restore tracing
+ ::v8::internal::FLAG_trace_sim = trace;
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool ICacheMatch(void* one, void* two) {
+ DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+ size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ(0, static_cast<int>(start & CachePage::kPageMask));
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry =
+ i_cache->LookupOrInsert(page, ICacheHash(page));
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size) {
+ DCHECK(size <= CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK((start & CachePage::kLineMask) == 0);
+ DCHECK((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), sizeof(FourByteInstr)),
+ 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
+ Initialize(isolate);
+// Set up simulator support first. Some of this information is needed to
+// setup the architecture state.
+#if V8_TARGET_ARCH_S390X
+ size_t stack_size = FLAG_sim_stack_size * KB;
+#else
+ size_t stack_size = MB; // allocate 1MB for stack
+#endif
+ stack_size += 2 * stack_protection_size_;
+ stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+// make sure our register type can hold exactly 4/8 bytes
+#ifdef V8_TARGET_ARCH_S390X
+ DCHECK(sizeof(intptr_t) == 8);
+#else
+ DCHECK(sizeof(intptr_t) == 4);
+#endif
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumGPRs; i++) {
+ registers_[i] = 0;
+ }
+ condition_reg_ = 0;
+ special_reg_pc_ = 0;
+
+ // Initializing FP registers.
+ for (int i = 0; i < kNumFPRs; i++) {
+ fp_registers_[i] = 0.0;
+ }
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] =
+ reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
+ InitializeCoverage();
+
+ last_debugger_input_ = NULL;
+}
+
+Simulator::~Simulator() { free(stack_); }
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
+ : external_function_(external_function),
+// we use TRAP4 here (0xBF22)
+#if V8_TARGET_LITTLE_ENDIAN
+ swi_instruction_(0x1000FFB2),
+#else
+ swi_instruction_(0xB2FF0000 | kCallRtRedirected),
+#endif
+ type_(type),
+ next_(NULL) {
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->FlushICache(
+ isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_), sizeof(FourByteInstr));
+ isolate->set_simulator_redirection(this);
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
+ function_descriptor_[1] = 0;
+ function_descriptor_[2] = 0;
+ }
+ }
+
+ void* address() {
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ return reinterpret_cast<void*>(function_descriptor_);
+ } else {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(Isolate* isolate, void* external_function,
+ ExternalReference::Type type) {
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ DCHECK_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(isolate, external_function, type);
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+ char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+ char* addr_of_redirection =
+ addr_of_swi - offsetof(Redirection, swi_instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static Redirection* FromAddress(void* address) {
+ int delta = ABI_USES_FUNCTION_DESCRIPTORS
+ ? offsetof(Redirection, function_descriptor_)
+ : offsetof(Redirection, swi_instruction_);
+ char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(intptr_t reg) {
+ Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
+ return redirection->external_function();
+ }
+
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
+ private:
+ void* external_function_;
+ uint32_t swi_instruction_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+ intptr_t function_descriptor_[3];
+};
+
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+ if (i_cache != nullptr) {
+ for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
+ return redirection->address();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+// Sets the register in the architecture state.
+void Simulator::set_register(int reg, uint64_t value) {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ registers_[reg] = value;
+}
+
+// Get the register from the architecture state.
+uint64_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= kNumGPRs) return 0;
+ // End stupid code.
+ return registers_[reg];
+}
+
+template <typename T>
+T Simulator::get_low_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= kNumGPRs) return 0;
+ // End stupid code.
+ return static_cast<T>(registers_[reg] & 0xFFFFFFFF);
+}
+
+template <typename T>
+T Simulator::get_high_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= kNumGPRs) return 0;
+ // End stupid code.
+ return static_cast<T>(registers_[reg] >> 32);
+}
+
+void Simulator::set_low_register(int reg, uint32_t value) {
+ uint64_t shifted_val = static_cast<uint64_t>(value);
+ uint64_t orig_val = static_cast<uint64_t>(registers_[reg]);
+ uint64_t result = (orig_val >> 32 << 32) | shifted_val;
+ registers_[reg] = result;
+}
+
+void Simulator::set_high_register(int reg, uint32_t value) {
+ uint64_t shifted_val = static_cast<uint64_t>(value) << 32;
+ uint64_t orig_val = static_cast<uint64_t>(registers_[reg]);
+ uint64_t result = (orig_val & 0xFFFFFFFF) | shifted_val;
+ registers_[reg] = result;
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ DCHECK((reg >= 0) && (reg < kNumGPRs) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+#if 0 && !V8_TARGET_ARCH_S390X // doesn't make sense in 64bit mode
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(fp_registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+#endif
+ return (dm_val);
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(intptr_t value) {
+ pc_modified_ = true;
+ special_reg_pc_ = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((special_reg_pc_ == bad_lr) || (special_reg_pc_ == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+intptr_t Simulator::get_pc() const { return special_reg_pc_; }
+
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from d1, d2 and r2.
+void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(2);
+ *z = get_register(2);
+}
+
+// The return value is in d0.
+void Simulator::SetFpResult(const double& result) {
+ set_d_register_from_double(0, result);
+}
+
+void Simulator::TrashCallerSaveRegisters() {
+// We don't trash the registers with the return value.
+#if 0 // A good idea to trash volatile registers, needs to be done
+ registers_[2] = 0x50Bad4U;
+ registers_[3] = 0x50Bad4U;
+ registers_[12] = 0x50Bad4U;
+#endif
+}
+
+uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+}
+
+int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
+
+int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+uint8_t Simulator::ReadBU(intptr_t addr) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int8_t Simulator::ReadB(intptr_t addr) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::WriteB(intptr_t addr, uint8_t value) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::WriteB(intptr_t addr, int8_t value) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+int64_t Simulator::ReadDW(intptr_t addr) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::WriteDW(intptr_t addr, int64_t value) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+/**
+ * Reads a double value from memory at given address.
+ */
+double Simulator::ReadDouble(intptr_t addr) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin to prevent
+ // overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + stack_protection_size_;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED();
+}
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+
+ return (uright > urest) ||
+ (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+
+ return (uright > uleft);
+}
+
+// Calculate V flag value for additions and subtractions.
+template <typename T1>
+bool Simulator::OverflowFromSigned(T1 alu_out, T1 left, T1 right,
+ bool addition) {
+ bool overflow;
+ if (addition) {
+ // operands have the same sign
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // and operands and result have different sign
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // operands have different signs
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // and first operand and result have different signs
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+#if V8_TARGET_ARCH_S390X
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+ *x = reinterpret_cast<intptr_t>(pair->x);
+ *y = reinterpret_cast<intptr_t>(pair->y);
+}
+#else
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+#if V8_TARGET_BIG_ENDIAN
+ *x = static_cast<int32_t>(*pair >> 32);
+ *y = static_cast<int32_t>(*pair);
+#else
+ *x = static_cast<int32_t>(*pair);
+ *y = static_cast<int32_t>(*pair >> 32);
+#endif
+}
+#endif
+
+// Calls into the V8 runtime.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4,
+ intptr_t arg5);
+
+// These prototypes handle the four types of FP calls.
+typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
+ intptr_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ int svc = instr->SvcValue();
+ switch (svc) {
+ case kCallRtRedirected: {
+ // Check if stack is aligned. Error if not aligned is reported below to
+ // include information on the function called.
+ bool stack_aligned =
+ (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
+ 0;
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ const int kArgCount = 6;
+ int arg0_regnum = 2;
+ intptr_t result_buffer = 0;
+ bool uses_result_buffer =
+ redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
+ (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+ !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+ if (uses_result_buffer) {
+ result_buffer = get_register(r2);
+ arg0_regnum++;
+ }
+ intptr_t arg[kArgCount];
+ for (int i = 0; i < kArgCount - 1; i++) {
+ arg[i] = get_register(arg0_regnum + i);
+ }
+ intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
+ arg[5] = stack_pointer[kCalleeRegisterSaveAreaSize / kPointerSize];
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ // Place the return address on the stack, making the call GC safe.
+ *reinterpret_cast<intptr_t*>(get_register(sp) +
+ kStackFrameRASlot * kPointerSize) =
+ get_register(r14);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ intptr_t ival; // zero or one integer parameters
+ int iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(generic_target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(generic_target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
+ FUNCTION_ADDR(generic_target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r2, iresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", iresult);
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg[0]);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg[0], Redirection::ReverseRedirection(arg[1]));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
+ target(arg[0], arg[1]);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1], arg[2]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
+ target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
+ } else {
+ // builtin call.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ PrintF(
+ "Call to host function at %p,\n"
+ "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+ FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ static_cast<intptr_t>(get_register(sp)));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ ObjectTriple result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ "}\n",
+ reinterpret_cast<intptr_t>(result.x),
+ reinterpret_cast<intptr_t>(result.y),
+ reinterpret_cast<intptr_t>(result.z));
+ }
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectTriple));
+ set_register(r2, result_buffer);
+ } else {
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+ SimulatorRuntimePairCall target =
+ reinterpret_cast<SimulatorRuntimePairCall>(external);
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+ }
+ if (ABI_RETURNS_OBJECTPAIR_IN_REGS) {
+ set_register(r2, x);
+ set_register(r3, y);
+ } else {
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectPair));
+ set_register(r2, result_buffer);
+ }
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
+ }
+ set_register(r2, result);
+ }
+ }
+ // #if !V8_TARGET_ARCH_S390X
+ // DCHECK(redirection->type() ==
+ // ExternalReference::BUILTIN_CALL);
+ // SimulatorRuntimeCall target =
+ // reinterpret_cast<SimulatorRuntimeCall>(external);
+ // int64_t result = target(arg[0], arg[1], arg[2], arg[3],
+ // arg[4],
+ // arg[5]);
+ // int32_t lo_res = static_cast<int32_t>(result);
+ // int32_t hi_res = static_cast<int32_t>(result >> 32);
+ // #if !V8_TARGET_LITTLE_ENDIAN
+ // if (::v8::internal::FLAG_trace_sim) {
+ // PrintF("Returned %08x\n", hi_res);
+ // }
+ // set_register(r2, hi_res);
+ // set_register(r3, lo_res);
+ // #else
+ // if (::v8::internal::FLAG_trace_sim) {
+ // PrintF("Returned %08x\n", lo_res);
+ // }
+ // set_register(r2, lo_res);
+ // set_register(r3, hi_res);
+ // #endif
+ // #else
+ // if (redirection->type() == ExternalReference::BUILTIN_CALL) {
+ // SimulatorRuntimeCall target =
+ // reinterpret_cast<SimulatorRuntimeCall>(external);
+ // intptr_t result = target(arg[0], arg[1], arg[2], arg[3],
+ // arg[4],
+ // arg[5]);
+ // if (::v8::internal::FLAG_trace_sim) {
+ // PrintF("Returned %08" V8PRIxPTR "\n", result);
+ // }
+ // set_register(r2, result);
+ // } else {
+ // DCHECK(redirection->type() ==
+ // ExternalReference::BUILTIN_CALL_PAIR);
+ // SimulatorRuntimePairCall target =
+ // reinterpret_cast<SimulatorRuntimePairCall>(external);
+ // ObjectPair result = target(arg[0], arg[1], arg[2], arg[3],
+ // arg[4], arg[5]);
+ // if (::v8::internal::FLAG_trace_sim) {
+ // PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n",
+ // result.x, result.y);
+ // }
+ // #if ABI_RETURNS_OBJECTPAIR_IN_REGS
+ // set_register(r2, result.x);
+ // set_register(r3, result.y);
+ // #else
+ // memcpy(reinterpret_cast<void *>(result_buffer), &result,
+ // sizeof(ObjectPair));
+ // #endif
+ // }
+ // #endif
+ }
+ int64_t saved_lr = *reinterpret_cast<intptr_t*>(
+ get_register(sp) + kStackFrameRASlot * kPointerSize);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+ // On zLinux-31, the saved_lr might be tagged with a high bit of 1.
+ // Cleanse it before proceeding with simulation.
+ saved_lr &= 0x7FFFFFFF;
+#endif
+ set_pc(saved_lr);
+ break;
+ }
+ case kBreakpoint: {
+ S390Debugger dbg(this);
+ dbg.Debug();
+ break;
+ }
+ // stop uses all codes greater than 1 << 23.
+ default: {
+ if (svc >= (1 << 23)) {
+ uint32_t code = svc & kStopCodeMask;
+ if (isWatchedStop(code)) {
+ IncreaseStopCounter(code);
+ }
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ S390Debugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + sizeof(FourByteInstr) + kPointerSize);
+ }
+ } else {
+ // This is not a valid svc code.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isStopInstruction(Instruction* instr) {
+ return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
+}
+
+bool Simulator::isWatchedStop(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ return code < kNumOfWatchedStops;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ // Unwatched stops are always enabled.
+ return !isWatchedStop(code) ||
+ !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint32_t code) {
+ DCHECK(isWatchedStop(code));
+ if (!isEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::DisableStop(uint32_t code) {
+ DCHECK(isWatchedStop(code));
+ if (isEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ DCHECK(isWatchedStop(code));
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ if (!isWatchedStop(code)) {
+ PrintF("Stop not watched.");
+ } else {
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+ state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+ }
+}
+
+// Method for checking overflow on signed addition:
+// Test src1 and src2 have opposite sign,
+// (1) No overflow if they have opposite sign
+// (2) Test the result and one of the operands have opposite sign
+// (a) No overflow if they don't have opposite sign
+// (b) Overflow if opposite
+#define CheckOverflowForIntAdd(src1, src2, type) \
+ OverflowFromSigned<type>(src1 + src2, src1, src2, true);
+
+#define CheckOverflowForIntSub(src1, src2, type) \
+ OverflowFromSigned<type>(src1 - src2, src1, src2, false);
+
+// Method for checking overflow on unsigned addtion
+#define CheckOverflowForUIntAdd(src1, src2) \
+ ((src1) + (src2) < (src1) || (src1) + (src2) < (src2))
+
+// Method for checking overflow on unsigned subtraction
+#define CheckOverflowForUIntSub(src1, src2) ((src1) - (src2) > (src1))
+
+// Method for checking overflow on multiplication
+#define CheckOverflowForMul(src1, src2) (((src1) * (src2)) / (src2) != (src1))
+
+// Method for checking overflow on shift right
+#define CheckOverflowForShiftRight(src1, src2) \
+ (((src1) >> (src2)) << (src2) != (src1))
+
+// Method for checking overflow on shift left
+#define CheckOverflowForShiftLeft(src1, src2) \
+ (((src1) << (src2)) >> (src2) != (src1))
+
+// S390 Decode and simulate helpers
+bool Simulator::DecodeTwoByte(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ switch (op) {
+ // RR format instructions
+ case AR:
+ case SR:
+ case MR:
+ case DR:
+ case OR:
+ case NR:
+ case XR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ bool isOF = false;
+ switch (op) {
+ case AR:
+ isOF = CheckOverflowForIntAdd(r1_val, r2_val, int32_t);
+ r1_val += r2_val;
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case SR:
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int32_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case OR:
+ r1_val |= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ break;
+ case NR:
+ r1_val &= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ break;
+ case XR:
+ r1_val ^= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ break;
+ case MR: {
+ DCHECK(r1 % 2 == 0);
+ r1_val = get_low_register<int32_t>(r1 + 1);
+ int64_t product =
+ static_cast<int64_t>(r1_val) * static_cast<int64_t>(r2_val);
+ int32_t high_bits = product >> 32;
+ r1_val = high_bits;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ break;
+ }
+ case DR: {
+ // reg-reg pair should be even-odd pair, assert r1 is an even register
+ DCHECK(r1 % 2 == 0);
+ // leftmost 32 bits of the dividend are in r1
+ // rightmost 32 bits of the dividend are in r1+1
+ // get the signed value from r1
+ int64_t dividend = static_cast<int64_t>(r1_val) << 32;
+ // get unsigned value from r1+1
+ // avoid addition with sign-extended r1+1 value
+ dividend += get_low_register<uint32_t>(r1 + 1);
+ int32_t remainder = dividend % r2_val;
+ int32_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_low_register(r1, remainder);
+ set_low_register(r1 + 1, quotient);
+ break; // reg pair
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ set_low_register(r1, r1_val);
+ break;
+ }
+ case LR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ set_low_register(r1, get_low_register<int32_t>(r2));
+ break;
+ }
+ case LDR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int64_t r2_val = get_d_register(r2);
+ set_d_register(r1, r2_val);
+ break;
+ }
+ case CR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ SetS390ConditionCode<int32_t>(r1_val, r2_val);
+ break;
+ }
+ case CLR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ SetS390ConditionCode<uint32_t>(r1_val, r2_val);
+ break;
+ }
+ case BCR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ if (TestConditionCode(Condition(r1))) {
+ intptr_t r2_val = get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+ // On 31-bit, the top most bit may be 0 or 1, but is ignored by the
+ // hardware. Cleanse the top bit before jumping to it, unless it's one
+ // of the special PCs
+ if (r2_val != bad_lr && r2_val != end_sim_pc) r2_val &= 0x7FFFFFFF;
+#endif
+ set_pc(r2_val);
+ }
+ break;
+ }
+ case LTR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ SetS390ConditionCode<int32_t>(r2_val, 0);
+ set_low_register(r1, r2_val);
+ break;
+ }
+ case ALR:
+ case SLR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+ if (ALR == op) {
+ alu_out = r1_val + r2_val;
+ isOF = CheckOverflowForUIntAdd(r1_val, r2_val);
+ } else if (SLR == op) {
+ alu_out = r1_val - r2_val;
+ isOF = CheckOverflowForUIntSub(r1_val, r2_val);
+ } else {
+ UNREACHABLE();
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ break;
+ }
+ case LNR: {
+ // Load Negative (32)
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val = (r2_val >= 0) ? -r2_val : r2_val; // If pos, then negate it.
+ set_low_register(r1, r2_val);
+ condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT; // CC0 - result is zero
+ // CC1 - result is negative
+ break;
+ }
+ case BASR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ intptr_t link_addr = get_pc() + 2;
+ // If R2 is zero, the BASR does not branch.
+ int64_t r2_val = (r2 == 0) ? link_addr : get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+ // On 31-bit, the top most bit may be 0 or 1, which can cause issues
+ // for stackwalker. The top bit should either be cleanse before being
+ // pushed onto the stack, or during stack walking when dereferenced.
+ // For simulator, we'll take the worst case scenario and always tag
+ // the high bit, to flush out more problems.
+ link_addr |= 0x80000000;
+#endif
+ set_register(r1, link_addr);
+ set_pc(r2_val);
+ break;
+ }
+ case LCR: {
+ RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t original_r2_val = r2_val;
+ r2_val = ~r2_val;
+ r2_val = r2_val + 1;
+ set_low_register(r1, r2_val);
+ SetS390ConditionCode<int32_t>(r2_val, 0);
+ // Checks for overflow where r2_val = -2147483648.
+ // Cannot do int comparison due to GCC 4.8 bug on x86.
+ // Detect INT_MIN alternatively, as it is the only value where both
+ // original and result are negative due to overflow.
+ if (r2_val < 0 && original_r2_val < 0) {
+ SetS390OverflowCode(true);
+ }
+ break;
+ }
+ case BKPT: {
+ set_pc(get_pc() + 2);
+ S390Debugger dbg(this);
+ dbg.Debug();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ return false;
+ break;
+ }
+ return true;
+}
+
+// Decode routine for four-byte instructions
+bool Simulator::DecodeFourByte(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ // Pre-cast instruction to various types
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+ SIInstruction* siInstr = reinterpret_cast<SIInstruction*>(instr);
+
+ switch (op) {
+ case POPCNT_Z: {
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r2_val = get_register(r2);
+ int64_t r1_val = 0;
+
+ uint8_t* r2_val_ptr = reinterpret_cast<uint8_t*>(&r2_val);
+ uint8_t* r1_val_ptr = reinterpret_cast<uint8_t*>(&r1_val);
+ for (int i = 0; i < 8; i++) {
+ uint32_t x = static_cast<uint32_t>(r2_val_ptr[i]);
+#if defined(__GNUC__)
+ r1_val_ptr[i] = __builtin_popcount(x);
+#else
+#error unsupport __builtin_popcount
+#endif
+ }
+
+ set_register(r1, static_cast<uint64_t>(r1_val));
+ break;
+ }
+ case LLGFR: {
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ uint64_t r2_finalval =
+ (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+ set_register(r1, r2_finalval);
+ break;
+ }
+ case EX: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int r1 = rxinst->R1Value();
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+
+ SixByteInstr the_instr = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+ int length = Instruction::InstructionLength(
+ reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+
+ char new_instr_buf[8];
+ char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
+ the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+ << (8 * length - 16);
+ Instruction::SetInstructionBits<SixByteInstr>(
+ reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
+ ExecuteInstruction(reinterpret_cast<Instruction*>(addr), false);
+ break;
+ }
+ case LGR: {
+ // Load Register (64)
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ set_register(r1, get_register(r2));
+ break;
+ }
+ case LDGR: {
+ // Load FPR from GPR (L <- 64)
+ uint64_t int_val = get_register(rreInst->R2Value());
+ // double double_val = bit_cast<double, uint64_t>(int_val);
+ // set_d_register_from_double(rreInst->R1Value(), double_val);
+ set_d_register(rreInst->R1Value(), int_val);
+ break;
+ }
+ case LGDR: {
+ // Load GPR from FPR (64 <- L)
+ int64_t double_val = get_d_register(rreInst->R2Value());
+ set_register(rreInst->R1Value(), double_val);
+ break;
+ }
+ case LTGR: {
+ // Load Register (64)
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r2_val = get_register(r2);
+ SetS390ConditionCode<int64_t>(r2_val, 0);
+ set_register(r1, get_register(r2));
+ break;
+ }
+ case LZDR: {
+ int r1 = rreInst->R1Value();
+ set_d_register_from_double(r1, 0.0);
+ break;
+ }
+ case LTEBR: {
+ RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreinst->R1Value();
+ int r2 = rreinst->R2Value();
+ int64_t r2_val = get_d_register(r2);
+ float fr2_val = get_float32_from_d_register(r2);
+ SetS390ConditionCode<float>(fr2_val, 0.0);
+ set_d_register(r1, r2_val);
+ break;
+ }
+ case LTDBR: {
+ RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreinst->R1Value();
+ int r2 = rreinst->R2Value();
+ int64_t r2_val = get_d_register(r2);
+ SetS390ConditionCode<double>(bit_cast<double, int64_t>(r2_val), 0.0);
+ set_d_register(r1, r2_val);
+ break;
+ }
+ case CGR: {
+ // Compare (64)
+ int64_t r1_val = get_register(rreInst->R1Value());
+ int64_t r2_val = get_register(rreInst->R2Value());
+ SetS390ConditionCode<int64_t>(r1_val, r2_val);
+ break;
+ }
+ case CLGR: {
+ // Compare Logical (64)
+ uint64_t r1_val = static_cast<uint64_t>(get_register(rreInst->R1Value()));
+ uint64_t r2_val = static_cast<uint64_t>(get_register(rreInst->R2Value()));
+ SetS390ConditionCode<uint64_t>(r1_val, r2_val);
+ break;
+ }
+ case LH: {
+ // Load Halfword
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int r1 = rxinst->R1Value();
+ int x2 = rxinst->X2Value();
+ int b2 = rxinst->B2Value();
+
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t mem_addr = x2_val + b2_val + d2_val;
+
+ int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+ set_low_register(r1, result);
+ break;
+ }
+ case LHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int i = riinst->I2Value();
+ set_low_register(r1, i);
+ break;
+ }
+ case LGHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int64_t i = riinst->I2Value();
+ set_register(r1, i);
+ break;
+ }
+ case CHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int16_t i = riinst->I2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ SetS390ConditionCode<int32_t>(r1_val, i);
+ break;
+ }
+ case CGHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int64_t i = static_cast<int64_t>(riinst->I2Value());
+ int64_t r1_val = get_register(r1);
+ SetS390ConditionCode<int64_t>(r1_val, i);
+ break;
+ }
+ case BRAS: {
+ // Branch Relative and Save
+ RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+ int r1 = rilInstr->R1Value();
+ intptr_t d2 = rilInstr->I2Value();
+ intptr_t pc = get_pc();
+ // Set PC of next instruction to register
+ set_register(r1, pc + sizeof(FourByteInstr));
+ // Update PC to branch target
+ set_pc(pc + d2 * 2);
+ break;
+ }
+ case BRC: {
+ // Branch Relative on Condition
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int m1 = riinst->M1Value();
+ if (TestConditionCode((Condition)m1)) {
+ intptr_t offset = riinst->I2Value() * 2;
+ set_pc(get_pc() + offset);
+ }
+ break;
+ }
+ case BRCT:
+ case BRCTG: {
+ // Branch On Count (32/64).
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int64_t value =
+ (op == BRCT) ? get_low_register<int32_t>(r1) : get_register(r1);
+ if (BRCT == op)
+ set_low_register(r1, --value);
+ else
+ set_register(r1, --value);
+ // Branch if value != 0
+ if (value != 0) {
+ intptr_t offset = riinst->I2Value() * 2;
+ set_pc(get_pc() + offset);
+ }
+ break;
+ }
+ case BXH: {
+ RSInstruction* rsinst = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsinst->R1Value();
+ int r3 = rsinst->R3Value();
+ int b2 = rsinst->B2Value();
+ int d2 = rsinst->D2Value();
+
+ // r1_val is the first operand, r3_val is the increment
+ int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
+ int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
+ intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ intptr_t branch_address = b2_val + d2;
+ // increment r1_val
+ r1_val += r3_val;
+
+ // if the increment is even, then it designates a pair of registers
+ // and the contents of the even and odd registers of the pair are used as
+ // the increment and compare value respectively. If the increment is odd,
+ // the increment itself is used as both the increment and compare value
+ int32_t compare_val = r3 % 2 == 0 ? get_register(r3 + 1) : r3_val;
+ if (r1_val > compare_val) {
+ // branch to address if r1_val is greater than compare value
+ set_pc(branch_address);
+ }
+
+ // update contents of register in r1 with the new incremented value
+ set_register(r1, r1_val);
+ break;
+ }
+ case IIHH:
+ case IIHL:
+ case IILH:
+ case IILL: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case STM:
+ case LM: {
+ // Store Multiple 32-bits.
+ RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsinstr->R1Value();
+ int r3 = rsinstr->R3Value();
+ int rb = rsinstr->B2Value();
+ int offset = rsinstr->D2Value();
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ if (op == STM) {
+ int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+ WriteW(rb_val + offset + 4 * i, value, instr);
+ } else if (op == LM) {
+ int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+ set_low_register((r1 + i) % 16, value);
+ }
+ }
+ break;
+ }
+ case SLL:
+ case SRL: {
+ RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsInstr->R1Value();
+ int b2 = rsInstr->B2Value();
+ intptr_t d2 = rsInstr->D2Value();
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t alu_out = 0;
+ if (SLL == op) {
+ alu_out = r1_val << shiftBits;
+ } else if (SRL == op) {
+ alu_out = r1_val >> shiftBits;
+ } else {
+ UNREACHABLE();
+ }
+ set_low_register(r1, alu_out);
+ break;
+ }
+ case SLDL: {
+ RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsInstr->R1Value();
+ int b2 = rsInstr->B2Value();
+ intptr_t d2 = rsInstr->D2Value();
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+
+ DCHECK(r1 % 2 == 0);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r1_next_val = get_low_register<uint32_t>(r1 + 1);
+ uint64_t alu_out = (static_cast<uint64_t>(r1_val) << 32) |
+ (static_cast<uint64_t>(r1_next_val));
+ alu_out <<= shiftBits;
+ set_low_register(r1 + 1, static_cast<uint32_t>(alu_out));
+ set_low_register(r1, static_cast<uint32_t>(alu_out >> 32));
+ break;
+ }
+ case SLA:
+ case SRA: {
+ RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsInstr->R1Value();
+ int b2 = rsInstr->B2Value();
+ intptr_t d2 = rsInstr->D2Value();
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ if (op == SLA) {
+ isOF = CheckOverflowForShiftLeft(r1_val, shiftBits);
+ alu_out = r1_val << shiftBits;
+ } else if (op == SRA) {
+ alu_out = r1_val >> shiftBits;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case LLHR: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case LLGHR: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case L:
+ case LA:
+ case LD:
+ case LE: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int32_t r1 = rxinst->R1Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t addr = b2_val + x2_val + d2_val;
+ if (op == L) {
+ int32_t mem_val = ReadW(addr, instr);
+ set_low_register(r1, mem_val);
+ } else if (op == LA) {
+ set_register(r1, addr);
+ } else if (op == LD) {
+ int64_t dbl_val = *reinterpret_cast<int64_t*>(addr);
+ set_d_register(r1, dbl_val);
+ } else if (op == LE) {
+ float float_val = *reinterpret_cast<float*>(addr);
+ set_d_register_from_float32(r1, float_val);
+ }
+ break;
+ }
+ case C:
+ case CL: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = ReadW(addr, instr);
+ if (C == op)
+ SetS390ConditionCode<int32_t>(r1_val, mem_val);
+ else if (CL == op)
+ SetS390ConditionCode<uint32_t>(r1_val, mem_val);
+ break;
+ }
+ case CLI: {
+ // Compare Immediate (Mem - Imm) (8)
+ int b1 = siInstr->B1Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = siInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = siInstr->I2Value();
+ SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+ break;
+ }
+ case TM: {
+ // Test Under Mask (Mem - Imm) (8)
+ int b1 = siInstr->B1Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = siInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = siInstr->I2Value();
+ uint8_t selected_bits = mem_val & imm_val;
+ // CC0: Selected bits are zero
+ // CC1: Selected bits mixed zeros and ones
+ // CC3: Selected bits all ones
+ if (0 == selected_bits) {
+ condition_reg_ = CC_EQ; // CC0
+ } else if (selected_bits == imm_val) {
+ condition_reg_ = 0x1; // CC3
+ } else {
+ condition_reg_ = 0x4; // CC1
+ }
+ break;
+ }
+ case ST:
+ case STE:
+ case STD: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t addr = b2_val + x2_val + d2_val;
+ if (op == ST) {
+ WriteW(addr, r1_val, instr);
+ } else if (op == STD) {
+ int64_t frs_val = get_d_register(rxinst->R1Value());
+ WriteDW(addr, frs_val);
+ } else if (op == STE) {
+ int64_t frs_val = get_d_register(rxinst->R1Value()) >> 32;
+ WriteW(addr, static_cast<int32_t>(frs_val), instr);
+ }
+ break;
+ }
+ case LTGFR:
+ case LGFR: {
+ // Load and Test Register (64 <- 32) (Sign Extends 32-bit val)
+ // Load Register (64 <- 32) (Sign Extends 32-bit val)
+ RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInstr->R1Value();
+ int r2 = rreInstr->R2Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int64_t result = static_cast<int64_t>(r2_val);
+ set_register(r1, result);
+
+ if (LTGFR == op) SetS390ConditionCode<int64_t>(result, 0);
+ break;
+ }
+ case LNGR: {
+ // Load Negative (64)
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r2_val = get_register(r2);
+ r2_val = (r2_val >= 0) ? -r2_val : r2_val; // If pos, then negate it.
+ set_register(r1, r2_val);
+ condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT; // CC0 - result is zero
+ // CC1 - result is negative
+ break;
+ }
+ case TRAP4: {
+ // whack the space of the caller allocated stack
+ int64_t sp_addr = get_register(sp);
+ for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
+ // we dont want to whack the RA (r14)
+ if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+ }
+ SoftwareInterrupt(instr);
+ break;
+ }
+ case STC: {
+ // Store Character/Byte
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ uint8_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t mem_addr = b2_val + x2_val + d2_val;
+ WriteB(mem_addr, r1_val);
+ break;
+ }
+ case STH: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int16_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t mem_addr = b2_val + x2_val + d2_val;
+ WriteH(mem_addr, r1_val, instr);
+ break;
+ }
+#if V8_TARGET_ARCH_S390X
+ case LCGR: {
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r2_val = get_register(r2);
+ r2_val = ~r2_val;
+ r2_val = r2_val + 1;
+ set_register(r1, r2_val);
+ SetS390ConditionCode<int64_t>(r2_val, 0);
+ // if the input is INT_MIN, loading its compliment would be overflowing
+ if (r2_val < 0 && (r2_val + 1) > 0) {
+ SetS390OverflowCode(true);
+ }
+ break;
+ }
+#endif
+ case SRDA: {
+ RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsInstr->R1Value();
+ DCHECK(r1 % 2 == 0); // must be a reg pair
+ int b2 = rsInstr->B2Value();
+ intptr_t d2 = rsInstr->D2Value();
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int64_t opnd1 = static_cast<int64_t>(get_low_register<int32_t>(r1)) << 32;
+ int64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+ int64_t r1_val = opnd1 + opnd2;
+ int64_t alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out >> 32);
+ set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ break;
+ }
+ case SRDL: {
+ RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+ int r1 = rsInstr->R1Value();
+ DCHECK(r1 % 2 == 0); // must be a reg pair
+ int b2 = rsInstr->B2Value();
+ intptr_t d2 = rsInstr->D2Value();
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ uint64_t opnd1 = static_cast<uint64_t>(get_low_register<uint32_t>(r1))
+ << 32;
+ uint64_t opnd2 =
+ static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+ uint64_t r1_val = opnd1 | opnd2;
+ uint64_t alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out >> 32);
+ set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ break;
+ }
+ default: { return DecodeFourByteArithmetic(instr); }
+ }
+ return true;
+}
+
+bool Simulator::DecodeFourByteArithmetic64Bit(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+
+ switch (op) {
+ case AGR:
+ case SGR:
+ case OGR:
+ case NGR:
+ case XGR: {
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ bool isOF = false;
+ switch (op) {
+ case AGR:
+ isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+ r1_val += r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case SGR:
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case OGR:
+ r1_val |= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ break;
+ case NGR:
+ r1_val &= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ break;
+ case XGR:
+ r1_val ^= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ set_register(r1, r1_val);
+ break;
+ }
+ case AGFR: {
+ // Add Register (64 <- 32) (Sign Extends 32-bit val)
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+ r1_val += r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val);
+ break;
+ }
+ case SGFR: {
+ // Sub Reg (64 <- 32)
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val);
+ break;
+ }
+ case AGRK:
+ case SGRK:
+ case NGRK:
+ case OGRK:
+ case XGRK: {
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int r3 = rrfInst->R3Value();
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ if (AGRK == op) {
+ bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int64_t);
+ SetS390ConditionCode<int64_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val + r3_val);
+ } else if (SGRK == op) {
+ bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int64_t);
+ SetS390ConditionCode<int64_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val - r3_val);
+ } else {
+ // Assume bitwise operation here
+ uint64_t bitwise_result = 0;
+ if (NGRK == op) {
+ bitwise_result = r2_val & r3_val;
+ } else if (OGRK == op) {
+ bitwise_result = r2_val | r3_val;
+ } else if (XGRK == op) {
+ bitwise_result = r2_val ^ r3_val;
+ }
+ SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+ set_register(r1, bitwise_result);
+ }
+ break;
+ }
+ case ALGRK:
+ case SLGRK: {
+ // 64-bit Non-clobbering unsigned arithmetics
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int r3 = rrfInst->R3Value();
+ uint64_t r2_val = get_register(r2);
+ uint64_t r3_val = get_register(r3);
+ if (ALGRK == op) {
+ bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+ SetS390ConditionCode<uint64_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val + r3_val);
+ } else if (SLGRK == op) {
+ bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+ SetS390ConditionCode<uint64_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val - r3_val);
+ }
+ }
+ case AGHI:
+ case MGHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int32_t r1 = riinst->R1Value();
+ int64_t i = static_cast<int64_t>(riinst->I2Value());
+ int64_t r1_val = get_register(r1);
+ bool isOF = false;
+ switch (op) {
+ case AGHI:
+ isOF = CheckOverflowForIntAdd(r1_val, i, int64_t);
+ r1_val += i;
+ break;
+ case MGHI:
+ isOF = CheckOverflowForMul(r1_val, i);
+ r1_val *= i;
+ break; // no overflow indication is given
+ default:
+ break;
+ }
+ set_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return true;
+}
+
+/**
+ * Decodes and simulates four byte arithmetic instructions
+ */
+bool Simulator::DecodeFourByteArithmetic(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ // Pre-cast instruction to various types
+ RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+
+ switch (op) {
+ case AGR:
+ case SGR:
+ case OGR:
+ case NGR:
+ case XGR:
+ case AGFR:
+ case SGFR: {
+ DecodeFourByteArithmetic64Bit(instr);
+ break;
+ }
+ case ARK:
+ case SRK:
+ case NRK:
+ case ORK:
+ case XRK: {
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int r3 = rrfInst->R3Value();
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ if (ARK == op) {
+ bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int32_t);
+ SetS390ConditionCode<int32_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val + r3_val);
+ } else if (SRK == op) {
+ bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int32_t);
+ SetS390ConditionCode<int32_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val - r3_val);
+ } else {
+ // Assume bitwise operation here
+ uint32_t bitwise_result = 0;
+ if (NRK == op) {
+ bitwise_result = r2_val & r3_val;
+ } else if (ORK == op) {
+ bitwise_result = r2_val | r3_val;
+ } else if (XRK == op) {
+ bitwise_result = r2_val ^ r3_val;
+ }
+ SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+ set_low_register(r1, bitwise_result);
+ }
+ break;
+ }
+ case ALRK:
+ case SLRK: {
+ // 32-bit Non-clobbering unsigned arithmetics
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int r3 = rrfInst->R3Value();
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ if (ALRK == op) {
+ bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+ SetS390ConditionCode<uint32_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val + r3_val);
+ } else if (SLRK == op) {
+ bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+ SetS390ConditionCode<uint32_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val - r3_val);
+ }
+ break;
+ }
+ case AGRK:
+ case SGRK:
+ case NGRK:
+ case OGRK:
+ case XGRK: {
+ DecodeFourByteArithmetic64Bit(instr);
+ break;
+ }
+ case ALGRK:
+ case SLGRK: {
+ DecodeFourByteArithmetic64Bit(instr);
+ break;
+ }
+ case AHI:
+ case MHI: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int32_t r1 = riinst->R1Value();
+ int32_t i = riinst->I2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ bool isOF = false;
+ switch (op) {
+ case AHI:
+ isOF = CheckOverflowForIntAdd(r1_val, i, int32_t);
+ r1_val += i;
+ break;
+ case MHI:
+ isOF = CheckOverflowForMul(r1_val, i);
+ r1_val *= i;
+ break; // no overflow indication is given
+ default:
+ break;
+ }
+ set_low_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case AGHI:
+ case MGHI: {
+ DecodeFourByteArithmetic64Bit(instr);
+ break;
+ }
+ case MLR: {
+ RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreinst->R1Value();
+ int r2 = rreinst->R2Value();
+ DCHECK(r1 % 2 == 0);
+
+ uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint64_t product =
+ static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(r2_val);
+ int32_t high_bits = product >> 32;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ break;
+ }
+ case DLGR: {
+#ifdef V8_TARGET_ARCH_S390X
+ RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreinst->R1Value();
+ int r2 = rreinst->R2Value();
+ uint64_t r1_val = get_register(r1);
+ uint64_t r2_val = get_register(r2);
+ DCHECK(r1 % 2 == 0);
+ unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
+ dividend += get_register(r1 + 1);
+ uint64_t remainder = dividend % r2_val;
+ uint64_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_register(r1, remainder);
+ set_register(r1 + 1, quotient);
+#else
+ UNREACHABLE();
+#endif
+ break;
+ }
+ case DLR: {
+ RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreinst->R1Value();
+ int r2 = rreinst->R2Value();
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ DCHECK(r1 % 2 == 0);
+ uint64_t dividend = static_cast<uint64_t>(r1_val) << 32;
+ dividend += get_low_register<uint32_t>(r1 + 1);
+ uint32_t remainder = dividend % r2_val;
+ uint32_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_low_register(r1, remainder);
+ set_low_register(r1 + 1, quotient);
+ break;
+ }
+ case A:
+ case S:
+ case M:
+ case D:
+ case O:
+ case N:
+ case X: {
+ // 32-bit Reg-Mem instructions
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ switch (op) {
+ case A:
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ alu_out = r1_val + mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case S:
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+ alu_out = r1_val - mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ case M:
+ case D:
+ UNIMPLEMENTED();
+ break;
+ case O:
+ alu_out = r1_val | mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ case N:
+ alu_out = r1_val & mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ case X:
+ alu_out = r1_val ^ mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ set_low_register(r1, alu_out);
+ break;
+ }
+ case OILL:
+ case OIHL: {
+ RIInstruction* riInst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riInst->R1Value();
+ int i = riInst->I2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ if (OILL == op) {
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>(r1_val | i);
+ } else if (OILH == op) {
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) | i);
+ i = i << 16;
+ } else {
+ UNIMPLEMENTED();
+ }
+ set_low_register(r1, r1_val | i);
+ break;
+ }
+ case NILL:
+ case NILH: {
+ RIInstruction* riInst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riInst->R1Value();
+ int i = riInst->I2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ if (NILL == op) {
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>(r1_val & i);
+ i |= 0xFFFF0000;
+ } else if (NILH == op) {
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) & i);
+ i = (i << 16) | 0x0000FFFF;
+ } else {
+ UNIMPLEMENTED();
+ }
+ set_low_register(r1, r1_val & i);
+ break;
+ }
+ case AH:
+ case SH:
+ case MH: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxinst->D2Value();
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ if (AH == op) {
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ alu_out = r1_val + mem_val;
+ } else if (SH == op) {
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+ alu_out = r1_val - mem_val;
+ } else if (MH == op) {
+ alu_out = r1_val * mem_val;
+ } else {
+ UNREACHABLE();
+ }
+ set_low_register(r1, alu_out);
+ if (MH != op) { // MH does not change condition code
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ }
+ break;
+ }
+ case DSGR: {
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+
+ DCHECK(r1 % 2 == 0);
+
+ int64_t dividend = get_register(r1 + 1);
+ int64_t divisor = get_register(r2);
+ set_register(r1, dividend % divisor);
+ set_register(r1 + 1, dividend / divisor);
+
+ break;
+ }
+ case FLOGR: {
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+
+ DCHECK(r1 % 2 == 0);
+
+ int64_t r2_val = get_register(r2);
+
+ int i = 0;
+ for (; i < 64; i++) {
+ if (r2_val < 0) break;
+ r2_val <<= 1;
+ }
+
+ r2_val = get_register(r2);
+
+ int64_t mask = ~(1 << (63 - i));
+ set_register(r1, i);
+ set_register(r1 + 1, r2_val & mask);
+
+ break;
+ }
+ case MSR:
+ case MSGR: { // they do not set overflow code
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ if (op == MSR) {
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ set_low_register(r1, r1_val * r2_val);
+ } else if (op == MSGR) {
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ set_register(r1, r1_val * r2_val);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case MS: {
+ RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+ int r1 = rxinst->R1Value();
+ int b2 = rxinst->B2Value();
+ int x2 = rxinst->X2Value();
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t d2_val = rxinst->D2Value();
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ set_low_register(r1, r1_val * mem_val);
+ break;
+ }
+ case LGBR:
+ case LBR: {
+ RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+#ifdef V8_TARGET_ARCH_S390X
+ int64_t r2_val = get_low_register<int64_t>(r2);
+ r2_val <<= 56;
+ r2_val >>= 56;
+ set_register(r1, r2_val);
+#else
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val <<= 24;
+ r2_val >>= 24;
+ set_low_register(r1, r2_val);
+#endif
+ break;
+ }
+ case LGHR:
+ case LHR: {
+ RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+#ifdef V8_TARGET_ARCH_S390X
+ int64_t r2_val = get_low_register<int64_t>(r2);
+ r2_val <<= 48;
+ r2_val >>= 48;
+ set_register(r1, r2_val);
+#else
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val <<= 16;
+ r2_val >>= 16;
+ set_low_register(r1, r2_val);
+#endif
+ break;
+ }
+ case ALCR: {
+ RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+
+ alu_out = r1_val + r2_val;
+ bool isOF_original = CheckOverflowForUIntAdd(r1_val, r2_val);
+ if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+ alu_out = alu_out + 1;
+ isOF = isOF_original || CheckOverflowForUIntAdd(alu_out, 1);
+ } else {
+ isOF = isOF_original;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ break;
+ }
+ case SLBR: {
+ RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rrinst->R1Value();
+ int r2 = rrinst->R2Value();
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+
+ alu_out = r1_val - r2_val;
+ bool isOF_original = CheckOverflowForUIntSub(r1_val, r2_val);
+ if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+ alu_out = alu_out - 1;
+ isOF = isOF_original || CheckOverflowForUIntSub(alu_out, 1);
+ } else {
+ isOF = isOF_original;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ break;
+ }
+ default: { return DecodeFourByteFloatingPoint(instr); }
+ }
+ return true;
+}
+
+void Simulator::DecodeFourByteFloatingPointIntConversion(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+ switch (op) {
+ case CDLFBR:
+ case CDLGBR:
+ case CELGBR:
+ case CLFDBR:
+ case CLGDBR:
+ case CELFBR:
+ case CLGEBR:
+ case CLFEBR: {
+ RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInstr->R1Value();
+ int r2 = rreInstr->R2Value();
+ if (op == CDLFBR) {
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ } else if (op == CELFBR) {
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ float r1_val = static_cast<float>(r2_val);
+ set_d_register_from_float32(r1, r1_val);
+ } else if (op == CDLGBR) {
+ uint64_t r2_val = get_register(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ } else if (op == CELGBR) {
+ uint64_t r2_val = get_register(r2);
+ float r1_val = static_cast<float>(r2_val);
+ set_d_register_from_float32(r1, r1_val);
+ } else if (op == CLFDBR) {
+ double r2_val = get_double_from_d_register(r2);
+ uint32_t r1_val = static_cast<uint32_t>(r2_val);
+ set_low_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+ } else if (op == CLFEBR) {
+ float r2_val = get_float32_from_d_register(r2);
+ uint32_t r1_val = static_cast<uint32_t>(r2_val);
+ set_low_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+ } else if (op == CLGDBR) {
+ double r2_val = get_double_from_d_register(r2);
+ uint64_t r1_val = static_cast<uint64_t>(r2_val);
+ set_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+ } else if (op == CLGEBR) {
+ float r2_val = get_float32_from_d_register(r2);
+ uint64_t r1_val = static_cast<uint64_t>(r2_val);
+ set_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeFourByteFloatingPointRound(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+ RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInstr->R1Value();
+ int r2 = rreInstr->R2Value();
+ double r2_val = get_double_from_d_register(r2);
+ float r2_fval = get_float32_from_d_register(r2);
+
+ switch (op) {
+ case CFDBR: {
+ int mask_val = rreInstr->M3Value();
+ int32_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_val, INT32_MAX, INT32_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ r1_val = static_cast<int32_t>(r2_val);
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ double sub_val1 = std::fabs(r2_val - floor_val);
+ double sub_val2 = std::fabs(r2_val - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // round away from zero:
+ if (r2_val > 0.0) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else {
+ r1_val = static_cast<int32_t>(floor_val);
+ }
+ }
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ double sub_val1 = std::fabs(r2_val - floor_val);
+ double sub_val2 = std::fabs(r2_val - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // check which one is even:
+ int32_t c_v = static_cast<int32_t>(ceil_val);
+ int32_t f_v = static_cast<int32_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ // check for overflow, cast r2_val to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(r2_val);
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(r2_val);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int32_t>(std::ceil(r2_val));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ // check for overflow, cast r2_val to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(std::floor(r2_val));
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(std::floor(r2_val));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_low_register(r1, r1_val);
+ break;
+ }
+ case CGDBR: {
+ int mask_val = rreInstr->M3Value();
+ int64_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_val, INT64_MAX, INT64_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ if (std::abs(r2_val - floor_val) > std::abs(r2_val - ceil_val)) {
+ r1_val = static_cast<int64_t>(ceil_val);
+ } else if (std::abs(r2_val - floor_val) <
+ std::abs(r2_val - ceil_val)) {
+ r1_val = static_cast<int64_t>(floor_val);
+ } else { // check which one is even:
+ int64_t c_v = static_cast<int64_t>(ceil_val);
+ int64_t f_v = static_cast<int64_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ r1_val = static_cast<int64_t>(r2_val);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::ceil(r2_val));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::floor(r2_val));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_register(r1, r1_val);
+ break;
+ }
+ case CGEBR: {
+ int mask_val = rreInstr->M3Value();
+ int64_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_fval, INT64_MAX, INT64_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ if (std::abs(r2_fval - floor_val) > std::abs(r2_fval - ceil_val)) {
+ r1_val = static_cast<int64_t>(ceil_val);
+ } else if (std::abs(r2_fval - floor_val) <
+ std::abs(r2_fval - ceil_val)) {
+ r1_val = static_cast<int64_t>(floor_val);
+ } else { // check which one is even:
+ int64_t c_v = static_cast<int64_t>(ceil_val);
+ int64_t f_v = static_cast<int64_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ r1_val = static_cast<int64_t>(r2_fval);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::ceil(r2_fval));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::floor(r2_fval));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_register(r1, r1_val);
+ break;
+ }
+ case CFEBR: {
+ int mask_val = rreInstr->M3Value();
+ int32_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_fval, INT32_MAX, INT32_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ r1_val = static_cast<int32_t>(r2_fval);
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ float sub_val1 = std::fabs(r2_fval - floor_val);
+ float sub_val2 = std::fabs(r2_fval - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // round away from zero:
+ if (r2_fval > 0.0) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else {
+ r1_val = static_cast<int32_t>(floor_val);
+ }
+ }
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ float sub_val1 = std::fabs(r2_fval - floor_val);
+ float sub_val2 = std::fabs(r2_fval - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // check which one is even:
+ int32_t c_v = static_cast<int32_t>(ceil_val);
+ int32_t f_v = static_cast<int32_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ // check for overflow, cast r2_fval to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(r2_fval);
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(r2_fval);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int32_t>(std::ceil(r2_fval));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ // check for overflow, cast r2_fval to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(std::floor(r2_fval));
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(std::floor(r2_fval));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_low_register(r1, r1_val);
+
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+/**
+ * Decodes and simulates four byte floating point instructions
+ */
+bool Simulator::DecodeFourByteFloatingPoint(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ switch (op) {
+ case ADBR:
+ case AEBR:
+ case SDBR:
+ case SEBR:
+ case MDBR:
+ case MEEBR:
+ case MADBR:
+ case DDBR:
+ case DEBR:
+ case CDBR:
+ case CEBR:
+ case CDFBR:
+ case CDGBR:
+ case CEGBR:
+ case CGEBR:
+ case CFDBR:
+ case CGDBR:
+ case SQDBR:
+ case SQEBR:
+ case CFEBR:
+ case CEFBR:
+ case LCDBR:
+ case LPDBR:
+ case LPEBR: {
+ RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInstr->R1Value();
+ int r2 = rreInstr->R2Value();
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ if (op == ADBR) {
+ r1_val += r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ } else if (op == AEBR) {
+ fr1_val += fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+ } else if (op == SDBR) {
+ r1_val -= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ } else if (op == SEBR) {
+ fr1_val -= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+ } else if (op == MDBR) {
+ r1_val *= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ } else if (op == MEEBR) {
+ fr1_val *= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+ } else if (op == MADBR) {
+ RRDInstruction* rrdInstr = reinterpret_cast<RRDInstruction*>(instr);
+ int r1 = rrdInstr->R1Value();
+ int r2 = rrdInstr->R2Value();
+ int r3 = rrdInstr->R3Value();
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ r1_val += r2_val * r3_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ } else if (op == DDBR) {
+ r1_val /= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ } else if (op == DEBR) {
+ fr1_val /= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+ } else if (op == CDBR) {
+ if (isNaN(r1_val) || isNaN(r2_val)) {
+ condition_reg_ = CC_OF;
+ } else {
+ SetS390ConditionCode<double>(r1_val, r2_val);
+ }
+ } else if (op == CEBR) {
+ if (isNaN(fr1_val) || isNaN(fr2_val)) {
+ condition_reg_ = CC_OF;
+ } else {
+ SetS390ConditionCode<float>(fr1_val, fr2_val);
+ }
+ } else if (op == CDGBR) {
+ int64_t r2_val = get_register(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ } else if (op == CEGBR) {
+ int64_t fr2_val = get_register(r2);
+ float fr1_val = static_cast<float>(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ } else if (op == CDFBR) {
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ } else if (op == CEFBR) {
+ int32_t fr2_val = get_low_register<int32_t>(r2);
+ float fr1_val = static_cast<float>(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ } else if (op == CFDBR) {
+ DecodeFourByteFloatingPointRound(instr);
+ } else if (op == CGDBR) {
+ DecodeFourByteFloatingPointRound(instr);
+ } else if (op == CGEBR) {
+ DecodeFourByteFloatingPointRound(instr);
+ } else if (op == SQDBR) {
+ r1_val = std::sqrt(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ } else if (op == SQEBR) {
+ fr1_val = std::sqrt(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ } else if (op == CFEBR) {
+ DecodeFourByteFloatingPointRound(instr);
+ } else if (op == LCDBR) {
+ r1_val = -r2_val;
+ set_d_register_from_double(r1, r1_val);
+ if (r2_val != r2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (r2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else if (r2_val < 0) {
+ condition_reg_ = CC_LT;
+ } else if (r2_val > 0) {
+ condition_reg_ = CC_GT;
+ }
+ } else if (op == LPDBR) {
+ r1_val = std::fabs(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ if (r2_val != r2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (r2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else {
+ condition_reg_ = CC_GT;
+ }
+ } else if (op == LPEBR) {
+ fr1_val = std::fabs(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ if (fr2_val != fr2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (fr2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else {
+ condition_reg_ = CC_GT;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case CDLFBR:
+ case CDLGBR:
+ case CELGBR:
+ case CLFDBR:
+ case CELFBR:
+ case CLGDBR:
+ case CLGEBR:
+ case CLFEBR: {
+ DecodeFourByteFloatingPointIntConversion(instr);
+ break;
+ }
+ case TMLL: {
+ RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+ int r1 = riinst->R1Value();
+ int mask = riinst->I2Value() & 0x0000FFFF;
+ if (mask == 0) {
+ condition_reg_ = 0x0;
+ break;
+ }
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ r1_val = r1_val & 0x0000FFFF; // uses only the last 16bits
+
+ // Test if all selected bits are Zero
+ bool allSelectedBitsAreZeros = true;
+ for (int i = 0; i < 15; i++) {
+ if (mask & (1 << i)) {
+ if (r1_val & (1 << i)) {
+ allSelectedBitsAreZeros = false;
+ break;
+ }
+ }
+ }
+ if (allSelectedBitsAreZeros) {
+ condition_reg_ = 0x8;
+ break; // Done!
+ }
+
+ // Test if all selected bits are one
+ bool allSelectedBitsAreOnes = true;
+ for (int i = 0; i < 15; i++) {
+ if (mask & (1 << i)) {
+ if (!(r1_val & (1 << i))) {
+ allSelectedBitsAreOnes = false;
+ break;
+ }
+ }
+ }
+ if (allSelectedBitsAreOnes) {
+ condition_reg_ = 0x1;
+ break; // Done!
+ }
+
+ // Now we know selected bits mixed zeros and ones
+ // Test if the leftmost bit is zero or one
+ for (int i = 14; i >= 0; i--) {
+ if (mask & (1 << i)) {
+ if (r1_val & (1 << i)) {
+ // leftmost bit is one
+ condition_reg_ = 0x2;
+ } else {
+ // leftmost bit is zero
+ condition_reg_ = 0x4;
+ }
+ break; // Done!
+ }
+ }
+ break;
+ }
+ case LEDBR: {
+ RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInst->R1Value();
+ int r2 = rreInst->R2Value();
+ double r2_val = get_double_from_d_register(r2);
+ set_d_register_from_float32(r1, static_cast<float>(r2_val));
+ break;
+ }
+ case FIDBRA: {
+ RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int m3 = rrfInst->M3Value();
+ double r2_val = get_double_from_d_register(r2);
+ DCHECK(rrfInst->M4Value() == 0);
+ switch (m3) {
+ case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+ set_d_register_from_double(r1, round(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_0:
+ set_d_register_from_double(r1, trunc(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+ set_d_register_from_double(r1, std::ceil(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+ set_d_register_from_double(r1, std::floor(r2_val));
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ }
+ case FIEBRA: {
+ RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+ int r1 = rrfInst->R1Value();
+ int r2 = rrfInst->R2Value();
+ int m3 = rrfInst->M3Value();
+ float r2_val = get_float32_from_d_register(r2);
+ DCHECK(rrfInst->M4Value() == 0);
+ switch (m3) {
+ case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+ set_d_register_from_float32(r1, round(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_0:
+ set_d_register_from_float32(r1, trunc(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+ set_d_register_from_float32(r1, std::ceil(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+ set_d_register_from_float32(r1, std::floor(r2_val));
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ }
+ case MSDBR: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case LDEBR: {
+ RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+ int r1 = rreInstr->R1Value();
+ int r2 = rreInstr->R2Value();
+ float fp_val = get_float32_from_d_register(r2);
+ double db_val = static_cast<double>(fp_val);
+ set_d_register_from_double(r1, db_val);
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ return false;
+ }
+ }
+ return true;
+}
+
+// Decode routine for six-byte instructions
+bool Simulator::DecodeSixByte(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ // Pre-cast instruction to various types
+ RIEInstruction* rieInstr = reinterpret_cast<RIEInstruction*>(instr);
+ RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+ RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+ RXEInstruction* rxeInstr = reinterpret_cast<RXEInstruction*>(instr);
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ SIYInstruction* siyInstr = reinterpret_cast<SIYInstruction*>(instr);
+ SILInstruction* silInstr = reinterpret_cast<SILInstruction*>(instr);
+ SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+
+ switch (op) {
+ case CLIY: {
+ // Compare Immediate (Mem - Imm) (8)
+ int b1 = siyInstr->B1Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = siyInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = siyInstr->I2Value();
+ SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+ break;
+ }
+ case TMY: {
+ // Test Under Mask (Mem - Imm) (8)
+ int b1 = siyInstr->B1Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = siyInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = siyInstr->I2Value();
+ uint8_t selected_bits = mem_val & imm_val;
+ // CC0: Selected bits are zero
+ // CC1: Selected bits mixed zeros and ones
+ // CC3: Selected bits all ones
+ if (0 == selected_bits) {
+ condition_reg_ = CC_EQ; // CC0
+ } else if (selected_bits == imm_val) {
+ condition_reg_ = 0x1; // CC3
+ } else {
+ condition_reg_ = 0x4; // CC1
+ }
+ break;
+ }
+ case LDEB: {
+ // Load Float
+ int r1 = rxeInstr->R1Value();
+ int rb = rxeInstr->B2Value();
+ int rx = rxeInstr->X2Value();
+ int offset = rxeInstr->D2Value();
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+ int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+ double ret = static_cast<double>(
+ *reinterpret_cast<float*>(rx_val + rb_val + offset));
+ set_d_register_from_double(r1, ret);
+ break;
+ }
+ case LAY: {
+ // Load Address
+ int r1 = rxyInstr->R1Value();
+ int rb = rxyInstr->B2Value();
+ int rx = rxyInstr->X2Value();
+ int offset = rxyInstr->D2Value();
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+ int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+ set_register(r1, rx_val + rb_val + offset);
+ break;
+ }
+ case LARL: {
+ // Load Addresss Relative Long
+ int r1 = rilInstr->R1Value();
+ intptr_t offset = rilInstr->I2Value() * 2;
+ set_register(r1, get_pc() + offset);
+ break;
+ }
+ case LLILF: {
+ // Load Logical into lower 32-bits (zero extend upper 32-bits)
+ int r1 = rilInstr->R1Value();
+ uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+ set_register(r1, imm);
+ break;
+ }
+ case LLIHF: {
+ // Load Logical Immediate into high word
+ int r1 = rilInstr->R1Value();
+ uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+ set_register(r1, imm << 32);
+ break;
+ }
+ case OILF:
+ case NILF:
+ case IILF: {
+ // Bitwise Op on lower 32-bits
+ int r1 = rilInstr->R1Value();
+ uint32_t imm = rilInstr->I2UnsignedValue();
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ if (NILF == op) {
+ alu_out &= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (OILF == op) {
+ alu_out |= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == IILF) {
+ alu_out = imm;
+ } else {
+ DCHECK(false);
+ }
+ set_low_register(r1, alu_out);
+ break;
+ }
+ case OIHF:
+ case NIHF:
+ case IIHF: {
+ // Bitwise Op on upper 32-bits
+ int r1 = rilInstr->R1Value();
+ uint32_t imm = rilInstr->I2Value();
+ uint32_t alu_out = get_high_register<uint32_t>(r1);
+ if (op == NIHF) {
+ alu_out &= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == OIHF) {
+ alu_out |= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == IIHF) {
+ alu_out = imm;
+ } else {
+ DCHECK(false);
+ }
+ set_high_register(r1, alu_out);
+ break;
+ }
+ case CLFI: {
+ // Compare Logical with Immediate (32)
+ int r1 = rilInstr->R1Value();
+ uint32_t imm = rilInstr->I2UnsignedValue();
+ SetS390ConditionCode<uint32_t>(get_low_register<uint32_t>(r1), imm);
+ break;
+ }
+ case CFI: {
+ // Compare with Immediate (32)
+ int r1 = rilInstr->R1Value();
+ int32_t imm = rilInstr->I2Value();
+ SetS390ConditionCode<int32_t>(get_low_register<int32_t>(r1), imm);
+ break;
+ }
+ case CLGFI: {
+ // Compare Logical with Immediate (64)
+ int r1 = rilInstr->R1Value();
+ uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+ SetS390ConditionCode<uint64_t>(get_register(r1), imm);
+ break;
+ }
+ case CGFI: {
+ // Compare with Immediate (64)
+ int r1 = rilInstr->R1Value();
+ int64_t imm = static_cast<int64_t>(rilInstr->I2Value());
+ SetS390ConditionCode<int64_t>(get_register(r1), imm);
+ break;
+ }
+ case BRASL: {
+ // Branch and Save Relative Long
+ int r1 = rilInstr->R1Value();
+ intptr_t d2 = rilInstr->I2Value();
+ intptr_t pc = get_pc();
+ set_register(r1, pc + 6); // save next instruction to register
+ set_pc(pc + d2 * 2); // update register
+ break;
+ }
+ case BRCL: {
+ // Branch on Condition Relative Long
+ Condition m1 = (Condition)rilInstr->R1Value();
+ if (TestConditionCode((Condition)m1)) {
+ intptr_t offset = rilInstr->I2Value() * 2;
+ set_pc(get_pc() + offset);
+ }
+ break;
+ }
+ case LMG:
+ case STMG: {
+ // Store Multiple 64-bits.
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int rb = rsyInstr->B2Value();
+ int offset = rsyInstr->D2Value();
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ if (op == LMG) {
+ int64_t value = ReadDW(rb_val + offset + 8 * i);
+ set_register((r1 + i) % 16, value);
+ } else if (op == STMG) {
+ int64_t value = get_register((r1 + i) % 16);
+ WriteDW(rb_val + offset + 8 * i, value);
+ } else {
+ DCHECK(false);
+ }
+ }
+ break;
+ }
+ case SLLK:
+ case RLL:
+ case SRLK:
+ case SLLG:
+ case RLLG:
+ case SRLG: {
+ DecodeSixByteBitShift(instr);
+ break;
+ }
+ case SLAK:
+ case SRAK: {
+ // 32-bit non-clobbering shift-left/right arithmetic
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int b2 = rsyInstr->B2Value();
+ intptr_t d2 = rsyInstr->D2Value();
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ if (op == SLAK) {
+ isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+ alu_out = r3_val << shiftBits;
+ } else if (op == SRAK) {
+ alu_out = r3_val >> shiftBits;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case SLAG:
+ case SRAG: {
+ // 64-bit non-clobbering shift-left/right arithmetic
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int b2 = rsyInstr->B2Value();
+ intptr_t d2 = rsyInstr->D2Value();
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int64_t r3_val = get_register(r3);
+ intptr_t alu_out = 0;
+ bool isOF = false;
+ if (op == SLAG) {
+ isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+ alu_out = r3_val << shiftBits;
+ } else if (op == SRAG) {
+ alu_out = r3_val >> shiftBits;
+ }
+ set_register(r1, alu_out);
+ SetS390ConditionCode<intptr_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case LMY:
+ case STMY: {
+ RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+ // Load/Store Multiple (32)
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int b2 = rsyInstr->B2Value();
+ int offset = rsyInstr->D2Value();
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ if (op == LMY) {
+ int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+ set_low_register((r1 + i) % 16, value);
+ } else {
+ int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+ WriteW(b2_val + offset + 4 * i, value, instr);
+ }
+ }
+ break;
+ }
+ case LT:
+ case LTG: {
+ // Load and Test (32/64)
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+
+ if (op == LT) {
+ int32_t value = ReadW(addr, instr);
+ set_low_register(r1, value);
+ SetS390ConditionCode<int32_t>(value, 0);
+ } else if (op == LTG) {
+ int64_t value = ReadDW(addr);
+ set_register(r1, value);
+ SetS390ConditionCode<int64_t>(value, 0);
+ }
+ break;
+ }
+ case LY:
+ case LB:
+ case LGB:
+ case LG:
+ case LGF:
+ case LGH:
+ case LLGF:
+ case STG:
+ case STY:
+ case STCY:
+ case STHY:
+ case STEY:
+ case LDY:
+ case LHY:
+ case STDY:
+ case LEY: {
+ // Miscellaneous Loads and Stores
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ if (op == LY) {
+ uint32_t mem_val = ReadWU(addr, instr);
+ set_low_register(r1, mem_val);
+ } else if (op == LB) {
+ int32_t mem_val = ReadB(addr);
+ set_low_register(r1, mem_val);
+ } else if (op == LGB) {
+ int64_t mem_val = ReadB(addr);
+ set_register(r1, mem_val);
+ } else if (op == LG) {
+ int64_t mem_val = ReadDW(addr);
+ set_register(r1, mem_val);
+ } else if (op == LGF) {
+ int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+ set_register(r1, mem_val);
+ } else if (op == LGH) {
+ int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+ set_register(r1, mem_val);
+ } else if (op == LLGF) {
+ // int r1 = rreInst->R1Value();
+ // int r2 = rreInst->R2Value();
+ // int32_t r2_val = get_low_register<int32_t>(r2);
+ // uint64_t r2_finalval = (static_cast<uint64_t>(r2_val)
+ // & 0x00000000ffffffff);
+ // set_register(r1, r2_finalval);
+ // break;
+ uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+ set_register(r1, mem_val);
+ } else if (op == LDY) {
+ uint64_t dbl_val = *reinterpret_cast<uint64_t*>(addr);
+ set_d_register(r1, dbl_val);
+ } else if (op == STEY) {
+ int64_t frs_val = get_d_register(r1) >> 32;
+ WriteW(addr, static_cast<int32_t>(frs_val), instr);
+ } else if (op == LEY) {
+ float float_val = *reinterpret_cast<float*>(addr);
+ set_d_register_from_float32(r1, float_val);
+ } else if (op == STY) {
+ uint32_t value = get_low_register<uint32_t>(r1);
+ WriteW(addr, value, instr);
+ } else if (op == STG) {
+ uint64_t value = get_register(r1);
+ WriteDW(addr, value);
+ } else if (op == STDY) {
+ int64_t frs_val = get_d_register(r1);
+ WriteDW(addr, frs_val);
+ } else if (op == STCY) {
+ uint8_t value = get_low_register<uint32_t>(r1);
+ WriteB(addr, value);
+ } else if (op == STHY) {
+ uint16_t value = get_low_register<uint32_t>(r1);
+ WriteH(addr, value, instr);
+ } else if (op == LHY) {
+ int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+ set_low_register(r1, result);
+ }
+ break;
+ }
+ case MVC: {
+ // Move Character
+ int b1 = ssInstr->B1Value();
+ intptr_t d1 = ssInstr->D1Value();
+ int b2 = ssInstr->B2Value();
+ intptr_t d2 = ssInstr->D2Value();
+ int length = ssInstr->Length();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t src_addr = b2_val + d2;
+ intptr_t dst_addr = b1_val + d1;
+ // remember that the length is the actual length - 1
+ for (int i = 0; i < length + 1; ++i) {
+ WriteB(dst_addr++, ReadB(src_addr++));
+ }
+ break;
+ }
+ case MVHI: {
+ // Move Integer (32)
+ int b1 = silInstr->B1Value();
+ intptr_t d1 = silInstr->D1Value();
+ int16_t i2 = silInstr->I2Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t src_addr = b1_val + d1;
+ WriteW(src_addr, i2, instr);
+ break;
+ }
+ case MVGHI: {
+ // Move Integer (64)
+ int b1 = silInstr->B1Value();
+ intptr_t d1 = silInstr->D1Value();
+ int16_t i2 = silInstr->I2Value();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t src_addr = b1_val + d1;
+ WriteDW(src_addr, i2);
+ break;
+ }
+ case LLH:
+ case LLGH: {
+ // Load Logical Halfworld
+ int r1 = rxyInstr->R1Value();
+ int b2 = rxyInstr->B2Value();
+ int x2 = rxyInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxyInstr->D2Value();
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ if (op == LLH) {
+ set_low_register(r1, mem_val);
+ } else if (op == LLGH) {
+ set_register(r1, mem_val);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LLC:
+ case LLGC: {
+ // Load Logical Character - loads a byte and zero extends.
+ int r1 = rxyInstr->R1Value();
+ int b2 = rxyInstr->B2Value();
+ int x2 = rxyInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxyInstr->D2Value();
+ uint8_t mem_val = ReadBU(b2_val + d2_val + x2_val);
+ if (op == LLC) {
+ set_low_register(r1, static_cast<uint32_t>(mem_val));
+ } else if (op == LLGC) {
+ set_register(r1, static_cast<uint64_t>(mem_val));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case XIHF:
+ case XILF: {
+ int r1 = rilInstr->R1Value();
+ uint32_t imm = rilInstr->I2UnsignedValue();
+ uint32_t alu_out = 0;
+ if (op == XILF) {
+ alu_out = get_low_register<uint32_t>(r1);
+ alu_out = alu_out ^ imm;
+ set_low_register(r1, alu_out);
+ } else if (op == XIHF) {
+ alu_out = get_high_register<uint32_t>(r1);
+ alu_out = alu_out ^ imm;
+ set_high_register(r1, alu_out);
+ } else {
+ UNREACHABLE();
+ }
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ }
+ case RISBG: {
+ // Rotate then insert selected bits
+ int r1 = rieInstr->R1Value();
+ int r2 = rieInstr->R2Value();
+ // Starting Bit Position is Bits 2-7 of I3 field
+ uint32_t start_bit = rieInstr->I3Value() & 0x3F;
+ // Ending Bit Position is Bits 2-7 of I4 field
+ uint32_t end_bit = rieInstr->I4Value() & 0x3F;
+ // Shift Amount is Bits 2-7 of I5 field
+ uint32_t shift_amount = rieInstr->I5Value() & 0x3F;
+ // Zero out Remaining (unslected) bits if Bit 0 of I4 is 1.
+ bool zero_remaining = (0 != (rieInstr->I4Value() & 0x80));
+
+ uint64_t src_val = get_register(r2);
+
+ // Rotate Left by Shift Amount first
+ uint64_t rotated_val =
+ (src_val << shift_amount) | (src_val >> (64 - shift_amount));
+ int32_t width = end_bit - start_bit + 1;
+
+ uint64_t selection_mask = 0;
+ if (width < 64) {
+ selection_mask = (static_cast<uint64_t>(1) << width) - 1;
+ } else {
+ selection_mask = static_cast<uint64_t>(static_cast<int64_t>(-1));
+ }
+ selection_mask = selection_mask << (63 - end_bit);
+
+ uint64_t selected_val = rotated_val & selection_mask;
+
+ if (!zero_remaining) {
+ // Merged the unselected bits from the original value
+ selected_val = (src_val & ~selection_mask) | selected_val;
+ }
+
+ // Condition code is set by treating result as 64-bit signed int
+ SetS390ConditionCode<int64_t>(selected_val, 0);
+ set_register(r1, selected_val);
+ break;
+ }
+ default:
+ return DecodeSixByteArithmetic(instr);
+ }
+ return true;
+}
+
+void Simulator::DecodeSixByteBitShift(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ // Pre-cast instruction to various types
+
+ RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+
+ switch (op) {
+ case SLLK:
+ case RLL:
+ case SRLK: {
+ // For SLLK/SRLL, the 32-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int b2 = rsyInstr->B2Value();
+ intptr_t d2 = rsyInstr->D2Value();
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ uint32_t alu_out = 0;
+ if (SLLK == op) {
+ alu_out = r3_val << shiftBits;
+ } else if (SRLK == op) {
+ alu_out = r3_val >> shiftBits;
+ } else if (RLL == op) {
+ uint32_t rotateBits = r3_val >> (32 - shiftBits);
+ alu_out = (r3_val << shiftBits) | (rotateBits);
+ } else {
+ UNREACHABLE();
+ }
+ set_low_register(r1, alu_out);
+ break;
+ }
+ case SLLG:
+ case RLLG:
+ case SRLG: {
+ // For SLLG/SRLG, the 64-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ int r1 = rsyInstr->R1Value();
+ int r3 = rsyInstr->R3Value();
+ int b2 = rsyInstr->B2Value();
+ intptr_t d2 = rsyInstr->D2Value();
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint64_t r3_val = get_register(r3);
+ uint64_t alu_out = 0;
+ if (op == SLLG) {
+ alu_out = r3_val << shiftBits;
+ } else if (op == SRLG) {
+ alu_out = r3_val >> shiftBits;
+ } else if (op == RLLG) {
+ uint64_t rotateBits = r3_val >> (64 - shiftBits);
+ alu_out = (r3_val << shiftBits) | (rotateBits);
+ } else {
+ UNREACHABLE();
+ }
+ set_register(r1, alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+/**
+ * Decodes and simulates six byte arithmetic instructions
+ */
+bool Simulator::DecodeSixByteArithmetic(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+
+ // Pre-cast instruction to various types
+ SIYInstruction* siyInstr = reinterpret_cast<SIYInstruction*>(instr);
+
+ switch (op) {
+ case CDB:
+ case ADB:
+ case SDB:
+ case MDB:
+ case DDB:
+ case SQDB: {
+ RXEInstruction* rxeInstr = reinterpret_cast<RXEInstruction*>(instr);
+ int b2 = rxeInstr->B2Value();
+ int x2 = rxeInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxeInstr->D2Value();
+ double r1_val = get_double_from_d_register(rxeInstr->R1Value());
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+
+ switch (op) {
+ case CDB:
+ SetS390ConditionCode<double>(r1_val, dbl_val);
+ break;
+ case ADB:
+ r1_val += dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ break;
+ case SDB:
+ r1_val -= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ break;
+ case MDB:
+ r1_val *= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ break;
+ case DDB:
+ r1_val /= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ break;
+ case SQDB:
+ r1_val = std::sqrt(dbl_val);
+ set_d_register_from_double(r1, r1_val);
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ case LRV:
+ case LRVH:
+ case STRV:
+ case STRVH: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+
+ if (op == LRVH) {
+ int16_t mem_val = ReadH(mem_addr, instr);
+ int32_t result = ByteReverse(mem_val) & 0x0000ffff;
+ result |= r1_val & 0xffff0000;
+ set_low_register(r1, result);
+ } else if (op == LRV) {
+ int32_t mem_val = ReadW(mem_addr, instr);
+ set_low_register(r1, ByteReverse(mem_val));
+ } else if (op == STRVH) {
+ int16_t result = static_cast<int16_t>(r1_val >> 16);
+ WriteH(mem_addr, ByteReverse(result), instr);
+ } else if (op == STRV) {
+ WriteW(mem_addr, ByteReverse(r1_val), instr);
+ }
+
+ break;
+ }
+ case AHIK:
+ case AGHIK: {
+ // Non-clobbering Add Halfword Immediate
+ RIEInstruction* rieInst = reinterpret_cast<RIEInstruction*>(instr);
+ int r1 = rieInst->R1Value();
+ int r2 = rieInst->R2Value();
+ bool isOF = false;
+ if (AHIK == op) {
+ // 32-bit Add
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t imm = rieInst->I6Value();
+ isOF = CheckOverflowForIntAdd(r2_val, imm, int32_t);
+ set_low_register(r1, r2_val + imm);
+ SetS390ConditionCode<int32_t>(r2_val + imm, 0);
+ } else if (AGHIK == op) {
+ // 64-bit Add
+ int64_t r2_val = get_register(r2);
+ int64_t imm = static_cast<int64_t>(rieInst->I6Value());
+ isOF = CheckOverflowForIntAdd(r2_val, imm, int64_t);
+ set_register(r1, r2_val + imm);
+ SetS390ConditionCode<int64_t>(r2_val + imm, 0);
+ }
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case ALFI:
+ case SLFI: {
+ RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+ int r1 = rilInstr->R1Value();
+ uint32_t imm = rilInstr->I2UnsignedValue();
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ if (op == ALFI) {
+ alu_out += imm;
+ } else if (op == SLFI) {
+ alu_out -= imm;
+ }
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ set_low_register(r1, alu_out);
+ break;
+ }
+ case ML: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case AY:
+ case SY:
+ case NY:
+ case OY:
+ case XY:
+ case CY: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ bool isOF = false;
+ if (op == AY) {
+ isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
+ alu_out += mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ } else if (op == SY) {
+ isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
+ alu_out -= mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ } else if (op == NY) {
+ alu_out &= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == OY) {
+ alu_out |= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == XY) {
+ alu_out ^= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ } else if (op == CY) {
+ SetS390ConditionCode<int32_t>(alu_out, mem_val);
+ }
+ if (op != CY) {
+ set_low_register(r1, alu_out);
+ }
+ break;
+ }
+ case AHY:
+ case SHY: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int32_t r1_val = get_low_register<int32_t>(rxyInstr->R1Value());
+ int b2 = rxyInstr->B2Value();
+ int x2 = rxyInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxyInstr->D2Value();
+ int32_t mem_val =
+ static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ switch (op) {
+ case AHY:
+ alu_out = r1_val + mem_val;
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ break;
+ case SHY:
+ alu_out = r1_val - mem_val;
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int64_t);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case AG:
+ case SG:
+ case NG:
+ case OG:
+ case XG:
+ case CG:
+ case CLG: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+
+ switch (op) {
+ case AG: {
+ alu_out += mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ break;
+ }
+ case SG: {
+ alu_out -= mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ break;
+ }
+ case NG: {
+ alu_out &= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ }
+ case OG: {
+ alu_out |= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ }
+ case XG: {
+ alu_out ^= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ break;
+ }
+ case CG: {
+ SetS390ConditionCode<int64_t>(alu_out, mem_val);
+ break;
+ }
+ case CLG: {
+ SetS390ConditionCode<uint64_t>(alu_out, mem_val);
+ break;
+ }
+ default: {
+ DCHECK(false);
+ break;
+ }
+ }
+
+ if (op != CG) {
+ set_register(r1, alu_out);
+ }
+ break;
+ }
+ case ALY:
+ case SLY:
+ case CLY: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ int x2 = rxyInstr->X2Value();
+ int b2 = rxyInstr->B2Value();
+ int d2 = rxyInstr->D2Value();
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+
+ if (op == ALY) {
+ alu_out += mem_val;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ } else if (op == SLY) {
+ alu_out -= mem_val;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ } else if (op == CLY) {
+ SetS390ConditionCode<uint32_t>(alu_out, mem_val);
+ }
+ break;
+ }
+ case AGFI:
+ case AFI: {
+ // Clobbering Add Word Immediate
+ RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+ int32_t r1 = rilInstr->R1Value();
+ bool isOF = false;
+ if (AFI == op) {
+ // 32-bit Add (Register + 32-bit Immediate)
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t i2 = rilInstr->I2Value();
+ isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+ int32_t alu_out = r1_val + i2;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ } else if (AGFI == op) {
+ // 64-bit Add (Register + 32-bit Imm)
+ int64_t r1_val = get_register(r1);
+ int64_t i2 = static_cast<int64_t>(rilInstr->I2Value());
+ isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+ int64_t alu_out = r1_val + i2;
+ set_register(r1, alu_out);
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ }
+ SetS390OverflowCode(isOF);
+ break;
+ }
+ case ASI: {
+ // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+ // The below static cast to 8 bit and then to 32 bit is necessary
+ // because siyInstr->I2Value() returns a uint8_t, which a direct
+ // cast to int32_t could incorrectly interpret.
+ int8_t i2_8bit = static_cast<int8_t>(siyInstr->I2Value());
+ int32_t i2 = static_cast<int32_t>(i2_8bit);
+ int b1 = siyInstr->B1Value();
+ intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+ int d1_val = siyInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+
+ int32_t mem_val = ReadW(addr, instr);
+ bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
+ int32_t alu_out = mem_val + i2;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ WriteW(addr, alu_out, instr);
+ break;
+ }
+ case AGSI: {
+ // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+ // The below static cast to 8 bit and then to 32 bit is necessary
+ // because siyInstr->I2Value() returns a uint8_t, which a direct
+ // cast to int32_t could incorrectly interpret.
+ int8_t i2_8bit = static_cast<int8_t>(siyInstr->I2Value());
+ int64_t i2 = static_cast<int64_t>(i2_8bit);
+ int b1 = siyInstr->B1Value();
+ intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+ int d1_val = siyInstr->D1Value();
+ intptr_t addr = b1_val + d1_val;
+
+ int64_t mem_val = ReadDW(addr);
+ int isOF = CheckOverflowForIntAdd(mem_val, i2, int64_t);
+ int64_t alu_out = mem_val + i2;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ WriteDW(addr, alu_out);
+ break;
+ }
+ case AGF:
+ case SGF:
+ case ALG:
+ case SLG: {
+#ifndef V8_TARGET_ARCH_S390X
+ DCHECK(false);
+#endif
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ uint64_t r1_val = get_register(rxyInstr->R1Value());
+ int b2 = rxyInstr->B2Value();
+ int x2 = rxyInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxyInstr->D2Value();
+ uint64_t alu_out = r1_val;
+ if (op == ALG) {
+ uint64_t mem_val =
+ static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+ alu_out += mem_val;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ } else if (op == SLG) {
+ uint64_t mem_val =
+ static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+ alu_out -= mem_val;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ } else if (op == AGF) {
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ alu_out += mem_val;
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ } else if (op == SGF) {
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ alu_out -= mem_val;
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ } else {
+ DCHECK(false);
+ }
+ set_register(r1, alu_out);
+ break;
+ }
+ case ALGFI:
+ case SLGFI: {
+#ifndef V8_TARGET_ARCH_S390X
+ // should only be called on 64bit
+ DCHECK(false);
+#endif
+ RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+ int r1 = rilInstr->R1Value();
+ uint32_t i2 = rilInstr->I2UnsignedValue();
+ uint64_t r1_val = (uint64_t)(get_register(r1));
+ uint64_t alu_out;
+ if (op == ALGFI)
+ alu_out = r1_val + i2;
+ else
+ alu_out = r1_val - i2;
+ set_register(r1, (intptr_t)alu_out);
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ break;
+ }
+ case MSY:
+ case MSG: {
+ RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+ int r1 = rxyInstr->R1Value();
+ int b2 = rxyInstr->B2Value();
+ int x2 = rxyInstr->X2Value();
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = rxyInstr->D2Value();
+ if (op == MSY) {
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ set_low_register(r1, mem_val * r1_val);
+ } else if (op == MSG) {
+ int64_t mem_val = ReadDW(b2_val + d2_val + x2_val);
+ int64_t r1_val = get_register(r1);
+ set_register(r1, mem_val * r1_val);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case MSFI:
+ case MSGFI: {
+ RILInstruction* rilinst = reinterpret_cast<RILInstruction*>(instr);
+ int r1 = rilinst->R1Value();
+ int32_t i2 = rilinst->I2Value();
+ if (op == MSFI) {
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ alu_out = alu_out * i2;
+ set_low_register(r1, alu_out);
+ } else if (op == MSGFI) {
+ int64_t alu_out = get_register(r1);
+ alu_out = alu_out * i2;
+ set_register(r1, alu_out);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ return true;
+}
+
+int16_t Simulator::ByteReverse(int16_t hword) {
+ return (hword << 8) | ((hword >> 8) & 0x00ff);
+}
+
+int32_t Simulator::ByteReverse(int32_t word) {
+ int32_t result = word << 24;
+ result |= (word << 8) & 0x00ff0000;
+ result |= (word >> 8) & 0x0000ff00;
+ result |= (word >> 24) & 0x00000ff;
+ return result;
+}
+
+// Executes the current instruction.
+void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
+ pc_modified_ = false;
+ if (::v8::internal::FLAG_trace_sim) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+#ifdef V8_TARGET_ARCH_S390X
+ PrintF("%05ld %08" V8PRIxPTR " %s\n", icount_,
+ reinterpret_cast<intptr_t>(instr), buffer.start());
+#else
+ PrintF("%05lld %08" V8PRIxPTR " %s\n", icount_,
+ reinterpret_cast<intptr_t>(instr), buffer.start());
+#endif
+ // Flush stdout to prevent incomplete file output during abnormal exits
+ // This is caused by the output being buffered before being written to file
+ fflush(stdout);
+ }
+
+ // Try to simulate as S390 Instruction first.
+ bool processed = true;
+
+ int instrLength = instr->InstructionLength();
+ if (instrLength == 2)
+ processed = DecodeTwoByte(instr);
+ else if (instrLength == 4)
+ processed = DecodeFourByte(instr);
+ else if (instrLength == 6)
+ processed = DecodeSixByte(instr);
+
+ if (processed) {
+ if (!pc_modified_ && auto_incr_pc) {
+ set_pc(reinterpret_cast<intptr_t>(instr) + instrLength);
+ }
+ return;
+ }
+}
+
+void Simulator::DebugStart() {
+ S390Debugger dbg(this);
+ dbg.Debug();
+}
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ intptr_t program_counter = get_pc();
+
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ ExecuteInstruction(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instuction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ S390Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ ExecuteInstruction(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+void Simulator::CallInternal(byte* entry, int reg_arg_count) {
+ // Prepare to execute the code at entry
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // entry is the function descriptor
+ set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ } else {
+ // entry is the instruction address
+ set_pc(reinterpret_cast<intptr_t>(entry));
+ }
+ // Remember the values of non-volatile registers.
+ int64_t r6_val = get_register(r6);
+ int64_t r7_val = get_register(r7);
+ int64_t r8_val = get_register(r8);
+ int64_t r9_val = get_register(r9);
+ int64_t r10_val = get_register(r10);
+ int64_t r11_val = get_register(r11);
+ int64_t r12_val = get_register(r12);
+ int64_t r13_val = get_register(r13);
+
+ if (ABI_CALL_VIA_IP) {
+ // Put target address in ip (for JS prologue).
+ set_register(ip, get_pc());
+ }
+
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ registers_[14] = end_sim_pc;
+
+ // Set up the non-volatile registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ intptr_t callee_saved_value = icount_;
+ if (reg_arg_count < 5) {
+ set_register(r6, callee_saved_value + 6);
+ }
+ set_register(r7, callee_saved_value + 7);
+ set_register(r8, callee_saved_value + 8);
+ set_register(r9, callee_saved_value + 9);
+ set_register(r10, callee_saved_value + 10);
+ set_register(r11, callee_saved_value + 11);
+ set_register(r12, callee_saved_value + 12);
+ set_register(r13, callee_saved_value + 13);
+
+ // Start the simulation
+ Execute();
+
+// Check that the non-volatile registers have been preserved.
+#ifndef V8_TARGET_ARCH_S390X
+ if (reg_arg_count < 5) {
+ DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+ }
+ DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+#else
+ if (reg_arg_count < 5) {
+ DCHECK_EQ(callee_saved_value + 6, get_register(r6));
+ }
+ DCHECK_EQ(callee_saved_value + 7, get_register(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_register(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_register(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_register(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_register(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_register(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_register(r13));
+#endif
+
+ // Restore non-volatile registers with the original value.
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+ set_register(r12, r12_val);
+ set_register(r13, r13_val);
+}
+
+intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+ // Remember the values of non-volatile registers.
+ int64_t r6_val = get_register(r6);
+ int64_t r7_val = get_register(r7);
+ int64_t r8_val = get_register(r8);
+ int64_t r9_val = get_register(r9);
+ int64_t r10_val = get_register(r10);
+ int64_t r11_val = get_register(r11);
+ int64_t r12_val = get_register(r12);
+ int64_t r13_val = get_register(r13);
+
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First 5 arguments passed in registers r2-r6.
+ int reg_arg_count = (argument_count > 5) ? 5 : argument_count;
+ int stack_arg_count = argument_count - reg_arg_count;
+ for (int i = 0; i < reg_arg_count; i++) {
+ intptr_t value = va_arg(parameters, intptr_t);
+ set_register(i + 2, value);
+ }
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ intptr_t entry_stack =
+ (original_stack -
+ (kCalleeRegisterSaveAreaSize + stack_arg_count * sizeof(intptr_t)));
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument =
+ reinterpret_cast<intptr_t*>(entry_stack + kCalleeRegisterSaveAreaSize);
+ for (int i = 0; i < stack_arg_count; i++) {
+ intptr_t value = va_arg(parameters, intptr_t);
+ stack_argument[i] = value;
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+// Prepare to execute the code at entry
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // entry is the function descriptor
+ set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+#else
+ // entry is the instruction address
+ set_pc(reinterpret_cast<intptr_t>(entry));
+#endif
+
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ registers_[14] = end_sim_pc;
+
+ // Set up the non-volatile registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ intptr_t callee_saved_value = icount_;
+ if (reg_arg_count < 5) {
+ set_register(r6, callee_saved_value + 6);
+ }
+ set_register(r7, callee_saved_value + 7);
+ set_register(r8, callee_saved_value + 8);
+ set_register(r9, callee_saved_value + 9);
+ set_register(r10, callee_saved_value + 10);
+ set_register(r11, callee_saved_value + 11);
+ set_register(r12, callee_saved_value + 12);
+ set_register(r13, callee_saved_value + 13);
+
+ // Start the simulation
+ Execute();
+
+// Check that the non-volatile registers have been preserved.
+#ifndef V8_TARGET_ARCH_S390X
+ if (reg_arg_count < 5) {
+ DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+ }
+ DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+#else
+ if (reg_arg_count < 5) {
+ DCHECK_EQ(callee_saved_value + 6, get_register(r6));
+ }
+ DCHECK_EQ(callee_saved_value + 7, get_register(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_register(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_register(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_register(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_register(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_register(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_register(r13));
+#endif
+
+ // Restore non-volatile registers with the original value.
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+ set_register(r12, r12_val);
+ set_register(r13, r13_val);
+// Pop stack passed arguments.
+
+#ifndef V8_TARGET_ARCH_S390X
+ DCHECK_EQ(entry_stack, get_low_register<int32_t>(sp));
+#else
+ DCHECK_EQ(entry_stack, get_register(sp));
+#endif
+ set_register(sp, original_stack);
+
+ // Return value register
+ intptr_t result = get_register(r2);
+ return result;
+}
+
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+ set_d_register_from_double(0, d0);
+ set_d_register_from_double(1, d1);
+ CallInternal(entry);
+}
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r2);
+ return result;
+}
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ return get_double_from_d_register(0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ uintptr_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+ uintptr_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
+#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
new file mode 100644
index 0000000000..ae3dd58209
--- /dev/null
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -0,0 +1,552 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Declares a Simulator for S390 instructions if we are not generating a native
+// S390 binary. This Simulator allows us to run and debug S390 code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a S390 hardware platform.
+
+#ifndef V8_S390_SIMULATOR_S390_H_
+#define V8_S390_SIMULATOR_S390_H_
+
+#include "src/allocation.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native s390 platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+ int, Address, int, void*, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type ppc_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on s390 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
+};
+} // namespace internal
+} // namespace v8
+
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class Simulator {
+ public:
+ friend class S390Debugger;
+ enum Register {
+ no_reg = -1,
+ r0 = 0,
+ r1 = 1,
+ r2 = 2,
+ r3 = 3,
+ r4 = 4,
+ r5 = 5,
+ r6 = 6,
+ r7 = 7,
+ r8 = 8,
+ r9 = 9,
+ r10 = 10,
+ r11 = 11,
+ r12 = 12,
+ r13 = 13,
+ r14 = 14,
+ r15 = 15,
+ fp = r11,
+ ip = r12,
+ cp = r13,
+ ra = r14,
+ sp = r15, // name aliases
+ kNumGPRs = 16,
+ d0 = 0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ kNumFPRs = 16
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state.
+ void set_register(int reg, uint64_t value);
+ uint64_t get_register(int reg) const;
+ template <typename T>
+ T get_low_register(int reg) const;
+ template <typename T>
+ T get_high_register(int reg) const;
+ void set_low_register(int reg, uint32_t value);
+ void set_high_register(int reg, uint32_t value);
+
+ double get_double_from_register_pair(int reg);
+ void set_d_register_from_double(int dreg, const double dbl) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ *bit_cast<double*>(&fp_registers_[dreg]) = dbl;
+ }
+
+ double get_double_from_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return *bit_cast<double*>(&fp_registers_[dreg]);
+ }
+ void set_d_register(int dreg, int64_t value) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ fp_registers_[dreg] = value;
+ }
+ int64_t get_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return fp_registers_[dreg];
+ }
+
+ void set_d_register_from_float32(int dreg, const float f) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+
+ int32_t f_int = *bit_cast<int32_t*>(&f);
+ int64_t finalval = static_cast<int64_t>(f_int) << 32;
+ set_d_register(dreg, finalval);
+ }
+
+ float get_float32_from_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+
+ int64_t regval = get_d_register(dreg) >> 32;
+ int32_t regval32 = static_cast<int32_t>(regval);
+ return *bit_cast<float*>(&regval32);
+ }
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(intptr_t value);
+ intptr_t get_pc() const;
+
+ Address get_sp() const {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit(uintptr_t c_limit) const;
+
+ // Executes S390 instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ // Call on program start.
+ static void Initialize(Isolate* isolate);
+
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
+ // V8 generally calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 7 parameters. This is a convenience function,
+ // which sets up the simulator state and grabs the result on return.
+ intptr_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_lr, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
+ bool BorrowFrom(int32_t left, int32_t right);
+ template <typename T1>
+ inline bool OverflowFromSigned(T1 alu_out, T1 left, T1 right, bool addition);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+ int32_t GetImm(Instruction* instr, bool* carry_out);
+ void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
+ intptr_t* start_address, intptr_t* end_address);
+ void HandleRList(Instruction* instr, bool load);
+ void HandleVList(Instruction* inst);
+ void SoftwareInterrupt(Instruction* instr);
+
+ // Stop helper functions.
+ inline bool isStopInstruction(Instruction* instr);
+ inline bool isWatchedStop(uint32_t bkpt_code);
+ inline bool isEnabledStop(uint32_t bkpt_code);
+ inline void EnableStop(uint32_t bkpt_code);
+ inline void DisableStop(uint32_t bkpt_code);
+ inline void IncreaseStopCounter(uint32_t bkpt_code);
+ void PrintStopInfo(uint32_t code);
+
+ // Byte Reverse
+ inline int16_t ByteReverse(int16_t hword);
+ inline int32_t ByteReverse(int32_t word);
+
+ // Read and write memory.
+ inline uint8_t ReadBU(intptr_t addr);
+ inline int8_t ReadB(intptr_t addr);
+ inline void WriteB(intptr_t addr, uint8_t value);
+ inline void WriteB(intptr_t addr, int8_t value);
+
+ inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
+ inline int16_t ReadH(intptr_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
+ inline int32_t ReadW(intptr_t addr, Instruction* instr);
+ inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
+ inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+
+ inline int64_t ReadDW(intptr_t addr);
+ inline double ReadDouble(intptr_t addr);
+ inline void WriteDW(intptr_t addr, int64_t value);
+
+ // S390
+ void Trace(Instruction* instr);
+ bool DecodeTwoByte(Instruction* instr);
+ bool DecodeFourByte(Instruction* instr);
+ bool DecodeFourByteArithmetic(Instruction* instr);
+ bool DecodeFourByteArithmetic64Bit(Instruction* instr);
+ bool DecodeFourByteFloatingPoint(Instruction* instr);
+ void DecodeFourByteFloatingPointIntConversion(Instruction* instr);
+ void DecodeFourByteFloatingPointRound(Instruction* instr);
+
+ bool DecodeSixByte(Instruction* instr);
+ bool DecodeSixByteArithmetic(Instruction* instr);
+ bool S390InstructionDecode(Instruction* instr);
+ void DecodeSixByteBitShift(Instruction* instr);
+
+ // Used by the CL**BR instructions.
+ template <typename T1, typename T2>
+ void SetS390RoundConditionCode(T1 r2_val, T2 max, T2 min) {
+ condition_reg_ = 0;
+ double r2_dval = static_cast<double>(r2_val);
+ double dbl_min = static_cast<double>(min);
+ double dbl_max = static_cast<double>(max);
+
+ if (r2_dval == 0.0)
+ condition_reg_ = 8;
+ else if (r2_dval < 0.0 && r2_dval >= dbl_min && std::isfinite(r2_dval))
+ condition_reg_ = 4;
+ else if (r2_dval > 0.0 && r2_dval <= dbl_max && std::isfinite(r2_dval))
+ condition_reg_ = 2;
+ else
+ condition_reg_ = 1;
+ }
+
+ template <typename T1>
+ void SetS390RoundConditionCode(T1 r2_val, int64_t max, int64_t min) {
+ condition_reg_ = 0;
+ double r2_dval = static_cast<double>(r2_val);
+ double dbl_min = static_cast<double>(min);
+ double dbl_max = static_cast<double>(max);
+
+ // Note that the IEEE 754 floating-point representations (both 32 and
+ // 64 bit) cannot exactly represent INT64_MAX. The closest it can get
+ // is INT64_max + 1. IEEE 754 FP can, though, represent INT64_MIN
+ // exactly.
+
+ // This is not an issue for INT32, as IEEE754 64-bit can represent
+ // INT32_MAX and INT32_MIN with exact precision.
+
+ if (r2_dval == 0.0)
+ condition_reg_ = 8;
+ else if (r2_dval < 0.0 && r2_dval >= dbl_min && std::isfinite(r2_dval))
+ condition_reg_ = 4;
+ else if (r2_dval > 0.0 && r2_dval < dbl_max && std::isfinite(r2_dval))
+ condition_reg_ = 2;
+ else
+ condition_reg_ = 1;
+ }
+
+ // Used by the CL**BR instructions.
+ template <typename T1, typename T2, typename T3>
+ void SetS390ConvertConditionCode(T1 src, T2 dst, T3 max) {
+ condition_reg_ = 0;
+ if (src == static_cast<T1>(0.0)) {
+ condition_reg_ |= 8;
+ } else if (src < static_cast<T1>(0.0) && static_cast<T2>(src) == 0 &&
+ std::isfinite(src)) {
+ condition_reg_ |= 4;
+ } else if (src > static_cast<T1>(0.0) && std::isfinite(src) &&
+ src < static_cast<T1>(max)) {
+ condition_reg_ |= 2;
+ } else {
+ condition_reg_ |= 1;
+ }
+ }
+
+ template <typename T>
+ void SetS390ConditionCode(T lhs, T rhs) {
+ condition_reg_ = 0;
+ if (lhs == rhs) {
+ condition_reg_ |= CC_EQ;
+ } else if (lhs < rhs) {
+ condition_reg_ |= CC_LT;
+ } else if (lhs > rhs) {
+ condition_reg_ |= CC_GT;
+ }
+
+ // We get down here only for floating point
+ // comparisons and the values are unordered
+ // i.e. NaN
+ if (condition_reg_ == 0) condition_reg_ = unordered;
+ }
+
+ // Used by arithmetic operations that use carry.
+ template <typename T>
+ void SetS390ConditionCodeCarry(T result, bool overflow) {
+ condition_reg_ = 0;
+ bool zero_result = (result == static_cast<T>(0));
+ if (zero_result && !overflow) {
+ condition_reg_ |= 8;
+ } else if (!zero_result && !overflow) {
+ condition_reg_ |= 4;
+ } else if (zero_result && overflow) {
+ condition_reg_ |= 2;
+ } else if (!zero_result && overflow) {
+ condition_reg_ |= 1;
+ }
+ if (condition_reg_ == 0) UNREACHABLE();
+ }
+
+ bool isNaN(double value) { return (value != value); }
+
+ // Set the condition code for bitwise operations
+ // CC0 is set if value == 0.
+ // CC1 is set if value != 0.
+ // CC2/CC3 are not set.
+ template <typename T>
+ void SetS390BitWiseConditionCode(T value) {
+ condition_reg_ = 0;
+
+ if (value == 0)
+ condition_reg_ |= CC_EQ;
+ else
+ condition_reg_ |= CC_LT;
+ }
+
+ void SetS390OverflowCode(bool isOF) {
+ if (isOF) condition_reg_ = CC_OF;
+ }
+
+ bool TestConditionCode(Condition mask) {
+ // Check for unconditional branch
+ if (mask == 0xf) return true;
+
+ return (condition_reg_ & mask) != 0;
+ }
+
+ // Executes one instruction.
+ void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
+
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+ // Runtime call support.
+ static void* RedirectExternalReference(
+ Isolate* isolate, void* external_function,
+ v8::internal::ExternalReference::Type type);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, intptr_t* z);
+ void SetFpResult(const double& result);
+ void TrashCallerSaveRegisters();
+
+ void CallInternal(byte* entry, int reg_arg_count = 3);
+
+ // Architecture state.
+ // On z9 and higher and supported Linux on z Systems platforms, all registers
+ // are 64-bit, even in 31-bit mode.
+ uint64_t registers_[kNumGPRs];
+ int64_t fp_registers_[kNumFPRs];
+
+ // Condition Code register. In S390, the last 4 bits are used.
+ int32_t condition_reg_;
+ // Special register to track PC.
+ intptr_t special_reg_pc_;
+
+ // Simulator support.
+ char* stack_;
+ static const size_t stack_protection_size_ = 256 * kPointerSize;
+ bool pc_modified_;
+ int64_t icount_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ // Icache simulation
+ v8::internal::HashMap* i_cache_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ v8::internal::Isolate* isolate_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Breakpoint is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+ void DebugStart();
+};
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+ (intptr_t)p3, (intptr_t)p4))
+
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
+ (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
+ (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
+ (intptr_t)NULL, (intptr_t)p8)
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit(c_limit);
+ }
+
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // !defined(USE_SIMULATOR)
+#endif // V8_S390_SIMULATOR_S390_H_
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index d198291907..ca23889b90 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -19,6 +19,8 @@
#include "src/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/simulator-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/simulator-x87.h"
#else
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
new file mode 100644
index 0000000000..84a08c103d
--- /dev/null
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -0,0 +1,421 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/code-serializer.h"
+
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/snapshot/deserializer.h"
+#include "src/version.h"
+
+namespace v8 {
+namespace internal {
+
+ScriptData* CodeSerializer::Serialize(Isolate* isolate,
+ Handle<SharedFunctionInfo> info,
+ Handle<String> source) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+ if (FLAG_trace_serializer) {
+ PrintF("[Serializing from");
+ Object* script = info->script();
+ if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
+ PrintF("]\n");
+ }
+
+ // Serialize code object.
+ SnapshotByteSink sink(info->code()->CodeSize() * 2);
+ CodeSerializer cs(isolate, &sink, *source);
+ DisallowHeapAllocation no_gc;
+ Object** location = Handle<Object>::cast(info).location();
+ cs.VisitPointer(location);
+ cs.SerializeDeferredObjects();
+ cs.Pad();
+
+ SerializedCodeData data(sink.data(), cs);
+ ScriptData* script_data = data.GetScriptData();
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int length = script_data->length();
+ PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
+ }
+
+ return script_data;
+}
+
+void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ int root_index = root_index_map_.Lookup(obj);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ return;
+ }
+
+ if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+ FlushSkip(skip);
+
+ if (obj->IsCode()) {
+ Code* code_object = Code::cast(obj);
+ switch (code_object->kind()) {
+ case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
+ case Code::HANDLER: // No handlers patched in yet.
+ case Code::REGEXP: // No regexp literals initialized yet.
+ case Code::NUMBER_OF_KINDS: // Pseudo enum value.
+ case Code::BYTECODE_HANDLER: // No direct references to handlers.
+ CHECK(false);
+ case Code::BUILTIN:
+ SerializeBuiltin(code_object->builtin_index(), how_to_code,
+ where_to_point);
+ return;
+ case Code::STUB:
+ SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
+ return;
+#define IC_KIND_CASE(KIND) case Code::KIND:
+ IC_KIND_LIST(IC_KIND_CASE)
+#undef IC_KIND_CASE
+ SerializeIC(code_object, how_to_code, where_to_point);
+ return;
+ case Code::FUNCTION:
+ DCHECK(code_object->has_reloc_info_for_serialization());
+ SerializeGeneric(code_object, how_to_code, where_to_point);
+ return;
+ case Code::WASM_FUNCTION:
+ case Code::WASM_TO_JS_FUNCTION:
+ case Code::JS_TO_WASM_FUNCTION:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+
+ // Past this point we should not see any (context-specific) maps anymore.
+ CHECK(!obj->IsMap());
+ // There should be no references to the global object embedded.
+ CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
+ // There should be no hash table embedded. They would require rehashing.
+ CHECK(!obj->IsHashTable());
+ // We expect no instantiated function objects or contexts.
+ CHECK(!obj->IsJSFunction() && !obj->IsContext());
+
+ SerializeGeneric(obj, how_to_code, where_to_point);
+}
+
+void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+ where_to_point);
+ serializer.Serialize();
+}
+
+void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+ (how_to_code == kFromCode && where_to_point == kInnerPointer));
+ DCHECK_LT(builtin_index, Builtins::builtin_count);
+ DCHECK_LE(0, builtin_index);
+
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding builtin: %s\n",
+ isolate()->builtins()->name(builtin_index));
+ }
+
+ sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+ sink_->PutInt(builtin_index, "builtin_index");
+}
+
+void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+ (how_to_code == kFromCode && where_to_point == kInnerPointer));
+ DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
+ DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
+
+ int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
+
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding code stub %s as %d\n",
+ CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
+ }
+
+ sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
+ sink_->PutInt(index, "CodeStub key");
+}
+
+void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ // The IC may be implemented as a stub.
+ uint32_t stub_key = ic->stub_key();
+ if (stub_key != CodeStub::NoCacheKey()) {
+ if (FLAG_trace_serializer) {
+ PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
+ }
+ SerializeCodeStub(stub_key, how_to_code, where_to_point);
+ return;
+ }
+ // The IC may be implemented as builtin. Only real builtins have an
+ // actual builtin_index value attached (otherwise it's just garbage).
+ // Compare to make sure we are really dealing with a builtin.
+ int builtin_index = ic->builtin_index();
+ if (builtin_index < Builtins::builtin_count) {
+ Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
+ Code* builtin = isolate()->builtins()->builtin(name);
+ if (builtin == ic) {
+ if (FLAG_trace_serializer) {
+ PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
+ }
+ DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
+ ic->kind() == Code::KEYED_STORE_IC);
+ SerializeBuiltin(builtin_index, how_to_code, where_to_point);
+ return;
+ }
+ }
+ // The IC may also just be a piece of code kept in the non_monomorphic_cache.
+ // In that case, just serialize as a normal code object.
+ if (FLAG_trace_serializer) {
+ PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
+ }
+ DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
+ SerializeGeneric(ic, how_to_code, where_to_point);
+}
+
+int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
+ // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
+ int index = 0;
+ while (index < stub_keys_.length()) {
+ if (stub_keys_[index] == stub_key) return index;
+ index++;
+ }
+ stub_keys_.Add(stub_key);
+ return index;
+}
+
+MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
+ Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ HandleScope scope(isolate);
+
+ base::SmartPointer<SerializedCodeData> scd(
+ SerializedCodeData::FromCachedData(isolate, cached_data, *source));
+ if (scd.is_empty()) {
+ if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+ DCHECK(cached_data->rejected());
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ // Prepare and register list of attached objects.
+ Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
+ Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
+ code_stub_keys.length() + kCodeStubsBaseIndex);
+ attached_objects[kSourceObjectIndex] = source;
+ for (int i = 0; i < code_stub_keys.length(); i++) {
+ attached_objects[i + kCodeStubsBaseIndex] =
+ CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+ }
+
+ Deserializer deserializer(scd.get());
+ deserializer.SetAttachedObjects(attached_objects);
+
+ // Deserialize.
+ Handle<SharedFunctionInfo> result;
+ if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
+ // Deserializing may fail if the reservations cannot be fulfilled.
+ if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int length = cached_data->length();
+ PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
+ }
+ result->set_deserialized(true);
+
+ if (isolate->logger()->is_logging_code_events() ||
+ isolate->cpu_profiler()->is_profiling()) {
+ String* name = isolate->heap()->empty_string();
+ if (result->script()->IsScript()) {
+ Script* script = Script::cast(result->script());
+ if (script->name()->IsString()) name = String::cast(script->name());
+ }
+ isolate->logger()->CodeCreateEvent(
+ Logger::SCRIPT_TAG, result->abstract_code(), *result, NULL, name);
+ }
+ return scope.CloseAndEscape(result);
+}
+
+class Checksum {
+ public:
+ explicit Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+ // Computing the checksum includes padding bytes for objects like strings.
+ // Mark every object as initialized in the code serializer.
+ MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+#endif // MEMORY_SANITIZER
+ // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
+ uintptr_t a = 1;
+ uintptr_t b = 0;
+ const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
+ DCHECK(IsAligned(payload.length(), kIntptrSize));
+ const uintptr_t* end = cur + payload.length() / kIntptrSize;
+ while (cur < end) {
+ // Unsigned overflow expected and intended.
+ a += *cur++;
+ b += a;
+ }
+#if V8_HOST_ARCH_64_BIT
+ a ^= a >> 32;
+ b ^= b >> 32;
+#endif // V8_HOST_ARCH_64_BIT
+ a_ = static_cast<uint32_t>(a);
+ b_ = static_cast<uint32_t>(b);
+ }
+
+ bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
+
+ uint32_t a() const { return a_; }
+ uint32_t b() const { return b_; }
+
+ private:
+ uint32_t a_;
+ uint32_t b_;
+
+ DISALLOW_COPY_AND_ASSIGN(Checksum);
+};
+
+SerializedCodeData::SerializedCodeData(const List<byte>& payload,
+ const CodeSerializer& cs) {
+ DisallowHeapAllocation no_gc;
+ const List<uint32_t>* stub_keys = cs.stub_keys();
+
+ List<Reservation> reservations;
+ cs.EncodeReservations(&reservations);
+
+ // Calculate sizes.
+ int reservation_size = reservations.length() * kInt32Size;
+ int num_stub_keys = stub_keys->length();
+ int stub_keys_size = stub_keys->length() * kInt32Size;
+ int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
+ int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+ int size = padded_payload_offset + payload.length();
+
+ // Allocate backing store and create result data.
+ AllocateData(size);
+
+ // Set header values.
+ SetMagicNumber(cs.isolate());
+ SetHeaderValue(kVersionHashOffset, Version::Hash());
+ SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
+ SetHeaderValue(kCpuFeaturesOffset,
+ static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
+ SetHeaderValue(kFlagHashOffset, FlagList::Hash());
+ SetHeaderValue(kNumReservationsOffset, reservations.length());
+ SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
+ SetHeaderValue(kPayloadLengthOffset, payload.length());
+
+ Checksum checksum(payload.ToConstVector());
+ SetHeaderValue(kChecksum1Offset, checksum.a());
+ SetHeaderValue(kChecksum2Offset, checksum.b());
+
+ // Copy reservation chunk sizes.
+ CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+ reservation_size);
+
+ // Copy code stub keys.
+ CopyBytes(data_ + kHeaderSize + reservation_size,
+ reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
+
+ memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
+
+ // Copy serialized data.
+ CopyBytes(data_ + padded_payload_offset, payload.begin(),
+ static_cast<size_t>(payload.length()));
+}
+
+SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
+ Isolate* isolate, String* source) const {
+ uint32_t magic_number = GetMagicNumber();
+ if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
+ uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
+ uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
+ uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
+ uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
+ uint32_t c1 = GetHeaderValue(kChecksum1Offset);
+ uint32_t c2 = GetHeaderValue(kChecksum2Offset);
+ if (version_hash != Version::Hash()) return VERSION_MISMATCH;
+ if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
+ if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
+ return CPU_FEATURES_MISMATCH;
+ }
+ if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
+ if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+ return CHECK_SUCCESS;
+}
+
+uint32_t SerializedCodeData::SourceHash(String* source) const {
+ return source->length();
+}
+
+// Return ScriptData object and relinquish ownership over it to the caller.
+ScriptData* SerializedCodeData::GetScriptData() {
+ DCHECK(owns_data_);
+ ScriptData* result = new ScriptData(data_, size_);
+ result->AcquireDataOwnership();
+ owns_data_ = false;
+ data_ = NULL;
+ return result;
+}
+
+Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
+ const {
+ return Vector<const Reservation>(
+ reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+ GetHeaderValue(kNumReservationsOffset));
+}
+
+Vector<const byte> SerializedCodeData::Payload() const {
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+ int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
+ int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
+ int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+ const byte* payload = data_ + padded_payload_offset;
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
+ int length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(data_ + size_, payload + length);
+ return Vector<const byte>(payload, length);
+}
+
+Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+ const byte* start = data_ + kHeaderSize + reservations_size;
+ return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
+ GetHeaderValue(kNumCodeStubKeysOffset));
+}
+
+SerializedCodeData::SerializedCodeData(ScriptData* data)
+ : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
+
+SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
+ ScriptData* cached_data,
+ String* source) {
+ DisallowHeapAllocation no_gc;
+ SerializedCodeData* scd = new SerializedCodeData(cached_data);
+ SanityCheckResult r = scd->SanityCheck(isolate, source);
+ if (r == CHECK_SUCCESS) return scd;
+ cached_data->Reject();
+ source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
+ delete scd;
+ return NULL;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
new file mode 100644
index 0000000000..b217fff52b
--- /dev/null
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -0,0 +1,127 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_CODE_SERIALIZER_H_
+#define V8_SNAPSHOT_CODE_SERIALIZER_H_
+
+#include "src/parsing/preparse-data.h"
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeSerializer : public Serializer {
+ public:
+ static ScriptData* Serialize(Isolate* isolate,
+ Handle<SharedFunctionInfo> info,
+ Handle<String> source);
+
+ MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
+ Isolate* isolate, ScriptData* cached_data, Handle<String> source);
+
+ static const int kSourceObjectIndex = 0;
+ STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
+
+ static const int kCodeStubsBaseIndex = 1;
+
+ String* source() const {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ return source_;
+ }
+
+ const List<uint32_t>* stub_keys() const { return &stub_keys_; }
+
+ private:
+ CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
+ : Serializer(isolate, sink), source_(source) {
+ back_reference_map_.AddSourceString(source);
+ }
+
+ ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
+
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+
+ void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ void SerializeIC(Code* ic, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ int AddCodeStubKey(uint32_t stub_key);
+
+ DisallowHeapAllocation no_gc_;
+ String* source_;
+ List<uint32_t> stub_keys_;
+ DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
+};
+
+// Wrapper around ScriptData to provide code-serializer-specific functionality.
+class SerializedCodeData : public SerializedData {
+ public:
+ // Used when consuming.
+ static SerializedCodeData* FromCachedData(Isolate* isolate,
+ ScriptData* cached_data,
+ String* source);
+
+ // Used when producing.
+ SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
+
+ // Return ScriptData object and relinquish ownership over it to the caller.
+ ScriptData* GetScriptData();
+
+ Vector<const Reservation> Reservations() const;
+ Vector<const byte> Payload() const;
+
+ Vector<const uint32_t> CodeStubKeys() const;
+
+ private:
+ explicit SerializedCodeData(ScriptData* data);
+
+ enum SanityCheckResult {
+ CHECK_SUCCESS = 0,
+ MAGIC_NUMBER_MISMATCH = 1,
+ VERSION_MISMATCH = 2,
+ SOURCE_MISMATCH = 3,
+ CPU_FEATURES_MISMATCH = 4,
+ FLAGS_MISMATCH = 5,
+ CHECKSUM_MISMATCH = 6
+ };
+
+ SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
+
+ uint32_t SourceHash(String* source) const;
+
+ // The data header consists of uint32_t-sized entries:
+ // [0] magic number and external reference count
+ // [1] version hash
+ // [2] source hash
+ // [3] cpu features
+ // [4] flag hash
+ // [5] number of code stub keys
+ // [6] number of reservation size entries
+ // [7] payload length
+ // [8] payload checksum part 1
+ // [9] payload checksum part 2
+ // ... reservations
+ // ... code stub keys
+ // ... serialized payload
+ static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
+ static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
+ static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
+ static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
+ static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
+ static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
+ static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
+ static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
+ static const int kHeaderSize = kChecksum2Offset + kInt32Size;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_CODE_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
new file mode 100644
index 0000000000..0a21feffa1
--- /dev/null
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -0,0 +1,818 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/deserializer.h"
+
+#include "src/bootstrapper.h"
+#include "src/external-reference-table.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/snapshot/natives.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+void Deserializer::DecodeReservation(
+ Vector<const SerializedData::Reservation> res) {
+ DCHECK_EQ(0, reservations_[NEW_SPACE].length());
+ STATIC_ASSERT(NEW_SPACE == 0);
+ int current_space = NEW_SPACE;
+ for (auto& r : res) {
+ reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
+ if (r.is_last()) current_space++;
+ }
+ DCHECK_EQ(kNumberOfSpaces, current_space);
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
+}
+
+void Deserializer::FlushICacheForNewIsolate() {
+ DCHECK(!deserializing_user_code_);
+ // The entire isolate is newly deserialized. Simply flush all code pages.
+ PageIterator it(isolate_->heap()->code_space());
+ while (it.has_next()) {
+ Page* p = it.next();
+ Assembler::FlushICache(isolate_, p->area_start(),
+ p->area_end() - p->area_start());
+ }
+}
+
+void Deserializer::FlushICacheForNewCodeObjects() {
+ DCHECK(deserializing_user_code_);
+ for (Code* code : new_code_objects_) {
+ if (FLAG_serialize_age_code) code->PreAge(isolate_);
+ Assembler::FlushICache(isolate_, code->instruction_start(),
+ code->instruction_size());
+ }
+}
+
+bool Deserializer::ReserveSpace() {
+#ifdef DEBUG
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+ CHECK(reservations_[i].length() > 0);
+ }
+#endif // DEBUG
+ if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ high_water_[i] = reservations_[i][0].start;
+ }
+ return true;
+}
+
+void Deserializer::Initialize(Isolate* isolate) {
+ DCHECK_NULL(isolate_);
+ DCHECK_NOT_NULL(isolate);
+ isolate_ = isolate;
+ DCHECK_NULL(external_reference_table_);
+ external_reference_table_ = ExternalReferenceTable::instance(isolate);
+ CHECK_EQ(magic_number_,
+ SerializedData::ComputeMagicNumber(external_reference_table_));
+}
+
+void Deserializer::Deserialize(Isolate* isolate) {
+ Initialize(isolate);
+ if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
+ // No active threads.
+ DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
+ // No active handles.
+ DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
+ // Partial snapshot cache is not yet populated.
+ DCHECK(isolate_->partial_snapshot_cache()->is_empty());
+
+ {
+ DisallowHeapAllocation no_gc;
+ isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
+ isolate_->heap()->IterateSmiRoots(this);
+ isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate_->heap()->RepairFreeListsAfterDeserialization();
+ isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
+ DeserializeDeferredObjects();
+ FlushICacheForNewIsolate();
+ }
+
+ isolate_->heap()->set_native_contexts_list(
+ isolate_->heap()->undefined_value());
+ // The allocation site list is build during root iteration, but if no sites
+ // were encountered then it needs to be initialized to undefined.
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ isolate_->heap()->set_allocation_sites_list(
+ isolate_->heap()->undefined_value());
+ }
+
+ // Update data pointers to the external strings containing natives sources.
+ Natives::UpdateSourceCache(isolate_->heap());
+ ExtraNatives::UpdateSourceCache(isolate_->heap());
+
+ // Issue code events for newly deserialized code objects.
+ LOG_CODE_EVENT(isolate_, LogCodeObjects());
+ LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
+ LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
+}
+
+MaybeHandle<Object> Deserializer::DeserializePartial(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+ Initialize(isolate);
+ if (!ReserveSpace()) {
+ V8::FatalProcessOutOfMemory("deserialize context");
+ return MaybeHandle<Object>();
+ }
+
+ Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
+ attached_objects[kGlobalProxyReference] = global_proxy;
+ SetAttachedObjects(attached_objects);
+
+ DisallowHeapAllocation no_gc;
+ // Keep track of the code space start and end pointers in case new
+ // code objects were unserialized
+ OldSpace* code_space = isolate_->heap()->code_space();
+ Address start_address = code_space->top();
+ Object* root;
+ VisitPointer(&root);
+ DeserializeDeferredObjects();
+
+ isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
+
+ // There's no code deserialized here. If this assert fires then that's
+ // changed and logging should be added to notify the profiler et al of the
+ // new code, which also has to be flushed from instruction cache.
+ CHECK_EQ(start_address, code_space->top());
+ return Handle<Object>(root, isolate);
+}
+
+MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
+ Isolate* isolate) {
+ Initialize(isolate);
+ if (!ReserveSpace()) {
+ return Handle<SharedFunctionInfo>();
+ } else {
+ deserializing_user_code_ = true;
+ HandleScope scope(isolate);
+ Handle<SharedFunctionInfo> result;
+ {
+ DisallowHeapAllocation no_gc;
+ Object* root;
+ VisitPointer(&root);
+ DeserializeDeferredObjects();
+ FlushICacheForNewCodeObjects();
+ result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
+ isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
+ }
+ CommitPostProcessedObjects(isolate);
+ return scope.CloseAndEscape(result);
+ }
+}
+
+Deserializer::~Deserializer() {
+ // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
+ // DCHECK(source_.AtEOF());
+ attached_objects_.Dispose();
+}
+
+// This is called on the roots. It is the driver of the deserialization
+// process. It is also called on the body of each function.
+void Deserializer::VisitPointers(Object** start, Object** end) {
+ // The space must be new space. Any other space would cause ReadChunk to try
+ // to update the remembered using NULL as the address.
+ ReadData(start, end, NEW_SPACE, NULL);
+}
+
+void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+ static const byte expected = kSynchronize;
+ CHECK_EQ(expected, source_.Get());
+}
+
+void Deserializer::DeserializeDeferredObjects() {
+ for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
+ switch (code) {
+ case kAlignmentPrefix:
+ case kAlignmentPrefix + 1:
+ case kAlignmentPrefix + 2:
+ SetAlignment(code);
+ break;
+ default: {
+ int space = code & kSpaceMask;
+ DCHECK(space <= kNumberOfSpaces);
+ DCHECK(code - space == kNewObject);
+ HeapObject* object = GetBackReferencedObject(space);
+ int size = source_.GetInt() << kPointerSizeLog2;
+ Address obj_address = object->address();
+ Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
+ Object** end = reinterpret_cast<Object**>(obj_address + size);
+ bool filled = ReadData(start, end, space, obj_address);
+ CHECK(filled);
+ DCHECK(CanBeDeferred(object));
+ PostProcessNewObject(object, space);
+ }
+ }
+ }
+}
+
+// Used to insert a deserialized internalized string into the string table.
+class StringTableInsertionKey : public HashTableKey {
+ public:
+ explicit StringTableInsertionKey(String* string)
+ : string_(string), hash_(HashForObject(string)) {
+ DCHECK(string->IsInternalizedString());
+ }
+
+ bool IsMatch(Object* string) override {
+ // We know that all entries in a hash table had their hash keys created.
+ // Use that knowledge to have fast failure.
+ if (hash_ != HashForObject(string)) return false;
+ // We want to compare the content of two internalized strings here.
+ return string_->SlowEquals(String::cast(string));
+ }
+
+ uint32_t Hash() override { return hash_; }
+
+ uint32_t HashForObject(Object* key) override {
+ return String::cast(key)->Hash();
+ }
+
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
+ return handle(string_, isolate);
+ }
+
+ private:
+ String* string_;
+ uint32_t hash_;
+ DisallowHeapAllocation no_gc;
+};
+
+HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
+ if (deserializing_user_code()) {
+ if (obj->IsString()) {
+ String* string = String::cast(obj);
+ // Uninitialize hash field as the hash seed may have changed.
+ string->set_hash_field(String::kEmptyHashField);
+ if (string->IsInternalizedString()) {
+ // Canonicalize the internalized string. If it already exists in the
+ // string table, set it to forward to the existing one.
+ StringTableInsertionKey key(string);
+ String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
+ if (canonical == NULL) {
+ new_internalized_strings_.Add(handle(string));
+ return string;
+ } else {
+ string->SetForwardedInternalizedString(canonical);
+ return canonical;
+ }
+ }
+ } else if (obj->IsScript()) {
+ new_scripts_.Add(handle(Script::cast(obj)));
+ } else {
+ DCHECK(CanBeDeferred(obj));
+ }
+ }
+ if (obj->IsAllocationSite()) {
+ DCHECK(obj->IsAllocationSite());
+ // Allocation sites are present in the snapshot, and must be linked into
+ // a list at deserialization time.
+ AllocationSite* site = AllocationSite::cast(obj);
+ // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+ // as a (weak) root. If this root is relocated correctly, this becomes
+ // unnecessary.
+ if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ site->set_weak_next(isolate_->heap()->undefined_value());
+ } else {
+ site->set_weak_next(isolate_->heap()->allocation_sites_list());
+ }
+ isolate_->heap()->set_allocation_sites_list(site);
+ } else if (obj->IsCode()) {
+ // We flush all code pages after deserializing the startup snapshot. In that
+ // case, we only need to remember code objects in the large object space.
+ // When deserializing user code, remember each individual code object.
+ if (deserializing_user_code() || space == LO_SPACE) {
+ new_code_objects_.Add(Code::cast(obj));
+ }
+ }
+ // Check alignment.
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
+ return obj;
+}
+
+void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
+ StringTable::EnsureCapacityForDeserialization(
+ isolate, new_internalized_strings_.length());
+ for (Handle<String> string : new_internalized_strings_) {
+ StringTableInsertionKey key(*string);
+ DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
+ StringTable::LookupKey(isolate, &key);
+ }
+
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ for (Handle<Script> script : new_scripts_) {
+ // Assign a new script id to avoid collision.
+ script->set_id(isolate_->heap()->NextScriptId());
+ // Add script to list.
+ Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
+ heap->SetRootScriptList(*list);
+ }
+}
+
+HeapObject* Deserializer::GetBackReferencedObject(int space) {
+ HeapObject* obj;
+ BackReference back_reference(source_.GetInt());
+ if (space == LO_SPACE) {
+ CHECK(back_reference.chunk_index() == 0);
+ uint32_t index = back_reference.large_object_index();
+ obj = deserialized_large_objects_[index];
+ } else {
+ DCHECK(space < kNumberOfPreallocatedSpaces);
+ uint32_t chunk_index = back_reference.chunk_index();
+ DCHECK_LE(chunk_index, current_chunk_[space]);
+ uint32_t chunk_offset = back_reference.chunk_offset();
+ Address address = reservations_[space][chunk_index].start + chunk_offset;
+ if (next_alignment_ != kWordAligned) {
+ int padding = Heap::GetFillToAlign(address, next_alignment_);
+ next_alignment_ = kWordAligned;
+ DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+ address += padding;
+ }
+ obj = HeapObject::FromAddress(address);
+ }
+ if (deserializing_user_code() && obj->IsInternalizedString()) {
+ obj = String::cast(obj)->GetForwardedInternalizedString();
+ }
+ hot_objects_.Add(obj);
+ return obj;
+}
+
+// This routine writes the new object into the pointer provided and then
+// returns true if the new object was in young space and false otherwise.
+// The reason for this strange interface is that otherwise the object is
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
+void Deserializer::ReadObject(int space_number, Object** write_back) {
+ Address address;
+ HeapObject* obj;
+ int size = source_.GetInt() << kObjectAlignmentBits;
+
+ if (next_alignment_ != kWordAligned) {
+ int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
+ address = Allocate(space_number, reserved);
+ obj = HeapObject::FromAddress(address);
+ // If one of the following assertions fails, then we are deserializing an
+ // aligned object when the filler maps have not been deserialized yet.
+ // We require filler maps as padding to align the object.
+ Heap* heap = isolate_->heap();
+ DCHECK(heap->free_space_map()->IsMap());
+ DCHECK(heap->one_pointer_filler_map()->IsMap());
+ DCHECK(heap->two_pointer_filler_map()->IsMap());
+ obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+ address = obj->address();
+ next_alignment_ = kWordAligned;
+ } else {
+ address = Allocate(space_number, size);
+ obj = HeapObject::FromAddress(address);
+ }
+
+ isolate_->heap()->OnAllocationEvent(obj, size);
+ Object** current = reinterpret_cast<Object**>(address);
+ Object** limit = current + (size >> kPointerSizeLog2);
+
+ if (ReadData(current, limit, space_number, address)) {
+ // Only post process if object content has not been deferred.
+ obj = PostProcessNewObject(obj, space_number);
+ }
+
+ Object* write_back_obj = obj;
+ UnalignedCopy(write_back, &write_back_obj);
+#ifdef DEBUG
+ if (obj->IsCode()) {
+ DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
+ } else {
+ DCHECK(space_number != CODE_SPACE);
+ }
+#endif // DEBUG
+}
+
+// We know the space requirements before deserialization and can
+// pre-allocate that reserved space. During deserialization, all we need
+// to do is to bump up the pointer for each space in the reserved
+// space. This is also used for fixing back references.
+// We may have to split up the pre-allocation into several chunks
+// because it would not fit onto a single page. We do not have to keep
+// track of when to move to the next chunk. An opcode will signal this.
+// Since multiple large objects cannot be folded into one large object
+// space allocation, we have to do an actual allocation when deserializing
+// each large object. Instead of tracking offset for back references, we
+// reference large objects by index.
+Address Deserializer::Allocate(int space_index, int size) {
+ if (space_index == LO_SPACE) {
+ AlwaysAllocateScope scope(isolate_);
+ LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
+ Executability exec = static_cast<Executability>(source_.Get());
+ AllocationResult result = lo_space->AllocateRaw(size, exec);
+ HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
+ deserialized_large_objects_.Add(obj);
+ return obj->address();
+ } else {
+ DCHECK(space_index < kNumberOfPreallocatedSpaces);
+ Address address = high_water_[space_index];
+ DCHECK_NOT_NULL(address);
+ high_water_[space_index] += size;
+#ifdef DEBUG
+ // Assert that the current reserved chunk is still big enough.
+ const Heap::Reservation& reservation = reservations_[space_index];
+ int chunk_index = current_chunk_[space_index];
+ CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
+#endif
+ if (space_index == CODE_SPACE) SkipList::Update(address, size);
+ return address;
+ }
+}
+
+Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
+ Object** current) {
+ DCHECK(!isolate_->heap()->deserialization_complete());
+ NativesExternalStringResource* resource = new NativesExternalStringResource(
+ source_vector.start(), source_vector.length());
+ Object* resource_obj = reinterpret_cast<Object*>(resource);
+ UnalignedCopy(current++, &resource_obj);
+ return current;
+}
+
+bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
+ Address current_object_address) {
+ Isolate* const isolate = isolate_;
+ // Write barrier support costs around 1% in startup time. In fact there
+ // are no new space objects in current boot snapshots, so it's not needed,
+ // but that may change.
+ bool write_barrier_needed =
+ (current_object_address != NULL && source_space != NEW_SPACE &&
+ source_space != CODE_SPACE);
+ while (current < limit) {
+ byte data = source_.Get();
+ switch (data) {
+#define CASE_STATEMENT(where, how, within, space_number) \
+ case where + how + within + space_number: \
+ STATIC_ASSERT((where & ~kWhereMask) == 0); \
+ STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
+ STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
+ STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
+
+#define CASE_BODY(where, how, within, space_number_if_any) \
+ { \
+ bool emit_write_barrier = false; \
+ bool current_was_incremented = false; \
+ int space_number = space_number_if_any == kAnyOldSpace \
+ ? (data & kSpaceMask) \
+ : space_number_if_any; \
+ if (where == kNewObject && how == kPlain && within == kStartOfObject) { \
+ ReadObject(space_number, current); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ } else { \
+ Object* new_object = NULL; /* May not be a real Object pointer. */ \
+ if (where == kNewObject) { \
+ ReadObject(space_number, &new_object); \
+ } else if (where == kBackref) { \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ new_object = GetBackReferencedObject(data & kSpaceMask); \
+ } else if (where == kBackrefWithSkip) { \
+ int skip = source_.GetInt(); \
+ current = reinterpret_cast<Object**>( \
+ reinterpret_cast<Address>(current) + skip); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ new_object = GetBackReferencedObject(data & kSpaceMask); \
+ } else if (where == kRootArray) { \
+ int id = source_.GetInt(); \
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
+ new_object = isolate->heap()->root(root_index); \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
+ } else if (where == kPartialSnapshotCache) { \
+ int cache_index = source_.GetInt(); \
+ new_object = isolate->partial_snapshot_cache()->at(cache_index); \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
+ } else if (where == kExternalReference) { \
+ int skip = source_.GetInt(); \
+ current = reinterpret_cast<Object**>( \
+ reinterpret_cast<Address>(current) + skip); \
+ int reference_id = source_.GetInt(); \
+ Address address = external_reference_table_->address(reference_id); \
+ new_object = reinterpret_cast<Object*>(address); \
+ } else if (where == kAttachedReference) { \
+ int index = source_.GetInt(); \
+ DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
+ new_object = *attached_objects_[index]; \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
+ } else { \
+ DCHECK(where == kBuiltin); \
+ DCHECK(deserializing_user_code()); \
+ int builtin_id = source_.GetInt(); \
+ DCHECK_LE(0, builtin_id); \
+ DCHECK_LT(builtin_id, Builtins::builtin_count); \
+ Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \
+ new_object = isolate->builtins()->builtin(name); \
+ emit_write_barrier = false; \
+ } \
+ if (within == kInnerPointer) { \
+ if (space_number != CODE_SPACE || new_object->IsCode()) { \
+ Code* new_code_object = reinterpret_cast<Code*>(new_object); \
+ new_object = \
+ reinterpret_cast<Object*>(new_code_object->instruction_start()); \
+ } else { \
+ DCHECK(space_number == CODE_SPACE); \
+ Cell* cell = Cell::cast(new_object); \
+ new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \
+ } \
+ } \
+ if (how == kFromCode) { \
+ Address location_of_branch_data = reinterpret_cast<Address>(current); \
+ Assembler::deserialization_set_special_target_at( \
+ isolate, location_of_branch_data, \
+ Code::cast(HeapObject::FromAddress(current_object_address)), \
+ reinterpret_cast<Address>(new_object)); \
+ location_of_branch_data += Assembler::kSpecialTargetSize; \
+ current = reinterpret_cast<Object**>(location_of_branch_data); \
+ current_was_incremented = true; \
+ } else { \
+ UnalignedCopy(current, &new_object); \
+ } \
+ } \
+ if (emit_write_barrier && write_barrier_needed) { \
+ Address current_address = reinterpret_cast<Address>(current); \
+ SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \
+ isolate->heap()->RecordWrite( \
+ HeapObject::FromAddress(current_object_address), \
+ static_cast<int>(current_address - current_object_address), \
+ *reinterpret_cast<Object**>(current_address)); \
+ } \
+ if (!current_was_incremented) { \
+ current++; \
+ } \
+ break; \
+ }
+
+// This generates a case and a body for the new space (which has to do extra
+// write barrier handling) and handles the other spaces with fall-through cases
+// and one body.
+#define ALL_SPACES(where, how, within) \
+ CASE_STATEMENT(where, how, within, NEW_SPACE) \
+ CASE_BODY(where, how, within, NEW_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_SPACE) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ CASE_STATEMENT(where, how, within, LO_SPACE) \
+ CASE_BODY(where, how, within, kAnyOldSpace)
+
+#define FOUR_CASES(byte_code) \
+ case byte_code: \
+ case byte_code + 1: \
+ case byte_code + 2: \
+ case byte_code + 3:
+
+#define SIXTEEN_CASES(byte_code) \
+ FOUR_CASES(byte_code) \
+ FOUR_CASES(byte_code + 4) \
+ FOUR_CASES(byte_code + 8) \
+ FOUR_CASES(byte_code + 12)
+
+#define SINGLE_CASE(where, how, within, space) \
+ CASE_STATEMENT(where, how, within, space) \
+ CASE_BODY(where, how, within, space)
+
+ // Deserialize a new object and write a pointer to it to the current
+ // object.
+ ALL_SPACES(kNewObject, kPlain, kStartOfObject)
+ // Support for direct instruction pointers in functions. It's an inner
+ // pointer because it points at the entry point, not at the start of the
+ // code object.
+ SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+ // Deserialize a new code object and write a pointer to its first
+ // instruction to the current code object.
+ ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
+ // Find a recently deserialized object using its offset from the current
+ // allocation point and write a pointer to it to the current object.
+ ALL_SPACES(kBackref, kPlain, kStartOfObject)
+ ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
+#if V8_CODE_EMBEDS_OBJECT_POINTER
+ // Deserialize a new object from pointer found in code and write
+ // a pointer to it to the current object. Required only for MIPS, PPC, ARM
+ // or S390 with embedded constant pool, and omitted on the other
+ // architectures because it is fully unrolled and would cause bloat.
+ ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
+ // Find a recently deserialized code object using its offset from the
+ // current allocation point and write a pointer to it to the current
+ // object. Required only for MIPS, PPC, ARM or S390 with embedded
+ // constant pool.
+ ALL_SPACES(kBackref, kFromCode, kStartOfObject)
+ ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
+#endif
+ // Find a recently deserialized code object using its offset from the
+ // current allocation point and write a pointer to its first instruction
+ // to the current code object or the instruction pointer in a function
+ // object.
+ ALL_SPACES(kBackref, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackref, kPlain, kInnerPointer)
+ ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
+ // Find an object in the roots array and write a pointer to it to the
+ // current object.
+ SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
+#if V8_CODE_EMBEDS_OBJECT_POINTER
+ // Find an object in the roots array and write a pointer to it to in code.
+ SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
+#endif
+ // Find an object in the partial snapshots cache and write a pointer to it
+ // to the current object.
+ SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+ // Find an code entry in the partial snapshots cache and
+ // write a pointer to it to the current object.
+ SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
+ // Find an external reference and write a pointer to it to the current
+ // object.
+ SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
+ // Find an external reference and write a pointer to it in the current
+ // code object.
+ SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
+ // Find an object in the attached references and write a pointer to it to
+ // the current object.
+ SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
+ // Find a builtin and write a pointer to it to the current object.
+ SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
+
+#undef CASE_STATEMENT
+#undef CASE_BODY
+#undef ALL_SPACES
+
+ case kSkip: {
+ int size = source_.GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + size);
+ break;
+ }
+
+ case kInternalReferenceEncoded:
+ case kInternalReference: {
+ // Internal reference address is not encoded via skip, but by offset
+ // from code entry.
+ int pc_offset = source_.GetInt();
+ int target_offset = source_.GetInt();
+ Code* code =
+ Code::cast(HeapObject::FromAddress(current_object_address));
+ DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
+ DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
+ Address pc = code->entry() + pc_offset;
+ Address target = code->entry() + target_offset;
+ Assembler::deserialization_set_target_internal_reference_at(
+ isolate, pc, target, data == kInternalReference
+ ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ break;
+ }
+
+ case kNop:
+ break;
+
+ case kNextChunk: {
+ int space = source_.Get();
+ DCHECK(space < kNumberOfPreallocatedSpaces);
+ int chunk_index = current_chunk_[space];
+ const Heap::Reservation& reservation = reservations_[space];
+ // Make sure the current chunk is indeed exhausted.
+ CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+ // Move to next reserved chunk.
+ chunk_index = ++current_chunk_[space];
+ CHECK_LT(chunk_index, reservation.length());
+ high_water_[space] = reservation[chunk_index].start;
+ break;
+ }
+
+ case kDeferred: {
+ // Deferred can only occur right after the heap object header.
+ DCHECK(current == reinterpret_cast<Object**>(current_object_address +
+ kPointerSize));
+ HeapObject* obj = HeapObject::FromAddress(current_object_address);
+ // If the deferred object is a map, its instance type may be used
+ // during deserialization. Initialize it with a temporary value.
+ if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
+ current = limit;
+ return false;
+ }
+
+ case kSynchronize:
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ CHECK(false);
+ break;
+
+ case kNativesStringResource:
+ current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
+ current);
+ break;
+
+ case kExtraNativesStringResource:
+ current = CopyInNativesSource(
+ ExtraNatives::GetScriptSource(source_.Get()), current);
+ break;
+
+ // Deserialize raw data of variable length.
+ case kVariableRawData: {
+ int size_in_bytes = source_.GetInt();
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ source_.CopyRaw(raw_data_out, size_in_bytes);
+ break;
+ }
+
+ case kVariableRepeat: {
+ int repeats = source_.GetInt();
+ Object* object = current[-1];
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ break;
+ }
+
+ case kAlignmentPrefix:
+ case kAlignmentPrefix + 1:
+ case kAlignmentPrefix + 2:
+ SetAlignment(data);
+ break;
+
+ STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
+ STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
+ SIXTEEN_CASES(kRootArrayConstantsWithSkip)
+ SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
+ int skip = source_.GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + skip);
+ // Fall through.
+ }
+
+ SIXTEEN_CASES(kRootArrayConstants)
+ SIXTEEN_CASES(kRootArrayConstants + 16) {
+ int id = data & kRootArrayConstantsMask;
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ Object* object = isolate->heap()->root(root_index);
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ UnalignedCopy(current++, &object);
+ break;
+ }
+
+ STATIC_ASSERT(kNumberOfHotObjects == 8);
+ FOUR_CASES(kHotObjectWithSkip)
+ FOUR_CASES(kHotObjectWithSkip + 4) {
+ int skip = source_.GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<Address>(current) + skip);
+ // Fall through.
+ }
+
+ FOUR_CASES(kHotObject)
+ FOUR_CASES(kHotObject + 4) {
+ int index = data & kHotObjectMask;
+ Object* hot_object = hot_objects_.Get(index);
+ UnalignedCopy(current, &hot_object);
+ if (write_barrier_needed) {
+ Address current_address = reinterpret_cast<Address>(current);
+ SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
+ isolate->heap()->RecordWrite(
+ HeapObject::FromAddress(current_object_address),
+ static_cast<int>(current_address - current_object_address),
+ hot_object);
+ }
+ current++;
+ break;
+ }
+
+ // Deserialize raw data of fixed length from 1 to 32 words.
+ STATIC_ASSERT(kNumberOfFixedRawData == 32);
+ SIXTEEN_CASES(kFixedRawData)
+ SIXTEEN_CASES(kFixedRawData + 16) {
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
+ source_.CopyRaw(raw_data_out, size_in_bytes);
+ current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
+ break;
+ }
+
+ STATIC_ASSERT(kNumberOfFixedRepeat == 16);
+ SIXTEEN_CASES(kFixedRepeat) {
+ int repeats = data - kFixedRepeatStart;
+ Object* object;
+ UnalignedCopy(&object, current - 1);
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ break;
+ }
+
+#undef SIXTEEN_CASES
+#undef FOUR_CASES
+#undef SINGLE_CASE
+
+ default:
+ CHECK(false);
+ }
+ }
+ CHECK_EQ(limit, current);
+ return true;
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
new file mode 100644
index 0000000000..58c481cc79
--- /dev/null
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -0,0 +1,150 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_DESERIALIZER_H_
+#define V8_SNAPSHOT_DESERIALIZER_H_
+
+#include "src/heap/heap.h"
+#include "src/objects.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot-source-sink.h"
+
+namespace v8 {
+namespace internal {
+
+// Used for platforms with embedded constant pools to trigger deserialization
+// of objects found in code.
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \
+ V8_EMBEDDED_CONSTANT_POOL
+#define V8_CODE_EMBEDS_OBJECT_POINTER 1
+#else
+#define V8_CODE_EMBEDS_OBJECT_POINTER 0
+#endif
+
+class Heap;
+
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+class Deserializer : public SerializerDeserializer {
+ public:
+ // Create a deserializer from a snapshot byte source.
+ template <class Data>
+ explicit Deserializer(Data* data)
+ : isolate_(NULL),
+ source_(data->Payload()),
+ magic_number_(data->GetMagicNumber()),
+ external_reference_table_(NULL),
+ deserialized_large_objects_(0),
+ deserializing_user_code_(false),
+ next_alignment_(kWordAligned) {
+ DecodeReservation(data->Reservations());
+ }
+
+ ~Deserializer() override;
+
+ // Deserialize the snapshot into an empty heap.
+ void Deserialize(Isolate* isolate);
+
+ // Deserialize a single object and the objects reachable from it.
+ MaybeHandle<Object> DeserializePartial(Isolate* isolate,
+ Handle<JSGlobalProxy> global_proxy);
+
+ // Deserialize a shared function info. Fail gracefully.
+ MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
+
+ // Pass a vector of externally-provided objects referenced by the snapshot.
+ // The ownership to its backing store is handed over as well.
+ void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
+ attached_objects_ = attached_objects;
+ }
+
+ private:
+ void VisitPointers(Object** start, Object** end) override;
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
+ void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
+
+ void Initialize(Isolate* isolate);
+
+ bool deserializing_user_code() { return deserializing_user_code_; }
+
+ void DecodeReservation(Vector<const SerializedData::Reservation> res);
+
+ bool ReserveSpace();
+
+ void UnalignedCopy(Object** dest, Object** src) {
+ memcpy(dest, src, sizeof(*src));
+ }
+
+ void SetAlignment(byte data) {
+ DCHECK_EQ(kWordAligned, next_alignment_);
+ int alignment = data - (kAlignmentPrefix - 1);
+ DCHECK_LE(kWordAligned, alignment);
+ DCHECK_LE(alignment, kSimd128Unaligned);
+ next_alignment_ = static_cast<AllocationAlignment>(alignment);
+ }
+
+ void DeserializeDeferredObjects();
+
+ void FlushICacheForNewIsolate();
+ void FlushICacheForNewCodeObjects();
+
+ void CommitPostProcessedObjects(Isolate* isolate);
+
+ // Fills in some heap data in an area from start to end (non-inclusive). The
+ // space id is used for the write barrier. The object_address is the address
+ // of the object we are writing into, or NULL if we are not writing into an
+ // object, i.e. if we are writing a series of tagged values that are not on
+ // the heap. Return false if the object content has been deferred.
+ bool ReadData(Object** start, Object** end, int space,
+ Address object_address);
+ void ReadObject(int space_number, Object** write_back);
+ Address Allocate(int space_index, int size);
+
+ // Special handling for serialized code like hooking up internalized strings.
+ HeapObject* PostProcessNewObject(HeapObject* obj, int space);
+
+ // This returns the address of an object that has been described in the
+ // snapshot by chunk index and offset.
+ HeapObject* GetBackReferencedObject(int space);
+
+ Object** CopyInNativesSource(Vector<const char> source_vector,
+ Object** current);
+
+ // Cached current isolate.
+ Isolate* isolate_;
+
+ // Objects from the attached object descriptions in the serialized user code.
+ Vector<Handle<Object> > attached_objects_;
+
+ SnapshotByteSource source_;
+ uint32_t magic_number_;
+
+ // The address of the next object that will be allocated in each space.
+ // Each space has a number of chunks reserved by the GC, with each chunk
+ // fitting into a page. Deserialized objects are allocated into the
+ // current chunk of the target space by bumping up high water mark.
+ Heap::Reservation reservations_[kNumberOfSpaces];
+ uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
+ Address high_water_[kNumberOfPreallocatedSpaces];
+
+ ExternalReferenceTable* external_reference_table_;
+
+ List<HeapObject*> deserialized_large_objects_;
+ List<Code*> new_code_objects_;
+ List<Handle<String> > new_internalized_strings_;
+ List<Handle<Script> > new_scripts_;
+
+ bool deserializing_user_code_;
+
+ AllocationAlignment next_alignment_;
+
+ DISALLOW_COPY_AND_ASSIGN(Deserializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index c69025adca..c38f92f5b1 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -12,8 +12,8 @@
#include "src/flags.h"
#include "src/list.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
-
+#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
using namespace v8;
@@ -109,10 +109,9 @@ class SnapshotWriter {
FILE* startup_blob_file_;
};
-
-char* GetExtraCode(char* filename) {
+char* GetExtraCode(char* filename, const char* description) {
if (filename == NULL || strlen(filename) == 0) return NULL;
- ::printf("Embedding extra script: %s\n", filename);
+ ::printf("Loading script for %s: %s\n", description, filename);
FILE* file = base::OS::FOpen(filename, "rb");
if (file == NULL) {
fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
@@ -137,14 +136,13 @@ char* GetExtraCode(char* filename) {
int main(int argc, char** argv) {
- // By default, log code create information in the snapshot.
- i::FLAG_log_code = true;
- i::FLAG_logfile_per_isolate = false;
+ // Make mksnapshot runs predictable to create reproducible snapshots.
+ i::FLAG_predictable = true;
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (result > 0 || (argc != 1 && argc != 2) || i::FLAG_help) {
+ if (result > 0 || (argc > 3) || i::FLAG_help) {
::printf("Usage: %s --startup_src=... --startup_blob=... [extras]\n",
argv[0]);
i::FlagList::PrintHelp();
@@ -161,11 +159,21 @@ int main(int argc, char** argv) {
SnapshotWriter writer;
if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- char* extra_code = GetExtraCode(argc == 2 ? argv[1] : NULL);
- StartupData blob = v8::V8::CreateSnapshotDataBlob(extra_code);
+
+ char* embed_script = GetExtraCode(argc >= 2 ? argv[1] : NULL, "embedding");
+ StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
+ delete[] embed_script;
+
+ char* warmup_script = GetExtraCode(argc >= 3 ? argv[2] : NULL, "warm up");
+ if (warmup_script) {
+ StartupData cold = blob;
+ blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
+ delete[] cold.data;
+ delete[] warmup_script;
+ }
+
CHECK(blob.data);
writer.WriteSnapshot(blob);
- delete[] extra_code;
delete[] blob.data;
}
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
new file mode 100644
index 0000000000..0f1f133edc
--- /dev/null
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -0,0 +1,123 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/partial-serializer.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PartialSerializer::PartialSerializer(Isolate* isolate,
+ Serializer* startup_snapshot_serializer,
+ SnapshotByteSink* sink)
+ : Serializer(isolate, sink),
+ startup_serializer_(startup_snapshot_serializer),
+ global_object_(NULL),
+ next_partial_cache_index_(0) {
+ InitializeCodeAddressMap();
+}
+
+PartialSerializer::~PartialSerializer() {
+ OutputStatistics("PartialSerializer");
+}
+
+void PartialSerializer::Serialize(Object** o) {
+ if ((*o)->IsContext()) {
+ Context* context = Context::cast(*o);
+ global_object_ = context->global_object();
+ back_reference_map()->AddGlobalProxy(context->global_proxy());
+ // The bootstrap snapshot has a code-stub context. When serializing the
+ // partial snapshot, it is chained into the weak context list on the isolate
+ // and it's next context pointer may point to the code-stub context. Clear
+ // it before serializing, it will get re-added to the context list
+ // explicitly when it's loaded.
+ if (context->IsNativeContext()) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ isolate_->heap()->undefined_value());
+ DCHECK(!context->global_object()->IsUndefined());
+ }
+ }
+ VisitPointer(o);
+ SerializeDeferredObjects();
+ Pad();
+}
+
+void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ if (obj->IsMap()) {
+ // The code-caches link to context-specific code objects, which
+ // the startup and context serializes cannot currently handle.
+ DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
+ }
+
+ // Replace typed arrays by undefined.
+ if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
+
+ int root_index = root_index_map_.Lookup(obj);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ return;
+ }
+
+ if (ShouldBeInThePartialSnapshotCache(obj)) {
+ FlushSkip(skip);
+
+ int cache_index = PartialSnapshotCacheIndex(obj);
+ sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
+ "PartialSnapshotCache");
+ sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+ return;
+ }
+
+ // Pointers from the partial snapshot to the objects in the startup snapshot
+ // should go through the root array or through the partial snapshot cache.
+ // If this is not the case you may have to add something to the root array.
+ DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
+ // All the internalized strings that the partial snapshot needs should be
+ // either in the root table or in the partial snapshot cache.
+ DCHECK(!obj->IsInternalizedString());
+
+ if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+ FlushSkip(skip);
+
+ // Clear literal boilerplates.
+ if (obj->IsJSFunction()) {
+ FixedArray* literals = JSFunction::cast(obj)->literals();
+ for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
+ }
+
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
+ serializer.Serialize();
+}
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+ int index = partial_cache_index_map_.LookupOrInsert(
+ heap_object, next_partial_cache_index_);
+ if (index == PartialCacheIndexMap::kInvalidIndex) {
+ // This object is not part of the partial snapshot cache yet. Add it to the
+ // startup snapshot so we can refer to it via partial snapshot index from
+ // the partial snapshot.
+ startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
+ return next_partial_cache_index_++;
+ }
+ return index;
+}
+
+bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ // Scripts should be referred only through shared function infos. We can't
+ // allow them to be part of the partial snapshot because they contain a
+ // unique ID, and deserializing several partial snapshots containing script
+ // would cause dupes.
+ DCHECK(!o->IsScript());
+ return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
+ o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
+ o->map() ==
+ startup_serializer_->isolate()->heap()->fixed_cow_array_map();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
new file mode 100644
index 0000000000..0bf61dd055
--- /dev/null
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+#define V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+
+#include "src/address-map.h"
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class PartialSerializer : public Serializer {
+ public:
+ PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
+ SnapshotByteSink* sink);
+
+ ~PartialSerializer() override;
+
+ // Serialize the objects reachable from a single object pointer.
+ void Serialize(Object** o);
+
+ private:
+ class PartialCacheIndexMap : public AddressMapBase {
+ public:
+ PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
+
+ static const int kInvalidIndex = -1;
+
+ // Lookup object in the map. Return its index if found, or create
+ // a new entry with new_index as value, and return kInvalidIndex.
+ int LookupOrInsert(HeapObject* obj, int new_index) {
+ HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+ if (entry != NULL) return GetValue(entry);
+ SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
+ return kInvalidIndex;
+ }
+
+ private:
+ HashMap map_;
+
+ DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+ };
+
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+
+ int PartialSnapshotCacheIndex(HeapObject* o);
+ bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
+
+ Serializer* startup_serializer_;
+ Object* global_object_;
+ PartialCacheIndexMap partial_cache_index_map_;
+ int next_partial_cache_index_;
+ DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
deleted file mode 100644
index 4868abd520..0000000000
--- a/deps/v8/src/snapshot/serialize.cc
+++ /dev/null
@@ -1,2877 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/serialize.h"
-
-#include "src/accessors.h"
-#include "src/api.h"
-#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/global-handles.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects.h"
-#include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
-#include "src/runtime/runtime.h"
-#include "src/snapshot/natives.h"
-#include "src/snapshot/snapshot.h"
-#include "src/snapshot/snapshot-source-sink.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
-#include "src/version.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Coding of external references.
-
-
-ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
- ExternalReferenceTable* external_reference_table =
- isolate->external_reference_table();
- if (external_reference_table == NULL) {
- external_reference_table = new ExternalReferenceTable(isolate);
- isolate->set_external_reference_table(external_reference_table);
- }
- return external_reference_table;
-}
-
-
-ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
- // Miscellaneous
- Add(ExternalReference::roots_array_start(isolate).address(),
- "Heap::roots_array_start()");
- Add(ExternalReference::address_of_stack_limit(isolate).address(),
- "StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
- "StackGuard::address_of_real_jslimit()");
- Add(ExternalReference::new_space_start(isolate).address(),
- "Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- "Heap::NewSpaceAllocationLimitAddress()");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- "Heap::NewSpaceAllocationTopAddress()");
- Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
- "mod_two_doubles");
- // Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
- "KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
- "KeyedLookupCache::field_offsets()");
- Add(ExternalReference::handle_scope_next_address(isolate).address(),
- "HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address(isolate).address(),
- "HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address(isolate).address(),
- "HandleScope::level");
- Add(ExternalReference::new_deoptimizer_function(isolate).address(),
- "Deoptimizer::New()");
- Add(ExternalReference::compute_output_frames_function(isolate).address(),
- "Deoptimizer::ComputeOutputFrames()");
- Add(ExternalReference::address_of_min_int().address(),
- "LDoubleConstant::min_int");
- Add(ExternalReference::address_of_one_half().address(),
- "LDoubleConstant::one_half");
- Add(ExternalReference::isolate_address(isolate).address(), "isolate");
- Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
- "Interpreter::dispatch_table_address");
- Add(ExternalReference::address_of_negative_infinity().address(),
- "LDoubleConstant::negative_infinity");
- Add(ExternalReference::power_double_double_function(isolate).address(),
- "power_double_double_function");
- Add(ExternalReference::power_double_int_function(isolate).address(),
- "power_double_int_function");
- Add(ExternalReference::math_log_double_function(isolate).address(),
- "std::log");
- Add(ExternalReference::store_buffer_top(isolate).address(),
- "store_buffer_top");
- Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
- Add(ExternalReference::get_date_field_function(isolate).address(),
- "JSDate::GetField");
- Add(ExternalReference::date_cache_stamp(isolate).address(),
- "date_cache_stamp");
- Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
- "address_of_pending_message_obj");
- Add(ExternalReference::get_make_code_young_function(isolate).address(),
- "Code::MakeCodeYoung");
- Add(ExternalReference::cpu_features().address(), "cpu_features");
- Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
- "Heap::OldSpaceAllocationTopAddress");
- Add(ExternalReference::old_space_allocation_limit_address(isolate).address(),
- "Heap::OldSpaceAllocationLimitAddress");
- Add(ExternalReference::allocation_sites_list_address(isolate).address(),
- "Heap::allocation_sites_list_address()");
- Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
- Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
- "Code::MarkCodeAsExecuted");
- Add(ExternalReference::is_profiling_address(isolate).address(),
- "CpuProfiler::is_profiling");
- Add(ExternalReference::scheduled_exception_address(isolate).address(),
- "Isolate::scheduled_exception");
- Add(ExternalReference::invoke_function_callback(isolate).address(),
- "InvokeFunctionCallback");
- Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
- "InvokeAccessorGetterCallback");
- Add(ExternalReference::f32_trunc_wrapper_function(isolate).address(),
- "f32_trunc_wrapper");
- Add(ExternalReference::f32_floor_wrapper_function(isolate).address(),
- "f32_floor_wrapper");
- Add(ExternalReference::f32_ceil_wrapper_function(isolate).address(),
- "f32_ceil_wrapper");
- Add(ExternalReference::f32_nearest_int_wrapper_function(isolate).address(),
- "f32_nearest_int_wrapper");
- Add(ExternalReference::f64_trunc_wrapper_function(isolate).address(),
- "f64_trunc_wrapper");
- Add(ExternalReference::f64_floor_wrapper_function(isolate).address(),
- "f64_floor_wrapper");
- Add(ExternalReference::f64_ceil_wrapper_function(isolate).address(),
- "f64_ceil_wrapper");
- Add(ExternalReference::f64_nearest_int_wrapper_function(isolate).address(),
- "f64_nearest_int_wrapper");
- Add(ExternalReference::log_enter_external_function(isolate).address(),
- "Logger::EnterExternal");
- Add(ExternalReference::log_leave_external_function(isolate).address(),
- "Logger::LeaveExternal");
- Add(ExternalReference::address_of_minus_one_half().address(),
- "double_constants.minus_one_half");
- Add(ExternalReference::stress_deopt_count(isolate).address(),
- "Isolate::stress_deopt_count_address()");
- Add(ExternalReference::virtual_handler_register(isolate).address(),
- "Isolate::virtual_handler_register()");
- Add(ExternalReference::virtual_slot_register(isolate).address(),
- "Isolate::virtual_slot_register()");
- Add(ExternalReference::runtime_function_table_address(isolate).address(),
- "Runtime::runtime_function_table_address()");
-
- // Debug addresses
- Add(ExternalReference::debug_after_break_target_address(isolate).address(),
- "Debug::after_break_target_address()");
- Add(ExternalReference::debug_is_active_address(isolate).address(),
- "Debug::is_active_address()");
- Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
- "Debug::step_in_enabled_address()");
-
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
- Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
- "RegExpMacroAssembler*::CheckStackGuardState()");
- Add(ExternalReference::re_grow_stack(isolate).address(),
- "NativeRegExpMacroAssembler::GrowStack()");
- Add(ExternalReference::re_word_character_map().address(),
- "NativeRegExpMacroAssembler::word_character_map");
- Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
- "RegExpStack::limit_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
- .address(),
- "RegExpStack::memory_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
- "RegExpStack::memory_size()");
- Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
- "OffsetsVector::static_offsets_vector");
-#endif // V8_INTERPRETED_REGEXP
-
- // The following populates all of the different type of external references
- // into the ExternalReferenceTable.
- //
- // NOTE: This function was originally 100k of code. It has since been
- // rewritten to be mostly table driven, as the callback macro style tends to
- // very easily cause code bloat. Please be careful in the future when adding
- // new references.
-
- struct RefTableEntry {
- uint16_t id;
- const char* name;
- };
-
- static const RefTableEntry c_builtins[] = {
-#define DEF_ENTRY_C(name, ignored) \
- { Builtins::c_##name, "Builtins::" #name } \
- ,
- BUILTIN_LIST_C(DEF_ENTRY_C)
-#undef DEF_ENTRY_C
- };
-
- for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
- isolate);
- Add(ref.address(), c_builtins[i].name);
- }
-
- static const RefTableEntry builtins[] = {
-#define DEF_ENTRY_C(name, ignored) \
- { Builtins::k##name, "Builtins::" #name } \
- ,
-#define DEF_ENTRY_A(name, i1, i2, i3) \
- { Builtins::k##name, "Builtins::" #name } \
- ,
- BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
-#undef DEF_ENTRY_C
-#undef DEF_ENTRY_A
- };
-
- for (unsigned i = 0; i < arraysize(builtins); ++i) {
- ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
- Add(ref.address(), builtins[i].name);
- }
-
- static const RefTableEntry runtime_functions[] = {
-#define RUNTIME_ENTRY(name, i1, i2) \
- { Runtime::k##name, "Runtime::" #name } \
- ,
- FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
-#undef RUNTIME_ENTRY
- };
-
- for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
- ExternalReference ref(
- static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
- Add(ref.address(), runtime_functions[i].name);
- }
-
- // Stat counters
- struct StatsRefTableEntry {
- StatsCounter* (Counters::*counter)();
- const char* name;
- };
-
- static const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, "Counters::" #name } \
- ,
- STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
- };
-
- Counters* counters = isolate->counters();
- for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
- // To make sure the indices are not dependent on whether counters are
- // enabled, use a dummy address as filler.
- Address address = NotAvailable();
- StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
- if (counter->Enabled()) {
- address = reinterpret_cast<Address>(counter->GetInternalPointer());
- }
- Add(address, stats_ref_table[i].name);
- }
-
- // Top addresses
- static const char* address_names[] = {
-#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
- FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
-#undef BUILD_NAME_LITERAL
- };
-
- for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
- Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
- address_names[i]);
- }
-
- // Accessors
- struct AccessorRefTable {
- Address address;
- const char* name;
- };
-
- static const AccessorRefTable accessors[] = {
-#define ACCESSOR_INFO_DECLARATION(name) \
- { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
- ,
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
-#define ACCESSOR_SETTER_DECLARATION(name) \
- { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name } \
- ,
- ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
- };
-
- for (unsigned i = 0; i < arraysize(accessors); ++i) {
- Add(accessors[i].address, accessors[i].name);
- }
-
- StubCache* stub_cache = isolate->stub_cache();
-
- // Stub cache tables
- Add(stub_cache->key_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->key");
- Add(stub_cache->value_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->value");
- Add(stub_cache->map_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->map");
- Add(stub_cache->key_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->key");
- Add(stub_cache->value_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->value");
- Add(stub_cache->map_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->map");
-
- // Runtime entries
- Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- "HandleScope::DeleteExtensions");
- Add(ExternalReference::incremental_marking_record_write_function(isolate)
- .address(),
- "IncrementalMarking::RecordWrite");
- Add(ExternalReference::incremental_marking_record_write_code_entry_function(
- isolate)
- .address(),
- "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
- Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- "StoreBuffer::StoreBufferOverflow");
-
- // Add a small set of deopt entry addresses to encoder without generating the
- // deopt table code, which isn't possible at deserialization time.
- HandleScope scope(isolate);
- for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
- Address address = Deoptimizer::GetDeoptimizationEntry(
- isolate,
- entry,
- Deoptimizer::LAZY,
- Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, "lazy_deopt");
- }
-}
-
-
-ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
- map_ = isolate->external_reference_map();
- if (map_ != NULL) return;
- map_ = new HashMap(HashMap::PointersMatch);
- ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
- for (int i = 0; i < table->size(); ++i) {
- Address addr = table->address(i);
- if (addr == ExternalReferenceTable::NotAvailable()) continue;
- // We expect no duplicate external references entries in the table.
- DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
- map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
- }
- isolate->set_external_reference_map(map_);
-}
-
-
-uint32_t ExternalReferenceEncoder::Encode(Address address) const {
- DCHECK_NOT_NULL(address);
- HashMap::Entry* entry =
- const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
- DCHECK_NOT_NULL(entry);
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
-}
-
-
-const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
- Address address) const {
- HashMap::Entry* entry =
- const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
- if (entry == NULL) return "<unknown>";
- uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
- return ExternalReferenceTable::instance(isolate)->name(i);
-}
-
-
-class CodeAddressMap: public CodeEventLogger {
- public:
- explicit CodeAddressMap(Isolate* isolate)
- : isolate_(isolate) {
- isolate->logger()->addCodeEventListener(this);
- }
-
- ~CodeAddressMap() override {
- isolate_->logger()->removeCodeEventListener(this);
- }
-
- void CodeMoveEvent(Address from, Address to) override {
- address_to_name_map_.Move(from, to);
- }
-
- void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) override {}
-
- void CodeDeleteEvent(Address from) override {
- address_to_name_map_.Remove(from);
- }
-
- const char* Lookup(Address address) {
- return address_to_name_map_.Lookup(address);
- }
-
- private:
- class NameMap {
- public:
- NameMap() : impl_(HashMap::PointersMatch) {}
-
- ~NameMap() {
- for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
- DeleteArray(static_cast<const char*>(p->value));
- }
- }
-
- void Insert(Address code_address, const char* name, int name_size) {
- HashMap::Entry* entry = FindOrCreateEntry(code_address);
- if (entry->value == NULL) {
- entry->value = CopyName(name, name_size);
- }
- }
-
- const char* Lookup(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
- }
-
- void Remove(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) {
- DeleteArray(static_cast<char*>(entry->value));
- RemoveEntry(entry);
- }
- }
-
- void Move(Address from, Address to) {
- if (from == to) return;
- HashMap::Entry* from_entry = FindEntry(from);
- DCHECK(from_entry != NULL);
- void* value = from_entry->value;
- RemoveEntry(from_entry);
- HashMap::Entry* to_entry = FindOrCreateEntry(to);
- DCHECK(to_entry->value == NULL);
- to_entry->value = value;
- }
-
- private:
- static char* CopyName(const char* name, int name_size) {
- char* result = NewArray<char>(name_size + 1);
- for (int i = 0; i < name_size; ++i) {
- char c = name[i];
- if (c == '\0') c = ' ';
- result[i] = c;
- }
- result[name_size] = '\0';
- return result;
- }
-
- HashMap::Entry* FindOrCreateEntry(Address code_address) {
- return impl_.LookupOrInsert(code_address,
- ComputePointerHash(code_address));
- }
-
- HashMap::Entry* FindEntry(Address code_address) {
- return impl_.Lookup(code_address, ComputePointerHash(code_address));
- }
-
- void RemoveEntry(HashMap::Entry* entry) {
- impl_.Remove(entry->key, entry->hash);
- }
-
- HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
- };
-
- void LogRecordedBuffer(Code* code, SharedFunctionInfo*, const char* name,
- int length) override {
- address_to_name_map_.Insert(code->address(), name, length);
- }
-
- NameMap address_to_name_map_;
- Isolate* isolate_;
-};
-
-
-void Deserializer::DecodeReservation(
- Vector<const SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].length());
- STATIC_ASSERT(NEW_SPACE == 0);
- int current_space = NEW_SPACE;
- for (auto& r : res) {
- reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-
-void Deserializer::FlushICacheForNewIsolate() {
- DCHECK(!deserializing_user_code_);
- // The entire isolate is newly deserialized. Simply flush all code pages.
- PageIterator it(isolate_->heap()->code_space());
- while (it.has_next()) {
- Page* p = it.next();
- Assembler::FlushICache(isolate_, p->area_start(),
- p->area_end() - p->area_start());
- }
-}
-
-
-void Deserializer::FlushICacheForNewCodeObjects() {
- DCHECK(deserializing_user_code_);
- for (Code* code : new_code_objects_) {
- Assembler::FlushICache(isolate_, code->instruction_start(),
- code->instruction_size());
- }
-}
-
-
-bool Deserializer::ReserveSpace() {
-#ifdef DEBUG
- for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- CHECK(reservations_[i].length() > 0);
- }
-#endif // DEBUG
- if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-
-void Deserializer::Initialize(Isolate* isolate) {
- DCHECK_NULL(isolate_);
- DCHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- DCHECK_NULL(external_reference_table_);
- external_reference_table_ = ExternalReferenceTable::instance(isolate);
- CHECK_EQ(magic_number_,
- SerializedData::ComputeMagicNumber(external_reference_table_));
-}
-
-
-void Deserializer::Deserialize(Isolate* isolate) {
- Initialize(isolate);
- if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
- // No active threads.
- DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
- // No active handles.
- DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
-
- {
- DisallowHeapAllocation no_gc;
- isolate_->heap()->IterateSmiRoots(this);
- isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->RepairFreeListsAfterDeserialization();
- isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
- DeserializeDeferredObjects();
- FlushICacheForNewIsolate();
- }
-
- isolate_->heap()->set_native_contexts_list(
- isolate_->heap()->undefined_value());
- // The allocation site list is build during root iteration, but if no sites
- // were encountered then it needs to be initialized to undefined.
- if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
- isolate_->heap()->set_allocation_sites_list(
- isolate_->heap()->undefined_value());
- }
-
- // Update data pointers to the external strings containing natives sources.
- Natives::UpdateSourceCache(isolate_->heap());
- ExtraNatives::UpdateSourceCache(isolate_->heap());
-
- // Issue code events for newly deserialized code objects.
- LOG_CODE_EVENT(isolate_, LogCodeObjects());
- LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
-}
-
-
-MaybeHandle<Object> Deserializer::DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
- Initialize(isolate);
- if (!ReserveSpace()) {
- V8::FatalProcessOutOfMemory("deserialize context");
- return MaybeHandle<Object>();
- }
-
- Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
- attached_objects[kGlobalProxyReference] = global_proxy;
- SetAttachedObjects(attached_objects);
-
- DisallowHeapAllocation no_gc;
- // Keep track of the code space start and end pointers in case new
- // code objects were unserialized
- OldSpace* code_space = isolate_->heap()->code_space();
- Address start_address = code_space->top();
- Object* root;
- VisitPointer(&root);
- DeserializeDeferredObjects();
-
- // There's no code deserialized here. If this assert fires then that's
- // changed and logging should be added to notify the profiler et al of the
- // new code, which also has to be flushed from instruction cache.
- CHECK_EQ(start_address, code_space->top());
- return Handle<Object>(root, isolate);
-}
-
-
-MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
- Isolate* isolate) {
- Initialize(isolate);
- if (!ReserveSpace()) {
- return Handle<SharedFunctionInfo>();
- } else {
- deserializing_user_code_ = true;
- HandleScope scope(isolate);
- Handle<SharedFunctionInfo> result;
- {
- DisallowHeapAllocation no_gc;
- Object* root;
- VisitPointer(&root);
- DeserializeDeferredObjects();
- FlushICacheForNewCodeObjects();
- result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
- }
- CommitPostProcessedObjects(isolate);
- return scope.CloseAndEscape(result);
- }
-}
-
-
-Deserializer::~Deserializer() {
- // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
- // DCHECK(source_.AtEOF());
- attached_objects_.Dispose();
-}
-
-
-// This is called on the roots. It is the driver of the deserialization
-// process. It is also called on the body of each function.
-void Deserializer::VisitPointers(Object** start, Object** end) {
- // The space must be new space. Any other space would cause ReadChunk to try
- // to update the remembered using NULL as the address.
- ReadData(start, end, NEW_SPACE, NULL);
-}
-
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
- static const byte expected = kSynchronize;
- CHECK_EQ(expected, source_.Get());
-}
-
-void Deserializer::DeserializeDeferredObjects() {
- for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
- switch (code) {
- case kAlignmentPrefix:
- case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(code);
- break;
- default: {
- int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
- HeapObject* object = GetBackReferencedObject(space);
- int size = source_.GetInt() << kPointerSizeLog2;
- Address obj_address = object->address();
- Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
- Object** end = reinterpret_cast<Object**>(obj_address + size);
- bool filled = ReadData(start, end, space, obj_address);
- CHECK(filled);
- DCHECK(CanBeDeferred(object));
- PostProcessNewObject(object, space);
- }
- }
- }
-}
-
-
-// Used to insert a deserialized internalized string into the string table.
-class StringTableInsertionKey : public HashTableKey {
- public:
- explicit StringTableInsertionKey(String* string)
- : string_(string), hash_(HashForObject(string)) {
- DCHECK(string->IsInternalizedString());
- }
-
- bool IsMatch(Object* string) override {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (hash_ != HashForObject(string)) return false;
- // We want to compare the content of two internalized strings here.
- return string_->SlowEquals(String::cast(string));
- }
-
- uint32_t Hash() override { return hash_; }
-
- uint32_t HashForObject(Object* key) override {
- return String::cast(key)->Hash();
- }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
- return handle(string_, isolate);
- }
-
- private:
- String* string_;
- uint32_t hash_;
- DisallowHeapAllocation no_gc;
-};
-
-
-HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
- if (deserializing_user_code()) {
- if (obj->IsString()) {
- String* string = String::cast(obj);
- // Uninitialize hash field as the hash seed may have changed.
- string->set_hash_field(String::kEmptyHashField);
- if (string->IsInternalizedString()) {
- // Canonicalize the internalized string. If it already exists in the
- // string table, set it to forward to the existing one.
- StringTableInsertionKey key(string);
- String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
- if (canonical == NULL) {
- new_internalized_strings_.Add(handle(string));
- return string;
- } else {
- string->SetForwardedInternalizedString(canonical);
- return canonical;
- }
- }
- } else if (obj->IsScript()) {
- new_scripts_.Add(handle(Script::cast(obj)));
- } else {
- DCHECK(CanBeDeferred(obj));
- }
- }
- if (obj->IsAllocationSite()) {
- DCHECK(obj->IsAllocationSite());
- // Allocation sites are present in the snapshot, and must be linked into
- // a list at deserialization time.
- AllocationSite* site = AllocationSite::cast(obj);
- // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
- // as a (weak) root. If this root is relocated correctly, this becomes
- // unnecessary.
- if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
- site->set_weak_next(isolate_->heap()->undefined_value());
- } else {
- site->set_weak_next(isolate_->heap()->allocation_sites_list());
- }
- isolate_->heap()->set_allocation_sites_list(site);
- } else if (obj->IsCode()) {
- // We flush all code pages after deserializing the startup snapshot. In that
- // case, we only need to remember code objects in the large object space.
- // When deserializing user code, remember each individual code object.
- if (deserializing_user_code() || space == LO_SPACE) {
- new_code_objects_.Add(Code::cast(obj));
- }
- }
- // Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
- return obj;
-}
-
-
-void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
- StringTable::EnsureCapacityForDeserialization(
- isolate, new_internalized_strings_.length());
- for (Handle<String> string : new_internalized_strings_) {
- StringTableInsertionKey key(*string);
- DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
- StringTable::LookupKey(isolate, &key);
- }
-
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
- for (Handle<Script> script : new_scripts_) {
- // Assign a new script id to avoid collision.
- script->set_id(isolate_->heap()->NextScriptId());
- // Add script to list.
- Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
- heap->SetRootScriptList(*list);
- }
-}
-
-
-HeapObject* Deserializer::GetBackReferencedObject(int space) {
- HeapObject* obj;
- BackReference back_reference(source_.GetInt());
- if (space == LO_SPACE) {
- CHECK(back_reference.chunk_index() == 0);
- uint32_t index = back_reference.large_object_index();
- obj = deserialized_large_objects_[index];
- } else {
- DCHECK(space < kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = back_reference.chunk_index();
- DCHECK_LE(chunk_index, current_chunk_[space]);
- uint32_t chunk_offset = back_reference.chunk_offset();
- Address address = reservations_[space][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
- address += padding;
- }
- obj = HeapObject::FromAddress(address);
- }
- if (deserializing_user_code() && obj->IsInternalizedString()) {
- obj = String::cast(obj)->GetForwardedInternalizedString();
- }
- hot_objects_.Add(obj);
- return obj;
-}
-
-
-// This routine writes the new object into the pointer provided and then
-// returns true if the new object was in young space and false otherwise.
-// The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, Object** write_back) {
- Address address;
- HeapObject* obj;
- int size = source_.GetInt() << kObjectAlignmentBits;
-
- if (next_alignment_ != kWordAligned) {
- int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = Allocate(space_number, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- Heap* heap = isolate_->heap();
- DCHECK(heap->free_space_map()->IsMap());
- DCHECK(heap->one_pointer_filler_map()->IsMap());
- DCHECK(heap->two_pointer_filler_map()->IsMap());
- obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
- address = obj->address();
- next_alignment_ = kWordAligned;
- } else {
- address = Allocate(space_number, size);
- obj = HeapObject::FromAddress(address);
- }
-
- isolate_->heap()->OnAllocationEvent(obj, size);
- Object** current = reinterpret_cast<Object**>(address);
- Object** limit = current + (size >> kPointerSizeLog2);
- if (FLAG_log_snapshot_positions) {
- LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
- }
-
- if (ReadData(current, limit, space_number, address)) {
- // Only post process if object content has not been deferred.
- obj = PostProcessNewObject(obj, space_number);
- }
-
- Object* write_back_obj = obj;
- UnalignedCopy(write_back, &write_back_obj);
-#ifdef DEBUG
- if (obj->IsCode()) {
- DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
- } else {
- DCHECK(space_number != CODE_SPACE);
- }
-#endif // DEBUG
-}
-
-
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address Deserializer::Allocate(int space_index, int size) {
- if (space_index == LO_SPACE) {
- AlwaysAllocateScope scope(isolate_);
- LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
- Executability exec = static_cast<Executability>(source_.Get());
- AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
- deserialized_large_objects_.Add(obj);
- return obj->address();
- } else {
- DCHECK(space_index < kNumberOfPreallocatedSpaces);
- Address address = high_water_[space_index];
- DCHECK_NOT_NULL(address);
- high_water_[space_index] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_index];
- int chunk_index = current_chunk_[space_index];
- CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
-#endif
- return address;
- }
-}
-
-
-Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
- Object** current) {
- DCHECK(!isolate_->heap()->deserialization_complete());
- NativesExternalStringResource* resource = new NativesExternalStringResource(
- source_vector.start(), source_vector.length());
- Object* resource_obj = reinterpret_cast<Object*>(resource);
- UnalignedCopy(current++, &resource_obj);
- return current;
-}
-
-
-bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
- Address current_object_address) {
- Isolate* const isolate = isolate_;
- // Write barrier support costs around 1% in startup time. In fact there
- // are no new space objects in current boot snapshots, so it's not needed,
- // but that may change.
- bool write_barrier_needed =
- (current_object_address != NULL && source_space != NEW_SPACE &&
- source_space != CODE_SPACE);
- while (current < limit) {
- byte data = source_.Get();
- switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number) \
- case where + how + within + space_number: \
- STATIC_ASSERT((where & ~kWhereMask) == 0); \
- STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
- STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
- STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
-
-#define CASE_BODY(where, how, within, space_number_if_any) \
- { \
- bool emit_write_barrier = false; \
- bool current_was_incremented = false; \
- int space_number = space_number_if_any == kAnyOldSpace \
- ? (data & kSpaceMask) \
- : space_number_if_any; \
- if (where == kNewObject && how == kPlain && within == kStartOfObject) { \
- ReadObject(space_number, current); \
- emit_write_barrier = (space_number == NEW_SPACE); \
- } else { \
- Object* new_object = NULL; /* May not be a real Object pointer. */ \
- if (where == kNewObject) { \
- ReadObject(space_number, &new_object); \
- } else if (where == kBackref) { \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetBackReferencedObject(data & kSpaceMask); \
- } else if (where == kBackrefWithSkip) { \
- int skip = source_.GetInt(); \
- current = reinterpret_cast<Object**>( \
- reinterpret_cast<Address>(current) + skip); \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetBackReferencedObject(data & kSpaceMask); \
- } else if (where == kRootArray) { \
- int id = source_.GetInt(); \
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
- new_object = isolate->heap()->root(root_index); \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else if (where == kPartialSnapshotCache) { \
- int cache_index = source_.GetInt(); \
- new_object = isolate->partial_snapshot_cache()->at(cache_index); \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else if (where == kExternalReference) { \
- int skip = source_.GetInt(); \
- current = reinterpret_cast<Object**>( \
- reinterpret_cast<Address>(current) + skip); \
- int reference_id = source_.GetInt(); \
- Address address = external_reference_table_->address(reference_id); \
- new_object = reinterpret_cast<Object*>(address); \
- } else if (where == kAttachedReference) { \
- int index = source_.GetInt(); \
- DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
- new_object = *attached_objects_[index]; \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else { \
- DCHECK(where == kBuiltin); \
- DCHECK(deserializing_user_code()); \
- int builtin_id = source_.GetInt(); \
- DCHECK_LE(0, builtin_id); \
- DCHECK_LT(builtin_id, Builtins::builtin_count); \
- Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \
- new_object = isolate->builtins()->builtin(name); \
- emit_write_barrier = false; \
- } \
- if (within == kInnerPointer) { \
- if (space_number != CODE_SPACE || new_object->IsCode()) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
- new_object = \
- reinterpret_cast<Object*>(new_code_object->instruction_start()); \
- } else { \
- DCHECK(space_number == CODE_SPACE); \
- Cell* cell = Cell::cast(new_object); \
- new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \
- } \
- } \
- if (how == kFromCode) { \
- Address location_of_branch_data = reinterpret_cast<Address>(current); \
- Assembler::deserialization_set_special_target_at( \
- isolate, location_of_branch_data, \
- Code::cast(HeapObject::FromAddress(current_object_address)), \
- reinterpret_cast<Address>(new_object)); \
- location_of_branch_data += Assembler::kSpecialTargetSize; \
- current = reinterpret_cast<Object**>(location_of_branch_data); \
- current_was_incremented = true; \
- } else { \
- UnalignedCopy(current, &new_object); \
- } \
- } \
- if (emit_write_barrier && write_barrier_needed) { \
- Address current_address = reinterpret_cast<Address>(current); \
- SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \
- isolate->heap()->RecordWrite( \
- HeapObject::FromAddress(current_object_address), \
- static_cast<int>(current_address - current_object_address), \
- *reinterpret_cast<Object**>(current_address)); \
- } \
- if (!current_was_incremented) { \
- current++; \
- } \
- break; \
- }
-
-// This generates a case and a body for the new space (which has to do extra
-// write barrier handling) and handles the other spaces with fall-through cases
-// and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_SPACE) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_STATEMENT(where, how, within, LO_SPACE) \
- CASE_BODY(where, how, within, kAnyOldSpace)
-
-#define FOUR_CASES(byte_code) \
- case byte_code: \
- case byte_code + 1: \
- case byte_code + 2: \
- case byte_code + 3:
-
-#define SIXTEEN_CASES(byte_code) \
- FOUR_CASES(byte_code) \
- FOUR_CASES(byte_code + 4) \
- FOUR_CASES(byte_code + 8) \
- FOUR_CASES(byte_code + 12)
-
-#define SINGLE_CASE(where, how, within, space) \
- CASE_STATEMENT(where, how, within, space) \
- CASE_BODY(where, how, within, space)
-
- // Deserialize a new object and write a pointer to it to the current
- // object.
- ALL_SPACES(kNewObject, kPlain, kStartOfObject)
- // Support for direct instruction pointers in functions. It's an inner
- // pointer because it points at the entry point, not at the start of the
- // code object.
- SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
- // Deserialize a new code object and write a pointer to its first
- // instruction to the current code object.
- ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
- // Find a recently deserialized object using its offset from the current
- // allocation point and write a pointer to it to the current object.
- ALL_SPACES(kBackref, kPlain, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
- // Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, PPC or
- // ARM with embedded constant pool, and omitted on the other architectures
- // because it is fully unrolled and would cause bloat.
- ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to it to the current
- // object. Required only for MIPS, PPC or ARM with embedded constant pool.
- ALL_SPACES(kBackref, kFromCode, kStartOfObject)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
-#endif
- // Find a recently deserialized code object using its offset from the
- // current allocation point and write a pointer to its first instruction
- // to the current code object or the instruction pointer in a function
- // object.
- ALL_SPACES(kBackref, kFromCode, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
- ALL_SPACES(kBackref, kPlain, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
- // Find an object in the roots array and write a pointer to it to the
- // current object.
- SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
- // Find an object in the roots array and write a pointer to it to in code.
- SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
-#endif
- // Find an object in the partial snapshots cache and write a pointer to it
- // to the current object.
- SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- // Find an code entry in the partial snapshots cache and
- // write a pointer to it to the current object.
- SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
- // Find an external reference and write a pointer to it to the current
- // object.
- SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
- // Find an external reference and write a pointer to it in the current
- // code object.
- SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
- // Find an object in the attached references and write a pointer to it to
- // the current object.
- SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
- SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
- // Find a builtin and write a pointer to it to the current object.
- SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
- SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
- SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
-
-#undef CASE_STATEMENT
-#undef CASE_BODY
-#undef ALL_SPACES
-
- case kSkip: {
- int size = source_.GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<intptr_t>(current) + size);
- break;
- }
-
- case kInternalReferenceEncoded:
- case kInternalReference: {
- // Internal reference address is not encoded via skip, but by offset
- // from code entry.
- int pc_offset = source_.GetInt();
- int target_offset = source_.GetInt();
- Code* code =
- Code::cast(HeapObject::FromAddress(current_object_address));
- DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
- DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
- Address pc = code->entry() + pc_offset;
- Address target = code->entry() + target_offset;
- Assembler::deserialization_set_target_internal_reference_at(
- isolate, pc, target, data == kInternalReference
- ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
- break;
- }
-
- case kNop:
- break;
-
- case kNextChunk: {
- int space = source_.Get();
- DCHECK(space < kNumberOfPreallocatedSpaces);
- int chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
- CHECK_LT(chunk_index, reservation.length());
- high_water_[space] = reservation[chunk_index].start;
- break;
- }
-
- case kDeferred: {
- // Deferred can only occur right after the heap object header.
- DCHECK(current == reinterpret_cast<Object**>(current_object_address +
- kPointerSize));
- HeapObject* obj = HeapObject::FromAddress(current_object_address);
- // If the deferred object is a map, its instance type may be used
- // during deserialization. Initialize it with a temporary value.
- if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
- current = limit;
- return false;
- }
-
- case kSynchronize:
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- CHECK(false);
- break;
-
- case kNativesStringResource:
- current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
- current);
- break;
-
- case kExtraNativesStringResource:
- current = CopyInNativesSource(
- ExtraNatives::GetScriptSource(source_.Get()), current);
- break;
-
- // Deserialize raw data of variable length.
- case kVariableRawData: {
- int size_in_bytes = source_.GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current);
- source_.CopyRaw(raw_data_out, size_in_bytes);
- break;
- }
-
- case kVariableRepeat: {
- int repeats = source_.GetInt();
- Object* object = current[-1];
- DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
- break;
- }
-
- case kAlignmentPrefix:
- case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2:
- SetAlignment(data);
- break;
-
- STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
- STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
- SIXTEEN_CASES(kRootArrayConstantsWithSkip)
- SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
- int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<intptr_t>(current) + skip);
- // Fall through.
- }
-
- SIXTEEN_CASES(kRootArrayConstants)
- SIXTEEN_CASES(kRootArrayConstants + 16) {
- int id = data & kRootArrayConstantsMask;
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
- Object* object = isolate->heap()->root(root_index);
- DCHECK(!isolate->heap()->InNewSpace(object));
- UnalignedCopy(current++, &object);
- break;
- }
-
- STATIC_ASSERT(kNumberOfHotObjects == 8);
- FOUR_CASES(kHotObjectWithSkip)
- FOUR_CASES(kHotObjectWithSkip + 4) {
- int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<Address>(current) + skip);
- // Fall through.
- }
-
- FOUR_CASES(kHotObject)
- FOUR_CASES(kHotObject + 4) {
- int index = data & kHotObjectMask;
- Object* hot_object = hot_objects_.Get(index);
- UnalignedCopy(current, &hot_object);
- if (write_barrier_needed) {
- Address current_address = reinterpret_cast<Address>(current);
- SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
- isolate->heap()->RecordWrite(
- HeapObject::FromAddress(current_object_address),
- static_cast<int>(current_address - current_object_address),
- hot_object);
- }
- current++;
- break;
- }
-
- // Deserialize raw data of fixed length from 1 to 32 words.
- STATIC_ASSERT(kNumberOfFixedRawData == 32);
- SIXTEEN_CASES(kFixedRawData)
- SIXTEEN_CASES(kFixedRawData + 16) {
- byte* raw_data_out = reinterpret_cast<byte*>(current);
- int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
- source_.CopyRaw(raw_data_out, size_in_bytes);
- current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
- break;
- }
-
- STATIC_ASSERT(kNumberOfFixedRepeat == 16);
- SIXTEEN_CASES(kFixedRepeat) {
- int repeats = data - kFixedRepeatStart;
- Object* object;
- UnalignedCopy(&object, current - 1);
- DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
- break;
- }
-
-#undef SIXTEEN_CASES
-#undef FOUR_CASES
-#undef SINGLE_CASE
-
- default:
- CHECK(false);
- }
- }
- CHECK_EQ(limit, current);
- return true;
-}
-
-
-Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
- : isolate_(isolate),
- sink_(sink),
- external_reference_encoder_(isolate),
- root_index_map_(isolate),
- recursion_depth_(0),
- code_address_map_(NULL),
- large_objects_total_size_(0),
- seen_large_objects_index_(0) {
- // The serializer is meant to be used only to generate initial heap images
- // from a context in which there is only one isolate.
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- pending_chunk_[i] = 0;
- max_chunk_size_[i] = static_cast<uint32_t>(
- MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
- }
-
-#ifdef OBJECT_PRINT
- if (FLAG_serialization_statistics) {
- instance_type_count_ = NewArray<int>(kInstanceTypes);
- instance_type_size_ = NewArray<size_t>(kInstanceTypes);
- for (int i = 0; i < kInstanceTypes; i++) {
- instance_type_count_[i] = 0;
- instance_type_size_[i] = 0;
- }
- } else {
- instance_type_count_ = NULL;
- instance_type_size_ = NULL;
- }
-#endif // OBJECT_PRINT
-}
-
-
-Serializer::~Serializer() {
- if (code_address_map_ != NULL) delete code_address_map_;
-#ifdef OBJECT_PRINT
- if (instance_type_count_ != NULL) {
- DeleteArray(instance_type_count_);
- DeleteArray(instance_type_size_);
- }
-#endif // OBJECT_PRINT
-}
-
-
-#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map* map, int size) {
- int instance_type = map->instance_type();
- instance_type_count_[instance_type]++;
- instance_type_size_[instance_type] += size;
-}
-#endif // OBJECT_PRINT
-
-
-void Serializer::OutputStatistics(const char* name) {
- if (!FLAG_serialization_statistics) return;
- PrintF("%s:\n", name);
- PrintF(" Spaces (bytes):\n");
- for (int space = 0; space < kNumberOfSpaces; space++) {
- PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
- }
- PrintF("\n");
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- size_t s = pending_chunk_[space];
- for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16" V8_PTR_PREFIX "d", s);
- }
- PrintF("%16d\n", large_objects_total_size_);
-#ifdef OBJECT_PRINT
- PrintF(" Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- if (instance_type_count_[Name]) { \
- PrintF("%10d %10" V8_PTR_PREFIX "d %s\n", instance_type_count_[Name], \
- instance_type_size_[Name], #Name); \
- }
- INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
-#undef PRINT_INSTANCE_TYPE
- PrintF("\n");
-#endif // OBJECT_PRINT
-}
-
-
-class Serializer::ObjectSerializer : public ObjectVisitor {
- public:
- ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink,
- HowToCode how_to_code, WhereToPoint where_to_point)
- : serializer_(serializer),
- object_(HeapObject::cast(o)),
- sink_(sink),
- reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0),
- is_code_object_(o->IsCode()),
- code_has_been_output_(false) {}
- void Serialize();
- void SerializeDeferred();
- void VisitPointers(Object** start, Object** end) override;
- void VisitEmbeddedPointer(RelocInfo* target) override;
- void VisitExternalReference(Address* p) override;
- void VisitExternalReference(RelocInfo* rinfo) override;
- void VisitInternalReference(RelocInfo* rinfo) override;
- void VisitCodeTarget(RelocInfo* target) override;
- void VisitCodeEntry(Address entry_address) override;
- void VisitCell(RelocInfo* rinfo) override;
- void VisitRuntimeEntry(RelocInfo* reloc) override;
- // Used for seralizing the external strings that hold the natives source.
- void VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource) override;
- // We can't serialize a heap with external two byte strings.
- void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) override {
- UNREACHABLE();
- }
-
- private:
- void SerializePrologue(AllocationSpace space, int size, Map* map);
-
- bool SerializeExternalNativeSourceString(
- int builtin_count,
- v8::String::ExternalOneByteStringResource** resource_pointer,
- FixedArray* source_cache, int resource_index);
-
- enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
- // This function outputs or skips the raw data between the last pointer and
- // up to the current position. It optionally can just return the number of
- // bytes to skip instead of performing a skip instruction, in case the skip
- // can be merged into the next instruction.
- int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
- // External strings are serialized in a way to resemble sequential strings.
- void SerializeExternalString();
-
- Address PrepareCode();
-
- Serializer* serializer_;
- HeapObject* object_;
- SnapshotByteSink* sink_;
- int reference_representation_;
- int bytes_processed_so_far_;
- bool is_code_object_;
- bool code_has_been_output_;
-};
-
-
-void Serializer::SerializeDeferredObjects() {
- while (deferred_objects_.length() > 0) {
- HeapObject* obj = deferred_objects_.RemoveLast();
- ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
- obj_serializer.SerializeDeferred();
- }
- sink_->Put(kSynchronize, "Finished with deferred objects");
-}
-
-
-void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = this->isolate();
- // No active threads.
- CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
- // No active or weak handles.
- CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
- CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
- CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
- // We don't support serializing installed extensions.
- CHECK(!isolate->has_installed_extensions());
- isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-}
-
-
-void StartupSerializer::VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if (start == isolate()->heap()->roots_array_start()) {
- root_index_wave_front_ =
- Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
- }
- if (ShouldBeSkipped(current)) {
- sink_->Put(kSkip, "Skip");
- sink_->PutInt(kPointerSize, "SkipOneWord");
- } else if ((*current)->IsSmi()) {
- sink_->Put(kOnePointerRawData, "Smi");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
- }
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
- }
- }
-}
-
-
-void PartialSerializer::Serialize(Object** o) {
- if ((*o)->IsContext()) {
- Context* context = Context::cast(*o);
- global_object_ = context->global_object();
- back_reference_map()->AddGlobalProxy(context->global_proxy());
- // The bootstrap snapshot has a code-stub context. When serializing the
- // partial snapshot, it is chained into the weak context list on the isolate
- // and it's next context pointer may point to the code-stub context. Clear
- // it before serializing, it will get re-added to the context list
- // explicitly when it's loaded.
- if (context->IsNativeContext()) {
- context->set(Context::NEXT_CONTEXT_LINK,
- isolate_->heap()->undefined_value());
- DCHECK(!context->global_object()->IsUndefined());
- }
- }
- VisitPointer(o);
- SerializeDeferredObjects();
- Pad();
-}
-
-
-bool Serializer::ShouldBeSkipped(Object** current) {
- Object** roots = isolate()->heap()->roots_array_start();
- return current == &roots[Heap::kStoreBufferTopRootIndex]
- || current == &roots[Heap::kStackLimitRootIndex]
- || current == &roots[Heap::kRealStackLimitRootIndex];
-}
-
-
-void Serializer::VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsSmi()) {
- sink_->Put(kOnePointerRawData, "Smi");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
- }
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
- }
- }
-}
-
-
-void Serializer::EncodeReservations(
- List<SerializedData::Reservation>* out) const {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- for (int j = 0; j < completed_chunks_[i].length(); j++) {
- out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
- }
-
- if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
- out->Add(SerializedData::Reservation(pending_chunk_[i]));
- }
- out->last().mark_as_last();
- }
-
- out->Add(SerializedData::Reservation(large_objects_total_size_));
- out->last().mark_as_last();
-}
-
-
-// This ensures that the partial snapshot cache keeps things alive during GC and
-// tracks their movement. When it is called during serialization of the startup
-// snapshot nothing happens. When the partial (context) snapshot is created,
-// this array is populated with the pointers that the partial snapshot will
-// need. As that happens we emit serialized objects to the startup snapshot
-// that correspond to the elements of this cache array. On deserialization we
-// therefore need to visit the cache array. This fills it up with pointers to
-// deserialized objects.
-void SerializerDeserializer::Iterate(Isolate* isolate,
- ObjectVisitor* visitor) {
- if (isolate->serializer_enabled()) return;
- List<Object*>* cache = isolate->partial_snapshot_cache();
- for (int i = 0;; ++i) {
- // Extend the array ready to get a value when deserializing.
- if (cache->length() <= i) cache->Add(Smi::FromInt(0));
- visitor->VisitPointer(&cache->at(i));
- // Sentinel is the undefined object, which is a root so it will not normally
- // be found in the cache.
- if (cache->at(i)->IsUndefined()) break;
- }
-}
-
-
-bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
- return !o->IsString() && !o->IsScript();
-}
-
-
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = this->isolate();
- List<Object*>* cache = isolate->partial_snapshot_cache();
- int new_index = cache->length();
-
- int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index);
- if (index == PartialCacheIndexMap::kInvalidIndex) {
- // We didn't find the object in the cache. So we add it to the cache and
- // then visit the pointer so that it becomes part of the startup snapshot
- // and we can refer to it from the partial snapshot.
- cache->Add(heap_object);
- startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
- // We don't recurse from the startup snapshot generator into the partial
- // snapshot generator.
- return new_index;
- }
- return index;
-}
-
-
-bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- // Scripts should be referred only through shared function infos. We can't
- // allow them to be part of the partial snapshot because they contain a
- // unique ID, and deserializing several partial snapshots containing script
- // would cause dupes.
- DCHECK(!o->IsScript());
- return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
- o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
- o->map() ==
- startup_serializer_->isolate()->heap()->fixed_cow_array_map();
-}
-
-
-#ifdef DEBUG
-bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
- DCHECK(reference.is_valid());
- DCHECK(!reference.is_source());
- DCHECK(!reference.is_global_proxy());
- AllocationSpace space = reference.space();
- int chunk_index = reference.chunk_index();
- if (space == LO_SPACE) {
- return chunk_index == 0 &&
- reference.large_object_index() < seen_large_objects_index_;
- } else if (chunk_index == completed_chunks_[space].length()) {
- return reference.chunk_offset() < pending_chunk_[space];
- } else {
- return chunk_index < completed_chunks_[space].length() &&
- reference.chunk_offset() < completed_chunks_[space][chunk_index];
- }
-}
-#endif // DEBUG
-
-
-bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (how_to_code == kPlain && where_to_point == kStartOfObject) {
- // Encode a reference to a hot object by its index in the working set.
- int index = hot_objects_.Find(obj);
- if (index != HotObjectsList::kNotFound) {
- DCHECK(index >= 0 && index < kNumberOfHotObjects);
- if (FLAG_trace_serializer) {
- PrintF(" Encoding hot object %d:", index);
- obj->ShortPrint();
- PrintF("\n");
- }
- if (skip != 0) {
- sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
- sink_->PutInt(skip, "HotObjectSkipDistance");
- } else {
- sink_->Put(kHotObject + index, "HotObject");
- }
- return true;
- }
- }
- BackReference back_reference = back_reference_map_.Lookup(obj);
- if (back_reference.is_valid()) {
- // Encode the location of an already deserialized object in order to write
- // its location into a later object. We can encode the location as an
- // offset fromthe start of the deserialized objects or as an offset
- // backwards from thecurrent allocation pointer.
- if (back_reference.is_source()) {
- FlushSkip(skip);
- if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
- DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
- sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
- } else if (back_reference.is_global_proxy()) {
- FlushSkip(skip);
- if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
- DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
- sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
- } else {
- if (FLAG_trace_serializer) {
- PrintF(" Encoding back reference to: ");
- obj->ShortPrint();
- PrintF("\n");
- }
-
- PutAlignmentPrefix(obj);
- AllocationSpace space = back_reference.space();
- if (skip == 0) {
- sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
- } else {
- sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
- "BackRefWithSkip");
- sink_->PutInt(skip, "BackRefSkipDistance");
- }
- PutBackReference(obj, back_reference);
- }
- return true;
- }
- return false;
-}
-
-StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
- : Serializer(isolate, sink),
- root_index_wave_front_(0),
- serializing_builtins_(false) {
- // Clear the cache of objects used by the partial snapshot. After the
- // strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects needed by that partial
- // snapshot.
- isolate->partial_snapshot_cache()->Clear();
- InitializeCodeAddressMap();
-}
-
-
-void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- DCHECK(!obj->IsJSFunction());
-
- if (obj->IsCode()) {
- Code* code = Code::cast(obj);
- // If the function code is compiled (either as native code or bytecode),
- // replace it with lazy-compile builtin. Only exception is when we are
- // serializing the canonical interpreter-entry-trampoline builtin.
- if (code->kind() == Code::FUNCTION ||
- (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
- obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
- }
- } else if (obj->IsBytecodeArray()) {
- obj = isolate()->heap()->undefined_value();
- }
-
- int root_index = root_index_map_.Lookup(obj);
- bool is_immortal_immovable_root = false;
- // We can only encode roots as such if it has already been serialized.
- // That applies to root indices below the wave front.
- if (root_index != RootIndexMap::kInvalidRootIndex) {
- if (root_index < root_index_wave_front_) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- } else {
- is_immortal_immovable_root = Heap::RootIsImmortalImmovable(root_index);
- }
- }
-
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
- FlushSkip(skip);
-
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
- where_to_point);
- object_serializer.Serialize();
-
- if (is_immortal_immovable_root) {
- // Make sure that the immortal immovable root has been included in the first
- // chunk of its reserved space , so that it is deserialized onto the first
- // page of its space and stays immortal immovable.
- BackReference ref = back_reference_map_.Lookup(obj);
- CHECK(ref.is_valid() && ref.chunk_index() == 0);
- }
-}
-
-
-void StartupSerializer::SerializeWeakReferencesAndDeferred() {
- // This phase comes right after the serialization (of the snapshot).
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // add one entry with 'undefined' which is the sentinel that the deserializer
- // uses to know it is done deserializing the array.
- Object* undefined = isolate()->heap()->undefined_value();
- VisitPointer(&undefined);
- isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
- SerializeDeferredObjects();
- Pad();
-}
-
-void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
- // We expect the builtins tag after builtins have been serialized.
- DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
- serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
- sink_->Put(kSynchronize, "Synchronize");
-}
-
-void Serializer::PutRoot(int root_index,
- HeapObject* object,
- SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point,
- int skip) {
- if (FLAG_trace_serializer) {
- PrintF(" Encoding root %d:", root_index);
- object->ShortPrint();
- PrintF("\n");
- }
-
- if (how_to_code == kPlain && where_to_point == kStartOfObject &&
- root_index < kNumberOfRootArrayConstants &&
- !isolate()->heap()->InNewSpace(object)) {
- if (skip == 0) {
- sink_->Put(kRootArrayConstants + root_index, "RootConstant");
- } else {
- sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
- sink_->PutInt(skip, "SkipInPutRoot");
- }
- } else {
- FlushSkip(skip);
- sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- }
-}
-
-
-void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
- DCHECK(BackReferenceIsAlreadyAllocated(reference));
- sink_->PutInt(reference.reference(), "BackRefValue");
- hot_objects_.Add(object);
-}
-
-
-int Serializer::PutAlignmentPrefix(HeapObject* object) {
- AllocationAlignment alignment = object->RequiredAlignment();
- if (alignment != kWordAligned) {
- DCHECK(1 <= alignment && alignment <= 3);
- byte prefix = (kAlignmentPrefix - 1) + alignment;
- sink_->Put(prefix, "Alignment");
- return Heap::GetMaximumFillToAlign(alignment);
- }
- return 0;
-}
-
-
-void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (obj->IsMap()) {
- // The code-caches link to context-specific code objects, which
- // the startup and context serializes cannot currently handle.
- DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
- }
-
- // Replace typed arrays by undefined.
- if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
-
- int root_index = root_index_map_.Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
-
- if (ShouldBeInThePartialSnapshotCache(obj)) {
- FlushSkip(skip);
-
- int cache_index = PartialSnapshotCacheIndex(obj);
- sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_->PutInt(cache_index, "partial_snapshot_cache_index");
- return;
- }
-
- // Pointers from the partial snapshot to the objects in the startup snapshot
- // should go through the root array or through the partial snapshot cache.
- // If this is not the case you may have to add something to the root array.
- DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
- // All the internalized strings that the partial snapshot needs should be
- // either in the root table or in the partial snapshot cache.
- DCHECK(!obj->IsInternalizedString());
-
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
- FlushSkip(skip);
-
- // Clear literal boilerplates.
- if (obj->IsJSFunction()) {
- FixedArray* literals = JSFunction::cast(obj)->literals();
- for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
- }
-
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
- serializer.Serialize();
-}
-
-
-void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
- int size, Map* map) {
- if (serializer_->code_address_map_) {
- const char* code_name =
- serializer_->code_address_map_->Lookup(object_->address());
- LOG(serializer_->isolate_,
- CodeNameEvent(object_->address(), sink_->Position(), code_name));
- LOG(serializer_->isolate_,
- SnapshotPositionEvent(object_->address(), sink_->Position()));
- }
-
- BackReference back_reference;
- if (space == LO_SPACE) {
- sink_->Put(kNewObject + reference_representation_ + space,
- "NewLargeObject");
- sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
- if (object_->IsCode()) {
- sink_->Put(EXECUTABLE, "executable large object");
- } else {
- sink_->Put(NOT_EXECUTABLE, "not executable large object");
- }
- back_reference = serializer_->AllocateLargeObject(size);
- } else {
- int fill = serializer_->PutAlignmentPrefix(object_);
- back_reference = serializer_->Allocate(space, size + fill);
- sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
- sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
- }
-
-#ifdef OBJECT_PRINT
- if (FLAG_serialization_statistics) {
- serializer_->CountInstanceType(map, size);
- }
-#endif // OBJECT_PRINT
-
- // Mark this object as already serialized.
- serializer_->back_reference_map()->Add(object_, back_reference);
-
- // Serialize the map (first word of the object).
- serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
-}
-
-
-void Serializer::ObjectSerializer::SerializeExternalString() {
- // Instead of serializing this as an external string, we serialize
- // an imaginary sequential string with the same content.
- Isolate* isolate = serializer_->isolate();
- DCHECK(object_->IsExternalString());
- DCHECK(object_->map() != isolate->heap()->native_source_string_map());
- ExternalString* string = ExternalString::cast(object_);
- int length = string->length();
- Map* map;
- int content_size;
- int allocation_size;
- const byte* resource;
- // Find the map and size for the imaginary sequential string.
- bool internalized = object_->IsInternalizedString();
- if (object_->IsExternalOneByteString()) {
- map = internalized ? isolate->heap()->one_byte_internalized_string_map()
- : isolate->heap()->one_byte_string_map();
- allocation_size = SeqOneByteString::SizeFor(length);
- content_size = length * kCharSize;
- resource = reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(string)->resource()->data());
- } else {
- map = internalized ? isolate->heap()->internalized_string_map()
- : isolate->heap()->string_map();
- allocation_size = SeqTwoByteString::SizeFor(length);
- content_size = length * kShortSize;
- resource = reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(string)->resource()->data());
- }
-
- AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
- ? LO_SPACE
- : OLD_SPACE;
- SerializePrologue(space, allocation_size, map);
-
- // Output the rest of the imaginary string.
- int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
-
- // Output raw data header. Do not bother with common raw length cases here.
- sink_->Put(kVariableRawData, "RawDataForString");
- sink_->PutInt(bytes_to_output, "length");
-
- // Serialize string header (except for map).
- Address string_start = string->address();
- for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
- sink_->PutSection(string_start[i], "StringHeader");
- }
-
- // Serialize string content.
- sink_->PutRaw(resource, content_size, "StringContent");
-
- // Since the allocation size is rounded up to object alignment, there
- // maybe left-over bytes that need to be padded.
- int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
- DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
- for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
-
- sink_->Put(kSkip, "SkipAfterString");
- sink_->PutInt(bytes_to_output, "SkipDistance");
-}
-
-// Clear and later restore the next link in the weak cell or allocation site.
-// TODO(all): replace this with proper iteration of weak slots in serializer.
-class UnlinkWeakNextScope {
- public:
- explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
- if (object->IsWeakCell()) {
- object_ = object;
- next_ = WeakCell::cast(object)->next();
- WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
- } else if (object->IsAllocationSite()) {
- object_ = object;
- next_ = AllocationSite::cast(object)->weak_next();
- AllocationSite::cast(object)
- ->set_weak_next(object->GetHeap()->undefined_value());
- }
- }
-
- ~UnlinkWeakNextScope() {
- if (object_ != nullptr) {
- if (object_->IsWeakCell()) {
- WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
- } else {
- AllocationSite::cast(object_)
- ->set_weak_next(next_, UPDATE_WEAK_WRITE_BARRIER);
- }
- }
- }
-
- private:
- HeapObject* object_;
- Object* next_;
- DisallowHeapAllocation no_gc_;
-};
-
-
-void Serializer::ObjectSerializer::Serialize() {
- if (FLAG_trace_serializer) {
- PrintF(" Encoding heap object: ");
- object_->ShortPrint();
- PrintF("\n");
- }
-
- // We cannot serialize typed array objects correctly.
- DCHECK(!object_->IsJSTypedArray());
-
- // We don't expect fillers.
- DCHECK(!object_->IsFiller());
-
- if (object_->IsScript()) {
- // Clear cached line ends.
- Object* undefined = serializer_->isolate()->heap()->undefined_value();
- Script::cast(object_)->set_line_ends(undefined);
- }
-
- if (object_->IsExternalString()) {
- Heap* heap = serializer_->isolate()->heap();
- if (object_->map() != heap->native_source_string_map()) {
- // Usually we cannot recreate resources for external strings. To work
- // around this, external strings are serialized to look like ordinary
- // sequential strings.
- // The exception are native source code strings, since we can recreate
- // their resources. In that case we fall through and leave it to
- // VisitExternalOneByteString further down.
- SerializeExternalString();
- return;
- }
- }
-
- int size = object_->Size();
- Map* map = object_->map();
- AllocationSpace space =
- MemoryChunk::FromAddress(object_->address())->owner()->identity();
- SerializePrologue(space, size, map);
-
- // Serialize the rest of the object.
- CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
-
- RecursionScope recursion(serializer_);
- // Objects that are immediately post processed during deserialization
- // cannot be deferred, since post processing requires the object content.
- if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
- serializer_->QueueDeferredObject(object_);
- sink_->Put(kDeferred, "Deferring object content");
- return;
- }
-
- UnlinkWeakNextScope unlink_weak_next(object_);
-
- object_->IterateBody(map->instance_type(), size, this);
- OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::SerializeDeferred() {
- if (FLAG_trace_serializer) {
- PrintF(" Encoding deferred heap object: ");
- object_->ShortPrint();
- PrintF("\n");
- }
-
- int size = object_->Size();
- Map* map = object_->map();
- BackReference reference = serializer_->back_reference_map()->Lookup(object_);
-
- // Serialize the rest of the object.
- CHECK_EQ(0, bytes_processed_so_far_);
- bytes_processed_so_far_ = kPointerSize;
-
- serializer_->PutAlignmentPrefix(object_);
- sink_->Put(kNewObject + reference.space(), "deferred object");
- serializer_->PutBackReference(object_, reference);
- sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
-
- UnlinkWeakNextScope unlink_weak_next(object_);
-
- object_->IterateBody(map->instance_type(), size, this);
- OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::VisitPointers(Object** start,
- Object** end) {
- Object** current = start;
- while (current < end) {
- while (current < end && (*current)->IsSmi()) current++;
- if (current < end) OutputRawData(reinterpret_cast<Address>(current));
-
- while (current < end && !(*current)->IsSmi()) {
- HeapObject* current_contents = HeapObject::cast(*current);
- int root_index = serializer_->root_index_map()->Lookup(current_contents);
- // Repeats are not subject to the write barrier so we can only use
- // immortal immovable root members. They are never in new space.
- if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
- Heap::RootIsImmortalImmovable(root_index) &&
- current_contents == current[-1]) {
- DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
- int repeat_count = 1;
- while (&current[repeat_count] < end - 1 &&
- current[repeat_count] == current_contents) {
- repeat_count++;
- }
- current += repeat_count;
- bytes_processed_so_far_ += repeat_count * kPointerSize;
- if (repeat_count > kNumberOfFixedRepeat) {
- sink_->Put(kVariableRepeat, "VariableRepeat");
- sink_->PutInt(repeat_count, "repeat count");
- } else {
- sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
- }
- } else {
- serializer_->SerializeObject(
- current_contents, kPlain, kStartOfObject, 0);
- bytes_processed_so_far_ += kPointerSize;
- current++;
- }
- }
- }
-}
-
-
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- Object* object = rinfo->target_object();
- serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
- kStartOfObject, skip);
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
- int skip = OutputRawData(reinterpret_cast<Address>(p),
- kCanReturnSkipInsteadOfSkipping);
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- Address target = *p;
- sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
- bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- Address target = rinfo->target_external_reference();
- sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
- // We can only reference to internal references of code that has been output.
- DCHECK(is_code_object_ && code_has_been_output_);
- // We do not use skip from last patched pc to find the pc to patch, since
- // target_address_address may not return addresses in ascending order when
- // used for internal references. External references may be stored at the
- // end of the code in the constant pool, whereas internal references are
- // inline. That would cause the skip to be negative. Instead, we store the
- // offset from code entry.
- Address entry = Code::cast(object_)->entry();
- intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
- intptr_t target_offset = rinfo->target_internal_reference() - entry;
- DCHECK(0 <= pc_offset &&
- pc_offset <= Code::cast(object_)->instruction_size());
- DCHECK(0 <= target_offset &&
- target_offset <= Code::cast(object_)->instruction_size());
- sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
- ? kInternalReference
- : kInternalReferenceEncoded,
- "InternalRef");
- sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
- sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
-}
-
-
-void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
- HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- Address target = rinfo->target_address();
- sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->target_address_address(),
- kCanReturnSkipInsteadOfSkipping);
- Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
- int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
- Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
- bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
- int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
- Cell* object = Cell::cast(rinfo->target_cell());
- serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
- bytes_processed_so_far_ += kPointerSize;
-}
-
-
-bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
- int builtin_count,
- v8::String::ExternalOneByteStringResource** resource_pointer,
- FixedArray* source_cache, int resource_index) {
- for (int i = 0; i < builtin_count; i++) {
- Object* source = source_cache->get(i);
- if (!source->IsUndefined()) {
- ExternalOneByteString* string = ExternalOneByteString::cast(source);
- typedef v8::String::ExternalOneByteStringResource Resource;
- const Resource* resource = string->resource();
- if (resource == *resource_pointer) {
- sink_->Put(resource_index, "NativesStringResource");
- sink_->PutSection(i, "NativesStringResourceEnd");
- bytes_processed_so_far_ += sizeof(resource);
- return true;
- }
- }
- }
- return false;
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource_pointer) {
- Address references_start = reinterpret_cast<Address>(resource_pointer);
- OutputRawData(references_start);
- if (SerializeExternalNativeSourceString(
- Natives::GetBuiltinsCount(), resource_pointer,
- Natives::GetSourceCache(serializer_->isolate()->heap()),
- kNativesStringResource)) {
- return;
- }
- if (SerializeExternalNativeSourceString(
- ExtraNatives::GetBuiltinsCount(), resource_pointer,
- ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
- kExtraNativesStringResource)) {
- return;
- }
- // One of the strings in the natives cache should match the resource. We
- // don't expect any other kinds of external strings here.
- UNREACHABLE();
-}
-
-
-Address Serializer::ObjectSerializer::PrepareCode() {
- // To make snapshots reproducible, we make a copy of the code object
- // and wipe all pointers in the copy, which we then serialize.
- Code* original = Code::cast(object_);
- Code* code = serializer_->CopyCode(original);
- // Code age headers are not serializable.
- code->MakeYoung(serializer_->isolate());
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- rinfo->WipeOut();
- }
- // We need to wipe out the header fields *after* wiping out the
- // relocations, because some of these fields are needed for the latter.
- code->WipeOutHeader();
- return code->address();
-}
-
-
-int Serializer::ObjectSerializer::OutputRawData(
- Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
- Address object_start = object_->address();
- int base = bytes_processed_so_far_;
- int up_to_offset = static_cast<int>(up_to - object_start);
- int to_skip = up_to_offset - bytes_processed_so_far_;
- int bytes_to_output = to_skip;
- bytes_processed_so_far_ += to_skip;
- // This assert will fail if the reloc info gives us the target_address_address
- // locations in a non-ascending order. Luckily that doesn't happen.
- DCHECK(to_skip >= 0);
- bool outputting_code = false;
- if (to_skip != 0 && is_code_object_ && !code_has_been_output_) {
- // Output the code all at once and fix later.
- bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
- outputting_code = true;
- code_has_been_output_ = true;
- }
- if (bytes_to_output != 0 && (!is_code_object_ || outputting_code)) {
- if (!outputting_code && bytes_to_output == to_skip &&
- IsAligned(bytes_to_output, kPointerAlignment) &&
- bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
- int size_in_words = bytes_to_output >> kPointerSizeLog2;
- sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
- to_skip = 0; // This instruction includes skip.
- } else {
- // We always end up here if we are outputting the code of a code object.
- sink_->Put(kVariableRawData, "VariableRawData");
- sink_->PutInt(bytes_to_output, "length");
- }
-
- if (is_code_object_) object_start = PrepareCode();
-
- const char* description = is_code_object_ ? "Code" : "Byte";
- sink_->PutRaw(object_start + base, bytes_to_output, description);
- }
- if (to_skip != 0 && return_skip == kIgnoringReturn) {
- sink_->Put(kSkip, "Skip");
- sink_->PutInt(to_skip, "SkipDistance");
- to_skip = 0;
- }
- return to_skip;
-}
-
-
-BackReference Serializer::AllocateLargeObject(int size) {
- // Large objects are allocated one-by-one when deserializing. We do not
- // have to keep track of multiple chunks.
- large_objects_total_size_ += size;
- return BackReference::LargeObjectReference(seen_large_objects_index_++);
-}
-
-
-BackReference Serializer::Allocate(AllocationSpace space, int size) {
- DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
- DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
- uint32_t new_chunk_size = pending_chunk_[space] + size;
- if (new_chunk_size > max_chunk_size(space)) {
- // The new chunk size would not fit onto a single page. Complete the
- // current chunk and start a new one.
- sink_->Put(kNextChunk, "NextChunk");
- sink_->Put(space, "NextChunkSpace");
- completed_chunks_[space].Add(pending_chunk_[space]);
- DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
- pending_chunk_[space] = 0;
- new_chunk_size = size;
- }
- uint32_t offset = pending_chunk_[space];
- pending_chunk_[space] = new_chunk_size;
- return BackReference::Reference(space, completed_chunks_[space].length(),
- offset);
-}
-
-
-void Serializer::Pad() {
- // The non-branching GetInt will read up to 3 bytes too far, so we need
- // to pad the snapshot to make sure we don't read over the end.
- for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
- sink_->Put(kNop, "Padding");
- }
- // Pad up to pointer size for checksum.
- while (!IsAligned(sink_->Position(), kPointerAlignment)) {
- sink_->Put(kNop, "Padding");
- }
-}
-
-
-void Serializer::InitializeCodeAddressMap() {
- isolate_->InitializeLoggingAndCounters();
- code_address_map_ = new CodeAddressMap(isolate_);
-}
-
-
-Code* Serializer::CopyCode(Code* code) {
- code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
- int size = code->CodeSize();
- code_buffer_.AddAll(Vector<byte>(code->address(), size));
- return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
-}
-
-
-ScriptData* CodeSerializer::Serialize(Isolate* isolate,
- Handle<SharedFunctionInfo> info,
- Handle<String> source) {
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
- if (FLAG_trace_serializer) {
- PrintF("[Serializing from");
- Object* script = info->script();
- if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
- PrintF("]\n");
- }
-
- // Serialize code object.
- SnapshotByteSink sink(info->code()->CodeSize() * 2);
- CodeSerializer cs(isolate, &sink, *source);
- DisallowHeapAllocation no_gc;
- Object** location = Handle<Object>::cast(info).location();
- cs.VisitPointer(location);
- cs.SerializeDeferredObjects();
- cs.Pad();
-
- SerializedCodeData data(sink.data(), cs);
- ScriptData* script_data = data.GetScriptData();
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int length = script_data->length();
- PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
- }
-
- return script_data;
-}
-
-
-void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- int root_index = root_index_map_.Lookup(obj);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
-
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
- FlushSkip(skip);
-
- if (obj->IsCode()) {
- Code* code_object = Code::cast(obj);
- switch (code_object->kind()) {
- case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
- case Code::HANDLER: // No handlers patched in yet.
- case Code::REGEXP: // No regexp literals initialized yet.
- case Code::NUMBER_OF_KINDS: // Pseudo enum value.
- CHECK(false);
- case Code::BUILTIN:
- SerializeBuiltin(code_object->builtin_index(), how_to_code,
- where_to_point);
- return;
- case Code::STUB:
- SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
- return;
-#define IC_KIND_CASE(KIND) case Code::KIND:
- IC_KIND_LIST(IC_KIND_CASE)
-#undef IC_KIND_CASE
- SerializeIC(code_object, how_to_code, where_to_point);
- return;
- case Code::FUNCTION:
- DCHECK(code_object->has_reloc_info_for_serialization());
- SerializeGeneric(code_object, how_to_code, where_to_point);
- return;
- case Code::WASM_FUNCTION:
- UNREACHABLE();
- }
- UNREACHABLE();
- }
-
- // Past this point we should not see any (context-specific) maps anymore.
- CHECK(!obj->IsMap());
- // There should be no references to the global object embedded.
- CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
- // There should be no hash table embedded. They would require rehashing.
- CHECK(!obj->IsHashTable());
- // We expect no instantiated function objects or contexts.
- CHECK(!obj->IsJSFunction() && !obj->IsContext());
-
- SerializeGeneric(obj, how_to_code, where_to_point);
-}
-
-
-void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
- HowToCode how_to_code,
- WhereToPoint where_to_point) {
- // Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
- where_to_point);
- serializer.Serialize();
-}
-
-
-void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kPlain && where_to_point == kInnerPointer) ||
- (how_to_code == kFromCode && where_to_point == kInnerPointer));
- DCHECK_LT(builtin_index, Builtins::builtin_count);
- DCHECK_LE(0, builtin_index);
-
- if (FLAG_trace_serializer) {
- PrintF(" Encoding builtin: %s\n",
- isolate()->builtins()->name(builtin_index));
- }
-
- sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
- sink_->PutInt(builtin_index, "builtin_index");
-}
-
-
-void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kPlain && where_to_point == kInnerPointer) ||
- (how_to_code == kFromCode && where_to_point == kInnerPointer));
- DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
- DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
-
- int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
-
- if (FLAG_trace_serializer) {
- PrintF(" Encoding code stub %s as %d\n",
- CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
- }
-
- sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
- sink_->PutInt(index, "CodeStub key");
-}
-
-
-void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- // The IC may be implemented as a stub.
- uint32_t stub_key = ic->stub_key();
- if (stub_key != CodeStub::NoCacheKey()) {
- if (FLAG_trace_serializer) {
- PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
- }
- SerializeCodeStub(stub_key, how_to_code, where_to_point);
- return;
- }
- // The IC may be implemented as builtin. Only real builtins have an
- // actual builtin_index value attached (otherwise it's just garbage).
- // Compare to make sure we are really dealing with a builtin.
- int builtin_index = ic->builtin_index();
- if (builtin_index < Builtins::builtin_count) {
- Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
- Code* builtin = isolate()->builtins()->builtin(name);
- if (builtin == ic) {
- if (FLAG_trace_serializer) {
- PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
- }
- DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
- ic->kind() == Code::KEYED_STORE_IC);
- SerializeBuiltin(builtin_index, how_to_code, where_to_point);
- return;
- }
- }
- // The IC may also just be a piece of code kept in the non_monomorphic_cache.
- // In that case, just serialize as a normal code object.
- if (FLAG_trace_serializer) {
- PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
- }
- DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
- SerializeGeneric(ic, how_to_code, where_to_point);
-}
-
-
-int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
- // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
- int index = 0;
- while (index < stub_keys_.length()) {
- if (stub_keys_[index] == stub_key) return index;
- index++;
- }
- stub_keys_.Add(stub_key);
- return index;
-}
-
-
-MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization) timer.Start();
-
- HandleScope scope(isolate);
-
- base::SmartPointer<SerializedCodeData> scd(
- SerializedCodeData::FromCachedData(isolate, cached_data, *source));
- if (scd.is_empty()) {
- if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
- DCHECK(cached_data->rejected());
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- // Prepare and register list of attached objects.
- Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
- Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
- code_stub_keys.length() + kCodeStubsBaseIndex);
- attached_objects[kSourceObjectIndex] = source;
- for (int i = 0; i < code_stub_keys.length(); i++) {
- attached_objects[i + kCodeStubsBaseIndex] =
- CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
- }
-
- Deserializer deserializer(scd.get());
- deserializer.SetAttachedObjects(attached_objects);
-
- // Deserialize.
- Handle<SharedFunctionInfo> result;
- if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
- // Deserializing may fail if the reservations cannot be fulfilled.
- if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- if (FLAG_profile_deserialization) {
- double ms = timer.Elapsed().InMillisecondsF();
- int length = cached_data->length();
- PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
- }
- result->set_deserialized(true);
-
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
- String* name = isolate->heap()->empty_string();
- if (result->script()->IsScript()) {
- Script* script = Script::cast(result->script());
- if (script->name()->IsString()) name = String::cast(script->name());
- }
- isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
- *result, NULL, name);
- }
- return scope.CloseAndEscape(result);
-}
-
-
-void SerializedData::AllocateData(int size) {
- DCHECK(!owns_data_);
- data_ = NewArray<byte>(size);
- size_ = size;
- owns_data_ = true;
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
-}
-
-
-SnapshotData::SnapshotData(const Serializer& ser) {
- DisallowHeapAllocation no_gc;
- List<Reservation> reservations;
- ser.EncodeReservations(&reservations);
- const List<byte>& payload = ser.sink()->data();
-
- // Calculate sizes.
- int reservation_size = reservations.length() * kInt32Size;
- int size = kHeaderSize + reservation_size + payload.length();
-
- // Allocate backing store and create result data.
- AllocateData(size);
-
- // Set header values.
- SetMagicNumber(ser.isolate());
- SetHeaderValue(kCheckSumOffset, Version::Hash());
- SetHeaderValue(kNumReservationsOffset, reservations.length());
- SetHeaderValue(kPayloadLengthOffset, payload.length());
-
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
- reservation_size);
-
- // Copy serialized data.
- CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
- static_cast<size_t>(payload.length()));
-}
-
-
-bool SnapshotData::IsSane() {
- return GetHeaderValue(kCheckSumOffset) == Version::Hash();
-}
-
-
-Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
-}
-
-
-Vector<const byte> SnapshotData::Payload() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- const byte* payload = data_ + kHeaderSize + reservations_size;
- int length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + length);
- return Vector<const byte>(payload, length);
-}
-
-
-class Checksum {
- public:
- explicit Checksum(Vector<const byte> payload) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
-#endif // MEMORY_SANITIZER
- // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
- uintptr_t a = 1;
- uintptr_t b = 0;
- const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
- DCHECK(IsAligned(payload.length(), kIntptrSize));
- const uintptr_t* end = cur + payload.length() / kIntptrSize;
- while (cur < end) {
- // Unsigned overflow expected and intended.
- a += *cur++;
- b += a;
- }
-#if V8_HOST_ARCH_64_BIT
- a ^= a >> 32;
- b ^= b >> 32;
-#endif // V8_HOST_ARCH_64_BIT
- a_ = static_cast<uint32_t>(a);
- b_ = static_cast<uint32_t>(b);
- }
-
- bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
-
- uint32_t a() const { return a_; }
- uint32_t b() const { return b_; }
-
- private:
- uint32_t a_;
- uint32_t b_;
-
- DISALLOW_COPY_AND_ASSIGN(Checksum);
-};
-
-
-SerializedCodeData::SerializedCodeData(const List<byte>& payload,
- const CodeSerializer& cs) {
- DisallowHeapAllocation no_gc;
- const List<uint32_t>* stub_keys = cs.stub_keys();
-
- List<Reservation> reservations;
- cs.EncodeReservations(&reservations);
-
- // Calculate sizes.
- int reservation_size = reservations.length() * kInt32Size;
- int num_stub_keys = stub_keys->length();
- int stub_keys_size = stub_keys->length() * kInt32Size;
- int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
- int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- int size = padded_payload_offset + payload.length();
-
- // Allocate backing store and create result data.
- AllocateData(size);
-
- // Set header values.
- SetMagicNumber(cs.isolate());
- SetHeaderValue(kVersionHashOffset, Version::Hash());
- SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
- SetHeaderValue(kCpuFeaturesOffset,
- static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
- SetHeaderValue(kFlagHashOffset, FlagList::Hash());
- SetHeaderValue(kNumReservationsOffset, reservations.length());
- SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
- SetHeaderValue(kPayloadLengthOffset, payload.length());
-
- Checksum checksum(payload.ToConstVector());
- SetHeaderValue(kChecksum1Offset, checksum.a());
- SetHeaderValue(kChecksum2Offset, checksum.b());
-
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
- reservation_size);
-
- // Copy code stub keys.
- CopyBytes(data_ + kHeaderSize + reservation_size,
- reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
-
- memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
-
- // Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload.begin(),
- static_cast<size_t>(payload.length()));
-}
-
-
-SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
- Isolate* isolate, String* source) const {
- uint32_t magic_number = GetMagicNumber();
- if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
- uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
- uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
- uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
- uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
- uint32_t c1 = GetHeaderValue(kChecksum1Offset);
- uint32_t c2 = GetHeaderValue(kChecksum2Offset);
- if (version_hash != Version::Hash()) return VERSION_MISMATCH;
- if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
- if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
- return CPU_FEATURES_MISMATCH;
- }
- if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
- if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
- return CHECK_SUCCESS;
-}
-
-
-uint32_t SerializedCodeData::SourceHash(String* source) const {
- return source->length();
-}
-
-
-// Return ScriptData object and relinquish ownership over it to the caller.
-ScriptData* SerializedCodeData::GetScriptData() {
- DCHECK(owns_data_);
- ScriptData* result = new ScriptData(data_, size_);
- result->AcquireDataOwnership();
- owns_data_ = false;
- data_ = NULL;
- return result;
-}
-
-
-Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
- const {
- return Vector<const Reservation>(
- reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kNumReservationsOffset));
-}
-
-
-Vector<const byte> SerializedCodeData::Payload() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
- int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
- int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- const byte* payload = data_ + padded_payload_offset;
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
- int length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + length);
- return Vector<const byte>(payload, length);
-}
-
-
-Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- const byte* start = data_ + kHeaderSize + reservations_size;
- return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
- GetHeaderValue(kNumCodeStubKeysOffset));
-}
-
-
-SerializedCodeData::SerializedCodeData(ScriptData* data)
- : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
-
-
-SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
- ScriptData* cached_data,
- String* source) {
- DisallowHeapAllocation no_gc;
- SerializedCodeData* scd = new SerializedCodeData(cached_data);
- SanityCheckResult r = scd->SanityCheck(isolate, source);
- if (r == CHECK_SUCCESS) return scd;
- cached_data->Reject();
- source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
- delete scd;
- return NULL;
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
deleted file mode 100644
index f7420efea9..0000000000
--- a/deps/v8/src/snapshot/serialize.h
+++ /dev/null
@@ -1,816 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_SERIALIZE_H_
-#define V8_SNAPSHOT_SERIALIZE_H_
-
-#include "src/address-map.h"
-#include "src/heap/heap.h"
-#include "src/objects.h"
-#include "src/snapshot/snapshot-source-sink.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-class ScriptData;
-
-static const int kDeoptTableSerializeEntryCount = 64;
-
-// ExternalReferenceTable is a helper class that defines the relationship
-// between external references and their encodings. It is used to build
-// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
-class ExternalReferenceTable {
- public:
- static ExternalReferenceTable* instance(Isolate* isolate);
-
- int size() const { return refs_.length(); }
- Address address(int i) { return refs_[i].address; }
- const char* name(int i) { return refs_[i].name; }
-
- inline static Address NotAvailable() { return NULL; }
-
- private:
- struct ExternalReferenceEntry {
- Address address;
- const char* name;
- };
-
- explicit ExternalReferenceTable(Isolate* isolate);
-
- void Add(Address address, const char* name) {
- ExternalReferenceEntry entry = {address, name};
- refs_.Add(entry);
- }
-
- List<ExternalReferenceEntry> refs_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
-};
-
-
-class ExternalReferenceEncoder {
- public:
- explicit ExternalReferenceEncoder(Isolate* isolate);
-
- uint32_t Encode(Address key) const;
-
- const char* NameOfAddress(Isolate* isolate, Address address) const;
-
- private:
- static uint32_t Hash(Address key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
- kPointerSizeLog2);
- }
-
- HashMap* map_;
-
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
-};
-
-
-class PartialCacheIndexMap : public AddressMapBase {
- public:
- PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
-
- static const int kInvalidIndex = -1;
-
- // Lookup object in the map. Return its index if found, or create
- // a new entry with new_index as value, and return kInvalidIndex.
- int LookupOrInsert(HeapObject* obj, int new_index) {
- HashMap::Entry* entry = LookupEntry(&map_, obj, false);
- if (entry != NULL) return GetValue(entry);
- SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
- return kInvalidIndex;
- }
-
- private:
- HashMap map_;
-
- DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
-};
-
-
-class HotObjectsList {
- public:
- HotObjectsList() : index_(0) {
- for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
- }
-
- void Add(HeapObject* object) {
- circular_queue_[index_] = object;
- index_ = (index_ + 1) & kSizeMask;
- }
-
- HeapObject* Get(int index) {
- DCHECK_NOT_NULL(circular_queue_[index]);
- return circular_queue_[index];
- }
-
- static const int kNotFound = -1;
-
- int Find(HeapObject* object) {
- for (int i = 0; i < kSize; i++) {
- if (circular_queue_[i] == object) return i;
- }
- return kNotFound;
- }
-
- static const int kSize = 8;
-
- private:
- STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
- static const int kSizeMask = kSize - 1;
- HeapObject* circular_queue_[kSize];
- int index_;
-
- DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
-};
-
-
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer: public ObjectVisitor {
- public:
- static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
-
- // No reservation for large object space necessary.
- static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
- static const int kNumberOfSpaces = LAST_SPACE + 1;
-
- protected:
- static bool CanBeDeferred(HeapObject* o);
-
- // ---------- byte code range 0x00..0x7f ----------
- // Byte codes in this range represent Where, HowToCode and WhereToPoint.
- // Where the pointed-to object can be found:
- // The static assert below will trigger when the number of preallocated spaces
- // changed. If that happens, update the bytecode ranges in the comments below.
- STATIC_ASSERT(5 == kNumberOfSpaces);
- enum Where {
- // 0x00..0x04 Allocate new object, in specified space.
- kNewObject = 0,
- // 0x05 Unused (including 0x25, 0x45, 0x65).
- // 0x06 Unused (including 0x26, 0x46, 0x66).
- // 0x07 Unused (including 0x27, 0x47, 0x67).
- // 0x08..0x0c Reference to previous object from space.
- kBackref = 0x08,
- // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
- // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
- // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
- // 0x10..0x14 Reference to previous object from space after skip.
- kBackrefWithSkip = 0x10,
- // 0x15 Unused (including 0x35, 0x55, 0x75).
- // 0x16 Unused (including 0x36, 0x56, 0x76).
- // 0x17 Misc (including 0x37, 0x57, 0x77).
- // 0x18 Root array item.
- kRootArray = 0x18,
- // 0x19 Object in the partial snapshot cache.
- kPartialSnapshotCache = 0x19,
- // 0x1a External reference referenced by id.
- kExternalReference = 0x1a,
- // 0x1b Object provided in the attached list.
- kAttachedReference = 0x1b,
- // 0x1c Builtin code referenced by index.
- kBuiltin = 0x1c
- // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
- };
-
- static const int kWhereMask = 0x1f;
- static const int kSpaceMask = 7;
- STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
-
- // How to code the pointer to the object.
- enum HowToCode {
- // Straight pointer.
- kPlain = 0,
- // A pointer inlined in code. What this means depends on the architecture.
- kFromCode = 0x20
- };
-
- static const int kHowToCodeMask = 0x20;
-
- // Where to point within the object.
- enum WhereToPoint {
- // Points to start of object
- kStartOfObject = 0,
- // Points to instruction in code object or payload of cell.
- kInnerPointer = 0x40
- };
-
- static const int kWhereToPointMask = 0x40;
-
- // ---------- Misc ----------
- // Skip.
- static const int kSkip = 0x1d;
- // Internal reference encoded as offsets of pc and target from code entry.
- static const int kInternalReference = 0x1e;
- static const int kInternalReferenceEncoded = 0x1f;
- // Do nothing, used for padding.
- static const int kNop = 0x3d;
- // Move to next reserved chunk.
- static const int kNextChunk = 0x3e;
- // Deferring object content.
- static const int kDeferred = 0x3f;
- // Used for the source code of the natives, which is in the executable, but
- // is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x5d;
- // Used for the source code for compiled stubs, which is in the executable,
- // but is referred to from external strings in the snapshot.
- static const int kExtraNativesStringResource = 0x5e;
- // A tag emitted at strategic points in the snapshot to delineate sections.
- // If the deserializer does not find these at the expected moments then it
- // is an indication that the snapshot and the VM do not fit together.
- // Examine the build process for architecture, version or configuration
- // mismatches.
- static const int kSynchronize = 0x17;
- // Repeats of variable length.
- static const int kVariableRepeat = 0x37;
- // Raw data of variable length.
- static const int kVariableRawData = 0x57;
- // Alignment prefixes 0x7d..0x7f
- static const int kAlignmentPrefix = 0x7d;
-
- // 0x77 unused
-
- // ---------- byte code range 0x80..0xff ----------
- // First 32 root array items.
- static const int kNumberOfRootArrayConstants = 0x20;
- // 0x80..0x9f
- static const int kRootArrayConstants = 0x80;
- // 0xa0..0xbf
- static const int kRootArrayConstantsWithSkip = 0xa0;
- static const int kRootArrayConstantsMask = 0x1f;
-
- // 8 hot (recently seen or back-referenced) objects with optional skip.
- static const int kNumberOfHotObjects = 0x08;
- // 0xc0..0xc7
- static const int kHotObject = 0xc0;
- // 0xc8..0xcf
- static const int kHotObjectWithSkip = 0xc8;
- static const int kHotObjectMask = 0x07;
-
- // 32 common raw data lengths.
- static const int kNumberOfFixedRawData = 0x20;
- // 0xd0..0xef
- static const int kFixedRawData = 0xd0;
- static const int kOnePointerRawData = kFixedRawData;
- static const int kFixedRawDataStart = kFixedRawData - 1;
-
- // 16 repeats lengths.
- static const int kNumberOfFixedRepeat = 0x10;
- // 0xf0..0xff
- static const int kFixedRepeat = 0xf0;
- static const int kFixedRepeatStart = kFixedRepeat - 1;
-
- // ---------- special values ----------
- static const int kAnyOldSpace = -1;
-
- // Sentinel after a new object to indicate that double alignment is needed.
- static const int kDoubleAlignmentSentinel = 0;
-
- // Used as index for the attached reference representing the source object.
- static const int kSourceObjectReference = 0;
-
- // Used as index for the attached reference representing the global proxy.
- static const int kGlobalProxyReference = 0;
-
- // ---------- member variable ----------
- HotObjectsList hot_objects_;
-};
-
-
-class SerializedData {
- public:
- class Reservation {
- public:
- explicit Reservation(uint32_t size)
- : reservation_(ChunkSizeBits::encode(size)) {}
-
- uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
- bool is_last() const { return IsLastChunkBits::decode(reservation_); }
-
- void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
-
- private:
- uint32_t reservation_;
- };
-
- SerializedData(byte* data, int size)
- : data_(data), size_(size), owns_data_(false) {}
- SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
-
- ~SerializedData() {
- if (owns_data_) DeleteArray<byte>(data_);
- }
-
- uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
-
- class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
- class IsLastChunkBits : public BitField<bool, 31, 1> {};
-
- static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
- uint32_t external_refs = table->size();
- return 0xC0DE0000 ^ external_refs;
- }
-
- protected:
- void SetHeaderValue(int offset, uint32_t value) {
- uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
- memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
- }
-
- uint32_t GetHeaderValue(int offset) const {
- uint32_t value;
- memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
- return value;
- }
-
- void AllocateData(int size);
-
- static uint32_t ComputeMagicNumber(Isolate* isolate) {
- return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
- }
-
- void SetMagicNumber(Isolate* isolate) {
- SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
- }
-
- static const int kMagicNumberOffset = 0;
-
- byte* data_;
- int size_;
- bool owns_data_;
-};
-
-
-// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerializerDeserializer {
- public:
- // Create a deserializer from a snapshot byte source.
- template <class Data>
- explicit Deserializer(Data* data)
- : isolate_(NULL),
- source_(data->Payload()),
- magic_number_(data->GetMagicNumber()),
- external_reference_table_(NULL),
- deserialized_large_objects_(0),
- deserializing_user_code_(false),
- next_alignment_(kWordAligned) {
- DecodeReservation(data->Reservations());
- }
-
- ~Deserializer() override;
-
- // Deserialize the snapshot into an empty heap.
- void Deserialize(Isolate* isolate);
-
- // Deserialize a single object and the objects reachable from it.
- MaybeHandle<Object> DeserializePartial(Isolate* isolate,
- Handle<JSGlobalProxy> global_proxy);
-
- // Deserialize a shared function info. Fail gracefully.
- MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
-
- // Pass a vector of externally-provided objects referenced by the snapshot.
- // The ownership to its backing store is handed over as well.
- void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
- attached_objects_ = attached_objects;
- }
-
- private:
- void VisitPointers(Object** start, Object** end) override;
-
- void Synchronize(VisitorSynchronization::SyncTag tag) override;
-
- void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
-
- void Initialize(Isolate* isolate);
-
- bool deserializing_user_code() { return deserializing_user_code_; }
-
- void DecodeReservation(Vector<const SerializedData::Reservation> res);
-
- bool ReserveSpace();
-
- void UnalignedCopy(Object** dest, Object** src) {
- memcpy(dest, src, sizeof(*src));
- }
-
- void SetAlignment(byte data) {
- DCHECK_EQ(kWordAligned, next_alignment_);
- int alignment = data - (kAlignmentPrefix - 1);
- DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kSimd128Unaligned);
- next_alignment_ = static_cast<AllocationAlignment>(alignment);
- }
-
- void DeserializeDeferredObjects();
-
- void FlushICacheForNewIsolate();
- void FlushICacheForNewCodeObjects();
-
- void CommitPostProcessedObjects(Isolate* isolate);
-
- // Fills in some heap data in an area from start to end (non-inclusive). The
- // space id is used for the write barrier. The object_address is the address
- // of the object we are writing into, or NULL if we are not writing into an
- // object, i.e. if we are writing a series of tagged values that are not on
- // the heap. Return false if the object content has been deferred.
- bool ReadData(Object** start, Object** end, int space,
- Address object_address);
- void ReadObject(int space_number, Object** write_back);
- Address Allocate(int space_index, int size);
-
- // Special handling for serialized code like hooking up internalized strings.
- HeapObject* PostProcessNewObject(HeapObject* obj, int space);
-
- // This returns the address of an object that has been described in the
- // snapshot by chunk index and offset.
- HeapObject* GetBackReferencedObject(int space);
-
- Object** CopyInNativesSource(Vector<const char> source_vector,
- Object** current);
-
- // Cached current isolate.
- Isolate* isolate_;
-
- // Objects from the attached object descriptions in the serialized user code.
- Vector<Handle<Object> > attached_objects_;
-
- SnapshotByteSource source_;
- uint32_t magic_number_;
-
- // The address of the next object that will be allocated in each space.
- // Each space has a number of chunks reserved by the GC, with each chunk
- // fitting into a page. Deserialized objects are allocated into the
- // current chunk of the target space by bumping up high water mark.
- Heap::Reservation reservations_[kNumberOfSpaces];
- uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
- Address high_water_[kNumberOfPreallocatedSpaces];
-
- ExternalReferenceTable* external_reference_table_;
-
- List<HeapObject*> deserialized_large_objects_;
- List<Code*> new_code_objects_;
- List<Handle<String> > new_internalized_strings_;
- List<Handle<Script> > new_scripts_;
-
- bool deserializing_user_code_;
-
- AllocationAlignment next_alignment_;
-
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
-};
-
-
-class CodeAddressMap;
-
-// There can be only one serializer per V8 process.
-class Serializer : public SerializerDeserializer {
- public:
- Serializer(Isolate* isolate, SnapshotByteSink* sink);
- ~Serializer() override;
-
- void EncodeReservations(List<SerializedData::Reservation>* out) const;
-
- void SerializeDeferredObjects();
-
- Isolate* isolate() const { return isolate_; }
-
- BackReferenceMap* back_reference_map() { return &back_reference_map_; }
- RootIndexMap* root_index_map() { return &root_index_map_; }
-
-#ifdef OBJECT_PRINT
- void CountInstanceType(Map* map, int size);
-#endif // OBJECT_PRINT
-
- protected:
- class ObjectSerializer;
- class RecursionScope {
- public:
- explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
- serializer_->recursion_depth_++;
- }
- ~RecursionScope() { serializer_->recursion_depth_--; }
- bool ExceedsMaximum() {
- return serializer_->recursion_depth_ >= kMaxRecursionDepth;
- }
-
- private:
- static const int kMaxRecursionDepth = 32;
- Serializer* serializer_;
- };
-
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) = 0;
-
- void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
- int skip);
-
- void PutBackReference(HeapObject* object, BackReference reference);
-
- // Emit alignment prefix if necessary, return required padding space in bytes.
- int PutAlignmentPrefix(HeapObject* object);
-
- // Returns true if the object was successfully serialized.
- bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
-
- inline void FlushSkip(int skip) {
- if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
- }
- }
-
- bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
-
- // This will return the space for an object.
- BackReference AllocateLargeObject(int size);
- BackReference Allocate(AllocationSpace space, int size);
- int EncodeExternalReference(Address addr) {
- return external_reference_encoder_.Encode(addr);
- }
-
- // GetInt reads 4 bytes at once, requiring padding at the end.
- void Pad();
-
- // Some roots should not be serialized, because their actual value depends on
- // absolute addresses and they are reset after deserialization, anyway.
- bool ShouldBeSkipped(Object** current);
-
- // We may not need the code address map for logging for every instance
- // of the serializer. Initialize it on demand.
- void InitializeCodeAddressMap();
-
- Code* CopyCode(Code* code);
-
- inline uint32_t max_chunk_size(int space) const {
- DCHECK_LE(0, space);
- DCHECK_LT(space, kNumberOfSpaces);
- return max_chunk_size_[space];
- }
-
- SnapshotByteSink* sink() const { return sink_; }
-
- void QueueDeferredObject(HeapObject* obj) {
- DCHECK(back_reference_map_.Lookup(obj).is_valid());
- deferred_objects_.Add(obj);
- }
-
- void OutputStatistics(const char* name);
-
- Isolate* isolate_;
-
- SnapshotByteSink* sink_;
- ExternalReferenceEncoder external_reference_encoder_;
-
- BackReferenceMap back_reference_map_;
- RootIndexMap root_index_map_;
-
- int recursion_depth_;
-
- friend class Deserializer;
- friend class ObjectSerializer;
- friend class RecursionScope;
- friend class SnapshotData;
-
- private:
- void VisitPointers(Object** start, Object** end) override;
-
- CodeAddressMap* code_address_map_;
- // Objects from the same space are put into chunks for bulk-allocation
- // when deserializing. We have to make sure that each chunk fits into a
- // page. So we track the chunk size in pending_chunk_ of a space, but
- // when it exceeds a page, we complete the current chunk and start a new one.
- uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
- List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
- uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
-
- // We map serialized large objects to indexes for back-referencing.
- uint32_t large_objects_total_size_;
- uint32_t seen_large_objects_index_;
-
- List<byte> code_buffer_;
-
- // To handle stack overflow.
- List<HeapObject*> deferred_objects_;
-
-#ifdef OBJECT_PRINT
- static const int kInstanceTypes = 256;
- int* instance_type_count_;
- size_t* instance_type_size_;
-#endif // OBJECT_PRINT
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-
-class PartialSerializer : public Serializer {
- public:
- PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink)
- : Serializer(isolate, sink),
- startup_serializer_(startup_snapshot_serializer),
- global_object_(NULL) {
- InitializeCodeAddressMap();
- }
-
- ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
-
- // Serialize the objects reachable from a single object pointer.
- void Serialize(Object** o);
-
- private:
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
-
- int PartialSnapshotCacheIndex(HeapObject* o);
- bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
-
- Serializer* startup_serializer_;
- Object* global_object_;
- PartialCacheIndexMap partial_cache_index_map_;
- DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
-};
-
-
-class StartupSerializer : public Serializer {
- public:
- StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
- ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
-
- // Serialize the current state of the heap. The order is:
- // 1) Strong references.
- // 2) Partial snapshot cache.
- // 3) Weak references (e.g. the string table).
- void SerializeStrongReferences();
- void SerializeWeakReferencesAndDeferred();
-
- private:
- // The StartupSerializer has to serialize the root array, which is slightly
- // different.
- void VisitPointers(Object** start, Object** end) override;
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
- void Synchronize(VisitorSynchronization::SyncTag tag) override;
-
- intptr_t root_index_wave_front_;
- bool serializing_builtins_;
- DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
-};
-
-
-class CodeSerializer : public Serializer {
- public:
- static ScriptData* Serialize(Isolate* isolate,
- Handle<SharedFunctionInfo> info,
- Handle<String> source);
-
- MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
- Isolate* isolate, ScriptData* cached_data, Handle<String> source);
-
- static const int kSourceObjectIndex = 0;
- STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
-
- static const int kCodeStubsBaseIndex = 1;
-
- String* source() const {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- return source_;
- }
-
- const List<uint32_t>* stub_keys() const { return &stub_keys_; }
-
- private:
- CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
- : Serializer(isolate, sink), source_(source) {
- back_reference_map_.AddSourceString(source);
- }
-
- ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
-
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
-
- void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
- WhereToPoint where_to_point);
- void SerializeIC(Code* ic, HowToCode how_to_code,
- WhereToPoint where_to_point);
- void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
- WhereToPoint where_to_point);
- void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
- WhereToPoint where_to_point);
- int AddCodeStubKey(uint32_t stub_key);
-
- DisallowHeapAllocation no_gc_;
- String* source_;
- List<uint32_t> stub_keys_;
- DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
-};
-
-
-// Wrapper around reservation sizes and the serialization payload.
-class SnapshotData : public SerializedData {
- public:
- // Used when producing.
- explicit SnapshotData(const Serializer& ser);
-
- // Used when consuming.
- explicit SnapshotData(const Vector<const byte> snapshot)
- : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
- CHECK(IsSane());
- }
-
- Vector<const Reservation> Reservations() const;
- Vector<const byte> Payload() const;
-
- Vector<const byte> RawData() const {
- return Vector<const byte>(data_, size_);
- }
-
- private:
- bool IsSane();
-
- // The data header consists of uint32_t-sized entries:
- // [0] magic number and external reference count
- // [1] version hash
- // [2] number of reservation size entries
- // [3] payload length
- // ... reservations
- // ... serialized payload
- static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
- static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
- static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
- static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
-};
-
-
-// Wrapper around ScriptData to provide code-serializer-specific functionality.
-class SerializedCodeData : public SerializedData {
- public:
- // Used when consuming.
- static SerializedCodeData* FromCachedData(Isolate* isolate,
- ScriptData* cached_data,
- String* source);
-
- // Used when producing.
- SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
-
- // Return ScriptData object and relinquish ownership over it to the caller.
- ScriptData* GetScriptData();
-
- Vector<const Reservation> Reservations() const;
- Vector<const byte> Payload() const;
-
- Vector<const uint32_t> CodeStubKeys() const;
-
- private:
- explicit SerializedCodeData(ScriptData* data);
-
- enum SanityCheckResult {
- CHECK_SUCCESS = 0,
- MAGIC_NUMBER_MISMATCH = 1,
- VERSION_MISMATCH = 2,
- SOURCE_MISMATCH = 3,
- CPU_FEATURES_MISMATCH = 4,
- FLAGS_MISMATCH = 5,
- CHECKSUM_MISMATCH = 6
- };
-
- SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
-
- uint32_t SourceHash(String* source) const;
-
- // The data header consists of uint32_t-sized entries:
- // [0] magic number and external reference count
- // [1] version hash
- // [2] source hash
- // [3] cpu features
- // [4] flag hash
- // [5] number of code stub keys
- // [6] number of reservation size entries
- // [7] payload length
- // [8] payload checksum part 1
- // [9] payload checksum part 2
- // ... reservations
- // ... code stub keys
- // ... serialized payload
- static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
- static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
- static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
- static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
- static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
- static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
- static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
- static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
- static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
- static const int kHeaderSize = kChecksum2Offset + kInt32Size;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_SERIALIZE_H_
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
new file mode 100644
index 0000000000..eeb7eb73fa
--- /dev/null
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/serializer-common.h"
+
+#include "src/external-reference-table.h"
+#include "src/ic/stub-cache.h"
+#include "src/list-inl.h"
+
+namespace v8 {
+namespace internal {
+
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
+ map_ = isolate->external_reference_map();
+ if (map_ != NULL) return;
+ map_ = new HashMap(HashMap::PointersMatch);
+ ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
+ for (int i = 0; i < table->size(); ++i) {
+ Address addr = table->address(i);
+ if (addr == ExternalReferenceTable::NotAvailable()) continue;
+ // We expect no duplicate external references entries in the table.
+ DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
+ map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
+ }
+ isolate->set_external_reference_map(map_);
+}
+
+uint32_t ExternalReferenceEncoder::Encode(Address address) const {
+ DCHECK_NOT_NULL(address);
+ HashMap::Entry* entry =
+ const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+ DCHECK_NOT_NULL(entry);
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+}
+
+const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
+ Address address) const {
+ HashMap::Entry* entry =
+ const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+ if (entry == NULL) return "<unknown>";
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ return ExternalReferenceTable::instance(isolate)->name(i);
+}
+
+void SerializedData::AllocateData(int size) {
+ DCHECK(!owns_data_);
+ data_ = NewArray<byte>(size);
+ size_ = size;
+ owns_data_ = true;
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
+}
+
+// The partial snapshot cache is terminated by undefined. We visit the
+// partial snapshot...
+// - during deserialization to populate it.
+// - during normal GC to keep its content alive.
+// - not during serialization. The partial serializer adds to it explicitly.
+void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
+ List<Object*>* cache = isolate->partial_snapshot_cache();
+ for (int i = 0;; ++i) {
+ // Extend the array ready to get a value when deserializing.
+ if (cache->length() <= i) cache->Add(Smi::FromInt(0));
+ // During deserialization, the visitor populates the partial snapshot cache
+ // and eventually terminates the cache with undefined.
+ visitor->VisitPointer(&cache->at(i));
+ if (cache->at(i)->IsUndefined()) break;
+ }
+}
+
+bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
+ return !o->IsString() && !o->IsScript();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
new file mode 100644
index 0000000000..645a9af3bf
--- /dev/null
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -0,0 +1,290 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SERIALIZER_COMMON_H_
+#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
+
+#include "src/address-map.h"
+#include "src/external-reference-table.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+class ExternalReferenceEncoder {
+ public:
+ explicit ExternalReferenceEncoder(Isolate* isolate);
+
+ uint32_t Encode(Address key) const;
+
+ const char* NameOfAddress(Isolate* isolate, Address address) const;
+
+ private:
+ static uint32_t Hash(Address key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
+ kPointerSizeLog2);
+ }
+
+ HashMap* map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
+};
+
+class HotObjectsList {
+ public:
+ HotObjectsList() : index_(0) {
+ for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
+ }
+
+ void Add(HeapObject* object) {
+ circular_queue_[index_] = object;
+ index_ = (index_ + 1) & kSizeMask;
+ }
+
+ HeapObject* Get(int index) {
+ DCHECK_NOT_NULL(circular_queue_[index]);
+ return circular_queue_[index];
+ }
+
+ static const int kNotFound = -1;
+
+ int Find(HeapObject* object) {
+ for (int i = 0; i < kSize; i++) {
+ if (circular_queue_[i] == object) return i;
+ }
+ return kNotFound;
+ }
+
+ static const int kSize = 8;
+
+ private:
+ STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
+ static const int kSizeMask = kSize - 1;
+ HeapObject* circular_queue_[kSize];
+ int index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
+};
+
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer : public ObjectVisitor {
+ public:
+ static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
+
+ // No reservation for large object space necessary.
+ static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
+ static const int kNumberOfSpaces = LAST_SPACE + 1;
+
+ protected:
+ static bool CanBeDeferred(HeapObject* o);
+
+ // ---------- byte code range 0x00..0x7f ----------
+ // Byte codes in this range represent Where, HowToCode and WhereToPoint.
+ // Where the pointed-to object can be found:
+ // The static assert below will trigger when the number of preallocated spaces
+ // changed. If that happens, update the bytecode ranges in the comments below.
+ STATIC_ASSERT(5 == kNumberOfSpaces);
+ enum Where {
+ // 0x00..0x04 Allocate new object, in specified space.
+ kNewObject = 0,
+ // 0x05 Unused (including 0x25, 0x45, 0x65).
+ // 0x06 Unused (including 0x26, 0x46, 0x66).
+ // 0x07 Unused (including 0x27, 0x47, 0x67).
+ // 0x08..0x0c Reference to previous object from space.
+ kBackref = 0x08,
+ // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
+ // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
+ // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
+ // 0x10..0x14 Reference to previous object from space after skip.
+ kBackrefWithSkip = 0x10,
+ // 0x15 Unused (including 0x35, 0x55, 0x75).
+ // 0x16 Unused (including 0x36, 0x56, 0x76).
+ // 0x17 Misc (including 0x37, 0x57, 0x77).
+ // 0x18 Root array item.
+ kRootArray = 0x18,
+ // 0x19 Object in the partial snapshot cache.
+ kPartialSnapshotCache = 0x19,
+ // 0x1a External reference referenced by id.
+ kExternalReference = 0x1a,
+ // 0x1b Object provided in the attached list.
+ kAttachedReference = 0x1b,
+ // 0x1c Builtin code referenced by index.
+ kBuiltin = 0x1c
+ // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
+ };
+
+ static const int kWhereMask = 0x1f;
+ static const int kSpaceMask = 7;
+ STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
+
+ // How to code the pointer to the object.
+ enum HowToCode {
+ // Straight pointer.
+ kPlain = 0,
+ // A pointer inlined in code. What this means depends on the architecture.
+ kFromCode = 0x20
+ };
+
+ static const int kHowToCodeMask = 0x20;
+
+ // Where to point within the object.
+ enum WhereToPoint {
+ // Points to start of object
+ kStartOfObject = 0,
+ // Points to instruction in code object or payload of cell.
+ kInnerPointer = 0x40
+ };
+
+ static const int kWhereToPointMask = 0x40;
+
+ // ---------- Misc ----------
+ // Skip.
+ static const int kSkip = 0x1d;
+ // Internal reference encoded as offsets of pc and target from code entry.
+ static const int kInternalReference = 0x1e;
+ static const int kInternalReferenceEncoded = 0x1f;
+ // Do nothing, used for padding.
+ static const int kNop = 0x3d;
+ // Move to next reserved chunk.
+ static const int kNextChunk = 0x3e;
+ // Deferring object content.
+ static const int kDeferred = 0x3f;
+ // Used for the source code of the natives, which is in the executable, but
+ // is referred to from external strings in the snapshot.
+ static const int kNativesStringResource = 0x5d;
+ // Used for the source code for compiled stubs, which is in the executable,
+ // but is referred to from external strings in the snapshot.
+ static const int kExtraNativesStringResource = 0x5e;
+ // A tag emitted at strategic points in the snapshot to delineate sections.
+ // If the deserializer does not find these at the expected moments then it
+ // is an indication that the snapshot and the VM do not fit together.
+ // Examine the build process for architecture, version or configuration
+ // mismatches.
+ static const int kSynchronize = 0x17;
+ // Repeats of variable length.
+ static const int kVariableRepeat = 0x37;
+ // Raw data of variable length.
+ static const int kVariableRawData = 0x57;
+ // Alignment prefixes 0x7d..0x7f
+ static const int kAlignmentPrefix = 0x7d;
+
+ // 0x77 unused
+
+ // ---------- byte code range 0x80..0xff ----------
+ // First 32 root array items.
+ static const int kNumberOfRootArrayConstants = 0x20;
+ // 0x80..0x9f
+ static const int kRootArrayConstants = 0x80;
+ // 0xa0..0xbf
+ static const int kRootArrayConstantsWithSkip = 0xa0;
+ static const int kRootArrayConstantsMask = 0x1f;
+
+ // 8 hot (recently seen or back-referenced) objects with optional skip.
+ static const int kNumberOfHotObjects = 0x08;
+ // 0xc0..0xc7
+ static const int kHotObject = 0xc0;
+ // 0xc8..0xcf
+ static const int kHotObjectWithSkip = 0xc8;
+ static const int kHotObjectMask = 0x07;
+
+ // 32 common raw data lengths.
+ static const int kNumberOfFixedRawData = 0x20;
+ // 0xd0..0xef
+ static const int kFixedRawData = 0xd0;
+ static const int kOnePointerRawData = kFixedRawData;
+ static const int kFixedRawDataStart = kFixedRawData - 1;
+
+ // 16 repeats lengths.
+ static const int kNumberOfFixedRepeat = 0x10;
+ // 0xf0..0xff
+ static const int kFixedRepeat = 0xf0;
+ static const int kFixedRepeatStart = kFixedRepeat - 1;
+
+ // ---------- special values ----------
+ static const int kAnyOldSpace = -1;
+
+ // Sentinel after a new object to indicate that double alignment is needed.
+ static const int kDoubleAlignmentSentinel = 0;
+
+ // Used as index for the attached reference representing the source object.
+ static const int kSourceObjectReference = 0;
+
+ // Used as index for the attached reference representing the global proxy.
+ static const int kGlobalProxyReference = 0;
+
+ // ---------- member variable ----------
+ HotObjectsList hot_objects_;
+};
+
+class SerializedData {
+ public:
+ class Reservation {
+ public:
+ explicit Reservation(uint32_t size)
+ : reservation_(ChunkSizeBits::encode(size)) {}
+
+ uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
+ bool is_last() const { return IsLastChunkBits::decode(reservation_); }
+
+ void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
+
+ private:
+ uint32_t reservation_;
+ };
+
+ SerializedData(byte* data, int size)
+ : data_(data), size_(size), owns_data_(false) {}
+ SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
+
+ ~SerializedData() {
+ if (owns_data_) DeleteArray<byte>(data_);
+ }
+
+ uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
+
+ class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
+ class IsLastChunkBits : public BitField<bool, 31, 1> {};
+
+ static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
+ uint32_t external_refs = table->size();
+ return 0xC0DE0000 ^ external_refs;
+ }
+
+ protected:
+ void SetHeaderValue(int offset, uint32_t value) {
+ uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
+ memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
+ }
+
+ uint32_t GetHeaderValue(int offset) const {
+ uint32_t value;
+ memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
+ return value;
+ }
+
+ void AllocateData(int size);
+
+ static uint32_t ComputeMagicNumber(Isolate* isolate) {
+ return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
+ }
+
+ void SetMagicNumber(Isolate* isolate) {
+ SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
+ }
+
+ static const int kMagicNumberOffset = 0;
+
+ byte* data_;
+ int size_;
+ bool owns_data_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_SERIALIZER_COMMON_H_
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
new file mode 100644
index 0000000000..41693384f3
--- /dev/null
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -0,0 +1,770 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/serializer.h"
+
+#include "src/macro-assembler.h"
+#include "src/snapshot/natives.h"
+
+namespace v8 {
+namespace internal {
+
+Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+ : isolate_(isolate),
+ sink_(sink),
+ external_reference_encoder_(isolate),
+ root_index_map_(isolate),
+ recursion_depth_(0),
+ code_address_map_(NULL),
+ large_objects_total_size_(0),
+ seen_large_objects_index_(0) {
+ // The serializer is meant to be used only to generate initial heap images
+ // from a context in which there is only one isolate.
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ pending_chunk_[i] = 0;
+ max_chunk_size_[i] = static_cast<uint32_t>(
+ MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
+ }
+
+#ifdef OBJECT_PRINT
+ if (FLAG_serialization_statistics) {
+ instance_type_count_ = NewArray<int>(kInstanceTypes);
+ instance_type_size_ = NewArray<size_t>(kInstanceTypes);
+ for (int i = 0; i < kInstanceTypes; i++) {
+ instance_type_count_[i] = 0;
+ instance_type_size_[i] = 0;
+ }
+ } else {
+ instance_type_count_ = NULL;
+ instance_type_size_ = NULL;
+ }
+#endif // OBJECT_PRINT
+}
+
+Serializer::~Serializer() {
+ if (code_address_map_ != NULL) delete code_address_map_;
+#ifdef OBJECT_PRINT
+ if (instance_type_count_ != NULL) {
+ DeleteArray(instance_type_count_);
+ DeleteArray(instance_type_size_);
+ }
+#endif // OBJECT_PRINT
+}
+
+#ifdef OBJECT_PRINT
+void Serializer::CountInstanceType(Map* map, int size) {
+ int instance_type = map->instance_type();
+ instance_type_count_[instance_type]++;
+ instance_type_size_[instance_type] += size;
+}
+#endif // OBJECT_PRINT
+
+void Serializer::OutputStatistics(const char* name) {
+ if (!FLAG_serialization_statistics) return;
+ PrintF("%s:\n", name);
+ PrintF(" Spaces (bytes):\n");
+ for (int space = 0; space < kNumberOfSpaces; space++) {
+ PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ size_t s = pending_chunk_[space];
+ for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
+ PrintF("%16" V8_SIZET_PREFIX V8_PTR_PREFIX "d", s);
+ }
+ PrintF("%16d\n", large_objects_total_size_);
+#ifdef OBJECT_PRINT
+ PrintF(" Instance types (count and bytes):\n");
+#define PRINT_INSTANCE_TYPE(Name) \
+ if (instance_type_count_[Name]) { \
+ PrintF("%10d %10" V8_SIZET_PREFIX V8_PTR_PREFIX "d %s\n", \
+ instance_type_count_[Name], instance_type_size_[Name], #Name); \
+ }
+ INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
+#undef PRINT_INSTANCE_TYPE
+ PrintF("\n");
+#endif // OBJECT_PRINT
+}
+
+void Serializer::SerializeDeferredObjects() {
+ while (deferred_objects_.length() > 0) {
+ HeapObject* obj = deferred_objects_.RemoveLast();
+ ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
+ obj_serializer.SerializeDeferred();
+ }
+ sink_->Put(kSynchronize, "Finished with deferred objects");
+}
+
+void Serializer::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsSmi()) {
+ PutSmi(Smi::cast(*current));
+ } else {
+ SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
+ }
+ }
+}
+
+void Serializer::EncodeReservations(
+ List<SerializedData::Reservation>* out) const {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ for (int j = 0; j < completed_chunks_[i].length(); j++) {
+ out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
+ }
+
+ if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
+ out->Add(SerializedData::Reservation(pending_chunk_[i]));
+ }
+ out->last().mark_as_last();
+ }
+
+ out->Add(SerializedData::Reservation(large_objects_total_size_));
+ out->last().mark_as_last();
+}
+
+#ifdef DEBUG
+bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
+ DCHECK(reference.is_valid());
+ DCHECK(!reference.is_source());
+ DCHECK(!reference.is_global_proxy());
+ AllocationSpace space = reference.space();
+ int chunk_index = reference.chunk_index();
+ if (space == LO_SPACE) {
+ return chunk_index == 0 &&
+ reference.large_object_index() < seen_large_objects_index_;
+ } else if (chunk_index == completed_chunks_[space].length()) {
+ return reference.chunk_offset() < pending_chunk_[space];
+ } else {
+ return chunk_index < completed_chunks_[space].length() &&
+ reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ }
+}
+#endif // DEBUG
+
+bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ if (how_to_code == kPlain && where_to_point == kStartOfObject) {
+ // Encode a reference to a hot object by its index in the working set.
+ int index = hot_objects_.Find(obj);
+ if (index != HotObjectsList::kNotFound) {
+ DCHECK(index >= 0 && index < kNumberOfHotObjects);
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding hot object %d:", index);
+ obj->ShortPrint();
+ PrintF("\n");
+ }
+ if (skip != 0) {
+ sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+ sink_->PutInt(skip, "HotObjectSkipDistance");
+ } else {
+ sink_->Put(kHotObject + index, "HotObject");
+ }
+ return true;
+ }
+ }
+ BackReference back_reference = back_reference_map_.Lookup(obj);
+ if (back_reference.is_valid()) {
+ // Encode the location of an already deserialized object in order to write
+ // its location into a later object. We can encode the location as an
+ // offset fromthe start of the deserialized objects or as an offset
+ // backwards from thecurrent allocation pointer.
+ if (back_reference.is_source()) {
+ FlushSkip(skip);
+ if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
+ DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+ sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
+ sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
+ } else if (back_reference.is_global_proxy()) {
+ FlushSkip(skip);
+ if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
+ DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+ sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
+ sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
+ } else {
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding back reference to: ");
+ obj->ShortPrint();
+ PrintF("\n");
+ }
+
+ PutAlignmentPrefix(obj);
+ AllocationSpace space = back_reference.space();
+ if (skip == 0) {
+ sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+ } else {
+ sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+ "BackRefWithSkip");
+ sink_->PutInt(skip, "BackRefSkipDistance");
+ }
+ PutBackReference(obj, back_reference);
+ }
+ return true;
+ }
+ return false;
+}
+
+void Serializer::PutRoot(int root_index, HeapObject* object,
+ SerializerDeserializer::HowToCode how_to_code,
+ SerializerDeserializer::WhereToPoint where_to_point,
+ int skip) {
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding root %d:", root_index);
+ object->ShortPrint();
+ PrintF("\n");
+ }
+
+ if (how_to_code == kPlain && where_to_point == kStartOfObject &&
+ root_index < kNumberOfRootArrayConstants &&
+ !isolate()->heap()->InNewSpace(object)) {
+ if (skip == 0) {
+ sink_->Put(kRootArrayConstants + root_index, "RootConstant");
+ } else {
+ sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
+ sink_->PutInt(skip, "SkipInPutRoot");
+ }
+ } else {
+ FlushSkip(skip);
+ sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ }
+}
+
+void Serializer::PutSmi(Smi* smi) {
+ sink_->Put(kOnePointerRawData, "Smi");
+ byte* bytes = reinterpret_cast<byte*>(&smi);
+ for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
+}
+
+void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
+ DCHECK(BackReferenceIsAlreadyAllocated(reference));
+ sink_->PutInt(reference.reference(), "BackRefValue");
+ hot_objects_.Add(object);
+}
+
+int Serializer::PutAlignmentPrefix(HeapObject* object) {
+ AllocationAlignment alignment = object->RequiredAlignment();
+ if (alignment != kWordAligned) {
+ DCHECK(1 <= alignment && alignment <= 3);
+ byte prefix = (kAlignmentPrefix - 1) + alignment;
+ sink_->Put(prefix, "Alignment");
+ return Heap::GetMaximumFillToAlign(alignment);
+ }
+ return 0;
+}
+
+BackReference Serializer::AllocateLargeObject(int size) {
+ // Large objects are allocated one-by-one when deserializing. We do not
+ // have to keep track of multiple chunks.
+ large_objects_total_size_ += size;
+ return BackReference::LargeObjectReference(seen_large_objects_index_++);
+}
+
+BackReference Serializer::Allocate(AllocationSpace space, int size) {
+ DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+ DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
+ uint32_t new_chunk_size = pending_chunk_[space] + size;
+ if (new_chunk_size > max_chunk_size(space)) {
+ // The new chunk size would not fit onto a single page. Complete the
+ // current chunk and start a new one.
+ sink_->Put(kNextChunk, "NextChunk");
+ sink_->Put(space, "NextChunkSpace");
+ completed_chunks_[space].Add(pending_chunk_[space]);
+ DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
+ pending_chunk_[space] = 0;
+ new_chunk_size = size;
+ }
+ uint32_t offset = pending_chunk_[space];
+ pending_chunk_[space] = new_chunk_size;
+ return BackReference::Reference(space, completed_chunks_[space].length(),
+ offset);
+}
+
+void Serializer::Pad() {
+ // The non-branching GetInt will read up to 3 bytes too far, so we need
+ // to pad the snapshot to make sure we don't read over the end.
+ for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
+ sink_->Put(kNop, "Padding");
+ }
+ // Pad up to pointer size for checksum.
+ while (!IsAligned(sink_->Position(), kPointerAlignment)) {
+ sink_->Put(kNop, "Padding");
+ }
+}
+
+void Serializer::InitializeCodeAddressMap() {
+ isolate_->InitializeLoggingAndCounters();
+ code_address_map_ = new CodeAddressMap(isolate_);
+}
+
+Code* Serializer::CopyCode(Code* code) {
+ code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
+ int size = code->CodeSize();
+ code_buffer_.AddAll(Vector<byte>(code->address(), size));
+ return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
+}
+
+bool Serializer::HasNotExceededFirstPageOfEachSpace() {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ if (!completed_chunks_[i].is_empty()) return false;
+ }
+ return true;
+}
+
+void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
+ int size, Map* map) {
+ if (serializer_->code_address_map_) {
+ const char* code_name =
+ serializer_->code_address_map_->Lookup(object_->address());
+ LOG(serializer_->isolate_,
+ CodeNameEvent(object_->address(), sink_->Position(), code_name));
+ }
+
+ BackReference back_reference;
+ if (space == LO_SPACE) {
+ sink_->Put(kNewObject + reference_representation_ + space,
+ "NewLargeObject");
+ sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
+ if (object_->IsCode()) {
+ sink_->Put(EXECUTABLE, "executable large object");
+ } else {
+ sink_->Put(NOT_EXECUTABLE, "not executable large object");
+ }
+ back_reference = serializer_->AllocateLargeObject(size);
+ } else {
+ int fill = serializer_->PutAlignmentPrefix(object_);
+ back_reference = serializer_->Allocate(space, size + fill);
+ sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
+ sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
+ }
+
+#ifdef OBJECT_PRINT
+ if (FLAG_serialization_statistics) {
+ serializer_->CountInstanceType(map, size);
+ }
+#endif // OBJECT_PRINT
+
+ // Mark this object as already serialized.
+ serializer_->back_reference_map()->Add(object_, back_reference);
+
+ // Serialize the map (first word of the object).
+ serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
+}
+
+void Serializer::ObjectSerializer::SerializeExternalString() {
+ // Instead of serializing this as an external string, we serialize
+ // an imaginary sequential string with the same content.
+ Isolate* isolate = serializer_->isolate();
+ DCHECK(object_->IsExternalString());
+ DCHECK(object_->map() != isolate->heap()->native_source_string_map());
+ ExternalString* string = ExternalString::cast(object_);
+ int length = string->length();
+ Map* map;
+ int content_size;
+ int allocation_size;
+ const byte* resource;
+ // Find the map and size for the imaginary sequential string.
+ bool internalized = object_->IsInternalizedString();
+ if (object_->IsExternalOneByteString()) {
+ map = internalized ? isolate->heap()->one_byte_internalized_string_map()
+ : isolate->heap()->one_byte_string_map();
+ allocation_size = SeqOneByteString::SizeFor(length);
+ content_size = length * kCharSize;
+ resource = reinterpret_cast<const byte*>(
+ ExternalOneByteString::cast(string)->resource()->data());
+ } else {
+ map = internalized ? isolate->heap()->internalized_string_map()
+ : isolate->heap()->string_map();
+ allocation_size = SeqTwoByteString::SizeFor(length);
+ content_size = length * kShortSize;
+ resource = reinterpret_cast<const byte*>(
+ ExternalTwoByteString::cast(string)->resource()->data());
+ }
+
+ AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
+ ? LO_SPACE
+ : OLD_SPACE;
+ SerializePrologue(space, allocation_size, map);
+
+ // Output the rest of the imaginary string.
+ int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
+
+ // Output raw data header. Do not bother with common raw length cases here.
+ sink_->Put(kVariableRawData, "RawDataForString");
+ sink_->PutInt(bytes_to_output, "length");
+
+ // Serialize string header (except for map).
+ Address string_start = string->address();
+ for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
+ sink_->PutSection(string_start[i], "StringHeader");
+ }
+
+ // Serialize string content.
+ sink_->PutRaw(resource, content_size, "StringContent");
+
+ // Since the allocation size is rounded up to object alignment, there
+ // maybe left-over bytes that need to be padded.
+ int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
+ DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
+ for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
+
+ sink_->Put(kSkip, "SkipAfterString");
+ sink_->PutInt(bytes_to_output, "SkipDistance");
+}
+
+// Clear and later restore the next link in the weak cell or allocation site.
+// TODO(all): replace this with proper iteration of weak slots in serializer.
+class UnlinkWeakNextScope {
+ public:
+ explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
+ if (object->IsWeakCell()) {
+ object_ = object;
+ next_ = WeakCell::cast(object)->next();
+ WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
+ } else if (object->IsAllocationSite()) {
+ object_ = object;
+ next_ = AllocationSite::cast(object)->weak_next();
+ AllocationSite::cast(object)->set_weak_next(
+ object->GetHeap()->undefined_value());
+ }
+ }
+
+ ~UnlinkWeakNextScope() {
+ if (object_ != nullptr) {
+ if (object_->IsWeakCell()) {
+ WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+ } else {
+ AllocationSite::cast(object_)->set_weak_next(next_,
+ UPDATE_WEAK_WRITE_BARRIER);
+ }
+ }
+ }
+
+ private:
+ HeapObject* object_;
+ Object* next_;
+ DisallowHeapAllocation no_gc_;
+};
+
+void Serializer::ObjectSerializer::Serialize() {
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding heap object: ");
+ object_->ShortPrint();
+ PrintF("\n");
+ }
+
+ // We cannot serialize typed array objects correctly.
+ DCHECK(!object_->IsJSTypedArray());
+
+ // We don't expect fillers.
+ DCHECK(!object_->IsFiller());
+
+ if (object_->IsScript()) {
+ // Clear cached line ends.
+ Object* undefined = serializer_->isolate()->heap()->undefined_value();
+ Script::cast(object_)->set_line_ends(undefined);
+ }
+
+ if (object_->IsExternalString()) {
+ Heap* heap = serializer_->isolate()->heap();
+ if (object_->map() != heap->native_source_string_map()) {
+ // Usually we cannot recreate resources for external strings. To work
+ // around this, external strings are serialized to look like ordinary
+ // sequential strings.
+ // The exception are native source code strings, since we can recreate
+ // their resources. In that case we fall through and leave it to
+ // VisitExternalOneByteString further down.
+ SerializeExternalString();
+ return;
+ }
+ }
+
+ int size = object_->Size();
+ Map* map = object_->map();
+ AllocationSpace space =
+ MemoryChunk::FromAddress(object_->address())->owner()->identity();
+ SerializePrologue(space, size, map);
+
+ // Serialize the rest of the object.
+ CHECK_EQ(0, bytes_processed_so_far_);
+ bytes_processed_so_far_ = kPointerSize;
+
+ RecursionScope recursion(serializer_);
+ // Objects that are immediately post processed during deserialization
+ // cannot be deferred, since post processing requires the object content.
+ if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
+ serializer_->QueueDeferredObject(object_);
+ sink_->Put(kDeferred, "Deferring object content");
+ return;
+ }
+
+ UnlinkWeakNextScope unlink_weak_next(object_);
+
+ object_->IterateBody(map->instance_type(), size, this);
+ OutputRawData(object_->address() + size);
+}
+
+void Serializer::ObjectSerializer::SerializeDeferred() {
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding deferred heap object: ");
+ object_->ShortPrint();
+ PrintF("\n");
+ }
+
+ int size = object_->Size();
+ Map* map = object_->map();
+ BackReference reference = serializer_->back_reference_map()->Lookup(object_);
+
+ // Serialize the rest of the object.
+ CHECK_EQ(0, bytes_processed_so_far_);
+ bytes_processed_so_far_ = kPointerSize;
+
+ serializer_->PutAlignmentPrefix(object_);
+ sink_->Put(kNewObject + reference.space(), "deferred object");
+ serializer_->PutBackReference(object_, reference);
+ sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
+
+ UnlinkWeakNextScope unlink_weak_next(object_);
+
+ object_->IterateBody(map->instance_type(), size, this);
+ OutputRawData(object_->address() + size);
+}
+
+void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
+ Object** current = start;
+ while (current < end) {
+ while (current < end && (*current)->IsSmi()) current++;
+ if (current < end) OutputRawData(reinterpret_cast<Address>(current));
+
+ while (current < end && !(*current)->IsSmi()) {
+ HeapObject* current_contents = HeapObject::cast(*current);
+ int root_index = serializer_->root_index_map()->Lookup(current_contents);
+ // Repeats are not subject to the write barrier so we can only use
+ // immortal immovable root members. They are never in new space.
+ if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
+ Heap::RootIsImmortalImmovable(root_index) &&
+ current_contents == current[-1]) {
+ DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
+ int repeat_count = 1;
+ while (&current[repeat_count] < end - 1 &&
+ current[repeat_count] == current_contents) {
+ repeat_count++;
+ }
+ current += repeat_count;
+ bytes_processed_so_far_ += repeat_count * kPointerSize;
+ if (repeat_count > kNumberOfFixedRepeat) {
+ sink_->Put(kVariableRepeat, "VariableRepeat");
+ sink_->PutInt(repeat_count, "repeat count");
+ } else {
+ sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
+ }
+ } else {
+ serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
+ 0);
+ bytes_processed_so_far_ += kPointerSize;
+ current++;
+ }
+ }
+ }
+}
+
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ Object* object = rinfo->target_object();
+ serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
+ kStartOfObject, skip);
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+ int skip = OutputRawData(reinterpret_cast<Address>(p),
+ kCanReturnSkipInsteadOfSkipping);
+ sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ Address target = *p;
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+ bytes_processed_so_far_ += kPointerSize;
+}
+
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ Address target = rinfo->target_external_reference();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
+ // We can only reference to internal references of code that has been output.
+ DCHECK(object_->IsCode() && code_has_been_output_);
+ // We do not use skip from last patched pc to find the pc to patch, since
+ // target_address_address may not return addresses in ascending order when
+ // used for internal references. External references may be stored at the
+ // end of the code in the constant pool, whereas internal references are
+ // inline. That would cause the skip to be negative. Instead, we store the
+ // offset from code entry.
+ Address entry = Code::cast(object_)->entry();
+ intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
+ intptr_t target_offset = rinfo->target_internal_reference() - entry;
+ DCHECK(0 <= pc_offset &&
+ pc_offset <= Code::cast(object_)->instruction_size());
+ DCHECK(0 <= target_offset &&
+ target_offset <= Code::cast(object_)->instruction_size());
+ sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
+ ? kInternalReference
+ : kInternalReferenceEncoded,
+ "InternalRef");
+ sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
+ sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
+}
+
+void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ Address target = rinfo->target_address();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
+ int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
+ Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+ bytes_processed_so_far_ += kPointerSize;
+}
+
+void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
+ Cell* object = Cell::cast(rinfo->target_cell());
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+ bytes_processed_so_far_ += kPointerSize;
+}
+
+bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
+ int builtin_count,
+ v8::String::ExternalOneByteStringResource** resource_pointer,
+ FixedArray* source_cache, int resource_index) {
+ for (int i = 0; i < builtin_count; i++) {
+ Object* source = source_cache->get(i);
+ if (!source->IsUndefined()) {
+ ExternalOneByteString* string = ExternalOneByteString::cast(source);
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ const Resource* resource = string->resource();
+ if (resource == *resource_pointer) {
+ sink_->Put(resource_index, "NativesStringResource");
+ sink_->PutSection(i, "NativesStringResourceEnd");
+ bytes_processed_so_far_ += sizeof(resource);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void Serializer::ObjectSerializer::VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource_pointer) {
+ Address references_start = reinterpret_cast<Address>(resource_pointer);
+ OutputRawData(references_start);
+ if (SerializeExternalNativeSourceString(
+ Natives::GetBuiltinsCount(), resource_pointer,
+ Natives::GetSourceCache(serializer_->isolate()->heap()),
+ kNativesStringResource)) {
+ return;
+ }
+ if (SerializeExternalNativeSourceString(
+ ExtraNatives::GetBuiltinsCount(), resource_pointer,
+ ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
+ kExtraNativesStringResource)) {
+ return;
+ }
+ // One of the strings in the natives cache should match the resource. We
+ // don't expect any other kinds of external strings here.
+ UNREACHABLE();
+}
+
+Address Serializer::ObjectSerializer::PrepareCode() {
+ // To make snapshots reproducible, we make a copy of the code object
+ // and wipe all pointers in the copy, which we then serialize.
+ Code* original = Code::cast(object_);
+ Code* code = serializer_->CopyCode(original);
+ // Code age headers are not serializable.
+ code->MakeYoung(serializer_->isolate());
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ rinfo->WipeOut();
+ }
+ // We need to wipe out the header fields *after* wiping out the
+ // relocations, because some of these fields are needed for the latter.
+ code->WipeOutHeader();
+ return code->address();
+}
+
+int Serializer::ObjectSerializer::OutputRawData(
+ Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
+ Address object_start = object_->address();
+ int base = bytes_processed_so_far_;
+ int up_to_offset = static_cast<int>(up_to - object_start);
+ int to_skip = up_to_offset - bytes_processed_so_far_;
+ int bytes_to_output = to_skip;
+ bytes_processed_so_far_ += to_skip;
+ // This assert will fail if the reloc info gives us the target_address_address
+ // locations in a non-ascending order. Luckily that doesn't happen.
+ DCHECK(to_skip >= 0);
+ bool outputting_code = false;
+ bool is_code_object = object_->IsCode();
+ if (to_skip != 0 && is_code_object && !code_has_been_output_) {
+ // Output the code all at once and fix later.
+ bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
+ outputting_code = true;
+ code_has_been_output_ = true;
+ }
+ if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
+ if (!outputting_code && bytes_to_output == to_skip &&
+ IsAligned(bytes_to_output, kPointerAlignment) &&
+ bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
+ int size_in_words = bytes_to_output >> kPointerSizeLog2;
+ sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
+ to_skip = 0; // This instruction includes skip.
+ } else {
+ // We always end up here if we are outputting the code of a code object.
+ sink_->Put(kVariableRawData, "VariableRawData");
+ sink_->PutInt(bytes_to_output, "length");
+ }
+
+ if (is_code_object) object_start = PrepareCode();
+
+ const char* description = is_code_object ? "Code" : "Byte";
+ sink_->PutRaw(object_start + base, bytes_to_output, description);
+ }
+ if (to_skip != 0 && return_skip == kIgnoringReturn) {
+ sink_->Put(kSkip, "Skip");
+ sink_->PutInt(to_skip, "SkipDistance");
+ to_skip = 0;
+ }
+ return to_skip;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
new file mode 100644
index 0000000000..eccbaabf5b
--- /dev/null
+++ b/deps/v8/src/snapshot/serializer.h
@@ -0,0 +1,321 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SERIALIZER_H_
+#define V8_SNAPSHOT_SERIALIZER_H_
+
+#include "src/isolate.h"
+#include "src/log.h"
+#include "src/objects.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot-source-sink.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeAddressMap : public CodeEventLogger {
+ public:
+ explicit CodeAddressMap(Isolate* isolate) : isolate_(isolate) {
+ isolate->logger()->addCodeEventListener(this);
+ }
+
+ ~CodeAddressMap() override {
+ isolate_->logger()->removeCodeEventListener(this);
+ }
+
+ void CodeMoveEvent(AbstractCode* from, Address to) override {
+ address_to_name_map_.Move(from->address(), to);
+ }
+
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override {}
+
+ const char* Lookup(Address address) {
+ return address_to_name_map_.Lookup(address);
+ }
+
+ private:
+ class NameMap {
+ public:
+ NameMap() : impl_(HashMap::PointersMatch) {}
+
+ ~NameMap() {
+ for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+ DeleteArray(static_cast<const char*>(p->value));
+ }
+ }
+
+ void Insert(Address code_address, const char* name, int name_size) {
+ HashMap::Entry* entry = FindOrCreateEntry(code_address);
+ if (entry->value == NULL) {
+ entry->value = CopyName(name, name_size);
+ }
+ }
+
+ const char* Lookup(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ }
+
+ void Remove(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ if (entry != NULL) {
+ DeleteArray(static_cast<char*>(entry->value));
+ RemoveEntry(entry);
+ }
+ }
+
+ void Move(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* from_entry = FindEntry(from);
+ DCHECK(from_entry != NULL);
+ void* value = from_entry->value;
+ RemoveEntry(from_entry);
+ HashMap::Entry* to_entry = FindOrCreateEntry(to);
+ DCHECK(to_entry->value == NULL);
+ to_entry->value = value;
+ }
+
+ private:
+ static char* CopyName(const char* name, int name_size) {
+ char* result = NewArray<char>(name_size + 1);
+ for (int i = 0; i < name_size; ++i) {
+ char c = name[i];
+ if (c == '\0') c = ' ';
+ result[i] = c;
+ }
+ result[name_size] = '\0';
+ return result;
+ }
+
+ HashMap::Entry* FindOrCreateEntry(Address code_address) {
+ return impl_.LookupOrInsert(code_address,
+ ComputePointerHash(code_address));
+ }
+
+ HashMap::Entry* FindEntry(Address code_address) {
+ return impl_.Lookup(code_address, ComputePointerHash(code_address));
+ }
+
+ void RemoveEntry(HashMap::Entry* entry) {
+ impl_.Remove(entry->key, entry->hash);
+ }
+
+ HashMap impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(NameMap);
+ };
+
+ void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+ const char* name, int length) override {
+ address_to_name_map_.Insert(code->address(), name, length);
+ }
+
+ NameMap address_to_name_map_;
+ Isolate* isolate_;
+};
+
+// There can be only one serializer per V8 process.
+class Serializer : public SerializerDeserializer {
+ public:
+ Serializer(Isolate* isolate, SnapshotByteSink* sink);
+ ~Serializer() override;
+
+ void EncodeReservations(List<SerializedData::Reservation>* out) const;
+
+ void SerializeDeferredObjects();
+
+ Isolate* isolate() const { return isolate_; }
+
+ BackReferenceMap* back_reference_map() { return &back_reference_map_; }
+ RootIndexMap* root_index_map() { return &root_index_map_; }
+
+#ifdef OBJECT_PRINT
+ void CountInstanceType(Map* map, int size);
+#endif // OBJECT_PRINT
+
+ protected:
+ class ObjectSerializer;
+ class RecursionScope {
+ public:
+ explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
+ serializer_->recursion_depth_++;
+ }
+ ~RecursionScope() { serializer_->recursion_depth_--; }
+ bool ExceedsMaximum() {
+ return serializer_->recursion_depth_ >= kMaxRecursionDepth;
+ }
+
+ private:
+ static const int kMaxRecursionDepth = 32;
+ Serializer* serializer_;
+ };
+
+ virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) = 0;
+
+ void VisitPointers(Object** start, Object** end) override;
+
+ void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
+ int skip);
+
+ void PutSmi(Smi* smi);
+
+ void PutBackReference(HeapObject* object, BackReference reference);
+
+ // Emit alignment prefix if necessary, return required padding space in bytes.
+ int PutAlignmentPrefix(HeapObject* object);
+
+ // Returns true if the object was successfully serialized.
+ bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
+ inline void FlushSkip(int skip) {
+ if (skip != 0) {
+ sink_->Put(kSkip, "SkipFromSerializeObject");
+ sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ }
+ }
+
+ bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
+
+ // This will return the space for an object.
+ BackReference AllocateLargeObject(int size);
+ BackReference Allocate(AllocationSpace space, int size);
+ int EncodeExternalReference(Address addr) {
+ return external_reference_encoder_.Encode(addr);
+ }
+
+ bool HasNotExceededFirstPageOfEachSpace();
+
+ // GetInt reads 4 bytes at once, requiring padding at the end.
+ void Pad();
+
+ // We may not need the code address map for logging for every instance
+ // of the serializer. Initialize it on demand.
+ void InitializeCodeAddressMap();
+
+ Code* CopyCode(Code* code);
+
+ inline uint32_t max_chunk_size(int space) const {
+ DCHECK_LE(0, space);
+ DCHECK_LT(space, kNumberOfSpaces);
+ return max_chunk_size_[space];
+ }
+
+ SnapshotByteSink* sink() const { return sink_; }
+
+ void QueueDeferredObject(HeapObject* obj) {
+ DCHECK(back_reference_map_.Lookup(obj).is_valid());
+ deferred_objects_.Add(obj);
+ }
+
+ void OutputStatistics(const char* name);
+
+ Isolate* isolate_;
+
+ SnapshotByteSink* sink_;
+ ExternalReferenceEncoder external_reference_encoder_;
+
+ BackReferenceMap back_reference_map_;
+ RootIndexMap root_index_map_;
+
+ int recursion_depth_;
+
+ friend class Deserializer;
+ friend class ObjectSerializer;
+ friend class RecursionScope;
+ friend class SnapshotData;
+
+ private:
+ CodeAddressMap* code_address_map_;
+ // Objects from the same space are put into chunks for bulk-allocation
+ // when deserializing. We have to make sure that each chunk fits into a
+ // page. So we track the chunk size in pending_chunk_ of a space, but
+ // when it exceeds a page, we complete the current chunk and start a new one.
+ uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
+ List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
+ uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
+
+ // We map serialized large objects to indexes for back-referencing.
+ uint32_t large_objects_total_size_;
+ uint32_t seen_large_objects_index_;
+
+ List<byte> code_buffer_;
+
+ // To handle stack overflow.
+ List<HeapObject*> deferred_objects_;
+
+#ifdef OBJECT_PRINT
+ static const int kInstanceTypes = 256;
+ int* instance_type_count_;
+ size_t* instance_type_size_;
+#endif // OBJECT_PRINT
+
+ DISALLOW_COPY_AND_ASSIGN(Serializer);
+};
+
+class Serializer::ObjectSerializer : public ObjectVisitor {
+ public:
+ ObjectSerializer(Serializer* serializer, HeapObject* obj,
+ SnapshotByteSink* sink, HowToCode how_to_code,
+ WhereToPoint where_to_point)
+ : serializer_(serializer),
+ object_(obj),
+ sink_(sink),
+ reference_representation_(how_to_code + where_to_point),
+ bytes_processed_so_far_(0),
+ code_has_been_output_(false) {}
+ ~ObjectSerializer() override {}
+ void Serialize();
+ void SerializeDeferred();
+ void VisitPointers(Object** start, Object** end) override;
+ void VisitEmbeddedPointer(RelocInfo* target) override;
+ void VisitExternalReference(Address* p) override;
+ void VisitExternalReference(RelocInfo* rinfo) override;
+ void VisitInternalReference(RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* target) override;
+ void VisitCodeEntry(Address entry_address) override;
+ void VisitCell(RelocInfo* rinfo) override;
+ void VisitRuntimeEntry(RelocInfo* reloc) override;
+ // Used for seralizing the external strings that hold the natives source.
+ void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource) override;
+ // We can't serialize a heap with external two byte strings.
+ void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) override {
+ UNREACHABLE();
+ }
+
+ private:
+ void SerializePrologue(AllocationSpace space, int size, Map* map);
+
+ bool SerializeExternalNativeSourceString(
+ int builtin_count,
+ v8::String::ExternalOneByteStringResource** resource_pointer,
+ FixedArray* source_cache, int resource_index);
+
+ enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
+ // This function outputs or skips the raw data between the last pointer and
+ // up to the current position. It optionally can just return the number of
+ // bytes to skip instead of performing a skip instruction, in case the skip
+ // can be merged into the next instruction.
+ int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
+ // External strings are serialized in a way to resemble sequential strings.
+ void SerializeExternalString();
+
+ Address PrepareCode();
+
+ Serializer* serializer_;
+ HeapObject* object_;
+ SnapshotByteSink* sink_;
+ int reference_representation_;
+ int bytes_processed_so_far_;
+ bool code_has_been_output_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 97e7c6b506..eb3bdb5604 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -9,6 +9,9 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-source-sink.h"
+#include "src/version.h"
namespace v8 {
namespace internal {
@@ -228,5 +231,52 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
int context_length = data->raw_size - context_offset;
return Vector<const byte>(context_data, context_length);
}
+
+SnapshotData::SnapshotData(const Serializer& ser) {
+ DisallowHeapAllocation no_gc;
+ List<Reservation> reservations;
+ ser.EncodeReservations(&reservations);
+ const List<byte>& payload = ser.sink()->data();
+
+ // Calculate sizes.
+ int reservation_size = reservations.length() * kInt32Size;
+ int size = kHeaderSize + reservation_size + payload.length();
+
+ // Allocate backing store and create result data.
+ AllocateData(size);
+
+ // Set header values.
+ SetMagicNumber(ser.isolate());
+ SetHeaderValue(kCheckSumOffset, Version::Hash());
+ SetHeaderValue(kNumReservationsOffset, reservations.length());
+ SetHeaderValue(kPayloadLengthOffset, payload.length());
+
+ // Copy reservation chunk sizes.
+ CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+ reservation_size);
+
+ // Copy serialized data.
+ CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
+ static_cast<size_t>(payload.length()));
+}
+
+bool SnapshotData::IsSane() {
+ return GetHeaderValue(kCheckSumOffset) == Version::Hash();
+}
+
+Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
+ return Vector<const Reservation>(
+ reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+ GetHeaderValue(kNumReservationsOffset));
+}
+
+Vector<const byte> SnapshotData::Payload() const {
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+ const byte* payload = data_ + kHeaderSize + reservations_size;
+ int length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(data_ + size_, payload + length);
+ return Vector<const byte>(payload, length);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index 1d5476cb5e..67dcb60f0b 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -7,7 +7,6 @@
#include "src/snapshot/snapshot.h"
#include "src/base/platform/mutex.h"
-#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/v8.h" // for V8::Initialize
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 812de5e2a8..cee5875310 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/handles-inl.h"
-#include "src/snapshot/serialize.h" // for SerializerDeserializer::nop()
namespace v8 {
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index d99f118bff..c648d7595e 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -5,7 +5,8 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
namespace v8 {
namespace internal {
@@ -88,6 +89,41 @@ class Snapshot : public AllStatic {
void SetSnapshotFromFile(StartupData* snapshot_blob);
#endif
+// Wrapper around reservation sizes and the serialization payload.
+class SnapshotData : public SerializedData {
+ public:
+ // Used when producing.
+ explicit SnapshotData(const Serializer& ser);
+
+ // Used when consuming.
+ explicit SnapshotData(const Vector<const byte> snapshot)
+ : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
+ CHECK(IsSane());
+ }
+
+ Vector<const Reservation> Reservations() const;
+ Vector<const byte> Payload() const;
+
+ Vector<const byte> RawData() const {
+ return Vector<const byte>(data_, size_);
+ }
+
+ private:
+ bool IsSane();
+
+ // The data header consists of uint32_t-sized entries:
+ // [0] magic number and external reference count
+ // [1] version hash
+ // [2] number of reservation size entries
+ // [3] payload length
+ // ... reservations
+ // ... serialized payload
+ static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
+ static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
+ static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
new file mode 100644
index 0000000000..fab01f51f8
--- /dev/null
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -0,0 +1,167 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/startup-serializer.h"
+
+#include "src/objects-inl.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+StartupSerializer::StartupSerializer(
+ Isolate* isolate, SnapshotByteSink* sink,
+ FunctionCodeHandling function_code_handling)
+ : Serializer(isolate, sink),
+ function_code_handling_(function_code_handling),
+ serializing_builtins_(false) {
+ InitializeCodeAddressMap();
+}
+
+StartupSerializer::~StartupSerializer() {
+ OutputStatistics("StartupSerializer");
+}
+
+void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ DCHECK(!obj->IsJSFunction());
+
+ if (function_code_handling_ == CLEAR_FUNCTION_CODE) {
+ if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ // If the function code is compiled (either as native code or bytecode),
+ // replace it with lazy-compile builtin. Only exception is when we are
+ // serializing the canonical interpreter-entry-trampoline builtin.
+ if (code->kind() == Code::FUNCTION ||
+ (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+ obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+ }
+ } else if (obj->IsBytecodeArray()) {
+ obj = isolate()->heap()->undefined_value();
+ }
+ } else if (obj->IsCode()) {
+ DCHECK_EQ(KEEP_FUNCTION_CODE, function_code_handling_);
+ Code* code = Code::cast(obj);
+ if (code->kind() == Code::FUNCTION) {
+ code->ClearInlineCaches();
+ code->set_profiler_ticks(0);
+ }
+ }
+
+ int root_index = root_index_map_.Lookup(obj);
+ // We can only encode roots as such if it has already been serialized.
+ // That applies to root indices below the wave front.
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ if (root_has_been_serialized_.test(root_index)) {
+ PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ return;
+ }
+ }
+
+ if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+ FlushSkip(skip);
+
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
+ where_to_point);
+ object_serializer.Serialize();
+
+ if (serializing_immortal_immovables_roots_ &&
+ root_index != RootIndexMap::kInvalidRootIndex) {
+ // Make sure that the immortal immovable root has been included in the first
+ // chunk of its reserved space , so that it is deserialized onto the first
+ // page of its space and stays immortal immovable.
+ BackReference ref = back_reference_map_.Lookup(obj);
+ CHECK(ref.is_valid() && ref.chunk_index() == 0);
+ }
+}
+
+void StartupSerializer::SerializeWeakReferencesAndDeferred() {
+ // This comes right after serialization of the partial snapshot, where we
+ // add entries to the partial snapshot cache of the startup snapshot. Add
+ // one entry with 'undefined' to terminate the partial snapshot cache.
+ Object* undefined = isolate()->heap()->undefined_value();
+ VisitPointer(&undefined);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
+ SerializeDeferredObjects();
+ Pad();
+}
+
+void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+ // We expect the builtins tag after builtins have been serialized.
+ DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
+ serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
+ sink_->Put(kSynchronize, "Synchronize");
+}
+
+void StartupSerializer::SerializeStrongReferences() {
+ Isolate* isolate = this->isolate();
+ // No active threads.
+ CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ // No active or weak handles.
+ CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+ CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+ CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
+ // We don't support serializing installed extensions.
+ CHECK(!isolate->has_installed_extensions());
+ // First visit immortal immovables to make sure they end up in the first page.
+ serializing_immortal_immovables_roots_ = true;
+ isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
+ // Check that immortal immovable roots are allocated on the first page.
+ CHECK(HasNotExceededFirstPageOfEachSpace());
+ serializing_immortal_immovables_roots_ = false;
+ // Visit the rest of the strong roots.
+ // Clear the stack limits to make the snapshot reproducible.
+ // Reset it again afterwards.
+ isolate->heap()->ClearStackLimits();
+ isolate->heap()->IterateSmiRoots(this);
+ isolate->heap()->SetStackLimits();
+
+ isolate->heap()->IterateStrongRoots(this,
+ VISIT_ONLY_STRONG_FOR_SERIALIZATION);
+}
+
+void StartupSerializer::VisitPointers(Object** start, Object** end) {
+ if (start == isolate()->heap()->roots_array_start()) {
+ // Serializing the root list needs special handling:
+ // - The first pass over the root list only serializes immortal immovables.
+ // - The second pass over the root list serializes the rest.
+ // - Only root list elements that have been fully serialized can be
+ // referenced via as root by using kRootArray bytecodes.
+ int skip = 0;
+ for (Object** current = start; current < end; current++) {
+ int root_index = static_cast<int>(current - start);
+ if (RootShouldBeSkipped(root_index)) {
+ skip += kPointerSize;
+ continue;
+ } else {
+ if ((*current)->IsSmi()) {
+ FlushSkip(skip);
+ PutSmi(Smi::cast(*current));
+ } else {
+ SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
+ skip);
+ }
+ root_has_been_serialized_.set(root_index);
+ skip = 0;
+ }
+ }
+ FlushSkip(skip);
+ } else {
+ Serializer::VisitPointers(start, end);
+ }
+}
+
+bool StartupSerializer::RootShouldBeSkipped(int root_index) {
+ if (root_index == Heap::kStackLimitRootIndex ||
+ root_index == Heap::kRealStackLimitRootIndex) {
+ return true;
+ }
+ return Heap::RootIsImmortalImmovable(root_index) !=
+ serializing_immortal_immovables_roots_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
new file mode 100644
index 0000000000..71b8475469
--- /dev/null
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -0,0 +1,55 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_STARTUP_SERIALIZER_H_
+#define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
+
+#include <bitset>
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class StartupSerializer : public Serializer {
+ public:
+ enum FunctionCodeHandling { CLEAR_FUNCTION_CODE, KEEP_FUNCTION_CODE };
+
+ StartupSerializer(
+ Isolate* isolate, SnapshotByteSink* sink,
+ FunctionCodeHandling function_code_handling = CLEAR_FUNCTION_CODE);
+ ~StartupSerializer() override;
+
+ // Serialize the current state of the heap. The order is:
+ // 1) Immortal immovable roots
+ // 2) Remaining strong references.
+ // 3) Partial snapshot cache.
+ // 4) Weak references (e.g. the string table).
+ void SerializeStrongReferences();
+ void SerializeWeakReferencesAndDeferred();
+
+ private:
+ // The StartupSerializer has to serialize the root array, which is slightly
+ // different.
+ void VisitPointers(Object** start, Object** end) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+ void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
+ // Some roots should not be serialized, because their actual value depends on
+ // absolute addresses and they are reset after deserialization, anyway.
+ // In the first pass over the root list, we only serialize immortal immovable
+ // roots. In the second pass, we serialize the rest.
+ bool RootShouldBeSkipped(int root_index);
+
+ FunctionCodeHandling function_code_handling_;
+ bool serializing_builtins_;
+ bool serializing_immortal_immovables_roots_;
+ std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+ DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_STARTUP_SERIALIZER_H_
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 43be8f1f18..02f6f1c2bb 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -537,6 +537,7 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
+ if (iter.GetCurrent()->IsJSProxy()) break;
Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
if (!key->IsUndefined()) {
if (!name->IsString() ||
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index d17f78589f..2b3bf9bf92 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -10,6 +10,7 @@
#include "base/trace_event/common/trace_event_common.h"
#include "include/v8-platform.h"
#include "src/base/atomicops.h"
+#include "src/base/macros.h"
// This header file defines implementation details of how the trace macros in
// trace_event_common.h collect and store trace events. Anything not
@@ -42,6 +43,11 @@ enum CategoryGroupEnabledFlags {
// macros. Use this macro to prevent Process ID mangling.
#define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id)
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+ trace_event_internal::TraceID::WithScope(scope, id)
+
// Sets the current sample state to the given category and name (both must be
// constant strings). These states are intended for a sampling profiler.
// Implementation note: we store category and name together because we don't
@@ -106,6 +112,7 @@ enum CategoryGroupEnabledFlags {
// char phase,
// const uint8_t* category_group_enabled,
// const char* name,
+// const char* scope,
// uint64_t id,
// uint64_t bind_id,
// int num_args,
@@ -181,26 +188,26 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
v8::internal::tracing::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, flags, \
- ##__VA_ARGS__); \
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
+ v8::internal::tracing::kNoId, flags, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- uint64_t h = v8::internal::tracing::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \
- TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer) \
- .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- h); \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ uint64_t h = v8::internal::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
+ v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
}
#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
@@ -214,8 +221,8 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
uint64_t h = v8::internal::tracing::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- v8::internal::tracing::kNoId, trace_event_bind_id.data(), \
- trace_event_flags, ##__VA_ARGS__); \
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
+ trace_event_bind_id.raw_id(), trace_event_flags, ##__VA_ARGS__); \
INTERNAL_TRACE_EVENT_UID(tracer) \
.Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
h); \
@@ -233,8 +240,8 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
&trace_event_flags); \
v8::internal::tracing::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_trace_id.data(), v8::internal::tracing::kNoId, \
- trace_event_flags, ##__VA_ARGS__); \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ v8::internal::tracing::kNoId, trace_event_flags, ##__VA_ARGS__); \
} \
} while (0)
@@ -254,6 +261,27 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
phase, category_group, name, id, thread_id, timestamp, flags, ...) \
UNIMPLEMENTED()
+// Enter and leave a context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \
+ public: \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \
+ TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \
+ } \
+ ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \
+ TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \
+ } \
+ \
+ private: \
+ /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \
+ uint64_t cid_; \
+ }; \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
namespace v8 {
namespace internal {
namespace tracing {
@@ -261,6 +289,7 @@ namespace tracing {
// Specify these values when the corresponding argument of AddTraceEvent is not
// used.
const int kZeroNumArgs = 0;
+const decltype(nullptr) kGlobalScope = nullptr;
const uint64_t kNoId = 0;
class TraceEventHelper {
@@ -273,70 +302,108 @@ class TraceEventHelper {
// collide when the same pointer is used on different processes.
class TraceID {
public:
+ class WithScope {
+ public:
+ WithScope(const char* scope, uint64_t raw_id)
+ : scope_(scope), raw_id_(raw_id) {}
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
+ };
+
class DontMangle {
public:
- explicit DontMangle(const void* id)
- : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {}
- explicit DontMangle(uint64_t id) : data_(id) {}
- explicit DontMangle(unsigned int id) : data_(id) {}
- explicit DontMangle(uint16_t id) : data_(id) {}
- explicit DontMangle(unsigned char id) : data_(id) {}
- explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
- explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {}
- explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
- explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
- uint64_t data() const { return data_; }
+ explicit DontMangle(const void* raw_id)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {}
+ explicit DontMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(int64_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(int16_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(signed char raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit DontMangle(WithScope scoped_id)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ const char* scope() const { return scope_; }
+ uint64_t raw_id() const { return raw_id_; }
private:
- uint64_t data_;
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
};
class ForceMangle {
public:
- explicit ForceMangle(uint64_t id) : data_(id) {}
- explicit ForceMangle(unsigned int id) : data_(id) {}
- explicit ForceMangle(uint16_t id) : data_(id) {}
- explicit ForceMangle(unsigned char id) : data_(id) {}
- explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
- explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {}
- explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
- explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
- uint64_t data() const { return data_; }
+ explicit ForceMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(int64_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(int16_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit ForceMangle(signed char raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ uint64_t raw_id() const { return raw_id_; }
private:
- uint64_t data_;
+ uint64_t raw_id_;
};
- TraceID(const void* id, unsigned int* flags)
- : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ TraceID(const void* raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {
*flags |= TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+ TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
*flags |= TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {}
- TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; }
- TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; }
- TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; }
- TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; }
- TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ TraceID(DontMangle maybe_scoped_id, unsigned int* flags)
+ : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+ TraceID(uint64_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(uint16_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
(void)flags;
}
- TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ TraceID(int64_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
(void)flags;
}
- TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ TraceID(int raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
(void)flags;
}
- TraceID(signed char id, unsigned int* flags)
- : data_(static_cast<uint64_t>(id)) {
+ TraceID(int16_t raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
(void)flags;
}
+ TraceID(signed char raw_id, unsigned int* flags)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {
+ (void)flags;
+ }
+ TraceID(WithScope scoped_id, unsigned int* flags)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
- uint64_t data() const { return data_; }
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
private:
- uint64_t data_;
+ const char* scope_ = nullptr;
+ uint64_t raw_id_;
};
// Simple union to store various types as uint64_t.
@@ -407,34 +474,33 @@ INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
static V8_INLINE uint64_t AddTraceEvent(char phase,
const uint8_t* category_group_enabled,
- const char* name, uint64_t id,
- uint64_t bind_id, unsigned int flags) {
+ const char* name, const char* scope,
+ uint64_t id, uint64_t bind_id,
+ unsigned int flags) {
return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
- id, bind_id, kZeroNumArgs, NULL, NULL,
- NULL, flags);
+ scope, id, bind_id, kZeroNumArgs, NULL,
+ NULL, NULL, flags);
}
template <class ARG1_TYPE>
-static V8_INLINE uint64_t AddTraceEvent(char phase,
- const uint8_t* category_group_enabled,
- const char* name, uint64_t id,
- uint64_t bind_id, unsigned int flags,
- const char* arg1_name,
- const ARG1_TYPE& arg1_val) {
+static V8_INLINE uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val) {
const int num_args = 1;
uint8_t arg_types[1];
uint64_t arg_values[1];
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
- id, bind_id, num_args, &arg1_name,
- arg_types, arg_values, flags);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ &arg1_name, arg_types, arg_values, flags);
}
template <class ARG1_TYPE, class ARG2_TYPE>
static V8_INLINE uint64_t AddTraceEvent(
char phase, const uint8_t* category_group_enabled, const char* name,
- uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name,
- const ARG1_TYPE& arg1_val, const char* arg2_name,
+ const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+ const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
const ARG2_TYPE& arg2_val) {
const int num_args = 2;
const char* arg_names[2] = {arg1_name, arg2_name};
@@ -442,9 +508,9 @@ static V8_INLINE uint64_t AddTraceEvent(
uint64_t arg_values[2];
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
- id, bind_id, num_args, arg_names,
- arg_types, arg_values, flags);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, flags);
}
// Used by TRACE_EVENTx macros. Do not use directly.
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 9424497e19..ea02d61031 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -114,7 +114,6 @@ bool TransitionArray::IsSpecialTransition(Name* name) {
name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
name == heap->elements_transition_symbol() ||
name == heap->strict_function_transition_symbol() ||
- name == heap->strong_function_transition_symbol() ||
name == heap->observed_symbol();
}
#endif
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index e63769e4af..082ebc16b0 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -395,7 +395,6 @@ Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
int slack) {
Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
LengthFor(number_of_transitions + slack));
- array->set(kNextLinkIndex, isolate->heap()->undefined_value());
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
return Handle<TransitionArray>::cast(array);
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
index 8bd35c0161..2a95df9f8c 100644
--- a/deps/v8/src/type-cache.h
+++ b/deps/v8/src/type-cache.h
@@ -13,12 +13,13 @@ namespace internal {
class TypeCache final {
private:
// This has to be first for the initialization magic to work.
+ base::AccountingAllocator allocator;
Zone zone_;
public:
static TypeCache const& Get();
- TypeCache() = default;
+ TypeCache() : zone_(&allocator) {}
Type* const kInt8 =
CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 97df1b9ae9..015104e96a 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -133,23 +133,19 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
*generic = gen;
}
-
-Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
-
-Handle<Object> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->megamorphic_symbol();
}
-
-Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
return isolate->factory()->premonomorphic_symbol();
}
-
-Object* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+Symbol* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
return isolate->heap()->uninitialized_symbol();
}
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index effbfe7cc6..770b5e5ded 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -231,17 +231,17 @@ class TypeFeedbackVector : public FixedArray {
void ClearKeyedStoreICs(SharedFunctionInfo* shared);
// The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+ static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
// The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+ static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
// The object that indicates a premonomorphic state.
- static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
+ static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Isolate* isolate);
+ static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
static const int kDummyLoadICSlot = 0;
static const int kDummyKeyedLoadICSlot = 2;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index ad253420bc..9087576f01 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -214,10 +214,6 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
*left_type = CompareICState::StateToType(zone(), stub.left());
*right_type = CompareICState::StateToType(zone(), stub.right());
*combined_type = CompareICState::StateToType(zone(), stub.state(), map);
- } else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(isolate(), code->extra_ic_state());
- *combined_type = stub.GetType(zone(), map);
- *left_type = *right_type = stub.GetInputType(zone(), map);
}
}
@@ -457,7 +453,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
- case Code::COMPARE_NIL_IC:
SetInfo(ast_id, target);
break;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 13a7f88b66..c4b0928fc4 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -71,7 +71,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
- // TODO(1571) We can't use ToBooleanStub::Types as the return value because
+ // TODO(1571) We can't use ToBooleanICStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
uint16_t ToBooleanTypes(TypeFeedbackId id);
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index d54826e34e..49c941816e 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -191,24 +191,29 @@ Type::bitset BitsetType::Lub(i::Map* map) {
map == heap->uninitialized_map() ||
map == heap->no_interceptor_result_sentinel_map() ||
map == heap->termination_exception_map() ||
- map == heap->arguments_marker_map());
+ map == heap->arguments_marker_map() ||
+ map == heap->optimized_out_map());
return kInternal & kTaggedPointer;
}
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
case SIMD128_VALUE_TYPE:
return kSimd;
+ case JS_OBJECT_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ if (map->is_undetectable()) return kOtherUndetectable;
+ return kOtherObject;
case JS_VALUE_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
- case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
+ case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
@@ -219,26 +224,15 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
- if (map->is_undetectable()) return kUndetectable;
+ DCHECK(!map->is_undetectable());
return kOtherObject;
case JS_FUNCTION_TYPE:
- if (map->is_undetectable()) return kUndetectable;
+ DCHECK(!map->is_undetectable());
return kFunction;
- case JS_REGEXP_TYPE:
- return kOtherObject; // TODO(rossberg): there should be a RegExp type.
case JS_PROXY_TYPE:
+ DCHECK(!map->is_undetectable());
return kProxy;
case MAP_TYPE:
- // When compiling stub templates, the meta map is used as a place holder
- // for the actual map with which the template is later instantiated.
- // We treat it as a kind of type variable whose upper bound is Any.
- // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
- // we must exclude Undetectable here. This makes no sense, really,
- // because it means that the template isn't actually parametric.
- // Also, it doesn't apply elsewhere. 8-(
- // We ought to find a cleaner solution for compiling stubs parameterised
- // over type or class variables, esp ones with bounds...
- return kDetectable & kTaggedPointer;
case ALLOCATION_SITE_TYPE:
case ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 9984ad8378..8061410429 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -42,8 +42,8 @@ namespace internal {
// Array < Object
// Function < Object
// RegExp < Object
-// Undetectable < Object
-// Detectable = Receiver \/ Number \/ Name - Undetectable
+// OtherUndetectable < Object
+// DetectableReceiver = Receiver - OtherUndetectable
//
// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
@@ -195,8 +195,8 @@ namespace internal {
V(InternalizedString, 1u << 13 | REPRESENTATION(kTaggedPointer)) \
V(OtherString, 1u << 14 | REPRESENTATION(kTaggedPointer)) \
V(Simd, 1u << 15 | REPRESENTATION(kTaggedPointer)) \
- V(Undetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
+ V(OtherUndetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
V(Function, 1u << 19 | REPRESENTATION(kTaggedPointer)) \
V(Internal, 1u << 20 | REPRESENTATION(kTagged | kUntagged)) \
@@ -218,13 +218,13 @@ namespace internal {
V(BooleanOrNumber, kBoolean | kNumber) \
V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
V(NullOrUndefined, kNull | kUndefined) \
+ V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
V(NumberOrString, kNumber | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
- V(Detectable, kDetectableReceiver | kNumber | kName) \
- V(Object, kFunction | kOtherObject | kUndetectable) \
+ V(Object, kFunction | kOtherObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
index ddb608fc2c..7482c4f651 100644
--- a/deps/v8/src/typing-asm.cc
+++ b/deps/v8/src/typing-asm.cc
@@ -690,7 +690,7 @@ void AsmTyper::VisitAssignment(Assignment* expr) {
expected_type_ = target_type;
VisitVariableProxy(expr->target()->AsVariableProxy(), true);
} else if (expr->target()->IsProperty()) {
- int value_intish = intish_;
+ int32_t value_intish = intish_;
Property* property = expr->target()->AsProperty();
RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
"bad propety object"));
@@ -781,7 +781,7 @@ void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
"array index expected to be integer"));
Literal* right = bin->right()->AsLiteral();
if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(right, "heap access shift must be integer");
+ FAIL(bin->right(), "heap access shift must be integer");
}
RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
"array shift expected to be integer"));
@@ -934,6 +934,54 @@ void AsmTyper::VisitProperty(Property* expr) {
FAIL(expr, "invalid property access");
}
+void AsmTyper::CheckPolymorphicStdlibArguments(
+ enum StandardMember standard_member, ZoneList<Expression*>* args) {
+ if (args->length() == 0) {
+ return;
+ }
+ // Handle polymorphic stdlib functions specially.
+ Expression* arg0 = args->at(0);
+ Type* arg0_type = arg0->bounds().upper;
+ switch (standard_member) {
+ case kMathFround: {
+ if (!arg0_type->Is(cache_.kAsmFloat) &&
+ !arg0_type->Is(cache_.kAsmDouble) &&
+ !arg0_type->Is(cache_.kAsmSigned) &&
+ !arg0_type->Is(cache_.kAsmUnsigned)) {
+ FAIL(arg0, "illegal function argument type");
+ }
+ break;
+ }
+ case kMathCeil:
+ case kMathFloor:
+ case kMathSqrt: {
+ if (!arg0_type->Is(cache_.kAsmFloat) &&
+ !arg0_type->Is(cache_.kAsmDouble)) {
+ FAIL(arg0, "illegal function argument type");
+ }
+ break;
+ }
+ case kMathAbs:
+ case kMathMin:
+ case kMathMax: {
+ if (!arg0_type->Is(cache_.kAsmFloat) &&
+ !arg0_type->Is(cache_.kAsmDouble) &&
+ !arg0_type->Is(cache_.kAsmSigned)) {
+ FAIL(arg0, "illegal function argument type");
+ }
+ if (args->length() > 1) {
+ Type* other = Type::Intersect(args->at(0)->bounds().upper,
+ args->at(1)->bounds().upper, zone());
+ if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+ !other->Is(cache_.kAsmSigned)) {
+ FAIL(arg0, "function arguments types don't match");
+ }
+ }
+ break;
+ }
+ default: { break; }
+ }
+}
void AsmTyper::VisitCall(Call* expr) {
Type* expected_type = expected_type_;
@@ -956,7 +1004,6 @@ void AsmTyper::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
if (Type::Any()->Is(result_type)) {
// For foreign calls.
- ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE(VisitWithExpectation(
@@ -988,29 +1035,7 @@ void AsmTyper::VisitCall(Call* expr) {
result_type = computed_type_;
}
}
- // Handle polymorphic stdlib functions specially.
- if (standard_member == kMathCeil || standard_member == kMathFloor ||
- standard_member == kMathSqrt) {
- if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
- FAIL(expr, "illegal function argument type");
- }
- } else if (standard_member == kMathAbs || standard_member == kMathMin ||
- standard_member == kMathMax) {
- if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
- FAIL(expr, "illegal function argument type");
- }
- if (args->length() > 1) {
- Type* other = Type::Intersect(args->at(0)->bounds().upper,
- args->at(1)->bounds().upper, zone());
- if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
- !other->Is(cache_.kAsmSigned)) {
- FAIL(expr, "function arguments types don't match");
- }
- }
- }
+ RECURSE(CheckPolymorphicStdlibArguments(standard_member, args));
intish_ = 0;
IntersectResult(expr, result_type);
}
@@ -1083,7 +1108,7 @@ void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
Type* result_type, bool conversion) {
RECURSE(VisitWithExpectation(expr->left(), Type::Number(),
"left bitwise operand expected to be a number"));
- int left_intish = intish_;
+ int32_t left_intish = intish_;
Type* left_type = computed_type_;
if (!left_type->Is(left_expected)) {
FAIL(expr->left(), "left bitwise operand expected to be an integer");
@@ -1095,7 +1120,7 @@ void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
RECURSE(
VisitWithExpectation(expr->right(), Type::Number(),
"right bitwise operand expected to be a number"));
- int right_intish = intish_;
+ int32_t right_intish = intish_;
Type* right_type = computed_type_;
if (!right_type->Is(right_expected)) {
FAIL(expr->right(), "right bitwise operand expected to be an integer");
@@ -1113,7 +1138,7 @@ void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
right_type = left_type;
}
if (!conversion) {
- if (!left_type->Is(right_type) || !right_type->Is(left_type)) {
+ if (!left_type->Is(cache_.kAsmIntQ) || !right_type->Is(cache_.kAsmIntQ)) {
FAIL(expr, "ill-typed bitwise operation");
}
}
@@ -1157,11 +1182,16 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
FAIL(expr, "illegal logical operator");
case Token::BIT_OR: {
// BIT_OR allows Any since it is used as a type coercion.
- VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
- cache_.kAsmSigned, true);
- if (expr->left()->IsCall() && expr->op() == Token::BIT_OR) {
+ RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
+ cache_.kAsmSigned, true));
+ if (expr->left()->IsCall() && expr->op() == Token::BIT_OR &&
+ Type::Number()->Is(expr->left()->bounds().upper)) {
+ // Force the return types of foreign functions.
expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
}
+ if (in_function_ && !expr->left()->bounds().upper->Is(cache_.kAsmIntQ)) {
+ FAIL(expr->left(), "intish required");
+ }
return;
}
case Token::BIT_XOR: {
@@ -1170,7 +1200,7 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
if (left && left->value()->IsBoolean()) {
if (left->ToBooleanIsTrue()) {
left->set_bounds(Bounds(cache_.kSingletonOne));
- RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmInt,
+ RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmIntQ,
"not operator expects an integer"));
IntersectResult(expr, cache_.kAsmSigned);
return;
@@ -1178,21 +1208,21 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
FAIL(left, "unexpected false");
}
}
- // BIT_XOR allows Number since it is used as a type coercion (via ~~).
- VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kAsmInt,
- cache_.kAsmSigned, true);
+ // BIT_XOR allows Any since it is used as a type coercion (via ~~).
+ RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
+ cache_.kAsmSigned, true));
return;
}
case Token::SHR: {
- VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
- cache_.kAsmUnsigned, false);
+ RECURSE(VisitIntegerBitwiseOperator(
+ expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmUnsigned, false));
return;
}
case Token::SHL:
case Token::SAR:
case Token::BIT_AND: {
- VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
- cache_.kAsmSigned, false);
+ RECURSE(VisitIntegerBitwiseOperator(
+ expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmSigned, false));
return;
}
case Token::ADD:
@@ -1204,28 +1234,33 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
expr->left(), Type::Number(),
"left arithmetic operand expected to be number"));
Type* left_type = computed_type_;
- int left_intish = intish_;
+ int32_t left_intish = intish_;
RECURSE(VisitWithExpectation(
expr->right(), Type::Number(),
"right arithmetic operand expected to be number"));
Type* right_type = computed_type_;
- int right_intish = intish_;
+ int32_t right_intish = intish_;
Type* type = Type::Union(left_type, right_type, zone());
if (type->Is(cache_.kAsmInt)) {
if (expr->op() == Token::MUL) {
- Literal* right = expr->right()->AsLiteral();
- if (!right) {
- FAIL(expr, "direct integer multiply forbidden");
- }
- if (!right->value()->IsNumber()) {
- FAIL(expr, "multiply must be by an integer");
- }
int32_t i;
- if (!right->value()->ToInt32(&i)) {
- FAIL(expr, "multiply must be a signed integer");
+ Literal* left = expr->left()->AsLiteral();
+ Literal* right = expr->right()->AsLiteral();
+ if (left != nullptr && left->value()->IsNumber() &&
+ left->value()->ToInt32(&i)) {
+ if (right_intish != 0) {
+ FAIL(expr, "intish not allowed in multiply");
+ }
+ } else if (right != nullptr && right->value()->IsNumber() &&
+ right->value()->ToInt32(&i)) {
+ if (left_intish != 0) {
+ FAIL(expr, "intish not allowed in multiply");
+ }
+ } else {
+ FAIL(expr, "multiply must be by an integer literal");
}
i = abs(i);
- if (i >= 1 << 20) {
+ if (i >= (1 << 20)) {
FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
}
intish_ = i;
@@ -1246,13 +1281,38 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
return;
}
} else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
- right_type->Is(cache_.kAsmDouble)) {
+ right_type->Is(cache_.kAsmDouble) &&
+ expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
+ expr->right()->AsLiteral()->raw_value()->AsNumber() == 1.0) {
// For unary +, expressed as x * 1.0
- if (expr->left()->IsCall() && expr->op() == Token::MUL) {
+ if (expr->left()->IsCall() &&
+ Type::Number()->Is(expr->left()->bounds().upper)) {
+ // Force the return types of foreign functions.
expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
+ left_type = expr->left()->bounds().upper;
+ }
+ if (!(expr->left()->IsProperty() &&
+ Type::Number()->Is(expr->left()->bounds().upper))) {
+ if (!left_type->Is(cache_.kAsmSigned) &&
+ !left_type->Is(cache_.kAsmUnsigned) &&
+ !left_type->Is(cache_.kAsmFixnum) &&
+ !left_type->Is(cache_.kAsmFloatQ) &&
+ !left_type->Is(cache_.kAsmDoubleQ)) {
+ FAIL(
+ expr->left(),
+ "unary + only allowed on signed, unsigned, float?, or double?");
+ }
}
IntersectResult(expr, cache_.kAsmDouble);
return;
+ } else if (expr->op() == Token::MUL && left_type->Is(cache_.kAsmDouble) &&
+ expr->right()->IsLiteral() &&
+ !expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
+ expr->right()->AsLiteral()->raw_value()->AsNumber() == -1.0) {
+ // For unary -, expressed as x * -1
+ expr->right()->set_bounds(Bounds(cache_.kAsmDouble));
+ IntersectResult(expr, cache_.kAsmDouble);
+ return;
} else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
if (left_intish != 0 || right_intish != 0) {
FAIL(expr, "float operation before required fround");
@@ -1493,8 +1553,6 @@ AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable,
if (!entry && in_function_) {
entry =
global_variable_type_.Lookup(variable, ComputePointerHash(variable));
- if (entry && entry->value) {
- }
}
}
if (!entry) return NULL;
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
index 54796ed4dd..c7984b2965 100644
--- a/deps/v8/src/typing-asm.h
+++ b/deps/v8/src/typing-asm.h
@@ -92,7 +92,7 @@ class AsmTyper : public AstVisitor {
Type* expected_type_;
Type* computed_type_;
VariableInfo* property_info_;
- int intish_; // How many ops we've gone without a x|0.
+ int32_t intish_; // How many ops we've gone without a x|0.
Type* return_type_; // Return type of last function.
size_t array_size_; // Array size of last ArrayLiteral.
@@ -135,6 +135,9 @@ class AsmTyper : public AstVisitor {
void VisitHeapAccess(Property* expr, bool assigning, Type* assignment_type);
+ void CheckPolymorphicStdlibArguments(enum StandardMember standard_member,
+ ZoneList<Expression*>* args);
+
Expression* GetReceiverOfPropertyAccess(Expression* expr, const char* name);
bool IsMathObject(Expression* expr);
bool IsSIMDObject(Expression* expr);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index d779979a61..44865edede 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -210,6 +210,30 @@ inline double Floor(double x) {
return std::floor(x);
}
+inline double Pow(double x, double y) {
+#if (defined(__MINGW64_VERSION_MAJOR) && \
+ (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
+ defined(V8_OS_AIX)
+ // MinGW64 and AIX have a custom implementation for pow. This handles certain
+ // special cases that are different.
+ if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
+ double f;
+ double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ /* retain sign if odd integer exponent */
+ return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
+ ? copysign(result, x)
+ : result;
+ }
+
+ if (x == 2.0) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return std::ldexp(1.0, y_int);
+ }
+ }
+#endif
+ return std::pow(x, y);
+}
// TODO(svenpanne) Clean up the whole power-of-2 mess.
inline int32_t WhichPowerOf2Abs(int32_t x) {
@@ -548,240 +572,6 @@ class EmbeddedVector : public Vector<T> {
T buffer_[kSize];
};
-
-/*
- * A class that collects values into a backing store.
- * Specialized versions of the class can allow access to the backing store
- * in different ways.
- * There is no guarantee that the backing store is contiguous (and, as a
- * consequence, no guarantees that consecutively added elements are adjacent
- * in memory). The collector may move elements unless it has guaranteed not
- * to.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class Collector {
- public:
- explicit Collector(int initial_capacity = kMinCapacity)
- : index_(0), size_(0) {
- current_chunk_ = Vector<T>::New(initial_capacity);
- }
-
- virtual ~Collector() {
- // Free backing store (in reverse allocation order).
- current_chunk_.Dispose();
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- }
-
- // Add a single element.
- inline void Add(T value) {
- if (index_ >= current_chunk_.length()) {
- Grow(1);
- }
- current_chunk_[index_] = value;
- index_++;
- size_++;
- }
-
- // Add a block of contiguous elements and return a Vector backed by the
- // memory area.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(int size, T initial_value) {
- DCHECK(size > 0);
- if (size > current_chunk_.length() - index_) {
- Grow(size);
- }
- T* position = current_chunk_.start() + index_;
- index_ += size;
- size_ += size;
- for (int i = 0; i < size; i++) {
- position[i] = initial_value;
- }
- return Vector<T>(position, size);
- }
-
-
- // Add a contiguous block of elements and return a vector backed
- // by the added block.
- // A basic Collector will keep this vector valid as long as the Collector
- // is alive.
- inline Vector<T> AddBlock(Vector<const T> source) {
- if (source.length() > current_chunk_.length() - index_) {
- Grow(source.length());
- }
- T* position = current_chunk_.start() + index_;
- index_ += source.length();
- size_ += source.length();
- for (int i = 0; i < source.length(); i++) {
- position[i] = source[i];
- }
- return Vector<T>(position, source.length());
- }
-
-
- // Write the contents of the collector into the provided vector.
- void WriteTo(Vector<T> destination) {
- DCHECK(size_ <= destination.length());
- int position = 0;
- for (int i = 0; i < chunks_.length(); i++) {
- Vector<T> chunk = chunks_.at(i);
- for (int j = 0; j < chunk.length(); j++) {
- destination[position] = chunk[j];
- position++;
- }
- }
- for (int i = 0; i < index_; i++) {
- destination[position] = current_chunk_[i];
- position++;
- }
- }
-
- // Allocate a single contiguous vector, copy all the collected
- // elements to the vector, and return it.
- // The caller is responsible for freeing the memory of the returned
- // vector (e.g., using Vector::Dispose).
- Vector<T> ToVector() {
- Vector<T> new_store = Vector<T>::New(size_);
- WriteTo(new_store);
- return new_store;
- }
-
- // Resets the collector to be empty.
- virtual void Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- chunks_.Rewind(0);
- index_ = 0;
- size_ = 0;
- }
-
- // Total number of elements added to collector so far.
- inline int size() { return size_; }
-
- protected:
- static const int kMinCapacity = 16;
- List<Vector<T> > chunks_;
- Vector<T> current_chunk_; // Block of memory currently being written into.
- int index_; // Current index in current chunk.
- int size_; // Total number of elements in collector.
-
- // Creates a new current chunk, and stores the old chunk in the chunks_ list.
- void Grow(int min_capacity) {
- DCHECK(growth_factor > 1);
- int new_capacity;
- int current_length = current_chunk_.length();
- if (current_length < kMinCapacity) {
- // The collector started out as empty.
- new_capacity = min_capacity * growth_factor;
- if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
- } else {
- int growth = current_length * (growth_factor - 1);
- if (growth > max_growth) {
- growth = max_growth;
- }
- new_capacity = current_length + growth;
- if (new_capacity < min_capacity) {
- new_capacity = min_capacity + growth;
- }
- }
- NewChunk(new_capacity);
- DCHECK(index_ + min_capacity <= current_chunk_.length());
- }
-
- // Before replacing the current chunk, give a subclass the option to move
- // some of the current data into the new chunk. The function may update
- // the current index_ value to represent data no longer in the current chunk.
- // Returns the initial index of the new chunk (after copied data).
- virtual void NewChunk(int new_capacity) {
- Vector<T> new_chunk = Vector<T>::New(new_capacity);
- if (index_ > 0) {
- chunks_.Add(current_chunk_.SubVector(0, index_));
- } else {
- current_chunk_.Dispose();
- }
- current_chunk_ = new_chunk;
- index_ = 0;
- }
-};
-
-
-/*
- * A collector that allows sequences of values to be guaranteed to
- * stay consecutive.
- * If the backing store grows while a sequence is active, the current
- * sequence might be moved, but after the sequence is ended, it will
- * not move again.
- * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
- * as well, if inside an active sequence where another element is added.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class SequenceCollector : public Collector<T, growth_factor, max_growth> {
- public:
- explicit SequenceCollector(int initial_capacity)
- : Collector<T, growth_factor, max_growth>(initial_capacity),
- sequence_start_(kNoSequence) { }
-
- virtual ~SequenceCollector() {}
-
- void StartSequence() {
- DCHECK(sequence_start_ == kNoSequence);
- sequence_start_ = this->index_;
- }
-
- Vector<T> EndSequence() {
- DCHECK(sequence_start_ != kNoSequence);
- int sequence_start = sequence_start_;
- sequence_start_ = kNoSequence;
- if (sequence_start == this->index_) return Vector<T>();
- return this->current_chunk_.SubVector(sequence_start, this->index_);
- }
-
- // Drops the currently added sequence, and all collected elements in it.
- void DropSequence() {
- DCHECK(sequence_start_ != kNoSequence);
- int sequence_length = this->index_ - sequence_start_;
- this->index_ = sequence_start_;
- this->size_ -= sequence_length;
- sequence_start_ = kNoSequence;
- }
-
- virtual void Reset() {
- sequence_start_ = kNoSequence;
- this->Collector<T, growth_factor, max_growth>::Reset();
- }
-
- private:
- static const int kNoSequence = -1;
- int sequence_start_;
-
- // Move the currently active sequence to the new chunk.
- virtual void NewChunk(int new_capacity) {
- if (sequence_start_ == kNoSequence) {
- // Fall back on default behavior if no sequence has been started.
- this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
- return;
- }
- int sequence_length = this->index_ - sequence_start_;
- Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
- DCHECK(sequence_length < new_chunk.length());
- for (int i = 0; i < sequence_length; i++) {
- new_chunk[i] = this->current_chunk_[sequence_start_ + i];
- }
- if (sequence_start_ > 0) {
- this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
- } else {
- this->current_chunk_.Dispose();
- }
- this->current_chunk_ = new_chunk;
- this->index_ = sequence_length;
- sequence_start_ = 0;
- }
-};
-
-
// Compare 8bit/16bit chars to 8bit/16bit chars.
template <typename lchar, typename rchar>
inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
@@ -1378,7 +1168,7 @@ INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
size_t chars));
-#elif defined(V8_HOST_ARCH_PPC)
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
size_t chars));
@@ -1541,7 +1331,7 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
MemCopy(dest, src, chars * sizeof(*dest));
}
}
-#elif defined(V8_HOST_ARCH_PPC)
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
#define CASE(n) \
case n: \
memcpy(dest, src, n); \
@@ -1752,21 +1542,22 @@ static inline double ReadDoubleValue(const void* p) {
return ReadUnalignedValue<double>(p);
}
-
static inline void WriteDoubleValue(void* p, double value) {
WriteUnalignedValue(p, value);
}
-
static inline uint16_t ReadUnalignedUInt16(const void* p) {
return ReadUnalignedValue<uint16_t>(p);
}
-
static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
WriteUnalignedValue(p, value);
}
+static inline uint32_t ReadUnalignedUInt32(const void* p) {
+ return ReadUnalignedValue<uint32_t>(p);
+}
+
static inline void WriteUnalignedUInt32(void* p, uint32_t value) {
WriteUnalignedValue(p, value);
}
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 31b48780e4..154cf6201d 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -19,7 +19,6 @@
#include "src/profiler/sampler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index a1b18b20d6..6016ef1419 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -21,7 +21,7 @@ class V8 : public AllStatic {
// Report process out of memory. Implementation found in api.cc.
// This function will not return, but will terminate the execution.
static void FatalProcessOutOfMemory(const char* location,
- bool is_heap_oom = false);
+ bool take_snapshot = false);
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 6533aa1817..c8bd4e8082 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -40,11 +40,8 @@ inline const char* StateToString(StateTag state) {
template <StateTag Tag>
VMState<Tag>::VMState(Isolate* isolate)
: isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
- if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- if (FLAG_log_timer_events) {
- LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
- }
- TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
+ if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+ LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
}
isolate_->set_current_vm_state(Tag);
}
@@ -52,11 +49,8 @@ VMState<Tag>::VMState(Isolate* isolate)
template <StateTag Tag>
VMState<Tag>::~VMState() {
- if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- if (FLAG_log_timer_events) {
- LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
- }
- TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
+ if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+ LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
}
isolate_->set_current_vm_state(previous_tag_);
}
@@ -64,16 +58,18 @@ VMState<Tag>::~VMState() {
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
: isolate_(isolate),
callback_(callback),
- previous_scope_(isolate->external_callback_scope()),
- timer_(&isolate->counters()->runtime_call_stats()->ExternalCallback,
- isolate->counters()->runtime_call_stats()->current_timer()) {
+ previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
if (FLAG_runtime_call_stats) {
- isolate_->counters()->runtime_call_stats()->Enter(&timer_);
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+ timer_.Initialize(&stats->ExternalCallback, stats->current_timer());
+ stats->Enter(&timer_);
}
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
+ "V8.ExternalCallback");
}
ExternalCallbackScope::~ExternalCallbackScope() {
@@ -81,6 +77,8 @@ ExternalCallbackScope::~ExternalCallbackScope() {
isolate_->counters()->runtime_call_stats()->Leave(&timer_);
}
isolate_->set_external_callback_scope(previous_scope_);
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
+ "V8.ExternalCallback");
}
Address ExternalCallbackScope::scope_address() {
diff --git a/deps/v8/src/wasm/asm-wasm-builder.cc b/deps/v8/src/wasm/asm-wasm-builder.cc
index ee5427b174..d16d3a8bdd 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.cc
+++ b/deps/v8/src/wasm/asm-wasm-builder.cc
@@ -4,6 +4,12 @@
#include "src/v8.h"
+// Required to get M_E etc. in MSVC.
+#if defined(_WIN32)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+
#include "src/wasm/asm-wasm-builder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
@@ -28,7 +34,7 @@ namespace wasm {
class AsmWasmBuilderImpl : public AstVisitor {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
- Handle<Object> foreign)
+ Handle<Object> foreign, AsmTyper* typer)
: local_variables_(HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
@@ -46,6 +52,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
isolate_(isolate),
zone_(zone),
foreign_(foreign),
+ typer_(typer),
cache_(TypeCache::Get()),
breakable_blocks_(zone),
block_size_(0),
@@ -59,12 +66,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
void InitializeInitFunction() {
- unsigned char init[] = "__init__";
init_function_index_ = builder_->AddFunction();
current_function_builder_ = builder_->FunctionAt(init_function_index_);
- current_function_builder_->SetName(init, 8);
current_function_builder_->ReturnType(kAstStmt);
- current_function_builder_->Exported(1);
+ builder_->MarkStartFunction(init_function_index_);
current_function_builder_ = nullptr;
}
@@ -133,13 +138,14 @@ class AsmWasmBuilderImpl : public AstVisitor {
: builder_(builder) {
builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
builder_->current_function_builder_->Emit(opcode);
- index_ = builder_->current_function_builder_->EmitEditableImmediate(0);
+ index_ =
+ builder_->current_function_builder_->EmitEditableVarIntImmediate();
prev_block_size_ = builder_->block_size_;
builder_->block_size_ = initial_block_size;
}
~BlockVisitor() {
- builder_->current_function_builder_->EditImmediate(index_,
- builder_->block_size_);
+ builder_->current_function_builder_->EditVarIntImmediate(
+ index_, builder_->block_size_);
builder_->block_size_ = prev_block_size_;
builder_->breakable_blocks_.pop_back();
}
@@ -188,7 +194,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
DCHECK(i >= 0);
- current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
current_function_builder_->Emit(kExprNop);
}
@@ -211,7 +217,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
DCHECK(i >= 0);
- current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
current_function_builder_->Emit(kExprNop);
}
@@ -232,7 +238,8 @@ class AsmWasmBuilderImpl : public AstVisitor {
void SetLocalTo(uint16_t index, int value) {
current_function_builder_->Emit(kExprSetLocal);
AddLeb128(index, true);
- byte code[] = {WASM_I32(value)};
+ // TODO(bradnelson): variable size
+ byte code[] = {WASM_I32V(value)};
current_function_builder_->EmitCode(code, sizeof(code));
block_size_++;
}
@@ -286,7 +293,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
RECURSE(Visit(stmt->body()));
current_function_builder_->Emit(kExprIf);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->EmitWithVarInt(kExprBr, 0);
current_function_builder_->Emit(kExprNop);
}
@@ -296,7 +303,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
1);
current_function_builder_->Emit(kExprIf);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->EmitWithVarInt(kExprBr, 0);
RECURSE(Visit(stmt->body()));
}
@@ -311,9 +318,9 @@ class AsmWasmBuilderImpl : public AstVisitor {
if (stmt->cond() != nullptr) {
block_size_++;
current_function_builder_->Emit(kExprIf);
- current_function_builder_->Emit(kExprBoolNot);
+ current_function_builder_->Emit(kExprI32Eqz);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithU8(kExprBr, 1);
+ current_function_builder_->EmitWithVarInt(kExprBr, 1);
current_function_builder_->Emit(kExprNop);
}
if (stmt->body() != nullptr) {
@@ -325,7 +332,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
RECURSE(Visit(stmt->next()));
}
block_size_++;
- current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->EmitWithVarInt(kExprBr, 0);
current_function_builder_->Emit(kExprNop);
}
@@ -371,6 +378,58 @@ class AsmWasmBuilderImpl : public AstVisitor {
RECURSE(Visit(expr->else_expression()));
}
+ bool VisitStdlibConstant(Variable* var) {
+ AsmTyper::StandardMember standard_object =
+ typer_->VariableAsStandardMember(var);
+ double value;
+ switch (standard_object) {
+ case AsmTyper::kInfinity: {
+ value = std::numeric_limits<double>::infinity();
+ break;
+ }
+ case AsmTyper::kNaN: {
+ value = std::numeric_limits<double>::quiet_NaN();
+ break;
+ }
+ case AsmTyper::kMathE: {
+ value = M_E;
+ break;
+ }
+ case AsmTyper::kMathLN10: {
+ value = M_LN10;
+ break;
+ }
+ case AsmTyper::kMathLN2: {
+ value = M_LN2;
+ break;
+ }
+ case AsmTyper::kMathLOG10E: {
+ value = M_LOG10E;
+ break;
+ }
+ case AsmTyper::kMathLOG2E: {
+ value = M_LOG2E;
+ break;
+ }
+ case AsmTyper::kMathPI: {
+ value = M_PI;
+ break;
+ }
+ case AsmTyper::kMathSQRT1_2: {
+ value = M_SQRT1_2;
+ break;
+ }
+ case AsmTyper::kMathSQRT2: {
+ value = M_SQRT2;
+ break;
+ }
+ default: { return false; }
+ }
+ byte code[] = {WASM_F64(value)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return true;
+ }
+
void VisitVariableProxy(VariableProxy* expr) {
if (in_function_) {
Variable* var = expr->var();
@@ -382,6 +441,9 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
is_set_op_ = false;
} else {
+ if (VisitStdlibConstant(var)) {
+ return;
+ }
if (var->IsContextSlot()) {
current_function_builder_->Emit(kExprLoadGlobal);
} else {
@@ -399,32 +461,32 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
void VisitLiteral(Literal* expr) {
- if (in_function_) {
- if (expr->raw_value()->IsNumber()) {
- LocalType type = TypeOf(expr);
- switch (type) {
- case kAstI32: {
- int val = static_cast<int>(expr->raw_value()->AsNumber());
- byte code[] = {WASM_I32(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- break;
- }
- case kAstF32: {
- float val = static_cast<float>(expr->raw_value()->AsNumber());
- byte code[] = {WASM_F32(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- break;
- }
- case kAstF64: {
- double val = static_cast<double>(expr->raw_value()->AsNumber());
- byte code[] = {WASM_F64(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- break;
- }
- default:
- UNREACHABLE();
- }
+ Handle<Object> value = expr->value();
+ if (!in_function_ || !value->IsNumber()) {
+ return;
+ }
+ Type* type = expr->bounds().upper;
+ if (type->Is(cache_.kAsmSigned)) {
+ int32_t i = 0;
+ if (!value->ToInt32(&i)) {
+ UNREACHABLE();
}
+ byte code[] = {WASM_I32V(i)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (type->Is(cache_.kAsmUnsigned) || type->Is(cache_.kAsmFixnum)) {
+ uint32_t u = 0;
+ if (!value->ToUint32(&u)) {
+ UNREACHABLE();
+ }
+ int32_t i = static_cast<int32_t>(u);
+ byte code[] = {WASM_I32V(i)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (type->Is(cache_.kAsmDouble)) {
+ double val = expr->raw_value()->AsNumber();
+ byte code[] = {WASM_F64(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else {
+ UNREACHABLE();
}
}
@@ -589,29 +651,33 @@ class AsmWasmBuilderImpl : public AstVisitor {
UnLoadInitFunction();
return;
}
- // TODO(bradnelson): Get rid of this.
- if (TypeOf(expr->value()) == kAstStmt) {
- Property* prop = expr->value()->AsProperty();
- if (prop != nullptr) {
- VariableProxy* vp = prop->obj()->AsVariableProxy();
- if (vp != nullptr && vp->var()->IsParameter() &&
- vp->var()->index() == 1) {
- VariableProxy* target = expr->target()->AsVariableProxy();
- if (target->bounds().lower->Is(Type::Function())) {
- const AstRawString* name =
- prop->key()->AsLiteral()->AsRawPropertyName();
- imported_function_table_.AddImport(
- target->var(), name->raw_data(), name->length());
- }
- }
- }
- ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
- if (funcs != nullptr &&
- funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+ Property* prop = expr->value()->AsProperty();
+ if (prop != nullptr) {
+ VariableProxy* vp = prop->obj()->AsVariableProxy();
+ if (vp != nullptr && vp->var()->IsParameter() &&
+ vp->var()->index() == 1) {
VariableProxy* target = expr->target()->AsVariableProxy();
- DCHECK_NOT_NULL(target);
- AddFunctionTable(target, funcs);
+ if (target->bounds().lower->Is(Type::Function())) {
+ const AstRawString* name =
+ prop->key()->AsLiteral()->AsRawPropertyName();
+ imported_function_table_.AddImport(target->var(), name->raw_data(),
+ name->length());
+ }
}
+ // Property values in module scope don't emit code, so return.
+ return;
+ }
+ ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
+ if (funcs != nullptr &&
+ funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+ VariableProxy* target = expr->target()->AsVariableProxy();
+ DCHECK_NOT_NULL(target);
+ AddFunctionTable(target, funcs);
+ // Only add to the function table. No init needed.
+ return;
+ }
+ if (expr->value()->IsCallNew()) {
+ // No init code to emit for CallNew nodes.
return;
}
in_init = true;
@@ -630,6 +696,12 @@ class AsmWasmBuilderImpl : public AstVisitor {
is_set_op_ = true;
RECURSE(Visit(expr->target()));
DCHECK(!is_set_op_);
+ // Assignment to heapf32 from float64 converts.
+ if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+ expr->target()->AsProperty()->obj()->bounds().lower->Is(
+ cache_.kFloat32Array)) {
+ current_function_builder_->Emit(kExprF32ConvertF64);
+ }
RECURSE(Visit(expr->value()));
if (in_init) {
UnLoadInitFunction();
@@ -672,7 +744,8 @@ class AsmWasmBuilderImpl : public AstVisitor {
Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
if (nvalue->IsNumber()) {
int32_t val = static_cast<int32_t>(nvalue->Number());
- byte code[] = {WASM_I32(val)};
+ // TODO(bradnelson): variable size
+ byte code[] = {WASM_I32V(val)};
current_function_builder_->EmitCode(code, sizeof(code));
return;
}
@@ -684,7 +757,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
current_function_builder_->EmitCode(code, sizeof(code));
} else {
- byte code[] = {WASM_I32(0)};
+ byte code[] = {WASM_I32V_1(0)};
current_function_builder_->EmitCode(code, sizeof(code));
}
}
@@ -725,9 +798,9 @@ class AsmWasmBuilderImpl : public AstVisitor {
} else {
UNREACHABLE();
}
- current_function_builder_->EmitWithU8(
- WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
- WasmOpcodes::LoadStoreAccessOf(false));
+ // TODO(titzer): use special asm-compatibility opcodes?
+ current_function_builder_->EmitWithU8U8(
+ WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_), 0, 0);
is_set_op_ = false;
if (size == 1) {
// Allow more general expression in byte arrays than the spec
@@ -742,7 +815,8 @@ class AsmWasmBuilderImpl : public AstVisitor {
DCHECK(value->raw_value()->IsNumber());
DCHECK_EQ(kAstI32, TypeOf(value));
int val = static_cast<int>(value->raw_value()->AsNumber());
- byte code[] = {WASM_I32(val * size)};
+ // TODO(bradnelson): variable size
+ byte code[] = {WASM_I32V(val * size)};
current_function_builder_->EmitCode(code, sizeof(code));
return;
}
@@ -765,11 +839,209 @@ class AsmWasmBuilderImpl : public AstVisitor {
UNREACHABLE();
}
+ bool VisitStdlibFunction(Call* call, VariableProxy* expr) {
+ Variable* var = expr->var();
+ AsmTyper::StandardMember standard_object =
+ typer_->VariableAsStandardMember(var);
+ ZoneList<Expression*>* args = call->arguments();
+ LocalType call_type = TypeOf(call);
+ switch (standard_object) {
+ case AsmTyper::kNone: {
+ return false;
+ }
+ case AsmTyper::kMathAcos: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Acos);
+ break;
+ }
+ case AsmTyper::kMathAsin: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Asin);
+ break;
+ }
+ case AsmTyper::kMathAtan: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Atan);
+ break;
+ }
+ case AsmTyper::kMathCos: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Cos);
+ break;
+ }
+ case AsmTyper::kMathSin: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Sin);
+ break;
+ }
+ case AsmTyper::kMathTan: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Tan);
+ break;
+ }
+ case AsmTyper::kMathExp: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Exp);
+ break;
+ }
+ case AsmTyper::kMathLog: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Log);
+ break;
+ }
+ case AsmTyper::kMathCeil: {
+ if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Ceil);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Ceil);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathFloor: {
+ if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Floor);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Floor);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathSqrt: {
+ if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Sqrt);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Sqrt);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathAbs: {
+ // TODO(bradnelson): Should this be cast to float?
+ if (call_type == kAstI32) {
+ current_function_builder_->Emit(kExprIfElse);
+ current_function_builder_->Emit(kExprI32LtS);
+ Visit(args->at(0));
+ byte code[] = {WASM_I8(0)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ current_function_builder_->Emit(kExprI32Sub);
+ current_function_builder_->EmitCode(code, sizeof(code));
+ Visit(args->at(0));
+ } else if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Abs);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Abs);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathMin: {
+ // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
+ if (call_type == kAstI32) {
+ current_function_builder_->Emit(kExprIfElse);
+ current_function_builder_->Emit(kExprI32LeS);
+ Visit(args->at(0));
+ Visit(args->at(1));
+ } else if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Min);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Min);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathMax: {
+ // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
+ if (call_type == kAstI32) {
+ current_function_builder_->Emit(kExprIfElse);
+ current_function_builder_->Emit(kExprI32GtS);
+ Visit(args->at(0));
+ Visit(args->at(1));
+ } else if (call_type == kAstF32) {
+ current_function_builder_->Emit(kExprF32Max);
+ } else if (call_type == kAstF64) {
+ current_function_builder_->Emit(kExprF64Max);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case AsmTyper::kMathAtan2: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Atan2);
+ break;
+ }
+ case AsmTyper::kMathPow: {
+ DCHECK_EQ(kAstF64, call_type);
+ current_function_builder_->Emit(kExprF64Pow);
+ break;
+ }
+ case AsmTyper::kMathImul: {
+ current_function_builder_->Emit(kExprI32Mul);
+ break;
+ }
+ case AsmTyper::kMathFround: {
+ DCHECK(args->length() == 1);
+ Literal* literal = args->at(0)->AsLiteral();
+ if (literal != nullptr) {
+ if (literal->raw_value()->IsNumber()) {
+ float val = static_cast<float>(literal->raw_value()->AsNumber());
+ byte code[] = {WASM_F32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return true;
+ }
+ }
+ switch (TypeIndexOf(args->at(0))) {
+ case kInt32:
+ case kFixnum:
+ current_function_builder_->Emit(kExprF32SConvertI32);
+ break;
+ case kUint32:
+ current_function_builder_->Emit(kExprF32UConvertI32);
+ break;
+ case kFloat32:
+ break;
+ case kFloat64:
+ current_function_builder_->Emit(kExprF32ConvertF64);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ VisitCallArgs(call);
+ return true;
+ }
+
+ void VisitCallArgs(Call* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+ }
+
void VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate_);
switch (call_type) {
case Call::OTHER_CALL: {
DCHECK(in_function_);
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != nullptr) {
+ if (VisitStdlibFunction(expr, proxy)) {
+ return;
+ }
+ }
uint16_t index;
VariableProxy* vp = expr->expression()->AsVariableProxy();
if (vp != nullptr &&
@@ -802,10 +1074,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var);
FunctionTableIndices* indices = LookupFunctionTable(var->var());
- current_function_builder_->EmitWithU8(kExprCallIndirect,
- indices->signature_index);
+ current_function_builder_->EmitWithVarInt(kExprCallIndirect,
+ indices->signature_index);
current_function_builder_->Emit(kExprI32Add);
- byte code[] = {WASM_I32(indices->start_index)};
+ // TODO(bradnelson): variable size
+ byte code[] = {WASM_I32V(indices->start_index)};
current_function_builder_->EmitCode(code, sizeof(code));
RECURSE(Visit(p->key()));
break;
@@ -813,11 +1086,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
default:
UNREACHABLE();
}
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
+ VisitCallArgs(expr);
}
void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -828,7 +1097,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
switch (expr->op()) {
case Token::NOT: {
DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
- current_function_builder_->Emit(kExprBoolNot);
+ current_function_builder_->Emit(kExprI32Eqz);
break;
}
default:
@@ -1022,7 +1291,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
} else if (type == kUint32) {
current_function_builder_->Emit(kExprI32RemU);
} else if (type == kFloat64) {
- ModF64(expr);
+ current_function_builder_->Emit(kExprF64Mod);
return;
} else {
UNREACHABLE();
@@ -1030,7 +1299,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
case Token::COMMA: {
- current_function_builder_->EmitWithU8(kExprBlock, 2);
+ current_function_builder_->EmitWithVarInt(kExprBlock, 2);
break;
}
default:
@@ -1041,32 +1310,6 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
- void ModF64(BinaryOperation* expr) {
- current_function_builder_->EmitWithU8(kExprBlock, 3);
- uint16_t index_0 = current_function_builder_->AddLocal(kAstF64);
- uint16_t index_1 = current_function_builder_->AddLocal(kAstF64);
- current_function_builder_->Emit(kExprSetLocal);
- AddLeb128(index_0, true);
- RECURSE(Visit(expr->left()));
- current_function_builder_->Emit(kExprSetLocal);
- AddLeb128(index_1, true);
- RECURSE(Visit(expr->right()));
- current_function_builder_->Emit(kExprF64Sub);
- current_function_builder_->Emit(kExprGetLocal);
- AddLeb128(index_0, true);
- current_function_builder_->Emit(kExprF64Mul);
- current_function_builder_->Emit(kExprGetLocal);
- AddLeb128(index_1, true);
- // Use trunc instead of two casts
- current_function_builder_->Emit(kExprF64SConvertI32);
- current_function_builder_->Emit(kExprI32SConvertF64);
- current_function_builder_->Emit(kExprF64Div);
- current_function_builder_->Emit(kExprGetLocal);
- AddLeb128(index_0, true);
- current_function_builder_->Emit(kExprGetLocal);
- AddLeb128(index_1, true);
- }
-
void AddLeb128(uint32_t index, bool is_local) {
std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
if (is_local) {
@@ -1262,6 +1505,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
Isolate* isolate_;
Zone* zone_;
Handle<Object> foreign_;
+ AsmTyper* typer_;
TypeCache const& cache_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
int block_size_;
@@ -1277,13 +1521,18 @@ class AsmWasmBuilderImpl : public AstVisitor {
};
AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
- FunctionLiteral* literal, Handle<Object> foreign)
- : isolate_(isolate), zone_(zone), literal_(literal), foreign_(foreign) {}
+ FunctionLiteral* literal, Handle<Object> foreign,
+ AsmTyper* typer)
+ : isolate_(isolate),
+ zone_(zone),
+ literal_(literal),
+ foreign_(foreign),
+ typer_(typer) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
WasmModuleIndex* AsmWasmBuilder::Run() {
- AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_);
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_, typer_);
impl.Compile();
WasmModuleWriter* writer = impl.builder_->Build(zone_);
return writer->WriteTo(zone_);
diff --git a/deps/v8/src/wasm/asm-wasm-builder.h b/deps/v8/src/wasm/asm-wasm-builder.h
index 9b761f9040..09645ee3c4 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.h
+++ b/deps/v8/src/wasm/asm-wasm-builder.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/objects.h"
+#include "src/typing-asm.h"
#include "src/wasm/encoder.h"
#include "src/zone.h"
@@ -20,7 +21,7 @@ namespace wasm {
class AsmWasmBuilder {
public:
explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
- Handle<Object> foreign);
+ Handle<Object> foreign, AsmTyper* typer);
WasmModuleIndex* Run();
private:
@@ -28,6 +29,7 @@ class AsmWasmBuilder {
Zone* zone_;
FunctionLiteral* literal_;
Handle<Object> foreign_;
+ AsmTyper* typer_;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
index c97c781c12..e2f6a046b3 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/platform/elapsed-timer.h"
#include "src/signature.h"
#include "src/bit-vector.h"
@@ -15,6 +14,8 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/ostreams.h"
+
#include "src/compiler/wasm-compiler.h"
namespace v8 {
@@ -52,7 +53,6 @@ struct Production {
Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
};
-
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
@@ -74,14 +74,12 @@ struct SsaEnv {
}
};
-
// An entry in the stack of blocks during decoding.
struct Block {
SsaEnv* ssa_env; // SSA renaming environment.
int stack_depth; // production stack depth.
};
-
// An entry in the stack of ifs during decoding.
struct IfEnv {
SsaEnv* false_env;
@@ -89,27 +87,27 @@ struct IfEnv {
SsaEnv** case_envs;
};
-
// Macros that build nodes only if there is a graph and the current SSA
// environment is reachable from start. This avoids problems with malformed
// TF graphs when decoding inputs that have unreachable code.
#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
#define BUILD0(func) (build() ? builder_->func() : nullptr)
-
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
class WasmDecoder : public Decoder {
public:
- WasmDecoder() : Decoder(nullptr, nullptr), function_env_(nullptr) {}
- WasmDecoder(FunctionEnv* env, const byte* start, const byte* end)
- : Decoder(start, end), function_env_(env) {}
- FunctionEnv* function_env_;
-
- void Reset(FunctionEnv* function_env, const byte* start, const byte* end) {
- Decoder::Reset(start, end);
- function_env_ = function_env;
- }
+ WasmDecoder(ModuleEnv* module, FunctionSig* sig, const byte* start,
+ const byte* end)
+ : Decoder(start, end),
+ module_(module),
+ sig_(sig),
+ total_locals_(0),
+ local_types_(nullptr) {}
+ ModuleEnv* module_;
+ FunctionSig* sig_;
+ size_t total_locals_;
+ ZoneVector<LocalType>* local_types_;
byte ByteOperand(const byte* pc, const char* msg = "missing 1-byte operand") {
if ((pc + sizeof(byte)) >= limit_) {
@@ -136,8 +134,12 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
- if (operand.index < function_env_->total_locals) {
- operand.type = function_env_->GetLocalType(operand.index);
+ if (operand.index < total_locals_) {
+ if (local_types_) {
+ operand.type = local_types_->at(operand.index);
+ } else {
+ operand.type = kAstStmt;
+ }
return true;
}
error(pc, pc + 1, "invalid local index");
@@ -145,9 +147,9 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
- ModuleEnv* m = function_env_->module;
- if (m && m->module && operand.index < m->module->globals->size()) {
- operand.machine_type = m->module->globals->at(operand.index).type;
+ ModuleEnv* m = module_;
+ if (m && m->module && operand.index < m->module->globals.size()) {
+ operand.machine_type = m->module->globals[operand.index].type;
operand.type = WasmOpcodes::LocalTypeFor(operand.machine_type);
return true;
}
@@ -156,9 +158,9 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
- ModuleEnv* m = function_env_->module;
- if (m && m->module && operand.index < m->module->functions->size()) {
- operand.sig = m->module->functions->at(operand.index).sig;
+ ModuleEnv* m = module_;
+ if (m && m->module && operand.index < m->module->functions.size()) {
+ operand.sig = m->module->functions[operand.index].sig;
return true;
}
error(pc, pc + 1, "invalid function index");
@@ -166,9 +168,9 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
- ModuleEnv* m = function_env_->module;
- if (m && m->module && operand.index < m->module->signatures->size()) {
- operand.sig = m->module->signatures->at(operand.index);
+ ModuleEnv* m = module_;
+ if (m && m->module && operand.index < m->module->signatures.size()) {
+ operand.sig = m->module->signatures[operand.index];
return true;
}
error(pc, pc + 1, "invalid signature index");
@@ -176,9 +178,9 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
- ModuleEnv* m = function_env_->module;
- if (m && m->module && operand.index < m->module->import_table->size()) {
- operand.sig = m->module->import_table->at(operand.index).sig;
+ ModuleEnv* m = module_;
+ if (m && m->module && operand.index < m->module->import_table.size()) {
+ operand.sig = m->module->import_table[operand.index].sig;
return true;
}
error(pc, pc + 1, "invalid signature index");
@@ -195,26 +197,14 @@ class WasmDecoder : public Decoder {
return false;
}
- bool Validate(const byte* pc, TableSwitchOperand& operand,
+ bool Validate(const byte* pc, BranchTableOperand& operand,
size_t block_depth) {
- if (operand.table_count == 0) {
- error(pc, "tableswitch with 0 entries");
- return false;
- }
// Verify table.
- for (uint32_t i = 0; i < operand.table_count; i++) {
- uint16_t target = operand.read_entry(this, i);
- if (target >= 0x8000) {
- size_t depth = target - 0x8000;
- if (depth > block_depth) {
- error(operand.table + i * 2, "improper branch in tableswitch");
- return false;
- }
- } else {
- if (target >= operand.case_count) {
- error(operand.table + i * 2, "invalid case target in tableswitch");
- return false;
- }
+ for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+ uint32_t target = operand.read_entry(this, i);
+ if (target >= block_depth) {
+ error(operand.table + i * 2, "improper branch in br_table");
+ return false;
}
}
return true;
@@ -262,27 +252,23 @@ class WasmDecoder : public Decoder {
case kExprCallFunction: {
FunctionIndexOperand operand(this, pc);
return static_cast<int>(
- function_env_->module->GetFunctionSignature(operand.index)
- ->parameter_count());
+ module_->GetFunctionSignature(operand.index)->parameter_count());
}
case kExprCallIndirect: {
SignatureIndexOperand operand(this, pc);
return 1 + static_cast<int>(
- function_env_->module->GetSignature(operand.index)
- ->parameter_count());
+ module_->GetSignature(operand.index)->parameter_count());
}
case kExprCallImport: {
ImportIndexOperand operand(this, pc);
return static_cast<int>(
- function_env_->module->GetImportSignature(operand.index)
- ->parameter_count());
+ module_->GetImportSignature(operand.index)->parameter_count());
}
case kExprReturn: {
- return static_cast<int>(function_env_->sig->return_count());
+ return static_cast<int>(sig_->return_count());
}
- case kExprTableSwitch: {
- TableSwitchOperand operand(this, pc);
- return 1 + operand.case_count;
+ case kExprBrTable: {
+ return 1;
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) \
@@ -293,10 +279,13 @@ class WasmDecoder : public Decoder {
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
+ case kExprDeclLocals:
+ default:
+ UNREACHABLE();
+ return 0;
}
- UNREACHABLE();
- return 0;
}
int OpcodeLength(const byte* pc) {
@@ -343,16 +332,22 @@ class WasmDecoder : public Decoder {
LocalIndexOperand operand(this, pc);
return 1 + operand.length;
}
- case kExprTableSwitch: {
- TableSwitchOperand operand(this, pc);
+ case kExprBrTable: {
+ BranchTableOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprI32Const: {
+ ImmI32Operand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprI64Const: {
+ ImmI64Operand operand(this, pc);
return 1 + operand.length;
}
case kExprI8Const:
return 2;
- case kExprI32Const:
case kExprF32Const:
return 5;
- case kExprI64Const:
case kExprF64Const:
return 9;
@@ -365,35 +360,28 @@ class WasmDecoder : public Decoder {
// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
// shift-reduce strategy with multiple internal stacks.
-class LR_WasmDecoder : public WasmDecoder {
+class SR_WasmDecoder : public WasmDecoder {
public:
- LR_WasmDecoder(Zone* zone, TFBuilder* builder)
- : zone_(zone),
+ SR_WasmDecoder(Zone* zone, TFBuilder* builder, FunctionBody& body)
+ : WasmDecoder(body.module, body.sig, body.start, body.end),
+ zone_(zone),
builder_(builder),
+ base_(body.base),
+ local_type_vec_(zone),
trees_(zone),
stack_(zone),
blocks_(zone),
- ifs_(zone) {}
-
- TreeResult Decode(FunctionEnv* function_env, const byte* base, const byte* pc,
- const byte* end) {
- base::ElapsedTimer decode_timer;
- if (FLAG_trace_wasm_decode_time) {
- decode_timer.Start();
- }
- trees_.clear();
- stack_.clear();
- blocks_.clear();
- ifs_.clear();
+ ifs_(zone) {
+ local_types_ = &local_type_vec_;
+ }
- if (end < pc) {
- error(pc, "function body end < start");
+ TreeResult Decode() {
+ if (end_ < pc_) {
+ error(pc_, "function body end < start");
return result_;
}
- base_ = base;
- Reset(function_env, pc, end);
-
+ DecodeLocalDecls();
InitSsaEnv();
DecodeFunctionBody();
@@ -401,12 +389,12 @@ class LR_WasmDecoder : public WasmDecoder {
if (ok()) {
if (ssa_env_->go()) {
if (stack_.size() > 0) {
- error(stack_.back().pc(), end, "fell off end of code");
+ error(stack_.back().pc(), end_, "fell off end of code");
}
AddImplicitReturnAtEnd();
}
if (trees_.size() == 0) {
- if (function_env_->sig->return_count() > 0) {
+ if (sig_->return_count() > 0) {
error(start_, "no trees created");
}
} else {
@@ -415,15 +403,7 @@ class LR_WasmDecoder : public WasmDecoder {
}
if (ok()) {
- if (FLAG_trace_wasm_ast) {
- PrintAst(function_env, pc, end);
- }
- if (FLAG_trace_wasm_decode_time) {
- double ms = decode_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
- } else {
- TRACE("wasm-decode ok\n\n");
- }
+ TRACE("wasm-decode ok\n");
} else {
TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
startrel(error_pc_), error_msg_.get());
@@ -432,6 +412,36 @@ class LR_WasmDecoder : public WasmDecoder {
return toResult(tree);
}
+ bool DecodeLocalDecls(AstLocalDecls& decls) {
+ DecodeLocalDecls();
+ if (failed()) return false;
+ decls.decls_encoded_size = pc_offset();
+ decls.total_local_count = 0;
+ decls.local_types.reserve(local_type_vec_.size());
+ for (size_t pos = 0; pos < local_type_vec_.size();) {
+ uint32_t count = 0;
+ LocalType type = local_type_vec_[pos];
+ while (pos < local_type_vec_.size() && local_type_vec_[pos] == type) {
+ pos++;
+ count++;
+ }
+ decls.total_local_count += count;
+ decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
+ }
+ return true;
+ }
+
+ BitVector* AnalyzeLoopAssignmentForTesting(const byte* pc,
+ size_t num_locals) {
+ total_locals_ = num_locals;
+ local_type_vec_.reserve(num_locals);
+ if (num_locals > local_type_vec_.size()) {
+ local_type_vec_.insert(local_type_vec_.end(),
+ num_locals - local_type_vec_.size(), kAstI32);
+ }
+ return AnalyzeLoopAssignment(pc);
+ }
+
private:
static const size_t kErrorMsgSize = 128;
@@ -442,6 +452,7 @@ class LR_WasmDecoder : public WasmDecoder {
SsaEnv* ssa_env_;
+ ZoneVector<LocalType> local_type_vec_;
ZoneVector<Tree*> trees_;
ZoneVector<Production> stack_;
ZoneVector<Block> blocks_;
@@ -450,8 +461,6 @@ class LR_WasmDecoder : public WasmDecoder {
inline bool build() { return builder_ && ssa_env_->go(); }
void InitSsaEnv() {
- FunctionSig* sig = function_env_->sig;
- int param_count = static_cast<int>(sig->parameter_count());
TFNode* start = nullptr;
SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
size_t size = sizeof(TFNode*) * EnvironmentCount();
@@ -459,50 +468,46 @@ class LR_WasmDecoder : public WasmDecoder {
ssa_env->locals =
size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
- int pos = 0;
if (builder_) {
- start = builder_->Start(param_count + 1);
- // Initialize parameters.
- for (int i = 0; i < param_count; i++) {
- ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
- }
- // Initialize int32 locals.
- if (function_env_->local_i32_count > 0) {
- TFNode* zero = builder_->Int32Constant(0);
- for (uint32_t i = 0; i < function_env_->local_i32_count; i++) {
- ssa_env->locals[pos++] = zero;
- }
- }
- // Initialize int64 locals.
- if (function_env_->local_i64_count > 0) {
- TFNode* zero = builder_->Int64Constant(0);
- for (uint32_t i = 0; i < function_env_->local_i64_count; i++) {
- ssa_env->locals[pos++] = zero;
- }
+ start = builder_->Start(static_cast<int>(sig_->parameter_count() + 1));
+ // Initialize local variables.
+ uint32_t index = 0;
+ while (index < sig_->parameter_count()) {
+ ssa_env->locals[index] = builder_->Param(index, local_type_vec_[index]);
+ index++;
}
- // Initialize float32 locals.
- if (function_env_->local_f32_count > 0) {
- TFNode* zero = builder_->Float32Constant(0);
- for (uint32_t i = 0; i < function_env_->local_f32_count; i++) {
- ssa_env->locals[pos++] = zero;
+ while (index < local_type_vec_.size()) {
+ LocalType type = local_type_vec_[index];
+ TFNode* node = DefaultValue(type);
+ while (index < local_type_vec_.size() &&
+ local_type_vec_[index] == type) {
+ // Do a whole run of like-typed locals at a time.
+ ssa_env->locals[index++] = node;
}
}
- // Initialize float64 locals.
- if (function_env_->local_f64_count > 0) {
- TFNode* zero = builder_->Float64Constant(0);
- for (uint32_t i = 0; i < function_env_->local_f64_count; i++) {
- ssa_env->locals[pos++] = zero;
- }
- }
- DCHECK_EQ(function_env_->total_locals, pos);
- DCHECK_EQ(EnvironmentCount(), pos);
- builder_->set_module(function_env_->module);
+ builder_->set_module(module_);
}
ssa_env->control = start;
ssa_env->effect = start;
SetEnv("initial", ssa_env);
}
+ TFNode* DefaultValue(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return builder_->Int32Constant(0);
+ case kAstI64:
+ return builder_->Int64Constant(0);
+ case kAstF32:
+ return builder_->Float32Constant(0);
+ case kAstF64:
+ return builder_->Float64Constant(0);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ }
+
void Leaf(LocalType type, TFNode* node = nullptr) {
size_t size = sizeof(Tree);
Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
@@ -561,6 +566,45 @@ class LR_WasmDecoder : public WasmDecoder {
return bytes;
}
+ // Decodes the locals declarations, if any, populating {local_type_vec_}.
+ void DecodeLocalDecls() {
+ DCHECK_EQ(0, local_type_vec_.size());
+ // Initialize {local_type_vec} from signature.
+ if (sig_) {
+ local_type_vec_.reserve(sig_->parameter_count());
+ for (size_t i = 0; i < sig_->parameter_count(); i++) {
+ local_type_vec_.push_back(sig_->GetParam(i));
+ }
+ }
+ // Decode local declarations, if any.
+ int length;
+ uint32_t entries = consume_u32v(&length, "local decls count");
+ while (entries-- > 0 && pc_ < limit_) {
+ uint32_t count = consume_u32v(&length, "local count");
+ byte code = consume_u8("local type");
+ LocalType type;
+ switch (code) {
+ case kLocalI32:
+ type = kAstI32;
+ break;
+ case kLocalI64:
+ type = kAstI64;
+ break;
+ case kLocalF32:
+ type = kAstF32;
+ break;
+ case kLocalF64:
+ type = kAstF64;
+ break;
+ default:
+ error(pc_ - 1, "invalid local type");
+ return;
+ }
+ local_type_vec_.insert(local_type_vec_.end(), count, type);
+ }
+ total_locals_ = local_type_vec_.size();
+ }
+
// Decodes the body of a function, producing reduced trees into {result}.
void DecodeFunctionBody() {
TRACE("wasm-decode %p...%p (%d bytes) %s\n",
@@ -621,7 +665,7 @@ class LR_WasmDecoder : public WasmDecoder {
PushBlock(break_env);
SsaEnv* cont_env = Steal(break_env);
// The continue environment is the inner environment.
- PrepareForLoop(cont_env);
+ PrepareForLoop(pc_, cont_env);
SetEnv("loop:start", Split(cont_env));
if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
PushBlock(cont_env);
@@ -655,16 +699,16 @@ class LR_WasmDecoder : public WasmDecoder {
len = 1 + operand.length;
break;
}
- case kExprTableSwitch: {
- TableSwitchOperand operand(this, pc_);
+ case kExprBrTable: {
+ BranchTableOperand operand(this, pc_);
if (Validate(pc_, operand, blocks_.size())) {
- Shift(kAstEnd, 1 + operand.case_count);
+ Shift(kAstEnd, 1);
}
len = 1 + operand.length;
break;
}
case kExprReturn: {
- int count = static_cast<int>(function_env_->sig->return_count());
+ int count = static_cast<int>(sig_->return_count());
if (count == 0) {
BUILD(Return, 0, builder_->Buffer(0));
ssa_env_->Kill();
@@ -821,6 +865,7 @@ class LR_WasmDecoder : public WasmDecoder {
len = 1 + operand.length;
break;
}
+ case kExprDeclLocals:
default:
error("Invalid opcode");
return;
@@ -853,7 +898,7 @@ class LR_WasmDecoder : public WasmDecoder {
}
void AddImplicitReturnAtEnd() {
- int retcount = static_cast<int>(function_env_->sig->return_count());
+ int retcount = static_cast<int>(sig_->return_count());
if (retcount == 0) {
BUILD0(ReturnVoid);
return;
@@ -872,7 +917,7 @@ class LR_WasmDecoder : public WasmDecoder {
for (int index = 0; index < retcount; index++) {
Tree* tree = trees_[trees_.size() - 1 - index];
if (buffer) buffer[index] = tree->node;
- LocalType expected = function_env_->sig->GetReturn(index);
+ LocalType expected = sig_->GetReturn(index);
if (tree->type != expected) {
error(limit_, tree->pc,
"ImplicitReturn[%d] expected type %s, found %s of type %s", index,
@@ -1043,73 +1088,42 @@ class LR_WasmDecoder : public WasmDecoder {
}
break;
}
- case kExprTableSwitch: {
+ case kExprBrTable: {
if (p->index == 1) {
// Switch key finished.
TypeCheckLast(p, kAstI32);
if (failed()) break;
- TableSwitchOperand operand(this, p->pc());
+ BranchTableOperand operand(this, p->pc());
DCHECK(Validate(p->pc(), operand, blocks_.size()));
- // Build the switch only if it has more than just a default target.
- bool build_switch = operand.table_count > 1;
+ // Build a switch only if it has more than just a default target.
+ bool build_switch = operand.table_count > 0;
TFNode* sw = nullptr;
- if (build_switch)
- sw = BUILD(Switch, operand.table_count, p->last()->node);
-
- // Allocate environments for each case.
- SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(operand.case_count);
- for (uint32_t i = 0; i < operand.case_count; i++) {
- case_envs[i] = UnreachableEnv();
+ if (build_switch) {
+ sw = BUILD(Switch, operand.table_count + 1, p->last()->node);
}
- ifs_.push_back({nullptr, nullptr, case_envs});
- SsaEnv* break_env = ssa_env_;
- PushBlock(break_env);
- SsaEnv* copy = Steal(break_env);
- ssa_env_ = copy;
-
- // Build the environments for each case based on the table.
- for (uint32_t i = 0; i < operand.table_count; i++) {
- uint16_t target = operand.read_entry(this, i);
+ // Process the targets of the break table.
+ SsaEnv* prev = ssa_env_;
+ SsaEnv* copy = Steal(prev);
+ for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+ uint32_t target = operand.read_entry(this, i);
SsaEnv* env = copy;
if (build_switch) {
- env = Split(env);
- env->control = (i == operand.table_count - 1)
- ? BUILD(IfDefault, sw)
- : BUILD(IfValue, i, sw);
- }
- if (target >= 0x8000) {
- // Targets an outer block.
- int depth = target - 0x8000;
- SsaEnv* tenv = blocks_[blocks_.size() - depth - 1].ssa_env;
- Goto(env, tenv);
- } else {
- // Targets a case.
- Goto(env, case_envs[target]);
+ ssa_env_ = env = Split(env);
+ env->control = i == operand.table_count ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
}
+ SsaEnv* tenv = blocks_[blocks_.size() - target - 1].ssa_env;
+ Goto(env, tenv);
}
- }
-
- if (p->done()) {
- // Last case. Fall through to the end.
- Block* block = &blocks_.back();
- if (p->index > 1) ReduceBreakToExprBlock(p, block);
- SsaEnv* next = block->ssa_env;
- blocks_.pop_back();
- ifs_.pop_back();
- SetEnv("switch:end", next);
- } else {
- // Interior case. Maybe fall through to the next case.
- SsaEnv* next = ifs_.back().case_envs[p->index - 1];
- if (p->index > 1 && ssa_env_->go()) Goto(ssa_env_, next);
- SetEnv("switch:case", next);
+ ssa_env_ = prev;
}
break;
}
case kExprReturn: {
- TypeCheckLast(p, function_env_->sig->GetReturn(p->index - 1));
+ TypeCheckLast(p, sig_->GetReturn(p->index - 1));
if (p->done()) {
if (build()) {
int count = p->tree->count;
@@ -1346,6 +1360,7 @@ class LR_WasmDecoder : public WasmDecoder {
}
void SetEnv(const char* reason, SsaEnv* env) {
+#if DEBUG
TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
static_cast<int>(blocks_.size()), reason);
if (FLAG_trace_wasm_decoder && env && env->control) {
@@ -1353,6 +1368,7 @@ class LR_WasmDecoder : public WasmDecoder {
compiler::WasmGraphBuilder::PrintDebugName(env->control);
}
TRACE("\n");
+#endif
ssa_env_ = env;
if (builder_) {
builder_->set_control_ptr(&env->control);
@@ -1389,8 +1405,7 @@ class LR_WasmDecoder : public WasmDecoder {
TFNode* b = from->locals[i];
if (a != b) {
TFNode* vals[] = {a, b};
- to->locals[i] =
- builder_->Phi(function_env_->GetLocalType(i), 2, vals, merge);
+ to->locals[i] = builder_->Phi(local_type_vec_[i], 2, vals, merge);
}
}
break;
@@ -1425,8 +1440,8 @@ class LR_WasmDecoder : public WasmDecoder {
vals[j] = tnode;
}
vals[count - 1] = fnode;
- to->locals[i] = builder_->Phi(function_env_->GetLocalType(i), count,
- vals, merge);
+ to->locals[i] =
+ builder_->Phi(local_type_vec_[i], count, vals, merge);
}
}
break;
@@ -1451,29 +1466,32 @@ class LR_WasmDecoder : public WasmDecoder {
return tnode;
}
- void BuildInfiniteLoop() {
- if (ssa_env_->go()) {
- PrepareForLoop(ssa_env_);
- SsaEnv* cont_env = ssa_env_;
- ssa_env_ = Split(ssa_env_);
- ssa_env_->state = SsaEnv::kReached;
- Goto(ssa_env_, cont_env);
- }
- }
-
- void PrepareForLoop(SsaEnv* env) {
- if (env->go()) {
- env->state = SsaEnv::kMerged;
- if (builder_) {
- env->control = builder_->Loop(env->control);
- env->effect = builder_->EffectPhi(1, &env->effect, env->control);
- builder_->Terminate(env->effect, env->control);
+ void PrepareForLoop(const byte* pc, SsaEnv* env) {
+ if (!env->go()) return;
+ env->state = SsaEnv::kMerged;
+ if (!builder_) return;
+
+ env->control = builder_->Loop(env->control);
+ env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ builder_->Terminate(env->effect, env->control);
+ if (FLAG_wasm_loop_assignment_analysis) {
+ BitVector* assigned = AnalyzeLoopAssignment(pc);
+ if (assigned != nullptr) {
+ // Only introduce phis for variables assigned in this loop.
for (int i = EnvironmentCount() - 1; i >= 0; i--) {
- env->locals[i] = builder_->Phi(function_env_->GetLocalType(i), 1,
- &env->locals[i], env->control);
+ if (!assigned->Contains(i)) continue;
+ env->locals[i] = builder_->Phi(local_type_vec_[i], 1, &env->locals[i],
+ env->control);
}
+ return;
}
}
+
+ // Conservatively introduce phis for all local variables.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ env->locals[i] =
+ builder_->Phi(local_type_vec_[i], 1, &env->locals[i], env->control);
+ }
}
// Create a complete copy of the {from}.
@@ -1524,7 +1542,7 @@ class LR_WasmDecoder : public WasmDecoder {
}
int EnvironmentCount() {
- if (builder_) return static_cast<int>(function_env_->GetLocalCount());
+ if (builder_) return static_cast<int>(local_type_vec_.size());
return 0; // if we aren't building a graph, don't bother with SSA renaming.
}
@@ -1560,23 +1578,84 @@ class LR_WasmDecoder : public WasmDecoder {
PrintProduction(depth + 1);
}
#endif
+
+ BitVector* AnalyzeLoopAssignment(const byte* pc) {
+ if (pc >= limit_) return nullptr;
+ if (*pc != kExprLoop) return nullptr;
+
+ BitVector* assigned =
+ new (zone_) BitVector(static_cast<int>(total_locals_), zone_);
+ // Keep a stack to model the nesting of expressions.
+ std::vector<int> arity_stack;
+ arity_stack.push_back(OpcodeArity(pc));
+ pc += OpcodeLength(pc);
+
+ // Iteratively process all AST nodes nested inside the loop.
+ while (pc < limit_) {
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ int arity = 0;
+ int length = 1;
+ int assigned_index = -1;
+ if (opcode == kExprSetLocal) {
+ LocalIndexOperand operand(this, pc);
+ if (assigned->length() > 0 &&
+ static_cast<int>(operand.index) < assigned->length()) {
+ // Unverified code might have an out-of-bounds index.
+ // Ignore out-of-bounds indices, as the main verification will fail.
+ assigned->Add(operand.index);
+ assigned_index = operand.index;
+ }
+ arity = 1;
+ length = 1 + operand.length;
+ } else {
+ arity = OpcodeArity(pc);
+ length = OpcodeLength(pc);
+ }
+
+ TRACE("loop-assign module+%-6d %s func+%d: 0x%02x %s", baserel(pc),
+ indentation(), startrel(pc), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+
+ if (assigned_index >= 0) {
+ TRACE(" (assigned local #%d)\n", assigned_index);
+ } else {
+ TRACE("\n");
+ }
+
+ pc += length;
+ arity_stack.push_back(arity);
+ while (arity_stack.back() == 0) {
+ arity_stack.pop_back();
+ if (arity_stack.empty()) return assigned; // reached end of loop
+ arity_stack.back()--;
+ }
+ }
+ return assigned;
+ }
};
+bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+ const byte* end) {
+ base::AccountingAllocator allocator;
+ Zone tmp(&allocator);
+ FunctionBody body = {nullptr, nullptr, nullptr, start, end};
+ SR_WasmDecoder decoder(&tmp, nullptr, body);
+ return decoder.DecodeLocalDecls(decls);
+}
-TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
- const byte* end) {
- Zone zone;
- LR_WasmDecoder decoder(&zone, nullptr);
- TreeResult result = decoder.Decode(env, base, start, end);
+TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ FunctionBody& body) {
+ Zone zone(allocator);
+ SR_WasmDecoder decoder(&zone, nullptr, body);
+ TreeResult result = decoder.Decode();
return result;
}
-
-TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
- const byte* start, const byte* end) {
- Zone zone;
- LR_WasmDecoder decoder(&zone, builder);
- TreeResult result = decoder.Decode(env, base, start, end);
+TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, FunctionBody& body) {
+ Zone zone(allocator);
+ SR_WasmDecoder decoder(&zone, builder, body);
+ TreeResult result = decoder.Decode();
return result;
}
@@ -1608,20 +1687,49 @@ ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
}
int OpcodeLength(const byte* pc, const byte* end) {
- WasmDecoder decoder(nullptr, pc, end);
+ WasmDecoder decoder(nullptr, nullptr, pc, end);
return decoder.OpcodeLength(pc);
}
-int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end) {
- WasmDecoder decoder(env, pc, end);
+int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
+ const byte* end) {
+ WasmDecoder decoder(module, sig, pc, end);
return decoder.OpcodeArity(pc);
}
-void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
- WasmDecoder decoder(env, start, end);
- const byte* pc = start;
+void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body) {
+ Zone zone(allocator);
+ SR_WasmDecoder decoder(&zone, nullptr, body);
+
+ OFStream os(stdout);
+
+ // Print the function signature.
+ if (body.sig) {
+ os << "// signature: " << *body.sig << std::endl;
+ }
+
+ // Print the local declarations.
+ AstLocalDecls decls(&zone);
+ decoder.DecodeLocalDecls(decls);
+ const byte* pc = decoder.pc();
+ if (body.start != decoder.pc()) {
+ printf("// locals:");
+ for (auto p : decls.local_types) {
+ LocalType type = p.first;
+ uint32_t count = p.second;
+ os << " " << count << " " << WasmOpcodes::TypeName(type);
+ }
+ os << std::endl;
+
+ for (const byte* locals = body.start; locals < pc; locals++) {
+ printf(" 0x%02x,", *locals);
+ }
+ printf("\n");
+ }
+
+ printf("// body: \n");
std::vector<int> arity_stack;
- while (pc < end) {
+ while (pc < body.end) {
int arity = decoder.OpcodeArity(pc);
size_t length = decoder.OpcodeLength(pc);
@@ -1636,6 +1744,35 @@ void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
for (size_t i = 1; i < length; i++) {
printf(" 0x%02x,", pc[i]);
}
+
+ if (body.module) {
+ switch (opcode) {
+ case kExprCallIndirect: {
+ SignatureIndexOperand operand(&decoder, pc);
+ if (decoder.Validate(pc, operand)) {
+ os << " // sig #" << operand.index << ": " << *operand.sig;
+ }
+ break;
+ }
+ case kExprCallImport: {
+ ImportIndexOperand operand(&decoder, pc);
+ if (decoder.Validate(pc, operand)) {
+ os << " // import #" << operand.index << ": " << *operand.sig;
+ }
+ break;
+ }
+ case kExprCallFunction: {
+ FunctionIndexOperand operand(&decoder, pc);
+ if (decoder.Validate(pc, operand)) {
+ os << " // function #" << operand.index << ": " << *operand.sig;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
pc += length;
printf("\n");
@@ -1648,65 +1785,11 @@ void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
}
}
-// Analyzes loop bodies for static assignments to locals, which helps in
-// reducing the number of phis introduced at loop headers.
-class LoopAssignmentAnalyzer : public WasmDecoder {
- public:
- LoopAssignmentAnalyzer(Zone* zone, FunctionEnv* function_env) : zone_(zone) {
- function_env_ = function_env;
- }
-
- BitVector* Analyze(const byte* pc, const byte* limit) {
- Decoder::Reset(pc, limit);
- if (pc_ >= limit_) return nullptr;
- if (*pc_ != kExprLoop) return nullptr;
-
- BitVector* assigned =
- new (zone_) BitVector(function_env_->total_locals, zone_);
- // Keep a stack to model the nesting of expressions.
- std::vector<int> arity_stack;
- arity_stack.push_back(OpcodeArity(pc_));
- pc_ += OpcodeLength(pc_);
-
- // Iteratively process all AST nodes nested inside the loop.
- while (pc_ < limit_) {
- WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
- int arity = 0;
- int length = 1;
- if (opcode == kExprSetLocal) {
- LocalIndexOperand operand(this, pc_);
- if (assigned->length() > 0 &&
- static_cast<int>(operand.index) < assigned->length()) {
- // Unverified code might have an out-of-bounds index.
- assigned->Add(operand.index);
- }
- arity = 1;
- length = 1 + operand.length;
- } else {
- arity = OpcodeArity(pc_);
- length = OpcodeLength(pc_);
- }
-
- pc_ += length;
- arity_stack.push_back(arity);
- while (arity_stack.back() == 0) {
- arity_stack.pop_back();
- if (arity_stack.empty()) return assigned; // reached end of loop
- arity_stack.back()--;
- }
- }
- return assigned;
- }
-
- private:
- Zone* zone_;
-};
-
-
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
- LoopAssignmentAnalyzer analyzer(zone, env);
- return analyzer.Analyze(start, end);
+ FunctionBody body = {nullptr, nullptr, nullptr, start, end};
+ SR_WasmDecoder decoder(zone, nullptr, body);
+ return decoder.AnalyzeLoopAssignmentForTesting(start, num_locals);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
index 465bacaab8..5376e7bfdd 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -46,8 +46,7 @@ struct ImmI32Operand {
int32_t value;
int length;
inline ImmI32Operand(Decoder* decoder, const byte* pc) {
- value = bit_cast<int32_t>(decoder->checked_read_u32(pc, 1, "immi32"));
- length = 4;
+ value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
}
};
@@ -55,8 +54,7 @@ struct ImmI64Operand {
int64_t value;
int length;
inline ImmI64Operand(Decoder* decoder, const byte* pc) {
- value = bit_cast<int64_t>(decoder->checked_read_u64(pc, 1, "immi64"));
- length = 8;
+ value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
}
};
@@ -97,8 +95,7 @@ struct BreakDepthOperand {
Block* target;
int length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->checked_read_u8(pc, 1, "break depth");
- length = 1;
+ depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
target = nullptr;
}
};
@@ -107,8 +104,7 @@ struct BlockCountOperand {
uint32_t count;
int length;
inline BlockCountOperand(Decoder* decoder, const byte* pc) {
- count = decoder->checked_read_u8(pc, 1, "block count");
- length = 1;
+ count = decoder->checked_read_u32v(pc, 1, &length, "block count");
}
};
@@ -142,103 +138,55 @@ struct ImportIndexOperand {
}
};
-struct TableSwitchOperand {
- uint32_t case_count;
+struct BranchTableOperand {
uint32_t table_count;
const byte* table;
int length;
- inline TableSwitchOperand(Decoder* decoder, const byte* pc) {
- case_count = decoder->checked_read_u16(pc, 1, "expected #cases");
- table_count = decoder->checked_read_u16(pc, 3, "expected #entries");
- length = 4 + table_count * 2;
-
- if (decoder->check(pc, 5, table_count * 2, "expected <table entries>")) {
- table = pc + 5;
+ inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+ int varint_length;
+ table_count =
+ decoder->checked_read_u32v(pc, 1, &varint_length, "expected #entries");
+ length = varint_length + (table_count + 1) * sizeof(uint32_t);
+
+ uint32_t table_start = 1 + varint_length;
+ if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
+ "expected <table entries>")) {
+ table = pc + table_start;
} else {
table = nullptr;
}
}
- inline uint16_t read_entry(Decoder* decoder, int i) {
- DCHECK(i >= 0 && static_cast<uint32_t>(i) < table_count);
- return table ? decoder->read_u16(table + i * sizeof(uint16_t)) : 0;
+ inline uint32_t read_entry(Decoder* decoder, int i) {
+ DCHECK(i >= 0 && static_cast<uint32_t>(i) <= table_count);
+ return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
}
};
struct MemoryAccessOperand {
- bool aligned;
+ uint32_t alignment;
uint32_t offset;
int length;
inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
- byte bitfield = decoder->checked_read_u8(pc, 1, "memory access byte");
- aligned = MemoryAccess::AlignmentField::decode(bitfield);
- if (MemoryAccess::OffsetField::decode(bitfield)) {
- offset = decoder->checked_read_u32v(pc, 2, &length, "memory offset");
- length++;
- } else {
- offset = 0;
- length = 1;
- }
+ int alignment_length;
+ alignment =
+ decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+ int offset_length;
+ offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
+ &offset_length, "offset");
+ length = alignment_length + offset_length;
}
};
typedef compiler::WasmGraphBuilder TFBuilder;
struct ModuleEnv; // forward declaration of module interface.
-// Interface the function environment during decoding, include the signature
-// and number of locals.
-struct FunctionEnv {
- ModuleEnv* module; // module environment
- FunctionSig* sig; // signature of this function
- uint32_t local_i32_count; // number of int32 locals
- uint32_t local_i64_count; // number of int64 locals
- uint32_t local_f32_count; // number of float32 locals
- uint32_t local_f64_count; // number of float64 locals
- uint32_t total_locals; // sum of parameters and all locals
-
- uint32_t GetLocalCount() { return total_locals; }
- LocalType GetLocalType(uint32_t index) {
- if (index < static_cast<uint32_t>(sig->parameter_count())) {
- return sig->GetParam(index);
- }
- index -= static_cast<uint32_t>(sig->parameter_count());
- if (index < local_i32_count) return kAstI32;
- index -= local_i32_count;
- if (index < local_i64_count) return kAstI64;
- index -= local_i64_count;
- if (index < local_f32_count) return kAstF32;
- index -= local_f32_count;
- if (index < local_f64_count) return kAstF64;
- return kAstStmt;
- }
-
- void AddLocals(LocalType type, uint32_t count) {
- switch (type) {
- case kAstI32:
- local_i32_count += count;
- break;
- case kAstI64:
- local_i64_count += count;
- break;
- case kAstF32:
- local_f32_count += count;
- break;
- case kAstF64:
- local_f64_count += count;
- break;
- default:
- UNREACHABLE();
- }
- total_locals += count;
- DCHECK_EQ(total_locals,
- (sig->parameter_count() + local_i32_count + local_i64_count +
- local_f32_count + local_f64_count));
- }
-
- void SumLocals() {
- total_locals = static_cast<uint32_t>(sig->parameter_count()) +
- local_i32_count + local_i64_count + local_f32_count +
- local_f64_count;
- }
+// All of the various data structures necessary to decode a function body.
+struct FunctionBody {
+ ModuleEnv* module; // module environment
+ FunctionSig* sig; // function signature
+ const byte* base; // base of the module bytes, for error reporting
+ const byte* start; // start of the function body
+ const byte* end; // end of the function body
};
struct Tree;
@@ -246,21 +194,25 @@ typedef Result<Tree*> TreeResult;
std::ostream& operator<<(std::ostream& os, const Tree& tree);
-TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
- const byte* end);
-TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
- const byte* start, const byte* end);
-
-void PrintAst(FunctionEnv* env, const byte* start, const byte* end);
-
-inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
- const byte* end) {
- return VerifyWasmCode(env, nullptr, start, end);
+TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ FunctionBody& body);
+TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, FunctionBody& body);
+void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body);
+
+inline TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ ModuleEnv* module, FunctionSig* sig,
+ const byte* start, const byte* end) {
+ FunctionBody body = {module, sig, nullptr, start, end};
+ return VerifyWasmCode(allocator, body);
}
-inline TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env,
- const byte* start, const byte* end) {
- return BuildTFGraph(builder, env, nullptr, start, end);
+inline TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, ModuleEnv* module,
+ FunctionSig* sig, const byte* start,
+ const byte* end) {
+ FunctionBody body = {module, sig, nullptr, start, end};
+ return BuildTFGraph(allocator, builder, body);
}
enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
@@ -268,14 +220,31 @@ enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
int*, uint32_t*);
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+struct AstLocalDecls {
+ // The size of the encoded declarations.
+ uint32_t decls_encoded_size; // size of encoded declarations
+
+ // Total number of locals.
+ uint32_t total_local_count;
+
+ // List of {local type, count} pairs.
+ ZoneVector<std::pair<LocalType, uint32_t>> local_types;
+
+ // Constructor initializes the vector.
+ explicit AstLocalDecls(Zone* zone)
+ : decls_encoded_size(0), total_local_count(0), local_types(zone) {}
+};
+
+bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start, const byte* end);
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end);
// Computes the length of the opcode at the given address.
int OpcodeLength(const byte* pc, const byte* end);
// Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end);
+int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
+ const byte* end);
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 0e88eda022..f9de2e1143 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -77,33 +77,44 @@ class Decoder {
return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
}
+ // Reads a variable-length unsigned integer (little endian).
uint32_t checked_read_u32v(const byte* base, int offset, int* length,
- const char* msg = "expected LEB128") {
- if (!check(base, offset, 1, msg)) {
- *length = 0;
- return 0;
- }
+ const char* msg = "expected LEB32") {
+ return checked_read_leb<uint32_t, false>(base, offset, length, msg);
+ }
- const ptrdiff_t kMaxDiff = 5; // maximum 5 bytes.
- const byte* ptr = base + offset;
- const byte* end = ptr + kMaxDiff;
- if (end > limit_) end = limit_;
- int shift = 0;
- byte b = 0;
- uint32_t result = 0;
- while (ptr < end) {
- b = *ptr++;
- result = result | ((b & 0x7F) << shift);
- if ((b & 0x80) == 0) break;
- shift += 7;
+ // Reads a variable-length signed integer (little endian).
+ int32_t checked_read_i32v(const byte* base, int offset, int* length,
+ const char* msg = "expected SLEB32") {
+ uint32_t result =
+ checked_read_leb<uint32_t, true>(base, offset, length, msg);
+ if (*length == 5) return bit_cast<int32_t>(result);
+ if (*length > 0) {
+ int shift = 32 - 7 * *length;
+ // Perform sign extension.
+ return bit_cast<int32_t>(result << shift) >> shift;
}
- DCHECK_LE(ptr - (base + offset), kMaxDiff);
- *length = static_cast<int>(ptr - (base + offset));
- if (ptr == end && (b & 0x80)) {
- error(base, ptr, msg);
- return 0;
+ return 0;
+ }
+
+ // Reads a variable-length unsigned integer (little endian).
+ uint64_t checked_read_u64v(const byte* base, int offset, int* length,
+ const char* msg = "expected LEB64") {
+ return checked_read_leb<uint64_t, false>(base, offset, length, msg);
+ }
+
+ // Reads a variable-length signed integer (little endian).
+ int64_t checked_read_i64v(const byte* base, int offset, int* length,
+ const char* msg = "expected SLEB64") {
+ uint64_t result =
+ checked_read_leb<uint64_t, true>(base, offset, length, msg);
+ if (*length == 10) return bit_cast<int64_t>(result);
+ if (*length > 0) {
+ int shift = 64 - 7 * *length;
+ // Perform sign extension.
+ return bit_cast<int64_t>(result << shift) >> shift;
}
- return result;
+ return 0;
}
// Reads a single 16-bit unsigned integer (little endian).
@@ -214,6 +225,8 @@ class Decoder {
*length = static_cast<int>(pc_ - pos);
if (pc_ == end && (b & 0x80)) {
error(pc_ - 1, "varint too large");
+ } else if (*length == 0) {
+ error(pc_, "varint of length 0");
} else {
TRACE("= %u\n", result);
}
@@ -222,9 +235,22 @@ class Decoder {
return traceOffEnd<uint32_t>();
}
+ // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
+ void consume_bytes(int size) {
+ if (checkAvailable(size)) {
+ pc_ += size;
+ } else {
+ pc_ = limit_;
+ }
+ }
+
// Check that at least {size} bytes exist between {pc_} and {limit_}.
bool checkAvailable(int size) {
- if (pc_ < start_ || (pc_ + size) > limit_) {
+ intptr_t pc_overflow_value = std::numeric_limits<intptr_t>::max() - size;
+ if (size < 0 || (intptr_t)pc_ > pc_overflow_value) {
+ error(pc_, nullptr, "reading %d bytes would underflow/overflow", size);
+ return false;
+ } else if (pc_ < start_ || limit_ < (pc_ + size)) {
error(pc_, nullptr, "expected %d bytes, fell off end", size);
return false;
} else {
@@ -232,12 +258,6 @@ class Decoder {
}
}
- bool RangeOk(const byte* pc, int length) {
- if (pc < start_ || pc_ >= limit_) return false;
- if ((pc + length) >= limit_) return false;
- return true;
- }
-
void error(const char* msg) { error(pc_, nullptr, msg); }
void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
@@ -283,12 +303,13 @@ class Decoder {
Result<T> toResult(T val) {
Result<T> result;
if (error_pc_) {
+ TRACE("Result error: %s\n", error_msg_.get());
result.error_code = kError;
result.start = start_;
result.error_pc = error_pc_;
result.error_pt = error_pt_;
- result.error_msg = error_msg_;
- error_msg_.Reset(nullptr);
+ // transfer ownership of the error to the result.
+ result.error_msg.Reset(error_msg_.Detach());
} else {
result.error_code = kSuccess;
}
@@ -308,7 +329,12 @@ class Decoder {
}
bool ok() const { return error_pc_ == nullptr; }
- bool failed() const { return error_pc_ != nullptr; }
+ bool failed() const { return !error_msg_.is_empty(); }
+ bool more() const { return pc_ < limit_; }
+
+ const byte* start() { return start_; }
+ const byte* pc() { return pc_; }
+ uint32_t pc_offset() { return static_cast<uint32_t>(pc_ - start_); }
protected:
const byte* start_;
@@ -318,6 +344,60 @@ class Decoder {
const byte* error_pc_;
const byte* error_pt_;
base::SmartArrayPointer<char> error_msg_;
+
+ private:
+ template <typename IntType, bool is_signed>
+ IntType checked_read_leb(const byte* base, int offset, int* length,
+ const char* msg) {
+ if (!check(base, offset, 1, msg)) {
+ *length = 0;
+ return 0;
+ }
+
+ const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
+ const byte* ptr = base + offset;
+ const byte* end = ptr + kMaxLength;
+ if (end > limit_) end = limit_;
+ int shift = 0;
+ byte b = 0;
+ IntType result = 0;
+ while (ptr < end) {
+ b = *ptr++;
+ result = result | (static_cast<IntType>(b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+ DCHECK_LE(ptr - (base + offset), kMaxLength);
+ *length = static_cast<int>(ptr - (base + offset));
+ if (ptr == end) {
+ // Check there are no bits set beyond the bitwidth of {IntType}.
+ const int kExtraBits = (1 + kMaxLength * 7) - (sizeof(IntType) * 8);
+ const byte kExtraBitsMask =
+ static_cast<byte>((0xFF << (8 - kExtraBits)) & 0xFF);
+ int extra_bits_value;
+ if (is_signed) {
+ // A signed-LEB128 must sign-extend the final byte, excluding its
+ // most-signifcant bit. e.g. for a 32-bit LEB128:
+ // kExtraBits = 4
+ // kExtraBitsMask = 0xf0
+ // If b is 0x0f, the value is negative, so extra_bits_value is 0x70.
+ // If b is 0x03, the value is positive, so extra_bits_value is 0x00.
+ extra_bits_value = (static_cast<int8_t>(b << kExtraBits) >> 8) &
+ kExtraBitsMask & ~0x80;
+ } else {
+ extra_bits_value = 0;
+ }
+ if (*length == kMaxLength && (b & kExtraBitsMask) != extra_bits_value) {
+ error(base, ptr, "extra bits in varint");
+ return 0;
+ }
+ if ((b & 0x80) != 0) {
+ error(base, ptr, msg);
+ return 0;
+ }
+ }
+ return result;
+ }
};
#undef TRACE
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/encoder.cc
index d80a275338..92e6b1145c 100644
--- a/deps/v8/src/wasm/encoder.cc
+++ b/deps/v8/src/wasm/encoder.cc
@@ -10,11 +10,21 @@
#include "src/wasm/ast-decoder.h"
#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/v8memory.h"
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_encoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -40,6 +50,11 @@ void EmitUint32(byte** b, uint32_t x) {
*b += 4;
}
+// Sections all start with a size, but it's unknown at the start.
+// We generate a large varint which we then fixup later when the size is known.
+//
+// TODO(jfb) Not strictly necessary since sizes are calculated ahead of time.
+const size_t padded_varint = 5;
void EmitVarInt(byte** b, size_t val) {
while (true) {
@@ -54,8 +69,47 @@ void EmitVarInt(byte** b, size_t val) {
}
}
}
-} // namespace
+size_t SizeOfVarInt(size_t value) {
+ size_t size = 0;
+ do {
+ size++;
+ value = value >> 7;
+ } while (value > 0);
+ return size;
+}
+
+void FixupSection(byte* start, byte* end) {
+ // Same as EmitVarInt, but fixed-width with zeroes in the MSBs.
+ size_t val = end - start - padded_varint;
+ TRACE(" fixup %u\n", (unsigned)val);
+ for (size_t pos = 0; pos != padded_varint; ++pos) {
+ size_t next = val >> 7;
+ byte out = static_cast<byte>(val & 0x7f);
+ if (pos != padded_varint - 1) {
+ *(start++) = 0x80 | out;
+ val = next;
+ } else {
+ *(start++) = out;
+ // TODO(jfb) check that the pre-allocated fixup size isn't overflowed.
+ }
+ }
+}
+
+// Returns the start of the section, where the section VarInt size is.
+byte* EmitSection(WasmSection::Code code, byte** b) {
+ byte* start = *b;
+ const char* name = WasmSection::getName(code);
+ size_t length = WasmSection::getNameLength(code);
+ TRACE("emit section: %s\n", name);
+ for (size_t padding = 0; padding != padded_varint; ++padding) {
+ EmitUint8(b, 0xff); // Will get fixed up later.
+ }
+ EmitVarInt(b, length); // Section name string size.
+ for (size_t i = 0; i != length; ++i) EmitUint8(b, name[i]);
+ return start;
+}
+} // namespace
struct WasmFunctionBuilder::Type {
bool param_;
@@ -120,16 +174,48 @@ void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
body_.push_back(immediate);
}
+void WasmFunctionBuilder::EmitWithU8U8(WasmOpcode opcode, const byte imm1,
+ const byte imm2) {
+ body_.push_back(static_cast<byte>(opcode));
+ body_.push_back(imm1);
+ body_.push_back(imm2);
+}
-uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
- body_.push_back(immediate);
+void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
+ uint32_t immediate) {
+ body_.push_back(static_cast<byte>(opcode));
+ size_t immediate_size = SizeOfVarInt(immediate);
+ body_.insert(body_.end(), immediate_size, 0);
+ byte* p = &body_[body_.size() - immediate_size];
+ EmitVarInt(&p, immediate);
+}
+
+uint32_t WasmFunctionBuilder::EmitEditableVarIntImmediate() {
+ // Guess that the immediate will be 1 byte. If it is more, we'll have to
+ // shift everything down.
+ body_.push_back(0);
return static_cast<uint32_t>(body_.size()) - 1;
}
+void WasmFunctionBuilder::EditVarIntImmediate(uint32_t offset,
+ const uint32_t immediate) {
+ uint32_t immediate_size = static_cast<uint32_t>(SizeOfVarInt(immediate));
+ // In EmitEditableVarIntImmediate, we guessed that we'd only need one byte.
+ // If we need more, shift everything down to make room for the larger
+ // immediate.
+ if (immediate_size > 1) {
+ uint32_t diff = immediate_size - 1;
+ body_.insert(body_.begin() + offset, diff, 0);
-void WasmFunctionBuilder::EditImmediate(uint32_t offset, const byte immediate) {
- DCHECK(offset < body_.size());
- body_[offset] = immediate;
+ for (size_t i = 0; i < local_indices_.size(); ++i) {
+ if (local_indices_[i] >= offset) {
+ local_indices_[i] += diff;
+ }
+ }
+ }
+ DCHECK(offset + immediate_size <= body_.size());
+ byte* p = &body_[offset];
+ EmitVarInt(&p, immediate);
}
@@ -144,7 +230,6 @@ void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
for (int i = 0; i < name_length; i++) {
name_.push_back(*(name + i));
}
- name_.push_back('\0');
}
}
@@ -250,15 +335,25 @@ WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
uint32_t WasmFunctionEncoder::HeaderSize() const {
uint32_t size = 3;
- if (HasLocals()) size += 8;
if (!external_) size += 2;
- if (HasName()) size += 4;
+ if (HasName()) {
+ uint32_t name_size = NameSize();
+ size += static_cast<uint32_t>(SizeOfVarInt(name_size)) + name_size;
+ }
return size;
}
uint32_t WasmFunctionEncoder::BodySize(void) const {
- return external_ ? 0 : static_cast<uint32_t>(body_.size());
+ // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
+ LocalDeclEncoder local_decl;
+ local_decl.AddLocals(local_i32_count_, kAstI32);
+ local_decl.AddLocals(local_i64_count_, kAstI64);
+ local_decl.AddLocals(local_f32_count_, kAstF32);
+ local_decl.AddLocals(local_f64_count_, kAstF64);
+
+ return external_ ? 0
+ : static_cast<uint32_t>(body_.size() + local_decl.Size());
}
@@ -271,28 +366,29 @@ void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
byte** body) const {
uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
(external_ ? kDeclFunctionImport : 0) |
- (HasLocals() ? kDeclFunctionLocals : 0) |
(HasName() ? kDeclFunctionName : 0);
EmitUint8(header, decl_bits);
EmitUint16(header, signature_index_);
if (HasName()) {
- uint32_t name_offset = static_cast<uint32_t>(*body - buffer);
- EmitUint32(header, name_offset);
- std::memcpy(*body, &name_[0], name_.size());
- (*body) += name_.size();
+ EmitVarInt(header, NameSize());
+ for (size_t i = 0; i < name_.size(); ++i) {
+ EmitUint8(header, name_[i]);
+ }
}
- if (HasLocals()) {
- EmitUint16(header, local_i32_count_);
- EmitUint16(header, local_i64_count_);
- EmitUint16(header, local_f32_count_);
- EmitUint16(header, local_f64_count_);
- }
if (!external_) {
- EmitUint16(header, static_cast<uint16_t>(body_.size()));
+ // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
+ LocalDeclEncoder local_decl;
+ local_decl.AddLocals(local_i32_count_, kAstI32);
+ local_decl.AddLocals(local_i64_count_, kAstI64);
+ local_decl.AddLocals(local_f32_count_, kAstF32);
+ local_decl.AddLocals(local_f64_count_, kAstF64);
+
+ EmitUint16(header, static_cast<uint16_t>(body_.size() + local_decl.Size()));
+ (*header) += local_decl.Emit(*header);
if (body_.size() > 0) {
std::memcpy(*header, &body_[0], body_.size());
(*header) += body_.size();
@@ -323,17 +419,13 @@ uint32_t WasmDataSegmentEncoder::BodySize() const {
void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
byte** body) const {
- uint32_t body_offset = static_cast<uint32_t>(*body - buffer);
- EmitUint32(header, dest_);
- EmitUint32(header, body_offset);
- EmitUint32(header, static_cast<uint32_t>(data_.size()));
- EmitUint8(header, 1); // init
+ EmitVarInt(header, dest_);
+ EmitVarInt(header, static_cast<uint32_t>(data_.size()));
- std::memcpy(*body, &data_[0], data_.size());
- (*body) += data_.size();
+ std::memcpy(*header, &data_[0], data_.size());
+ (*header) += data_.size();
}
-
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
@@ -341,8 +433,8 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
data_segments_(zone),
indirect_functions_(zone),
globals_(zone),
- signature_map_(zone) {}
-
+ signature_map_(zone),
+ start_function_index_(-1) {}
uint16_t WasmModuleBuilder::AddFunction() {
functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
@@ -399,6 +491,9 @@ void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
indirect_functions_.push_back(index);
}
+void WasmModuleBuilder::MarkStartFunction(uint16_t index) {
+ start_function_index_ = index;
+}
WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
@@ -417,6 +512,7 @@ WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
for (auto global : globals_) {
writer->globals_.push_back(global);
}
+ writer->start_function_index_ = start_function_index_;
return writer;
}
@@ -434,7 +530,6 @@ WasmModuleWriter::WasmModuleWriter(Zone* zone)
indirect_functions_(zone),
globals_(zone) {}
-
struct Sizes {
size_t header_size;
size_t body_size;
@@ -446,80 +541,124 @@ struct Sizes {
body_size += body;
}
- void AddSection(size_t size) {
- if (size > 0) {
- Add(1, 0);
- while (size > 0) {
- Add(1, 0);
- size = size >> 7;
- }
- }
+ void AddSection(WasmSection::Code code, size_t other_size) {
+ Add(padded_varint + SizeOfVarInt(WasmSection::getNameLength(code)) +
+ WasmSection::getNameLength(code),
+ 0);
+ if (other_size) Add(SizeOfVarInt(other_size), 0);
}
};
-
WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
Sizes sizes = {0, 0};
- sizes.Add(1, 0);
+ sizes.Add(2 * sizeof(uint32_t), 0); // header
+
+ sizes.AddSection(WasmSection::Code::Memory, 0);
sizes.Add(kDeclMemorySize, 0);
+ TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
- sizes.AddSection(signatures_.size());
- for (auto sig : signatures_) {
- sizes.Add(2 + sig->parameter_count(), 0);
+ if (globals_.size() > 0) {
+ sizes.AddSection(WasmSection::Code::Globals, globals_.size());
+ /* These globals never have names, so are always 3 bytes. */
+ sizes.Add(3 * globals_.size(), 0);
+ TRACE("Size after globals: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
}
- sizes.AddSection(globals_.size());
- if (globals_.size() > 0) {
- sizes.Add(kDeclGlobalSize * globals_.size(), 0);
+ if (signatures_.size() > 0) {
+ sizes.AddSection(WasmSection::Code::Signatures, signatures_.size());
+ for (auto sig : signatures_) {
+ sizes.Add(
+ 1 + SizeOfVarInt(sig->parameter_count()) + sig->parameter_count(), 0);
+ }
+ TRACE("Size after signatures: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
}
- sizes.AddSection(functions_.size());
- for (auto function : functions_) {
- sizes.Add(function->HeaderSize() + function->BodySize(),
- function->NameSize());
+ if (functions_.size() > 0) {
+ sizes.AddSection(WasmSection::Code::Functions, functions_.size());
+ for (auto function : functions_) {
+ sizes.Add(function->HeaderSize() + function->BodySize(),
+ function->NameSize());
+ }
+ TRACE("Size after functions: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
}
- sizes.AddSection(data_segments_.size());
- for (auto segment : data_segments_) {
- sizes.Add(segment->HeaderSize(), segment->BodySize());
+ if (start_function_index_ >= 0) {
+ sizes.AddSection(WasmSection::Code::StartFunction, 0);
+ sizes.Add(SizeOfVarInt(start_function_index_), 0);
+ TRACE("Size after start: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
}
- sizes.AddSection(indirect_functions_.size());
- sizes.Add(2 * static_cast<uint32_t>(indirect_functions_.size()), 0);
+ if (data_segments_.size() > 0) {
+ sizes.AddSection(WasmSection::Code::DataSegments, data_segments_.size());
+ for (auto segment : data_segments_) {
+ sizes.Add(segment->HeaderSize(), segment->BodySize());
+ }
+ TRACE("Size after data segments: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
+ }
- if (sizes.body_size > 0) sizes.Add(1, 0);
+ if (indirect_functions_.size() > 0) {
+ sizes.AddSection(WasmSection::Code::FunctionTable,
+ indirect_functions_.size());
+ for (auto function_index : indirect_functions_) {
+ sizes.Add(SizeOfVarInt(function_index), 0);
+ }
+ TRACE("Size after indirect functions: %u, %u\n",
+ (unsigned)sizes.header_size, (unsigned)sizes.body_size);
+ }
+
+ if (sizes.body_size > 0) {
+ sizes.AddSection(WasmSection::Code::End, 0);
+ TRACE("Size after end: %u, %u\n", (unsigned)sizes.header_size,
+ (unsigned)sizes.body_size);
+ }
ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
byte* buffer = &buffer_vector[0];
byte* header = buffer;
byte* body = buffer + sizes.header_size;
+ // -- emit magic -------------------------------------------------------------
+ TRACE("emit magic\n");
+ EmitUint32(&header, kWasmMagic);
+ EmitUint32(&header, kWasmVersion);
+
// -- emit memory declaration ------------------------------------------------
- EmitUint8(&header, kDeclMemory);
- EmitUint8(&header, 16); // min memory size
- EmitUint8(&header, 16); // max memory size
- EmitUint8(&header, 0); // memory export
+ {
+ byte* section = EmitSection(WasmSection::Code::Memory, &header);
+ EmitVarInt(&header, 16); // min memory size
+ EmitVarInt(&header, 16); // max memory size
+ EmitUint8(&header, 0); // memory export
+ static_assert(kDeclMemorySize == 3, "memory size must match emit above");
+ FixupSection(section, header);
+ }
// -- emit globals -----------------------------------------------------------
if (globals_.size() > 0) {
- EmitUint8(&header, kDeclGlobals);
+ byte* section = EmitSection(WasmSection::Code::Globals, &header);
EmitVarInt(&header, globals_.size());
for (auto global : globals_) {
- EmitUint32(&header, 0);
+ EmitVarInt(&header, 0); // Length of the global name.
EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
EmitUint8(&header, global.second);
}
+ FixupSection(section, header);
}
// -- emit signatures --------------------------------------------------------
if (signatures_.size() > 0) {
- EmitUint8(&header, kDeclSignatures);
+ byte* section = EmitSection(WasmSection::Code::Signatures, &header);
EmitVarInt(&header, signatures_.size());
for (FunctionSig* sig : signatures_) {
- EmitUint8(&header, static_cast<byte>(sig->parameter_count()));
+ EmitVarInt(&header, sig->parameter_count());
if (sig->return_count() > 0) {
EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
} else {
@@ -529,39 +668,53 @@ WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
}
}
+ FixupSection(section, header);
}
// -- emit functions ---------------------------------------------------------
if (functions_.size() > 0) {
- EmitUint8(&header, kDeclFunctions);
+ byte* section = EmitSection(WasmSection::Code::Functions, &header);
EmitVarInt(&header, functions_.size());
for (auto func : functions_) {
func->Serialize(buffer, &header, &body);
}
+ FixupSection(section, header);
+ }
+
+ // -- emit start function index ----------------------------------------------
+ if (start_function_index_ >= 0) {
+ byte* section = EmitSection(WasmSection::Code::StartFunction, &header);
+ EmitVarInt(&header, start_function_index_);
+ FixupSection(section, header);
}
// -- emit data segments -----------------------------------------------------
if (data_segments_.size() > 0) {
- EmitUint8(&header, kDeclDataSegments);
+ byte* section = EmitSection(WasmSection::Code::DataSegments, &header);
EmitVarInt(&header, data_segments_.size());
for (auto segment : data_segments_) {
segment->Serialize(buffer, &header, &body);
}
+ FixupSection(section, header);
}
// -- emit function table ----------------------------------------------------
if (indirect_functions_.size() > 0) {
- EmitUint8(&header, kDeclFunctionTable);
+ byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
EmitVarInt(&header, indirect_functions_.size());
for (auto index : indirect_functions_) {
- EmitUint16(&header, index);
+ EmitVarInt(&header, index);
}
+ FixupSection(section, header);
}
- if (sizes.body_size > 0) EmitUint8(&header, kDeclEnd);
+ if (sizes.body_size > 0) {
+ byte* section = EmitSection(WasmSection::Code::End, &header);
+ FixupSection(section, header);
+ }
return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
}
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/encoder.h
index 7b651bf95e..49a7bf7d05 100644
--- a/deps/v8/src/wasm/encoder.h
+++ b/deps/v8/src/wasm/encoder.h
@@ -42,11 +42,6 @@ class WasmFunctionEncoder : public ZoneObject {
ZoneVector<uint8_t> body_;
ZoneVector<char> name_;
- bool HasLocals() const {
- return (local_i32_count_ + local_i64_count_ + local_f32_count_ +
- local_f64_count_) > 0;
- }
-
bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
};
@@ -60,8 +55,10 @@ class WasmFunctionBuilder : public ZoneObject {
const uint32_t* local_indices, uint32_t indices_size);
void Emit(WasmOpcode opcode);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
- uint32_t EmitEditableImmediate(const byte immediate);
- void EditImmediate(uint32_t offset, const byte immediate);
+ void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
+ void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+ uint32_t EmitEditableVarIntImmediate();
+ void EditVarIntImmediate(uint32_t offset, const uint32_t immediate);
void Exported(uint8_t flag);
void External(uint8_t flag);
void SetName(const unsigned char* name, int name_length);
@@ -120,6 +117,7 @@ class WasmModuleWriter : public ZoneObject {
ZoneVector<FunctionSig*> signatures_;
ZoneVector<uint16_t> indirect_functions_;
ZoneVector<std::pair<MachineType, bool>> globals_;
+ int start_function_index_;
};
class WasmModuleBuilder : public ZoneObject {
@@ -131,6 +129,7 @@ class WasmModuleBuilder : public ZoneObject {
void AddDataSegment(WasmDataSegmentEncoder* data);
uint16_t AddSignature(FunctionSig* sig);
void AddIndirectFunction(uint16_t index);
+ void MarkStartFunction(uint16_t index);
WasmModuleWriter* Build(Zone* zone);
struct CompareFunctionSigs {
@@ -146,6 +145,7 @@ class WasmModuleBuilder : public ZoneObject {
ZoneVector<uint16_t> indirect_functions_;
ZoneVector<std::pair<MachineType, bool>> globals_;
SignatureMap signature_map_;
+ int start_function_index_;
};
std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 62b000da2b..3e85a1b53c 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/wasm/module-decoder.h"
+
+#include "src/base/functional.h"
+#include "src/base/platform/platform.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
#include "src/v8.h"
#include "src/wasm/decoder.h"
-#include "src/wasm/module-decoder.h"
namespace v8 {
namespace internal {
@@ -27,8 +30,8 @@ namespace wasm {
class ModuleDecoder : public Decoder {
public:
ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
- bool asm_js)
- : Decoder(module_start, module_end), module_zone(zone), asm_js_(asm_js) {
+ ModuleOrigin origin)
+ : Decoder(module_start, module_end), module_zone(zone), origin_(origin) {
result_.start = start_;
if (limit_ < start_) {
error(start_, "end is less than start");
@@ -40,86 +43,196 @@ class ModuleDecoder : public Decoder {
pc_ = limit_; // On error, terminate section decoding loop.
}
+ static void DumpModule(WasmModule* module, ModuleResult result) {
+ std::string path;
+ if (FLAG_dump_wasm_module_path) {
+ path = FLAG_dump_wasm_module_path;
+ if (path.size() &&
+ !base::OS::isDirectorySeparator(path[path.size() - 1])) {
+ path += base::OS::DirectorySeparator();
+ }
+ }
+ // File are named `HASH.{ok,failed}.wasm`.
+ size_t hash = base::hash_range(module->module_start, module->module_end);
+ char buf[32] = {'\0'};
+#if V8_OS_WIN && _MSC_VER < 1900
+#define snprintf sprintf_s
+#endif
+ snprintf(buf, sizeof(buf) - 1, "%016zx.%s.wasm", hash,
+ result.ok() ? "ok" : "failed");
+ std::string name(buf);
+ if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
+ fwrite(module->module_start, module->module_end - module->module_start, 1,
+ wasm_file);
+ fclose(wasm_file);
+ }
+ }
+
// Decodes an entire module.
ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
pc_ = start_;
module->module_start = start_;
module->module_end = limit_;
- module->min_mem_size_log2 = 0;
- module->max_mem_size_log2 = 0;
+ module->min_mem_pages = 0;
+ module->max_mem_pages = 0;
module->mem_export = false;
module->mem_external = false;
- module->globals = new std::vector<WasmGlobal>();
- module->signatures = new std::vector<FunctionSig*>();
- module->functions = new std::vector<WasmFunction>();
- module->data_segments = new std::vector<WasmDataSegment>();
- module->function_table = new std::vector<uint16_t>();
- module->import_table = new std::vector<WasmImport>();
+ module->origin = origin_;
+
+ bool sections[(size_t)WasmSection::Code::Max] = {false};
+
+ const byte* pos = pc_;
+ uint32_t magic_word = consume_u32("wasm magic");
+#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+ if (magic_word != kWasmMagic) {
+ error(pos, pos,
+ "expected magic word %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmMagic), BYTES(magic_word));
+ goto done;
+ }
- bool sections[kMaxModuleSectionCode];
- memset(sections, 0, sizeof(sections));
+ pos = pc_;
+ {
+ uint32_t magic_version = consume_u32("wasm version");
+ if (magic_version != kWasmVersion) {
+ error(pos, pos,
+ "expected version %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmVersion), BYTES(magic_version));
+ goto done;
+ }
+ }
// Decode the module sections.
while (pc_ < limit_) {
TRACE("DecodeSection\n");
- WasmSectionDeclCode section =
- static_cast<WasmSectionDeclCode>(consume_u8("section"));
- // Each section should appear at most once.
- if (section < kMaxModuleSectionCode) {
- CheckForPreviousSection(sections, section, false);
- sections[section] = true;
+ pos = pc_;
+
+ int length;
+ uint32_t section_length = consume_u32v(&length, "section size");
+
+ int section_string_leb_length = 0;
+ uint32_t section_string_length = 0;
+ WasmSection::Code section = consume_section_name(
+ &section_string_leb_length, &section_string_length);
+ uint32_t string_and_leb_length =
+ section_string_leb_length + section_string_length;
+ if (string_and_leb_length > section_length) {
+ error(pos, pos,
+ "section string of size %u longer than total section bytes %u",
+ string_and_leb_length, section_length);
+ break;
}
+ if (section == WasmSection::Code::Max) {
+ // Skip unknown section.
+ uint32_t skip = section_length - string_and_leb_length;
+ TRACE("skipping %u bytes from unknown section\n", skip);
+ consume_bytes(skip);
+ continue;
+ }
+
+ // Each section should appear at most once.
+ CheckForPreviousSection(sections, section, false);
+ sections[(size_t)section] = true;
+
switch (section) {
- case kDeclEnd:
+ case WasmSection::Code::End:
// Terminate section decoding.
limit_ = pc_;
break;
- case kDeclMemory:
- module->min_mem_size_log2 = consume_u8("min memory");
- module->max_mem_size_log2 = consume_u8("max memory");
+ case WasmSection::Code::Memory:
+ int length;
+ module->min_mem_pages = consume_u32v(&length, "min memory");
+ module->max_mem_pages = consume_u32v(&length, "max memory");
module->mem_export = consume_u8("export memory") != 0;
break;
- case kDeclSignatures: {
+ case WasmSection::Code::Signatures: {
int length;
uint32_t signatures_count = consume_u32v(&length, "signatures count");
- module->signatures->reserve(SafeReserve(signatures_count));
+ module->signatures.reserve(SafeReserve(signatures_count));
// Decode signatures.
for (uint32_t i = 0; i < signatures_count; i++) {
if (failed()) break;
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
FunctionSig* s = consume_sig(); // read function sig.
- module->signatures->push_back(s);
+ module->signatures.push_back(s);
}
break;
}
- case kDeclFunctions: {
+ case WasmSection::Code::FunctionSignatures: {
// Functions require a signature table first.
- CheckForPreviousSection(sections, kDeclSignatures, true);
+ CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+ true);
int length;
uint32_t functions_count = consume_u32v(&length, "functions count");
- module->functions->reserve(SafeReserve(functions_count));
+ module->functions.reserve(SafeReserve(functions_count));
+ for (uint32_t i = 0; i < functions_count; i++) {
+ module->functions.push_back(
+ {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
+ WasmFunction* function = &module->functions.back();
+ function->sig_index = consume_sig_index(module, &function->sig);
+ }
+ break;
+ }
+ case WasmSection::Code::FunctionBodies: {
+ // Function bodies should follow signatures.
+ CheckForPreviousSection(sections,
+ WasmSection::Code::FunctionSignatures, true);
+ int length;
+ const byte* pos = pc_;
+ uint32_t functions_count = consume_u32v(&length, "functions count");
+ if (functions_count != module->functions.size()) {
+ error(pos, pos, "function body count %u mismatch (%u expected)",
+ functions_count,
+ static_cast<uint32_t>(module->functions.size()));
+ break;
+ }
+ for (uint32_t i = 0; i < functions_count; i++) {
+ WasmFunction* function = &module->functions[i];
+ int length;
+ uint32_t size = consume_u32v(&length, "body size");
+ function->code_start_offset = pc_offset();
+ function->code_end_offset = pc_offset() + size;
+
+ TRACE(" +%d %-20s: (%d bytes)\n", pc_offset(), "function body",
+ size);
+ pc_ += size;
+ if (pc_ > limit_) {
+ error(pc_, "function body extends beyond end of file");
+ }
+ }
+ break;
+ }
+ case WasmSection::Code::Functions: {
+ // Functions require a signature table first.
+ CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+ true);
+ int length;
+ uint32_t functions_count = consume_u32v(&length, "functions count");
+ module->functions.reserve(SafeReserve(functions_count));
// Set up module environment for verification.
ModuleEnv menv;
menv.module = module;
menv.instance = nullptr;
- menv.asm_js = asm_js_;
+ menv.origin = origin_;
// Decode functions.
for (uint32_t i = 0; i < functions_count; i++) {
if (failed()) break;
TRACE("DecodeFunction[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->functions->push_back(
+ module->functions.push_back(
{nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
- WasmFunction* function = &module->functions->back();
+ WasmFunction* function = &module->functions.back();
DecodeFunctionInModule(module, function, false);
}
if (ok() && verify_functions) {
for (uint32_t i = 0; i < functions_count; i++) {
if (failed()) break;
- WasmFunction* function = &module->functions->at(i);
+ WasmFunction* function = &module->functions[i];
if (!function->external) {
VerifyFunctionBody(i, &menv, function);
if (result_.failed())
@@ -129,132 +242,166 @@ class ModuleDecoder : public Decoder {
}
break;
}
- case kDeclGlobals: {
+ case WasmSection::Code::Names: {
+ // Names correspond to functions.
+ CheckForPreviousSection(sections,
+ WasmSection::Code::FunctionSignatures, true);
+ int length;
+ const byte* pos = pc_;
+ uint32_t functions_count = consume_u32v(&length, "functions count");
+ if (functions_count != module->functions.size()) {
+ error(pos, pos, "function name count %u mismatch (%u expected)",
+ functions_count,
+ static_cast<uint32_t>(module->functions.size()));
+ break;
+ }
+
+ for (uint32_t i = 0; i < functions_count; i++) {
+ WasmFunction* function = &module->functions[i];
+ function->name_offset =
+ consume_string(&function->name_length, "function name");
+
+ uint32_t local_names_count =
+ consume_u32v(&length, "local names count");
+ for (uint32_t j = 0; j < local_names_count; j++) {
+ uint32_t unused = 0;
+ uint32_t offset = consume_string(&unused, "local name");
+ USE(unused);
+ USE(offset);
+ }
+ }
+ break;
+ }
+ case WasmSection::Code::Globals: {
int length;
uint32_t globals_count = consume_u32v(&length, "globals count");
- module->globals->reserve(SafeReserve(globals_count));
+ module->globals.reserve(SafeReserve(globals_count));
// Decode globals.
for (uint32_t i = 0; i < globals_count; i++) {
if (failed()) break;
TRACE("DecodeGlobal[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->globals->push_back({0, MachineType::Int32(), 0, false});
- WasmGlobal* global = &module->globals->back();
+ module->globals.push_back({0, 0, MachineType::Int32(), 0, false});
+ WasmGlobal* global = &module->globals.back();
DecodeGlobalInModule(global);
}
break;
}
- case kDeclDataSegments: {
+ case WasmSection::Code::DataSegments: {
int length;
uint32_t data_segments_count =
consume_u32v(&length, "data segments count");
- module->data_segments->reserve(SafeReserve(data_segments_count));
+ module->data_segments.reserve(SafeReserve(data_segments_count));
// Decode data segments.
for (uint32_t i = 0; i < data_segments_count; i++) {
if (failed()) break;
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->data_segments->push_back({0, 0, 0});
- WasmDataSegment* segment = &module->data_segments->back();
+ module->data_segments.push_back({0, 0, 0});
+ WasmDataSegment* segment = &module->data_segments.back();
DecodeDataSegmentInModule(module, segment);
}
break;
}
- case kDeclFunctionTable: {
+ case WasmSection::Code::FunctionTable: {
// An indirect function table requires functions first.
- CheckForPreviousSection(sections, kDeclFunctions, true);
+ CheckForFunctions(module, section);
int length;
uint32_t function_table_count =
consume_u32v(&length, "function table count");
- module->function_table->reserve(SafeReserve(function_table_count));
+ module->function_table.reserve(SafeReserve(function_table_count));
// Decode function table.
for (uint32_t i = 0; i < function_table_count; i++) {
if (failed()) break;
TRACE("DecodeFunctionTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- uint16_t index = consume_u16();
- if (index >= module->functions->size()) {
+ uint16_t index = consume_u32v(&length);
+ if (index >= module->functions.size()) {
error(pc_ - 2, "invalid function index");
break;
}
- module->function_table->push_back(index);
+ module->function_table.push_back(index);
}
break;
}
- case kDeclStartFunction: {
+ case WasmSection::Code::StartFunction: {
// Declares a start function for a module.
- CheckForPreviousSection(sections, kDeclFunctions, true);
+ CheckForFunctions(module, section);
if (module->start_function_index >= 0) {
error("start function already declared");
break;
}
- int length;
- const byte* before = pc_;
- uint32_t index = consume_u32v(&length, "start function index");
- if (index >= module->functions->size()) {
- error(before, "invalid start function index");
- break;
- }
- module->start_function_index = static_cast<int>(index);
- FunctionSig* sig =
- module->signatures->at(module->functions->at(index).sig_index);
- if (sig->parameter_count() > 0) {
- error(before, "invalid start function: non-zero parameter count");
+ WasmFunction* func;
+ const byte* pos = pc_;
+ module->start_function_index = consume_func_index(module, &func);
+ if (func && func->sig->parameter_count() > 0) {
+ error(pos, "invalid start function: non-zero parameter count");
break;
}
break;
}
- case kDeclImportTable: {
+ case WasmSection::Code::ImportTable: {
// Declares an import table.
- CheckForPreviousSection(sections, kDeclSignatures, true);
+ CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+ true);
int length;
uint32_t import_table_count =
consume_u32v(&length, "import table count");
- module->import_table->reserve(SafeReserve(import_table_count));
+ module->import_table.reserve(SafeReserve(import_table_count));
// Decode import table.
for (uint32_t i = 0; i < import_table_count; i++) {
if (failed()) break;
TRACE("DecodeImportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->import_table->push_back({nullptr, 0, 0});
- WasmImport* import = &module->import_table->back();
+ module->import_table.push_back({nullptr, 0, 0});
+ WasmImport* import = &module->import_table.back();
- const byte* sigpos = pc_;
- import->sig_index = consume_u16("signature index");
-
- if (import->sig_index >= module->signatures->size()) {
- error(sigpos, "invalid signature index");
- } else {
- import->sig = module->signatures->at(import->sig_index);
+ import->sig_index = consume_sig_index(module, &import->sig);
+ const byte* pos = pc_;
+ import->module_name_offset = consume_string(
+ &import->module_name_length, "import module name");
+ if (import->module_name_length == 0) {
+ error(pos, "import module name cannot be NULL");
}
- import->module_name_offset = consume_string("import module name");
- import->function_name_offset =
- consume_string("import function name");
+ import->function_name_offset = consume_string(
+ &import->function_name_length, "import function name");
}
break;
}
- case kDeclWLL: {
- // Reserved for experimentation by the Web Low-level Language project
- // which is augmenting the binary encoding with source code meta
- // information. This section does not affect the semantics of the code
- // and can be ignored by the runtime. https://github.com/JSStats/wll
- int length = 0;
- uint32_t section_size = consume_u32v(&length, "section size");
- if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
- error(pc_ - length, "invalid section size");
- break;
+ case WasmSection::Code::ExportTable: {
+ // Declares an export table.
+ CheckForFunctions(module, section);
+ int length;
+ uint32_t export_table_count =
+ consume_u32v(&length, "export table count");
+ module->export_table.reserve(SafeReserve(export_table_count));
+ // Decode export table.
+ for (uint32_t i = 0; i < export_table_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeExportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->export_table.push_back({0, 0});
+ WasmExport* exp = &module->export_table.back();
+
+ WasmFunction* func;
+ exp->func_index = consume_func_index(module, &func);
+ exp->name_offset = consume_string(&exp->name_length, "export name");
}
- pc_ += section_size;
break;
}
- default:
- error(pc_ - 1, nullptr, "unrecognized section 0x%02x", section);
- break;
+ case WasmSection::Code::Max:
+ UNREACHABLE(); // Already skipped unknown sections.
}
}
- return toResult(module);
+ done:
+ ModuleResult result = toResult(module);
+ if (FLAG_dump_wasm_module) {
+ DumpModule(module, result);
+ }
+ return result;
}
uint32_t SafeReserve(uint32_t count) {
@@ -263,38 +410,23 @@ class ModuleDecoder : public Decoder {
return count < kMaxReserve ? count : kMaxReserve;
}
- void CheckForPreviousSection(bool* sections, WasmSectionDeclCode section,
- bool present) {
- if (section >= kMaxModuleSectionCode) return;
- if (sections[section] == present) return;
- const char* name = "";
- switch (section) {
- case kDeclMemory:
- name = "memory";
- break;
- case kDeclSignatures:
- name = "signatures";
- break;
- case kDeclFunctions:
- name = "function declaration";
- break;
- case kDeclGlobals:
- name = "global variable";
- break;
- case kDeclDataSegments:
- name = "data segment";
- break;
- case kDeclFunctionTable:
- name = "function table";
- break;
- default:
- name = "";
- break;
+ void CheckForFunctions(WasmModule* module, WasmSection::Code section) {
+ if (module->functions.size() == 0) {
+ error(pc_ - 1, nullptr, "functions must appear before section %s",
+ WasmSection::getName(section));
}
+ }
+
+ void CheckForPreviousSection(bool* sections, WasmSection::Code section,
+ bool present) {
+ if (section >= WasmSection::Code::Max) return;
+ if (sections[(size_t)section] == present) return;
if (present) {
- error(pc_ - 1, nullptr, "required %s section missing", name);
+ error(pc_ - 1, nullptr, "required %s section missing",
+ WasmSection::getName(section));
} else {
- error(pc_ - 1, nullptr, "%s section already present", name);
+ error(pc_ - 1, nullptr, "%s section already present",
+ WasmSection::getName(section));
}
}
@@ -302,16 +434,13 @@ class ModuleDecoder : public Decoder {
FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
WasmFunction* function) {
pc_ = start_;
- function->sig = consume_sig(); // read signature
- function->name_offset = 0; // ---- name
- function->code_start_offset = off(pc_ + 8); // ---- code start
- function->code_end_offset = off(limit_); // ---- code end
- function->local_i32_count = consume_u16(); // read u16
- function->local_i64_count = consume_u16(); // read u16
- function->local_f32_count = consume_u16(); // read u16
- function->local_f64_count = consume_u16(); // read u16
- function->exported = false; // ---- exported
- function->external = false; // ---- external
+ function->sig = consume_sig(); // read signature
+ function->name_offset = 0; // ---- name
+ function->name_length = 0; // ---- name length
+ function->code_start_offset = off(pc_); // ---- code start
+ function->code_end_offset = off(limit_); // ---- code end
+ function->exported = false; // ---- exported
+ function->external = false; // ---- external
if (ok()) VerifyFunctionBody(0, module_env, function);
@@ -331,19 +460,20 @@ class ModuleDecoder : public Decoder {
private:
Zone* module_zone;
ModuleResult result_;
- bool asm_js_;
+ ModuleOrigin origin_;
uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
// Decodes a single global entry inside a module starting at {pc_}.
void DecodeGlobalInModule(WasmGlobal* global) {
- global->name_offset = consume_string("global name");
+ global->name_offset = consume_string(&global->name_length, "global name");
global->type = mem_type();
global->offset = 0;
global->exported = consume_u8("exported") != 0;
}
// Decodes a single function entry inside a module starting at {pc_}.
+ // TODO(titzer): legacy function body; remove
void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
bool verify_body = true) {
byte decl_bits = consume_u8("function decl");
@@ -351,10 +481,10 @@ class ModuleDecoder : public Decoder {
const byte* sigpos = pc_;
function->sig_index = consume_u16("signature index");
- if (function->sig_index >= module->signatures->size()) {
+ if (function->sig_index >= module->signatures.size()) {
return error(sigpos, "invalid signature index");
} else {
- function->sig = module->signatures->at(function->sig_index);
+ function->sig = module->signatures[function->sig_index];
}
TRACE(" +%d <function attributes:%s%s%s%s%s>\n",
@@ -366,7 +496,8 @@ class ModuleDecoder : public Decoder {
(decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
if (decl_bits & kDeclFunctionName) {
- function->name_offset = consume_string("function name");
+ function->name_offset =
+ consume_string(&function->name_length, "function name");
}
function->exported = decl_bits & kDeclFunctionExport;
@@ -406,25 +537,30 @@ class ModuleDecoder : public Decoder {
// Decodes a single data segment entry inside a module starting at {pc_}.
void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
- segment->dest_addr = consume_u32("destination");
- segment->source_offset = consume_offset("source offset");
- segment->source_size = consume_u32("source size");
- segment->init = consume_u8("init");
+ const byte* start = pc_;
+ int length;
+ segment->dest_addr = consume_u32v(&length, "destination");
+ segment->source_size = consume_u32v(&length, "source size");
+ segment->source_offset = static_cast<uint32_t>(pc_ - start_);
+ segment->init = true;
// Validate the data is in the module.
uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
if (!IsWithinLimit(module_limit, segment->source_offset,
segment->source_size)) {
- error(pc_ - sizeof(uint32_t), "segment out of bounds of module");
+ error(start, "segment out of bounds of module");
}
// Validate that the segment will fit into the (minimum) memory.
uint32_t memory_limit =
- 1 << (module ? module->min_mem_size_log2 : WasmModule::kMaxMemSize);
+ WasmModule::kPageSize * (module ? module->min_mem_pages
+ : WasmModule::kMaxMemPages);
if (!IsWithinLimit(memory_limit, segment->dest_addr,
segment->source_size)) {
- error(pc_ - sizeof(uint32_t), "segment out of bounds of memory");
+ error(start, "segment out of bounds of memory");
}
+
+ consume_bytes(segment->source_size);
}
// Verifies the body (code) of a given function.
@@ -436,18 +572,10 @@ class ModuleDecoder : public Decoder {
<< std::endl;
os << std::endl;
}
- FunctionEnv fenv;
- fenv.module = menv;
- fenv.sig = function->sig;
- fenv.local_i32_count = function->local_i32_count;
- fenv.local_i64_count = function->local_i64_count;
- fenv.local_f32_count = function->local_f32_count;
- fenv.local_f64_count = function->local_f64_count;
- fenv.SumLocals();
-
- TreeResult result =
- VerifyWasmCode(&fenv, start_, start_ + function->code_start_offset,
- start_ + function->code_end_offset);
+ FunctionBody body = {menv, function->sig, start_,
+ start_ + function->code_start_offset,
+ start_ + function->code_end_offset};
+ TreeResult result = VerifyWasmCode(module_zone->allocator(), body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
@@ -476,11 +604,67 @@ class ModuleDecoder : public Decoder {
return offset;
}
- // Reads a single 32-bit unsigned integer interpreted as an offset into the
- // data and validating the string there and advances.
- uint32_t consume_string(const char* name = nullptr) {
- // TODO(titzer): validate string
- return consume_offset(name ? name : "string");
+ // Reads a length-prefixed string, checking that it is within bounds. Returns
+ // the offset of the string, and the length as an out parameter.
+ uint32_t consume_string(uint32_t* length, const char* name = nullptr) {
+ int varint_length;
+ *length = consume_u32v(&varint_length, "string length");
+ uint32_t offset = pc_offset();
+ TRACE(" +%u %-20s: (%u bytes)\n", offset, "string", *length);
+ consume_bytes(*length);
+ return offset;
+ }
+
+ uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
+ const byte* pos = pc_;
+ int length;
+ uint32_t sig_index = consume_u32v(&length, "signature index");
+ if (sig_index >= module->signatures.size()) {
+ error(pos, pos, "signature index %u out of bounds (%d signatures)",
+ sig_index, static_cast<int>(module->signatures.size()));
+ *sig = nullptr;
+ return 0;
+ }
+ *sig = module->signatures[sig_index];
+ return sig_index;
+ }
+
+ uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
+ const byte* pos = pc_;
+ int length;
+ uint32_t func_index = consume_u32v(&length, "function index");
+ if (func_index >= module->functions.size()) {
+ error(pos, pos, "function index %u out of bounds (%d functions)",
+ func_index, static_cast<int>(module->functions.size()));
+ *func = nullptr;
+ return 0;
+ }
+ *func = &module->functions[func_index];
+ return func_index;
+ }
+
+ // Reads a section name.
+ WasmSection::Code consume_section_name(int* string_leb_length,
+ uint32_t* string_length) {
+ *string_length = consume_u32v(string_leb_length, "name length");
+ const byte* start = pc_;
+ consume_bytes(*string_length);
+ if (failed()) {
+ TRACE("Section name of length %u couldn't be read\n", *string_length);
+ return WasmSection::Code::Max;
+ }
+ // TODO(jfb) Linear search, it may be better to do a common-prefix search.
+ for (WasmSection::Code i = WasmSection::begin(); i != WasmSection::end();
+ i = WasmSection::next(i)) {
+ if (WasmSection::getNameLength(i) == *string_length &&
+ 0 == memcmp(WasmSection::getName(i), start, *string_length)) {
+ return i;
+ }
+ }
+ TRACE("Unknown section: '");
+ for (uint32_t i = 0; i != *string_length; ++i) TRACE("%c", *(start + i));
+ TRACE("'\n");
+ return WasmSection::Code::Max;
}
// Reads a single 8-bit integer, interpreting it as a local type.
@@ -537,7 +721,8 @@ class ModuleDecoder : public Decoder {
// Parses an inline function signature.
FunctionSig* consume_sig() {
- byte count = consume_u8("param count");
+ int length;
+ byte count = consume_u32v(&length, "param count");
LocalType ret = consume_local_type();
FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
if (ret != kAstStmt) builder.AddReturn(ret);
@@ -579,22 +764,21 @@ class FunctionError : public FunctionResult {
}
};
-
ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
const byte* module_start, const byte* module_end,
- bool verify_functions, bool asm_js) {
+ bool verify_functions, ModuleOrigin origin) {
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleError("start > end");
if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
WasmModule* module = new WasmModule();
- ModuleDecoder decoder(zone, module_start, module_end, asm_js);
+ ModuleDecoder decoder(zone, module_start, module_end, origin);
return decoder.DecodeModule(module, verify_functions);
}
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
const byte* end) {
- ModuleDecoder decoder(zone, start, end, false);
+ ModuleDecoder decoder(zone, start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(start);
}
@@ -608,7 +792,7 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
if (size > kMaxFunctionSize)
return FunctionError("size > maximum function size");
WasmFunction* function = new WasmFunction();
- ModuleDecoder decoder(zone, function_start, function_end, false);
+ ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
return decoder.DecodeSingleFunction(module_env, function);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 3f469a500e..00a9b878c6 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -14,7 +14,7 @@ namespace wasm {
// Decodes the bytes of a WASM module between {module_start} and {module_end}.
ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
const byte* module_start, const byte* module_end,
- bool verify_functions, bool asm_js);
+ bool verify_functions, ModuleOrigin origin);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
new file mode 100644
index 0000000000..4aa452bbf5
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -0,0 +1,181 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_EXTERNAL_REFS_H
+#define WASM_EXTERNAL_REFS_H
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+static void f32_nearest_int_wrapper(float* param) {
+ *param = nearbyintf(*param);
+}
+
+static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+
+static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+
+static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+
+static void f64_nearest_int_wrapper(double* param) {
+ *param = nearbyint(*param);
+}
+
+static void int64_to_float32_wrapper(int64_t* input, float* output) {
+ *output = static_cast<float>(*input);
+}
+
+static void uint64_to_float32_wrapper(uint64_t* input, float* output) {
+#if V8_CC_MSVC
+ // With MSVC we use static_cast<float>(uint32_t) instead of
+ // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
+ // semantics. The idea is to calculate
+ // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
+ // achieve proper rounding in all cases we have to adjust the high_word
+ // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
+ // the high_word if the low_word may affect the rounding of the high_word.
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+ float shift = static_cast<float>(1ull << 32);
+ // If the MSB of the high_word is set, then we make space for a rounding bit.
+ if (high_word < 0x80000000) {
+ high_word <<= 1;
+ shift = static_cast<float>(1ull << 31);
+ }
+
+ if ((high_word & 0xfe000000) && low_word) {
+ // Set the rounding bit.
+ high_word |= 1;
+ }
+
+ float result = static_cast<float>(high_word);
+ result *= shift;
+ result += static_cast<float>(low_word);
+ *output = result;
+
+#else
+ *output = static_cast<float>(*input);
+#endif
+}
+
+static void int64_to_float64_wrapper(int64_t* input, double* output) {
+ *output = static_cast<double>(*input);
+}
+
+static void uint64_to_float64_wrapper(uint64_t* input, double* output) {
+#if V8_CC_MSVC
+ // With MSVC we use static_cast<double>(uint32_t) instead of
+ // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
+ // semantics. The idea is to calculate
+ // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+ double shift = static_cast<double>(1ull << 32);
+
+ double result = static_cast<double>(high_word);
+ result *= shift;
+ result += static_cast<double>(low_word);
+ *output = result;
+
+#else
+ *output = static_cast<double>(*input);
+#endif
+}
+
+static int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
+ *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ *output = static_cast<int64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+static int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (*input > -1.0 &&
+ *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ *output = static_cast<uint64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+static int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+ *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ *output = static_cast<int64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+static int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (*input > -1.0 &&
+ *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ *output = static_cast<uint64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+static int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
+ return -1;
+ }
+ *dst /= *src;
+ return 1;
+}
+
+static int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst %= *src;
+ return 1;
+}
+
+static int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst /= *src;
+ return 1;
+}
+
+static int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst %= *src;
+ return 1;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 62a2676032..83009d7c81 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -37,20 +37,43 @@ struct RawBuffer {
RawBuffer GetRawBufferArgument(
ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(titzer): allow typed array views.
- if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
+ if (args.Length() < 1) {
thrower.Error("Argument 0 must be an array buffer");
return {nullptr, nullptr};
}
- Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
- ArrayBuffer::Contents contents = buffer->GetContents();
- const byte* start = reinterpret_cast<const byte*>(contents.Data());
- const byte* end = start + contents.ByteLength();
+ const byte* start = nullptr;
+ const byte* end = nullptr;
- if (start == nullptr) {
- thrower.Error("ArrayBuffer argument is empty");
+ if (args[0]->IsArrayBuffer()) {
+ // A raw array buffer was passed.
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ start = reinterpret_cast<const byte*>(contents.Data());
+ end = start + contents.ByteLength();
+
+ if (start == nullptr || end == start) {
+ thrower.Error("ArrayBuffer argument is empty");
+ }
+ } else if (args[0]->IsTypedArray()) {
+ // A TypedArray was passed.
+ Local<TypedArray> array = Local<TypedArray>::Cast(args[0]);
+ Local<ArrayBuffer> buffer = array->Buffer();
+
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ start =
+ reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
+ end = start + array->ByteLength();
+
+ if (start == nullptr || end == start) {
+ thrower.Error("ArrayBuffer argument is empty");
+ }
+ } else {
+ thrower.Error("Argument 0 must be an ArrayBuffer or Uint8Array");
}
+
return {start, end};
}
@@ -63,9 +86,10 @@ void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
RawBuffer buffer = GetRawBufferArgument(thrower, args);
if (thrower.error()) return;
- i::Zone zone;
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, buffer.start, buffer.end, true, false);
+ i::Zone zone(isolate->allocator());
+ internal::wasm::ModuleResult result =
+ internal::wasm::DecodeWasmModule(isolate, &zone, buffer.start, buffer.end,
+ true, internal::wasm::kWasmOrigin);
if (result.failed()) {
thrower.Failed("", result);
@@ -87,7 +111,7 @@ void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
{
// Verification of a single function shouldn't allocate.
i::DisallowHeapAllocation no_allocation;
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
buffer.start, buffer.end);
}
@@ -123,25 +147,18 @@ v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(
return nullptr;
}
- auto module = v8::internal::wasm::AsmWasmBuilder(
- info->isolate(), info->zone(), info->literal(), foreign)
- .Run();
-
- if (i::FLAG_dump_asmjs_wasm) {
- FILE* wasm_file = fopen(i::FLAG_asmjs_wasm_dumpfile, "wb");
- if (wasm_file) {
- fwrite(module->Begin(), module->End() - module->Begin(), 1, wasm_file);
- fclose(wasm_file);
- }
- }
+ auto module =
+ v8::internal::wasm::AsmWasmBuilder(info->isolate(), info->zone(),
+ info->literal(), foreign, &typer)
+ .Run();
return module;
}
-
void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
const byte* start, const byte* end,
- ErrorThrower* thrower, bool must_decode) {
+ ErrorThrower* thrower,
+ internal::wasm::ModuleOrigin origin) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
@@ -153,11 +170,11 @@ void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
// Decode but avoid a redundant pass over function bodies for verification.
// Verification will happen during compilation.
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, start, end, false, false);
+ isolate, &zone, start, end, false, origin);
- if (result.failed() && must_decode) {
+ if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
thrower->Error("Asm.js converted module failed to decode");
} else if (result.failed()) {
thrower->Failed("", result);
@@ -192,7 +209,7 @@ void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
Local<String> source = Local<String>::Cast(args[0]);
i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
i::ParseInfo info(&zone, script);
@@ -208,7 +225,8 @@ void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower, true);
+ InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower,
+ internal::wasm::kAsmJsOrigin);
}
@@ -220,7 +238,8 @@ void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
RawBuffer buffer = GetRawBufferArgument(thrower, args);
if (buffer.start == nullptr) return;
- InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower, false);
+ InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower,
+ internal::wasm::kWasmOrigin);
}
} // namespace
@@ -260,7 +279,7 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
// Bind the WASM object.
Factory* factory = isolate->factory();
- Handle<String> name = v8_str(isolate, "_WASMEXP_");
+ Handle<String> name = v8_str(isolate, "Wasm");
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(
cons, Handle<Object>(context->initial_object_prototype(), isolate));
@@ -280,10 +299,26 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
- Handle<Map> wasm_function_map = isolate->factory()->NewMap(
- JS_FUNCTION_TYPE, JSFunction::kSize + kPointerSize);
- wasm_function_map->set_is_callable();
- context->set_wasm_function_map(*wasm_function_map);
+ // TODO(titzer): Move this to bootstrapper.cc??
+ // TODO(titzer): Also make one for strict mode functions?
+ Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+ InstanceType instance_type = prev_map->instance_type();
+ int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+ CHECK_EQ(0, internal_fields);
+ int pre_allocated =
+ prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+ int instance_size;
+ int in_object_properties;
+ JSFunction::CalculateInstanceSizeHelper(instance_type, internal_fields + 1,
+ 0, &instance_size,
+ &in_object_properties);
+
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map = Map::CopyInitialMap(
+ prev_map, instance_size, in_object_properties, unused_property_fields);
+
+ context->set_wasm_function_map(*map);
}
}
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index dd653c1740..d9199e82fb 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -7,6 +7,50 @@
#include "src/wasm/wasm-opcodes.h"
+#define U32_LE(v) \
+ static_cast<byte>(v), static_cast<byte>((v) >> 8), \
+ static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
+
+#define U16_LE(v) static_cast<byte>(v), static_cast<byte>((v) >> 8)
+
+#define WASM_MODULE_HEADER U32_LE(kWasmMagic), U32_LE(kWasmVersion)
+
+#define SIG_INDEX(v) U16_LE(v)
+// TODO(binji): make SIG_INDEX match this.
+#define IMPORT_SIG_INDEX(v) U32V_1(v)
+#define FUNC_INDEX(v) U32V_1(v)
+#define NO_NAME U32V_1(0)
+#define NAME_LENGTH(v) U32V_1(v)
+
+#define ZERO_ALIGNMENT 0
+#define ZERO_OFFSET 0
+
+#define BR_TARGET(v) U32_LE(v)
+
+#define MASK_7 ((1 << 7) - 1)
+#define MASK_14 ((1 << 14) - 1)
+#define MASK_21 ((1 << 21) - 1)
+#define MASK_28 ((1 << 28) - 1)
+
+#define U32V_1(x) static_cast<byte>((x)&MASK_7)
+#define U32V_2(x) \
+ static_cast<byte>(((x)&MASK_7) | 0x80), static_cast<byte>(((x) >> 7) & MASK_7)
+#define U32V_3(x) \
+ static_cast<byte>((((x)) & MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((x) >> 14) & MASK_7)
+#define U32V_4(x) \
+ static_cast<byte>(((x)&MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((x) >> 21) & MASK_7)
+#define U32V_5(x) \
+ static_cast<byte>(((x)&MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>((((x) >> 28) & MASK_7))
+
// Convenience macros for building Wasm bytecode directly into a byte array.
//------------------------------------------------------------------------------
@@ -33,14 +77,8 @@
#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
#define WASM_UNREACHABLE kExprUnreachable
-#define WASM_TABLESWITCH_OP(case_count, table_count, ...) \
- kExprTableSwitch, static_cast<byte>(case_count), \
- static_cast<byte>(case_count >> 8), static_cast<byte>(table_count), \
- static_cast<byte>(table_count >> 8), __VA_ARGS__
-
-#define WASM_TABLESWITCH_BODY0(key) key
-
-#define WASM_TABLESWITCH_BODY(key, ...) key, __VA_ARGS__
+#define WASM_BR_TABLE(key, count, ...) \
+ kExprBrTable, U32V_1(count), __VA_ARGS__, key
#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -52,18 +90,222 @@
#define WASM_ZERO kExprI8Const, 0
#define WASM_ONE kExprI8Const, 1
#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
-#define WASM_I32(val) \
- kExprI32Const, static_cast<byte>(val), static_cast<byte>(val >> 8), \
- static_cast<byte>(val >> 16), static_cast<byte>(val >> 24)
-#define WASM_I64(val) \
- kExprI64Const, static_cast<byte>(static_cast<uint64_t>(val)), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 8), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 16), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 24), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 32), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 40), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 48), \
- static_cast<byte>(static_cast<uint64_t>(val) >> 56)
+
+#define I32V_MIN(length) -(1 << (6 + (7 * ((length) - 1))))
+#define I32V_MAX(length) ((1 << (6 + (7 * ((length) - 1)))) - 1)
+#define I64V_MIN(length) -(1LL << (6 + (7 * ((length) - 1))))
+#define I64V_MAX(length) ((1LL << (6 + 7 * ((length) - 1))) - 1)
+
+#define I32V_IN_RANGE(value, length) \
+ ((value) >= I32V_MIN(length) && (value) <= I32V_MAX(length))
+#define I64V_IN_RANGE(value, length) \
+ ((value) >= I64V_MIN(length) && (value) <= I64V_MAX(length))
+
+#define WASM_NO_LOCALS 0
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+inline void CheckI32v(int32_t value, int length) {
+ DCHECK(length >= 1 && length <= 5);
+ DCHECK(length == 5 || I32V_IN_RANGE(value, length));
+}
+
+inline void CheckI64v(int64_t value, int length) {
+ DCHECK(length >= 1 && length <= 10);
+ DCHECK(length == 10 || I64V_IN_RANGE(value, length));
+}
+
+// A helper for encoding local declarations prepended to the body of a
+// function.
+class LocalDeclEncoder {
+ public:
+ // Prepend local declarations by creating a new buffer and copying data
+ // over. The new buffer must be delete[]'d by the caller.
+ void Prepend(const byte** start, const byte** end) const {
+ size_t size = (*end - *start);
+ byte* buffer = new byte[Size() + size];
+ size_t pos = Emit(buffer);
+ memcpy(buffer + pos, *start, size);
+ pos += size;
+ *start = buffer;
+ *end = buffer + pos;
+ }
+
+ size_t Emit(byte* buffer) const {
+ size_t pos = 0;
+ pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
+ for (size_t i = 0; i < local_decls.size(); i++) {
+ pos = WriteUint32v(buffer, pos, local_decls[i].first);
+ buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
+ }
+ DCHECK_EQ(Size(), pos);
+ return pos;
+ }
+
+ // Add locals declarations to this helper. Return the index of the newly added
+ // local(s), with an optional adjustment for the parameters.
+ uint32_t AddLocals(uint32_t count, LocalType type,
+ FunctionSig* sig = nullptr) {
+ if (count == 0) {
+ return static_cast<uint32_t>((sig ? sig->parameter_count() : 0) +
+ local_decls.size());
+ }
+ size_t pos = local_decls.size();
+ if (local_decls.size() > 0 && local_decls.back().second == type) {
+ count += local_decls.back().first;
+ local_decls.pop_back();
+ }
+ local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
+ return static_cast<uint32_t>(pos + (sig ? sig->parameter_count() : 0));
+ }
+
+ size_t Size() const {
+ size_t size = SizeofUint32v(static_cast<uint32_t>(local_decls.size()));
+ for (auto p : local_decls) size += 1 + SizeofUint32v(p.first);
+ return size;
+ }
+
+ private:
+ std::vector<std::pair<uint32_t, LocalType>> local_decls;
+
+ size_t SizeofUint32v(uint32_t val) const {
+ size_t size = 1;
+ while (true) {
+ byte b = val & MASK_7;
+ if (b == val) return size;
+ size++;
+ val = val >> 7;
+ }
+ }
+
+ // TODO(titzer): lift encoding of u32v to a common place.
+ size_t WriteUint32v(byte* buffer, size_t pos, uint32_t val) const {
+ while (true) {
+ byte b = val & MASK_7;
+ if (b == val) {
+ buffer[pos++] = b;
+ break;
+ }
+ buffer[pos++] = 0x80 | b;
+ val = val >> 7;
+ }
+ return pos;
+ }
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+// Int32 Const operations
+//------------------------------------------------------------------------------
+#define WASM_I32V(val) kExprI32Const, U32V_5(val)
+
+#define WASM_I32V_1(val) \
+ static_cast<byte>(CheckI32v((val), 1), kExprI32Const), U32V_1(val)
+#define WASM_I32V_2(val) \
+ static_cast<byte>(CheckI32v((val), 2), kExprI32Const), U32V_2(val)
+#define WASM_I32V_3(val) \
+ static_cast<byte>(CheckI32v((val), 3), kExprI32Const), U32V_3(val)
+#define WASM_I32V_4(val) \
+ static_cast<byte>(CheckI32v((val), 4), kExprI32Const), U32V_4(val)
+#define WASM_I32V_5(val) \
+ static_cast<byte>(CheckI32v((val), 5), kExprI32Const), U32V_5(val)
+
+//------------------------------------------------------------------------------
+// Int64 Const operations
+//------------------------------------------------------------------------------
+#define WASM_I64V(val) \
+ kExprI64Const, \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 56) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 63) & MASK_7)
+
+#define WASM_I64V_1(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 1), kExprI64Const), \
+ static_cast<byte>(static_cast<int64_t>(val) & MASK_7)
+#define WASM_I64V_2(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 2), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 7) & MASK_7)
+#define WASM_I64V_3(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 3), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 14) & MASK_7)
+#define WASM_I64V_4(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 4), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 21) & MASK_7)
+#define WASM_I64V_5(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 5), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 28) & MASK_7)
+#define WASM_I64V_6(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 6), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 35) & MASK_7)
+#define WASM_I64V_7(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 7), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 42) & MASK_7)
+#define WASM_I64V_8(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 8), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 49) & MASK_7)
+#define WASM_I64V_9(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 9), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 56) & MASK_7)
+#define WASM_I64V_10(val) \
+ static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 10), kExprI64Const), \
+ static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+ static_cast<byte>(((static_cast<int64_t>(val) >> 56) & MASK_7) | 0x80), \
+ static_cast<byte>((static_cast<int64_t>(val) >> 63) & MASK_7)
+
#define WASM_F32(val) \
kExprF32Const, \
static_cast<byte>(bit_cast<int32_t>(static_cast<float>(val))), \
@@ -87,21 +329,19 @@
#define WASM_LOAD_MEM(type, index) \
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
- v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index
+ ZERO_ALIGNMENT, ZERO_OFFSET, index
#define WASM_STORE_MEM(type, index, val) \
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
- v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index, val
+ ZERO_ALIGNMENT, ZERO_OFFSET, index, val
#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
- v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
- static_cast<byte>(offset), index
+ ZERO_ALIGNMENT, U32V_1(offset), index
#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
static_cast<byte>( \
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
- v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
- static_cast<byte>(offset), index, val
+ ZERO_ALIGNMENT, U32V_1(offset), index, val
#define WASM_CALL_FUNCTION(index, ...) \
kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
#define WASM_CALL_IMPORT(index, ...) \
@@ -112,7 +352,7 @@
#define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
#define WASM_CALL_INDIRECT0(index, func) \
kExprCallIndirect, static_cast<byte>(index), func
-#define WASM_NOT(x) kExprBoolNot, x
+#define WASM_NOT(x) kExprI32Eqz, x
//------------------------------------------------------------------------------
// Constructs that are composed of multiple bytecodes.
@@ -144,6 +384,8 @@
#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
+#define WASM_I32_ROR(x, y) kExprI32Ror, x, y
+#define WASM_I32_ROL(x, y) kExprI32Rol, x, y
#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
#define WASM_I32_NE(x, y) kExprI32Ne, x, y
#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
@@ -157,6 +399,7 @@
#define WASM_I32_CLZ(x) kExprI32Clz, x
#define WASM_I32_CTZ(x) kExprI32Ctz, x
#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
+#define WASM_I32_EQZ(x) kExprI32Eqz, x
//------------------------------------------------------------------------------
// Int64 operations
@@ -174,6 +417,8 @@
#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
+#define WASM_I64_ROR(x, y) kExprI64Ror, x, y
+#define WASM_I64_ROL(x, y) kExprI64Rol, x, y
#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
#define WASM_I64_NE(x, y) kExprI64Ne, x, y
#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
@@ -187,6 +432,7 @@
#define WASM_I64_CLZ(x) kExprI64Clz, x
#define WASM_I64_CTZ(x) kExprI64Ctz, x
#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
+#define WASM_I64_EQZ(x) kExprI64Eqz, x
//------------------------------------------------------------------------------
// Float32 operations
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 02d197c547..a1c2a7a3e1 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -19,13 +19,43 @@ namespace v8 {
namespace internal {
namespace wasm {
+static const char* wasmSections[] = {
+#define F(enumerator, string) string,
+ FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+};
+
+static uint8_t wasmSectionsLengths[]{
+#define F(enumerator, string) sizeof(string) - 1,
+ FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+};
+
+static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
+ (size_t)WasmSection::Code::Max,
+ "expected enum WasmSection::Code to be monotonic from 0");
+
+WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
+WasmSection::Code WasmSection::end() { return WasmSection::Code::Max; }
+WasmSection::Code WasmSection::next(WasmSection::Code code) {
+ return (WasmSection::Code)(1 + (uint32_t)code);
+}
+
+const char* WasmSection::getName(WasmSection::Code code) {
+ return wasmSections[(size_t)code];
+}
+
+size_t WasmSection::getNameLength(WasmSection::Code code) {
+ return wasmSectionsLengths[(size_t)code];
+}
+
std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
os << "WASM module with ";
- os << (1 << module.min_mem_size_log2) << " min mem";
- os << (1 << module.max_mem_size_log2) << " max mem";
- if (module.functions) os << module.functions->size() << " functions";
- if (module.globals) os << module.functions->size() << " globals";
- if (module.data_segments) os << module.functions->size() << " data segments";
+ os << (module.min_mem_pages * module.kPageSize) << " min mem";
+ os << (module.max_mem_pages * module.kPageSize) << " max mem";
+ os << module.functions.size() << " functions";
+ os << module.functions.size() << " globals";
+ os << module.functions.size() << " data segments";
return os;
}
@@ -48,7 +78,9 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
os << "#" << pair.function_->func_index << ":";
if (pair.function_->name_offset > 0) {
if (pair.module_) {
- os << pair.module_->GetName(pair.function_->name_offset);
+ WasmName name = pair.module_->GetName(pair.function_->name_offset,
+ pair.function_->name_length);
+ os.write(name.name, name.length);
} else {
os << "+" << pair.function_->func_index;
}
@@ -91,15 +123,15 @@ class WasmLinker {
}
void Link(Handle<FixedArray> function_table,
- std::vector<uint16_t>* functions) {
+ std::vector<uint16_t>& functions) {
for (size_t i = 0; i < function_code_.size(); i++) {
LinkFunction(function_code_[i]);
}
- if (functions && !function_table.is_null()) {
- int table_size = static_cast<int>(functions->size());
+ if (!function_table.is_null()) {
+ int table_size = static_cast<int>(functions.size());
DCHECK_EQ(function_table->length(), table_size * 2);
for (int i = 0; i < table_size; i++) {
- function_table->set(i + table_size, *function_code_[functions->at(i)]);
+ function_table->set(i + table_size, *function_code_[functions[i]]);
}
}
}
@@ -151,11 +183,10 @@ const int kWasmModuleCodeTable = 1;
const int kWasmMemArrayBuffer = 2;
const int kWasmGlobalsArrayBuffer = 3;
-
-size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
+size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>& globals) {
uint32_t offset = 0;
- if (!globals) return 0;
- for (WasmGlobal& global : *globals) {
+ if (globals.size() == 0) return 0;
+ for (WasmGlobal& global : globals) {
byte size = WasmOpcodes::MemSize(global.type);
offset = (offset + size - 1) & ~(size - 1); // align
global.offset = offset;
@@ -166,8 +197,9 @@ size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
- for (const WasmDataSegment& segment : *module->data_segments) {
+ for (const WasmDataSegment& segment : module->data_segments) {
if (!segment.init) continue;
+ if (!segment.source_size) continue;
CHECK_LT(segment.dest_addr, mem_size);
CHECK_LE(segment.source_size, mem_size);
CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
@@ -179,14 +211,13 @@ void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
- if (!module->function_table || module->function_table->size() == 0) {
+ if (module->function_table.size() == 0) {
return Handle<FixedArray>::null();
}
- int table_size = static_cast<int>(module->function_table->size());
+ int table_size = static_cast<int>(module->function_table.size());
Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
for (int i = 0; i < table_size; i++) {
- WasmFunction* function =
- &module->functions->at(module->function_table->at(i));
+ WasmFunction* function = &module->functions[module->function_table[i]];
fixed->set(i, Smi::FromInt(function->sig_index));
}
return fixed;
@@ -194,7 +225,7 @@ Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
byte** backing_store) {
- if (size > (1 << WasmModule::kMaxMemSize)) {
+ if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
*backing_store = nullptr;
return Handle<JSArrayBuffer>::null();
@@ -236,12 +267,11 @@ bool AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
DCHECK(instance->module);
DCHECK(instance->mem_buffer.is_null());
- if (instance->module->min_mem_size_log2 > WasmModule::kMaxMemSize) {
+ if (instance->module->min_mem_pages > WasmModule::kMaxMemPages) {
thrower->Error("Out of memory: wasm memory too large");
return false;
}
- instance->mem_size = static_cast<size_t>(1)
- << instance->module->min_mem_size_log2;
+ instance->mem_size = WasmModule::kPageSize * instance->module->min_mem_pages;
instance->mem_buffer =
NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
if (!instance->mem_start) {
@@ -273,50 +303,75 @@ WasmModule::WasmModule()
: shared_isolate(nullptr),
module_start(nullptr),
module_end(nullptr),
- min_mem_size_log2(0),
- max_mem_size_log2(0),
+ min_mem_pages(0),
+ max_mem_pages(0),
mem_export(false),
mem_external(false),
start_function_index(-1),
- globals(nullptr),
- signatures(nullptr),
- functions(nullptr),
- data_segments(nullptr),
- function_table(nullptr),
- import_table(nullptr) {}
-
-WasmModule::~WasmModule() {
- if (globals) delete globals;
- if (signatures) delete signatures;
- if (functions) delete functions;
- if (data_segments) delete data_segments;
- if (function_table) delete function_table;
- if (import_table) delete import_table;
+ origin(kWasmOrigin) {}
+
+static MaybeHandle<JSFunction> ReportFFIError(ErrorThrower& thrower,
+ const char* error, uint32_t index,
+ wasm::WasmName module_name,
+ wasm::WasmName function_name) {
+ if (function_name.name) {
+ thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
+ index, module_name.length, module_name.name,
+ function_name.length, function_name.name, error);
+ } else {
+ thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
+ module_name.length, module_name.name, error);
+ }
+ thrower.Error("Import ");
+ return MaybeHandle<JSFunction>();
}
-static MaybeHandle<JSFunction> LookupFunction(ErrorThrower& thrower,
- Handle<JSObject> ffi,
- uint32_t index,
- Handle<String> name,
- const char* cstr) {
- if (!ffi.is_null()) {
- MaybeHandle<Object> result = Object::GetProperty(ffi, name);
- if (!result.is_null()) {
- Handle<Object> obj = result.ToHandleChecked();
- if (obj->IsJSFunction()) {
- return Handle<JSFunction>::cast(obj);
- } else {
- thrower.Error("FFI function #%d:%s is not a JSFunction.", index, cstr);
- return MaybeHandle<JSFunction>();
- }
- } else {
- thrower.Error("FFI function #%d:%s not found.", index, cstr);
- return MaybeHandle<JSFunction>();
+static MaybeHandle<JSFunction> LookupFunction(
+ ErrorThrower& thrower, Factory* factory, Handle<JSObject> ffi,
+ uint32_t index, wasm::WasmName module_name, wasm::WasmName function_name) {
+ if (ffi.is_null()) {
+ return ReportFFIError(thrower, "FFI is not an object", index, module_name,
+ function_name);
+ }
+
+ // Look up the module first.
+ Handle<String> name = factory->InternalizeUtf8String(
+ Vector<const char>(module_name.name, module_name.length));
+ MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ if (result.is_null()) {
+ return ReportFFIError(thrower, "module not found", index, module_name,
+ function_name);
+ }
+
+ Handle<Object> module = result.ToHandleChecked();
+
+ if (!module->IsJSReceiver()) {
+ return ReportFFIError(thrower, "module is not an object or function", index,
+ module_name, function_name);
+ }
+
+ Handle<Object> function;
+ if (function_name.name) {
+ // Look up the function in the module.
+ Handle<String> name = factory->InternalizeUtf8String(
+ Vector<const char>(function_name.name, function_name.length));
+ MaybeHandle<Object> result = Object::GetProperty(module, name);
+ if (result.is_null()) {
+ return ReportFFIError(thrower, "function not found", index, module_name,
+ function_name);
}
+ function = result.ToHandleChecked();
} else {
- thrower.Error("FFI table is not an object.");
- return MaybeHandle<JSFunction>();
+ // No function specified. Use the "default export".
+ function = module;
+ }
+
+ if (!function->IsJSFunction()) {
+ return ReportFFIError(thrower, "not a function", index, module_name,
+ function_name);
}
+
+ return Handle<JSFunction>::cast(function);
}
// Instantiates a wasm module as a JSObject.
@@ -338,11 +393,10 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
JS_OBJECT_TYPE,
JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
WasmModuleInstance instance(this);
- std::vector<Handle<Code>> import_code;
instance.context = isolate->native_context();
instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+ factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
//-------------------------------------------------------------------------
@@ -359,13 +413,6 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
*instance.mem_buffer);
LoadDataSegments(this, instance.mem_start, instance.mem_size);
- if (mem_export) {
- // Export the memory as a named property.
- Handle<String> name = factory->InternalizeUtf8String("memory");
- JSObject::AddProperty(instance.js_object, name, instance.mem_buffer,
- READ_ONLY);
- }
-
//-------------------------------------------------------------------------
// Allocate the globals area if necessary.
//-------------------------------------------------------------------------
@@ -382,25 +429,27 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
//-------------------------------------------------------------------------
uint32_t index = 0;
instance.function_table = BuildFunctionTable(isolate, this);
- WasmLinker linker(isolate, functions->size());
+ WasmLinker linker(isolate, functions.size());
ModuleEnv module_env;
module_env.module = this;
module_env.instance = &instance;
module_env.linker = &linker;
- module_env.asm_js = false;
-
- if (import_table->size() > 0) {
- instance.import_code = &import_code;
- instance.import_code->reserve(import_table->size());
- for (const WasmImport& import : *import_table) {
- const char* cstr = GetName(import.function_name_offset);
- Handle<String> name = factory->InternalizeUtf8String(cstr);
- MaybeHandle<JSFunction> function =
- LookupFunction(thrower, ffi, index, name, cstr);
+ module_env.origin = origin;
+
+ if (import_table.size() > 0) {
+ instance.import_code.reserve(import_table.size());
+ for (const WasmImport& import : import_table) {
+ WasmName module_name =
+ GetNameOrNull(import.module_name_offset, import.module_name_length);
+ WasmName function_name = GetNameOrNull(import.function_name_offset,
+ import.function_name_length);
+ MaybeHandle<JSFunction> function = LookupFunction(
+ thrower, factory, ffi, index, module_name, function_name);
if (function.is_null()) return MaybeHandle<JSObject>();
Handle<Code> code = compiler::CompileWasmToJSWrapper(
- isolate, &module_env, function.ToHandleChecked(), import.sig, cstr);
- instance.import_code->push_back(code);
+ isolate, &module_env, function.ToHandleChecked(), import.sig,
+ module_name, function_name);
+ instance.import_code.push_back(code);
index++;
}
}
@@ -410,27 +459,32 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
//-------------------------------------------------------------------------
// First pass: compile each function and initialize the code table.
- index = 0;
- for (const WasmFunction& func : *functions) {
+ index = FLAG_skip_compiling_wasm_funcs;
+ while (index < functions.size()) {
+ const WasmFunction& func = functions[index];
if (thrower.error()) break;
DCHECK_EQ(index, func.func_index);
- const char* cstr = GetName(func.name_offset);
- Handle<String> name = factory->InternalizeUtf8String(cstr);
+ WasmName str = GetName(func.name_offset, func.name_length);
+ WasmName str_null = {nullptr, 0};
+ Handle<String> name = factory->InternalizeUtf8String(
+ Vector<const char>(str.name, str.length));
Handle<Code> code = Handle<Code>::null();
Handle<JSFunction> function = Handle<JSFunction>::null();
if (func.external) {
// Lookup external function in FFI object.
MaybeHandle<JSFunction> function =
- LookupFunction(thrower, ffi, index, name, cstr);
+ LookupFunction(thrower, factory, ffi, index, str, str_null);
if (function.is_null()) return MaybeHandle<JSObject>();
- code = compiler::CompileWasmToJSWrapper(
- isolate, &module_env, function.ToHandleChecked(), func.sig, cstr);
+ code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
+ function.ToHandleChecked(),
+ func.sig, str, str_null);
} else {
// Compile the function.
code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
if (code.is_null()) {
- thrower.Error("Compilation of #%d:%s failed.", index, cstr);
+ thrower.Error("Compilation of #%d:%.*s failed.", index, str.length,
+ str.name);
return MaybeHandle<JSObject>();
}
if (func.exported) {
@@ -455,6 +509,40 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
instance.js_object->SetInternalField(kWasmModuleFunctionTable,
Smi::FromInt(0));
+ //-------------------------------------------------------------------------
+ // Create and populate the exports object.
+ //-------------------------------------------------------------------------
+ if (export_table.size() > 0 || mem_export) {
+ index = 0;
+ // Create the "exports" object.
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate->native_context()->object_function(), isolate);
+ Handle<JSObject> exports_object =
+ factory->NewJSObject(object_function, TENURED);
+ Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance.js_object, exports_name, exports_object,
+ READ_ONLY);
+
+ // Compile wrappers and add them to the exports object.
+ for (const WasmExport& exp : export_table) {
+ if (thrower.error()) break;
+ WasmName str = GetName(exp.name_offset, exp.name_length);
+ Handle<String> name = factory->InternalizeUtf8String(
+ Vector<const char>(str.name, str.length));
+ Handle<Code> code = linker.GetFunctionCode(exp.func_index);
+ Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, name, code, instance.js_object, exp.func_index);
+ JSObject::AddProperty(exports_object, name, function, READ_ONLY);
+ }
+
+ if (mem_export) {
+ // Export the memory as a named property.
+ Handle<String> name = factory->InternalizeUtf8String("memory");
+ JSObject::AddProperty(exports_object, name, instance.mem_buffer,
+ READ_ONLY);
+ }
+ }
+
// Run the start function if one was specified.
if (this->start_function_index >= 0) {
HandleScope scope(isolate);
@@ -480,18 +568,12 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
DCHECK(IsValidFunction(index));
if (linker) return linker->GetFunctionCode(index);
- if (instance && instance->function_code) {
- return instance->function_code->at(index);
- }
- return Handle<Code>::null();
+ return instance ? instance->function_code[index] : Handle<Code>::null();
}
Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
DCHECK(IsValidImport(index));
- if (instance && instance->import_code) {
- return instance->import_code->at(index);
- }
- return Handle<Code>::null();
+ return instance ? instance->import_code[index] : Handle<Code>::null();
}
compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
@@ -499,7 +581,7 @@ compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
DCHECK(IsValidFunction(index));
// Always make a direct call to whatever is in the table at that location.
// A wrapper will be generated for FFI calls.
- WasmFunction* function = &module->functions->at(index);
+ WasmFunction* function = &module->functions[index];
return GetWasmCallDescriptor(zone, function->sig);
}
@@ -507,12 +589,15 @@ compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool asm_js) {
HandleScope scope(isolate);
- Zone zone;
+ Zone zone(isolate->allocator());
// Decode the module, but don't verify function bodies, since we'll
// be compiling them anyway.
- ModuleResult result =
- DecodeWasmModule(isolate, &zone, module_start, module_end, false, false);
+ ModuleResult result = DecodeWasmModule(isolate, &zone, module_start,
+ module_end, false, kWasmOrigin);
if (result.failed()) {
+ if (result.val) {
+ delete result.val;
+ }
// Module verification failed. throw.
std::ostringstream str;
str << "WASM.compileRun() failed: " << result;
@@ -546,18 +631,18 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
instance.function_table = BuildFunctionTable(isolate, module);
// Create module environment.
- WasmLinker linker(isolate, module->functions->size());
+ WasmLinker linker(isolate, module->functions.size());
ModuleEnv module_env;
module_env.module = module;
module_env.instance = &instance;
module_env.linker = &linker;
- module_env.asm_js = false;
+ module_env.origin = module->origin;
// Compile all functions.
Handle<Code> main_code = Handle<Code>::null(); // record last code.
uint32_t index = 0;
int main_index = 0;
- for (const WasmFunction& func : *module->functions) {
+ for (const WasmFunction& func : module->functions) {
DCHECK_EQ(index, func.func_index);
if (!func.external) {
// Compile the function and install it in the code table.
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 5f5777cebe..4e5aa78486 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -22,22 +22,81 @@ namespace wasm {
const size_t kMaxModuleSize = 1024 * 1024 * 1024;
const size_t kMaxFunctionSize = 128 * 1024;
const size_t kMaxStringSize = 256;
-
-enum WasmSectionDeclCode {
- kDeclMemory = 0x00,
- kDeclSignatures = 0x01,
- kDeclFunctions = 0x02,
- kDeclGlobals = 0x03,
- kDeclDataSegments = 0x04,
- kDeclFunctionTable = 0x05,
- kDeclEnd = 0x06,
- kDeclStartFunction = 0x07,
- kDeclImportTable = 0x08,
- kDeclWLL = 0x11,
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x0a;
+
+// WebAssembly sections are named as strings in the binary format, but
+// internally V8 uses an enum to handle them.
+//
+// Entries have the form F(enumerator, string).
+#define FOR_EACH_WASM_SECTION_TYPE(F) \
+ F(Memory, "memory") \
+ F(Signatures, "signatures") \
+ F(Functions, "functions") \
+ F(Globals, "globals") \
+ F(DataSegments, "data_segments") \
+ F(FunctionTable, "function_table") \
+ F(End, "end") \
+ F(StartFunction, "start_function") \
+ F(ImportTable, "import_table") \
+ F(ExportTable, "export_table") \
+ F(FunctionSignatures, "function_signatures") \
+ F(FunctionBodies, "function_bodies") \
+ F(Names, "names")
+
+// Contants for the above section types: {LEB128 length, characters...}.
+#define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
+#define WASM_SECTION_SIGNATURES \
+ 10, 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', 's'
+#define WASM_SECTION_FUNCTIONS 9, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', 's'
+#define WASM_SECTION_GLOBALS 7, 'g', 'l', 'o', 'b', 'a', 'l', 's'
+#define WASM_SECTION_DATA_SEGMENTS \
+ 13, 'd', 'a', 't', 'a', '_', 's', 'e', 'g', 'm', 'e', 'n', 't', 's'
+#define WASM_SECTION_FUNCTION_TABLE \
+ 14, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_END 3, 'e', 'n', 'd'
+#define WASM_SECTION_START_FUNCTION \
+ 14, 's', 't', 'a', 'r', 't', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
+#define WASM_SECTION_IMPORT_TABLE \
+ 12, 'i', 'm', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_EXPORT_TABLE \
+ 12, 'e', 'x', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_FUNCTION_SIGNATURES \
+ 19, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 's', 'i', 'g', 'n', 'a', \
+ 't', 'u', 'r', 'e', 's'
+#define WASM_SECTION_FUNCTION_BODIES \
+ 15, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 'b', 'o', 'd', 'i', 'e', 's'
+#define WASM_SECTION_NAMES 5, 'n', 'a', 'm', 'e', 's'
+
+// Constants for the above section headers' size (LEB128 + characters).
+#define WASM_SECTION_MEMORY_SIZE ((size_t)7)
+#define WASM_SECTION_SIGNATURES_SIZE ((size_t)11)
+#define WASM_SECTION_FUNCTIONS_SIZE ((size_t)10)
+#define WASM_SECTION_GLOBALS_SIZE ((size_t)8)
+#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)14)
+#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)15)
+#define WASM_SECTION_END_SIZE ((size_t)4)
+#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)15)
+#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)13)
+#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)13)
+#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)20)
+#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)16)
+#define WASM_SECTION_NAMES_SIZE ((size_t)6)
+
+struct WasmSection {
+ enum class Code : uint32_t {
+#define F(enumerator, string) enumerator,
+ FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+ Max
+ };
+ static WasmSection::Code begin();
+ static WasmSection::Code end();
+ static WasmSection::Code next(WasmSection::Code code);
+ static const char* getName(Code code);
+ static size_t getNameLength(Code code);
};
-static const int kMaxModuleSectionCode = 0x11;
-
enum WasmFunctionDeclBit {
kDeclFunctionName = 0x01,
kDeclFunctionImport = 0x02,
@@ -47,15 +106,15 @@ enum WasmFunctionDeclBit {
// Constants for fixed-size elements within a module.
static const size_t kDeclMemorySize = 3;
-static const size_t kDeclGlobalSize = 6;
static const size_t kDeclDataSegmentSize = 13;
// Static representation of a WASM function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
uint32_t func_index; // index into the function table.
- uint16_t sig_index; // index into the signature table.
+ uint32_t sig_index; // index into the signature table.
uint32_t name_offset; // offset in the module bytes of the name, if any.
+ uint32_t name_length; // length in bytes of the name.
uint32_t code_start_offset; // offset in the module bytes of code start.
uint32_t code_end_offset; // offset in the module bytes of code end.
uint16_t local_i32_count; // number of i32 local variables.
@@ -69,14 +128,24 @@ struct WasmFunction {
// Static representation of an imported WASM function.
struct WasmImport {
FunctionSig* sig; // signature of the function.
- uint16_t sig_index; // index into the signature table.
+ uint32_t sig_index; // index into the signature table.
uint32_t module_name_offset; // offset in module bytes of the module name.
+ uint32_t module_name_length; // length in bytes of the module name.
uint32_t function_name_offset; // offset in module bytes of the import name.
+ uint32_t function_name_length; // length in bytes of the import name.
+};
+
+// Static representation of an exported WASM function.
+struct WasmExport {
+ uint32_t func_index; // index into the function table.
+ uint32_t name_offset; // offset in module bytes of the name to export.
+ uint32_t name_length; // length in bytes of the exported name.
};
// Static representation of a wasm global variable.
struct WasmGlobal {
uint32_t name_offset; // offset in the module bytes of the name, if any.
+ uint32_t name_length; // length in bytes of the global name.
MachineType type; // type of the global.
uint32_t offset; // offset from beginning of globals area.
bool exported; // true if this global is exported.
@@ -90,35 +159,46 @@ struct WasmDataSegment {
bool init; // true if loaded upon instantiation.
};
+enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
+
// Static representation of a module.
struct WasmModule {
- static const uint8_t kMinMemSize = 12; // Minimum memory size = 4kb
- static const uint8_t kMaxMemSize = 30; // Maximum memory size = 1gb
+ static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
+ static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
+ static const uint32_t kMaxMemPages = 16384; // Maximum memory size = 1gb
Isolate* shared_isolate; // isolate for storing shared code.
const byte* module_start; // starting address for the module bytes.
const byte* module_end; // end address for the module bytes.
- uint8_t min_mem_size_log2; // minimum size of the memory (log base 2).
- uint8_t max_mem_size_log2; // maximum size of the memory (log base 2).
+ uint32_t min_mem_pages; // minimum size of the memory in 64k pages.
+ uint32_t max_mem_pages; // maximum size of the memory in 64k pages.
bool mem_export; // true if the memory is exported.
bool mem_external; // true if the memory is external.
int start_function_index; // start function, if any.
+ ModuleOrigin origin; // origin of the module
- std::vector<WasmGlobal>* globals; // globals in this module.
- std::vector<FunctionSig*>* signatures; // signatures in this module.
- std::vector<WasmFunction>* functions; // functions in this module.
- std::vector<WasmDataSegment>* data_segments; // data segments in this module.
- std::vector<uint16_t>* function_table; // function table.
- std::vector<WasmImport>* import_table; // import table.
+ std::vector<WasmGlobal> globals; // globals in this module.
+ std::vector<FunctionSig*> signatures; // signatures in this module.
+ std::vector<WasmFunction> functions; // functions in this module.
+ std::vector<WasmDataSegment> data_segments; // data segments in this module.
+ std::vector<uint16_t> function_table; // function table.
+ std::vector<WasmImport> import_table; // import table.
+ std::vector<WasmExport> export_table; // export table.
WasmModule();
- ~WasmModule();
- // Get a pointer to a string stored in the module bytes representing a name.
- const char* GetName(uint32_t offset) const {
- if (offset == 0) return "<?>"; // no name.
- CHECK(BoundsCheck(offset, offset + 1));
- return reinterpret_cast<const char*>(module_start + offset);
+ // Get a string stored in the module bytes representing a name.
+ WasmName GetName(uint32_t offset, uint32_t length) const {
+ if (length == 0) return {"<?>", 3}; // no name.
+ CHECK(BoundsCheck(offset, offset + length));
+ return {reinterpret_cast<const char*>(module_start + offset), length};
+ }
+
+ // Get a string stored in the module bytes representing a name.
+ WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
+ if (length == 0) return {NULL, 0}; // no name.
+ CHECK(BoundsCheck(offset, offset + length));
+ return {reinterpret_cast<const char*>(module_start + offset), length};
}
// Checks the given offset range is contained within the module bytes.
@@ -141,8 +221,8 @@ struct WasmModuleInstance {
Handle<JSArrayBuffer> mem_buffer; // Handle to array buffer of memory.
Handle<JSArrayBuffer> globals_buffer; // Handle to array buffer of globals.
Handle<FixedArray> function_table; // indirect function table.
- std::vector<Handle<Code>>* function_code; // code objects for each function.
- std::vector<Handle<Code>>* import_code; // code objects for each import.
+ std::vector<Handle<Code>> function_code; // code objects for each function.
+ std::vector<Handle<Code>> import_code; // code objects for each import.
// -- raw memory ------------------------------------------------------------
byte* mem_start; // start of linear memory.
size_t mem_size; // size of the linear memory.
@@ -152,7 +232,6 @@ struct WasmModuleInstance {
explicit WasmModuleInstance(WasmModule* m)
: module(m),
- function_code(nullptr),
mem_start(nullptr),
mem_size(0),
globals_start(nullptr),
@@ -168,41 +247,42 @@ struct ModuleEnv {
WasmModule* module;
WasmModuleInstance* instance;
WasmLinker* linker;
- bool asm_js; // true if the module originated from asm.js.
+ ModuleOrigin origin;
bool IsValidGlobal(uint32_t index) {
- return module && index < module->globals->size();
+ return module && index < module->globals.size();
}
bool IsValidFunction(uint32_t index) {
- return module && index < module->functions->size();
+ return module && index < module->functions.size();
}
bool IsValidSignature(uint32_t index) {
- return module && index < module->signatures->size();
+ return module && index < module->signatures.size();
}
bool IsValidImport(uint32_t index) {
- return module && index < module->import_table->size();
+ return module && index < module->import_table.size();
}
MachineType GetGlobalType(uint32_t index) {
DCHECK(IsValidGlobal(index));
- return module->globals->at(index).type;
+ return module->globals[index].type;
}
FunctionSig* GetFunctionSignature(uint32_t index) {
DCHECK(IsValidFunction(index));
- return module->functions->at(index).sig;
+ return module->functions[index].sig;
}
FunctionSig* GetImportSignature(uint32_t index) {
DCHECK(IsValidImport(index));
- return module->import_table->at(index).sig;
+ return module->import_table[index].sig;
}
FunctionSig* GetSignature(uint32_t index) {
DCHECK(IsValidSignature(index));
- return module->signatures->at(index);
+ return module->signatures[index];
}
size_t FunctionTableSize() {
- return module && module->function_table ? module->function_table->size()
- : 0;
+ return module ? module->function_table.size() : 0;
}
+ bool asm_js() { return origin == kAsmJsOrigin; }
+
Handle<Code> GetFunctionCode(uint32_t index);
Handle<Code> GetImportCode(uint32_t index);
Handle<FixedArray> GetFunctionTable();
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index a609e03261..736c4d9609 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -66,6 +66,7 @@ static void InitSigTable() {
#define SET_SIG_TABLE(name, opcode, sig) \
kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+ FOREACH_ASMJS_COMPAT_OPCODE(SET_SIG_TABLE);
#undef SET_SIG_TABLE
}
@@ -103,6 +104,8 @@ bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
case kExprI64Shl:
case kExprI64ShrU:
case kExprI64ShrS:
+ case kExprI64Ror:
+ case kExprI64Rol:
case kExprI64Eq:
case kExprI64Ne:
case kExprI64LtS:
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 7cb9c00449..52f85aab0a 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -46,28 +46,14 @@ const LocalType kAstF64 = MachineRepresentation::kFloat64;
// We use kTagged here because kNone is already used by kAstStmt.
const LocalType kAstEnd = MachineRepresentation::kTagged;
-// Functionality related to encoding memory accesses.
-struct MemoryAccess {
- // Atomicity annotations for access to the memory and globals.
- enum Atomicity {
- kNone = 0, // non-atomic
- kSequential = 1, // sequential consistency
- kAcquire = 2, // acquire semantics
- kRelease = 3 // release semantics
- };
-
- // Alignment annotations for memory accesses.
- enum Alignment { kAligned = 0, kUnaligned = 1 };
-
- // Bitfields for the various annotations for memory accesses.
- typedef BitField<Alignment, 7, 1> AlignmentField;
- typedef BitField<Atomicity, 5, 2> AtomicityField;
- typedef BitField<bool, 4, 1> OffsetField;
-};
-
typedef Signature<LocalType> FunctionSig;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
+struct WasmName {
+ const char* name;
+ uint32_t length;
+};
+
// TODO(titzer): Renumber all the opcodes to fill in holes.
// Control expressions and blocks.
@@ -80,7 +66,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
V(Select, 0x05, _) \
V(Br, 0x06, _) \
V(BrIf, 0x07, _) \
- V(TableSwitch, 0x08, _) \
+ V(BrTable, 0x08, _) \
V(Return, 0x14, _) \
V(Unreachable, 0x15, _)
@@ -97,7 +83,8 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
V(StoreGlobal, 0x11, _) \
V(CallFunction, 0x12, _) \
V(CallIndirect, 0x13, _) \
- V(CallImport, 0x1F, _)
+ V(CallImport, 0x1F, _) \
+ V(DeclLocals, 0x1E, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -161,7 +148,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
V(I32Clz, 0x57, i_i) \
V(I32Ctz, 0x58, i_i) \
V(I32Popcnt, 0x59, i_i) \
- V(BoolNot, 0x5a, i_i) \
+ V(I32Eqz, 0x5a, i_i) \
V(I64Add, 0x5b, l_ll) \
V(I64Sub, 0x5c, l_ll) \
V(I64Mul, 0x5d, l_ll) \
@@ -188,6 +175,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
V(I64Clz, 0x72, l_l) \
V(I64Ctz, 0x73, l_l) \
V(I64Popcnt, 0x74, l_l) \
+ V(I64Eqz, 0xba, i_l) \
V(F32Add, 0x75, f_ff) \
V(F32Sub, 0x76, f_ff) \
V(F32Mul, 0x77, f_ff) \
@@ -252,7 +240,47 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
V(F64ConvertF32, 0xb2, d_f) \
V(F64ReinterpretI64, 0xb3, d_l) \
V(I32ReinterpretF32, 0xb4, i_f) \
- V(I64ReinterpretF64, 0xb5, l_d)
+ V(I64ReinterpretF64, 0xb5, l_d) \
+ V(I32Ror, 0xb6, i_ii) \
+ V(I32Rol, 0xb7, i_ii) \
+ V(I64Ror, 0xb8, l_ll) \
+ V(I64Rol, 0xb9, l_ll)
+
+// For compatibility with Asm.js.
+#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
+ V(F64Acos, 0xc0, d_d) \
+ V(F64Asin, 0xc1, d_d) \
+ V(F64Atan, 0xc2, d_d) \
+ V(F64Cos, 0xc3, d_d) \
+ V(F64Sin, 0xc4, d_d) \
+ V(F64Tan, 0xc5, d_d) \
+ V(F64Exp, 0xc6, d_d) \
+ V(F64Log, 0xc7, d_d) \
+ V(F64Atan2, 0xc8, d_dd) \
+ V(F64Pow, 0xc9, d_dd) \
+ V(F64Mod, 0xca, d_dd)
+
+// TODO(titzer): sketch of asm-js compatibility bytecodes
+/* V(I32AsmjsDivS, 0xd0, i_ii) \ */
+/* V(I32AsmjsDivU, 0xd1, i_ii) \ */
+/* V(I32AsmjsRemS, 0xd2, i_ii) \ */
+/* V(I32AsmjsRemU, 0xd3, i_ii) \ */
+/* V(I32AsmjsLoad8S, 0xd4, i_i) \ */
+/* V(I32AsmjsLoad8U, 0xd5, i_i) \ */
+/* V(I32AsmjsLoad16S, 0xd6, i_i) \ */
+/* V(I32AsmjsLoad16U, 0xd7, i_i) \ */
+/* V(I32AsmjsLoad, 0xd8, i_i) \ */
+/* V(F32AsmjsLoad, 0xd9, f_i) \ */
+/* V(F64AsmjsLoad, 0xda, d_i) \ */
+/* V(I32AsmjsStore8, 0xdb, i_i) \ */
+/* V(I32AsmjsStore16, 0xdc, i_i) \ */
+/* V(I32AsmjsStore, 0xdd, i_ii) \ */
+/* V(F32AsmjsStore, 0xde, i_if) \ */
+/* V(F64AsmjsStore, 0xdf, i_id) \ */
+/* V(I32SAsmjsConvertF32, 0xe0, i_f) \ */
+/* V(I32UAsmjsConvertF32, 0xe1, i_f) \ */
+/* V(I32SAsmjsConvertF64, 0xe2, i_d) \ */
+/* V(I32SAsmjsConvertF64, 0xe3, i_d) */
// All opcodes.
#define FOREACH_OPCODE(V) \
@@ -261,7 +289,8 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
FOREACH_SIMPLE_OPCODE(V) \
FOREACH_STORE_MEM_OPCODE(V) \
FOREACH_LOAD_MEM_OPCODE(V) \
- FOREACH_MISC_MEM_OPCODE(V)
+ FOREACH_MISC_MEM_OPCODE(V) \
+ FOREACH_ASMJS_COMPAT_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
@@ -300,6 +329,19 @@ enum WasmOpcode {
#undef DECLARE_NAMED_ENUM
};
+// The reason for a trap.
+enum TrapReason {
+ kTrapUnreachable,
+ kTrapMemOutOfBounds,
+ kTrapDivByZero,
+ kTrapDivUnrepresentable,
+ kTrapRemByZero,
+ kTrapFloatUnrepresentable,
+ kTrapFuncInvalid,
+ kTrapFuncSigMismatch,
+ kTrapCount
+};
+
// A collection of opcode-related static methods.
class WasmOpcodes {
public:
@@ -428,10 +470,6 @@ class WasmOpcodes {
}
}
- static byte LoadStoreAccessOf(bool with_offset) {
- return MemoryAccess::OffsetField::encode(with_offset);
- }
-
static char ShortNameOf(LocalType type) {
switch (type) {
case kAstI32:
@@ -470,6 +508,29 @@ class WasmOpcodes {
return "<unknown>";
}
}
+
+ static const char* TrapReasonName(TrapReason reason) {
+ switch (reason) {
+ case kTrapUnreachable:
+ return "unreachable";
+ case kTrapMemOutOfBounds:
+ return "memory access out of bounds";
+ case kTrapDivByZero:
+ return "divide by zero";
+ case kTrapDivUnrepresentable:
+ return "divide result unrepresentable";
+ case kTrapRemByZero:
+ return "remainder by zero";
+ case kTrapFloatUnrepresentable:
+ return "integer result unrepresentable";
+ case kTrapFuncInvalid:
+ return "invalid function";
+ case kTrapFuncSigMismatch:
+ return "function signature mismatch";
+ default:
+ return "<?>";
+ }
+ }
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 9a0fc7c8f4..f32f407a8d 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -326,6 +326,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -364,6 +368,20 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Memory::Address_at(pc_) = updated_reference;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+ }
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -410,8 +428,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 3cf3398e87..214b786fed 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -523,8 +523,9 @@ void Assembler::arithmetic_op_16(byte opcode,
void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg);
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
}
emit(opcode);
emit_operand(reg, op);
@@ -1469,17 +1470,18 @@ void Assembler::movp(Register dst, void* value, RelocInfo::Mode rmode) {
emitp(value, rmode);
}
-
-void Assembler::movq(Register dst, int64_t value) {
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode, value);
+ }
emitq(value);
}
-
-void Assembler::movq(Register dst, uint64_t value) {
- movq(dst, static_cast<int64_t>(value));
+void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
+ movq(dst, static_cast<int64_t>(value), rmode);
}
@@ -2014,6 +2016,50 @@ void Assembler::testb(const Operand& op, Register reg) {
emit_operand(reg, op);
}
+void Assembler::testw(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ if (src.low_bits() == 4) {
+ emit_rex_32(src, dst);
+ }
+ emit(0x85);
+ emit_modrm(src, dst);
+}
+
+void Assembler::testw(Register reg, Immediate mask) {
+ DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ if (reg.is(rax)) {
+ emit(0xA9);
+ emit(mask.value_);
+ } else {
+ if (reg.low_bits() == 4) {
+ emit_rex_32(reg);
+ }
+ emit(0xF7);
+ emit_modrm(0x0, reg);
+ emit(mask.value_);
+ }
+}
+
+void Assembler::testw(const Operand& op, Immediate mask) {
+ DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(rax, op);
+ emit(0xF7);
+ emit_operand(rax, op);
+ emit(mask.value_);
+}
+
+void Assembler::testw(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg, op);
+ emit(0x85);
+ emit_operand(rax, op);
+}
void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 2847ff2569..e48f3586d3 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -699,8 +699,10 @@ class Assembler : public AssemblerBase {
void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
// Loads a 64-bit immediate into a register.
- void movq(Register dst, int64_t value);
- void movq(Register dst, uint64_t value);
+ void movq(Register dst, int64_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE64);
+ void movq(Register dst, uint64_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE64);
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, const Operand& src);
@@ -771,6 +773,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
+ void testb(Register reg, const Operand& op) { testb(op, reg); }
+
+ void testw(Register reg, const Operand& op) { testw(op, reg); }
+
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
@@ -846,6 +852,11 @@ class Assembler : public AssemblerBase {
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
+ void testw(Register dst, Register src);
+ void testw(Register reg, Immediate mask);
+ void testw(const Operand& op, Immediate mask);
+ void testw(const Operand& op, Register reg);
+
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
@@ -1695,7 +1706,9 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dq(Label* label);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -2178,8 +2191,8 @@ class Assembler : public AssemblerBase {
List< Handle<Code> > code_targets_;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 6c4419e084..316378348c 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -124,6 +124,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool check_derived_construct) {
// ----------- S t a t e -------------
// -- rax: number of arguments
+ // -- rsi: context
// -- rdi: constructor function
// -- rbx: allocation site or undefined
// -- rdx: new target
@@ -135,6 +136,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rsi);
__ Push(rbx);
__ Integer32ToSmi(rcx, rax);
__ Push(rcx);
@@ -200,7 +202,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -351,9 +353,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9 : argc
// [rsp+0x20] : argv
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
-
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1010,6 +1009,28 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
}
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : first argument (left-hand side)
+ // -- rsp[16] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ movp(InstanceOfDescriptor::LeftRegister(),
+ Operand(rbp, 2 * kPointerSize)); // Load left-hand side.
+ __ movp(InstanceOfDescriptor::RightRegister(),
+ Operand(rbp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ ret(2 * kPointerSize);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1810,40 +1831,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ movp(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrongModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
- __ j(equal, &no_strong_error, Label::kNear);
-
- // What we really care about is the required number of arguments.
-
- if (kPointerSize == kInt32Size) {
- __ movp(
- kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- } else {
- // See comment near kLengthOffset in src/objects.h
- __ movsxlq(
- kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
- __ shrq(kScratchRegister, Immediate(1));
- }
-
- __ cmpp(rax, kScratchRegister);
- __ j(greater_equal, &no_strong_error, Label::kNear);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentsAdaptorStackCheck(masm, &stack_overflow);
@@ -1870,7 +1857,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ j(less, &fill);
// Restore function pointer.
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
}
// Call the entry point.
@@ -2065,18 +2052,19 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is active.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ Move(kScratchRegister, debug_is_active);
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ Move(kScratchRegister, is_tail_call_elimination_enabled);
__ cmpb(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &done);
+ __ j(equal, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ Cmp(Operand(rbp, StandardFrameConstants::kMarkerOffset),
+ __ Cmp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::STUB));
__ j(not_equal, &no_interpreter_frame, Label::kNear);
__ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2084,16 +2072,18 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ __ Cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ movp(rbp, scratch2);
__ SmiToInteger32(
- scratch1, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ caller_args_count_reg,
+ Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ jmp(&formal_parameter_count_loaded, Label::kNear);
__ bind(&no_arguments_adaptor);
@@ -2102,55 +2092,14 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ movp(scratch1,
FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ LoadSharedFunctionInfoSpecialField(
- scratch1, scratch1, SharedFunctionInfo::kFormalParameterCountOffset);
+ caller_args_count_reg, scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset);
__ bind(&formal_parameter_count_loaded);
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch2;
- __ subp(scratch1, args_reg);
- __ leap(new_sp_reg, Operand(rbp, scratch1, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset));
-
- if (FLAG_debug_code) {
- __ cmpp(rsp, new_sp_reg);
- __ Check(below, kStackAccessBelowStackPointer);
- }
-
- // Copy receiver and return address as well.
- Register count_reg = scratch1;
- __ leap(count_reg, Operand(args_reg, 2));
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch3;
- __ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- __ movp(Operand(rsp, 0), tmp_reg);
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- Operand src(rsp, count_reg, times_pointer_size, 0);
- Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ decp(count_reg);
- __ movp(tmp_reg, src);
- __ movp(dst, tmp_reg);
- __ bind(&entry);
- __ cmpp(count_reg, Immediate(0));
- __ j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- __ movp(rsp, new_sp_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack);
__ bind(&done);
}
} // namespace
@@ -2727,23 +2676,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ ret(0);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f314b9cfcb..e737801f58 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_X64
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -75,6 +76,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -534,34 +539,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register scratch = rax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(key, &slow);
-
- // Everything is fine, call runtime.
- __ PopReturnAddressTo(scratch);
- __ Push(receiver); // receiver
- __ Push(key); // key
- __ PushReturnAddressFrom(scratch);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
@@ -1269,7 +1246,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal, undetectable;
+ Label return_equal, return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1283,10 +1260,10 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &undetectable);
+ __ j(not_zero, &undetectable, Label::kNear);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &return_unequal);
+ __ j(not_zero, &return_unequal, Label::kNear);
__ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &runtime_call, Label::kNear);
@@ -1300,7 +1277,17 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&undetectable);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CmpInstanceType(rbx, ODDBALL_TYPE);
+ __ j(zero, &return_equal, Label::kNear);
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(not_zero, &return_unequal, Label::kNear);
+
+ __ bind(&return_equal);
__ Set(rax, EQUAL);
__ ret(0);
}
@@ -1877,14 +1864,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ pushq(rbp);
__ movp(rbp, rsp);
- // Push the stack frame type marker twice.
+ // Push the stack frame type.
int marker = type();
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
- __ Push(kScratchRegister); // context slot
- __ Push(kScratchRegister); // function slot
+ __ Push(Smi::FromInt(marker)); // context slot
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ __ Load(kScratchRegister, context_address);
+ __ Push(kScratchRegister); // context
// Save callee-saved registers (X64/X32/Win64 calling conventions).
__ pushq(r12);
__ pushq(r13);
@@ -2069,6 +2054,11 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
__ j(not_equal, &slow_case);
+ // Go to the runtime if the function is not a constructor.
+ __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &slow_case);
+
// Ensure that {function} has an instance prototype.
__ testb(FieldOperand(function_map, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
@@ -2137,7 +2127,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Push(object);
__ Push(function);
__ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -2544,23 +2535,21 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_heap_number);
- Label not_string, slow_string;
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in rax.
+ __ AssertNotNumber(rax);
+
+ Label not_string;
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
// rax: object
// rdi: object map
__ j(above_equal, &not_string, Label::kNear);
- // Check if string has a cached array index.
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &slow_string, Label::kNear);
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- __ IndexFromHash(rax, rax);
- __ Ret();
- __ bind(&slow_string);
- __ PopReturnAddressTo(rcx); // Pop return address.
- __ Push(rax); // Push argument.
- __ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ bind(&not_string);
Label not_oddball;
@@ -2576,26 +2565,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in rax.
+ __ AssertString(rax);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes on argument in rax.
- Label not_smi, positive_smi;
- __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
- __ testp(rax, rax);
- __ j(greater_equal, &positive_smi, Label::kNear);
- __ xorl(rax, rax);
- __ bind(&positive_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ testl(FieldOperand(rax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(not_zero, &runtime, Label::kNear);
+ __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+ __ IndexFromHash(rax, rax);
__ Ret();
- __ bind(&not_smi);
+ __ bind(&runtime);
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength);
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in rax.
Label is_number;
@@ -2803,45 +2792,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : left string
- // -- rax : right string
- // -- rsp[0] : return address
- // -----------------------------------
- __ AssertString(rdx);
- __ AssertString(rax);
-
- // Check for identity.
- Label not_same;
- __ cmpp(rdx, rax);
- __ j(not_equal, &not_same, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of one-byte strings.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
- r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ PopReturnAddressTo(rcx);
- __ Push(rdx);
- __ Push(rax);
- __ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : left
@@ -3158,13 +3108,21 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ PopReturnAddressTo(tmp1);
- __ Push(left);
- __ Push(right);
- __ PushReturnAddressFrom(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left);
+ __ Push(right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(rdx, Heap::kTrueValueRootIndex);
+ __ subp(rax, rdx);
+ __ Ret();
} else {
+ __ PopReturnAddressTo(tmp1);
+ __ Push(left);
+ __ Push(right);
+ __ PushReturnAddressFrom(tmp1);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3641,7 +3599,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
@@ -4610,7 +4568,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+ __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
@@ -4618,7 +4576,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &no_rest_parameters, Label::kNear);
@@ -4774,7 +4732,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
+ __ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
@@ -4980,14 +4938,14 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+ __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &arguments_adaptor, Label::kNear);
{
@@ -5415,10 +5373,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdi : callee
// -- rbx : call_data
@@ -5451,8 +5406,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || rax.is(argc.reg()));
-
__ PopReturnAddressTo(return_address);
// context save
@@ -5464,7 +5417,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// call data
__ Push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!this->call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
@@ -5481,7 +5434,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Push return address back on stack.
__ PushReturnAddressFrom(return_address);
- if (!is_lazy) {
+ if (!this->is_lazy()) {
// load context from callee
__ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
}
@@ -5493,28 +5446,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
PrepareCallApiFunction(masm, kApiStackSpace);
// FunctionCallbackInfo::implicit_args_.
+ int argc = this->argc();
__ movp(StackSpaceOperand(0), scratch);
- if (argc.is_immediate()) {
- __ addp(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) *
- kPointerSize));
- // FunctionCallbackInfo::values_.
- __ movp(StackSpaceOperand(1), scratch);
- // FunctionCallbackInfo::length_.
- __ Set(StackSpaceOperand(2), argc.immediate());
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
- } else {
- __ leap(scratch, Operand(scratch, argc.reg(), times_pointer_size,
- (FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ movp(StackSpaceOperand(1), scratch);
- // FunctionCallbackInfo::length_.
- __ movp(StackSpaceOperand(2), argc.reg());
- // FunctionCallbackInfo::is_construct_call_.
- __ leap(argc.reg(), Operand(argc.reg(), times_pointer_size,
- (FCA::kArgsLength + 1) * kPointerSize));
- __ movp(StackSpaceOperand(3), argc.reg());
- }
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ movp(StackSpaceOperand(1), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Set(StackSpaceOperand(2), argc);
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
@@ -5541,36 +5481,17 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
FCA::kArgsLength - FCA::kContextSaveIndex);
Operand is_construct_call_operand = StackSpaceOperand(3);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- return_first_arg ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
int stack_space = 0;
Operand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = nullptr;
- }
+ stack_space = argc + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
stack_space, stack_space_operand,
return_value_operand, &context_restore_operand);
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 81c1a69aa8..33e987e248 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -288,6 +288,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
+ __ Push(rsi);
__ Push(rax);
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -326,7 +327,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(rax);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rsi);
__ jmp(fail);
// Box doubles into heap numbers.
@@ -380,7 +381,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(rax);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rsi);
__ bind(&only_change_map);
// Set transitioned map.
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index ddf59eb470..9d70c3236b 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -105,12 +105,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on x64 in the input frame.
- return false;
-}
-
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -162,7 +156,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
+ __ movp(rax, Immediate(0));
+ Label context_check;
+ __ movp(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(rdi, &context_check);
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ movp(arg_reg_1, rax);
__ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
@@ -232,7 +231,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ popq(rax);
- // Replace the current frame with the output frames.
+ __ movp(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index d213ecb7dc..5aad54a696 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -39,13 +39,11 @@ class EntryFrameConstants : public AllStatic {
static const int kArgvOffset = 6 * kPointerSize;
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
static const int kCallerFPOffset = +0 * kPointerSize;
static const int kCallerPCOffset = kFPOnStackSize;
@@ -63,7 +61,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 0913d1c1d9..b10b52298b 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -114,36 +114,8 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return rax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return rax; }
-
-
// static
-const Register ToNameDescriptor::ReceiverRegister() { return rax; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -263,6 +235,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -307,24 +286,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rax};
+ Register registers[] = {rdx, rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -388,21 +359,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdi, // callee
- rbx, // call_data
- rcx, // holder
- rdx, // api_function_address
- rax, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // callee
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e72d40b4ae..566091df4e 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -215,25 +215,27 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
bind(&ok);
}
// Load store buffer top.
- LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ movp(scratch, ExternalOperand(store_buffer));
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
- StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ movp(ExternalOperand(store_buffer), scratch);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
+ j(equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
DCHECK(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
+ j(not_equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
@@ -1114,6 +1116,14 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
}
+void MacroAssembler::Set(Register dst, int64_t x, RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::WASM_MEMORY_REFERENCE) {
+ DCHECK(x != 0);
+ movq(dst, x, rmode);
+ } else {
+ DCHECK(RelocInfo::IsNone(rmode));
+ }
+}
void MacroAssembler::Set(const Operand& dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
@@ -3867,6 +3877,15 @@ void MacroAssembler::AssertNumber(Register object) {
}
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), kOperandIsANumber);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(not_equal, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@@ -4094,6 +4113,77 @@ void MacroAssembler::DebugBreak() {
Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1,
+ ReturnAddressState ra_state) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch0;
+ if (callee_args_count.is_reg()) {
+ subp(caller_args_count_reg, callee_args_count.reg());
+ leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset));
+ } else {
+ leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ callee_args_count.immediate() * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ cmpp(rsp, new_sp_reg);
+ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch1;
+ if (ra_state == ReturnAddressState::kOnStack) {
+ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ movp(Operand(rsp, 0), tmp_reg);
+ } else {
+ DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+ Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ }
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // +2 here is to copy both receiver and return address.
+ Register count_reg = caller_args_count_reg;
+ if (callee_args_count.is_reg()) {
+ leap(count_reg, Operand(callee_args_count.reg(), 2));
+ } else {
+ movp(count_reg, Immediate(callee_args_count.immediate() + 2));
+ // TODO(ishell): Unroll copying loop for small immediate values.
+ }
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ jmp(&entry, Label::kNear);
+ bind(&loop);
+ decp(count_reg);
+ movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
+ movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+ bind(&entry);
+ cmpp(count_reg, Immediate(0));
+ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ movp(rsp, new_sp_reg);
+}
void MacroAssembler::InvokeFunction(Register function,
Register new_target,
@@ -4285,15 +4375,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
bind(&skip_flooding);
}
-
-void MacroAssembler::StubPrologue() {
- pushq(rbp); // Caller's frame pointer.
- movp(rbp, rsp);
- Push(rsi); // Callee's context.
- Push(Smi::FromInt(StackFrame::STUB));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(Smi::FromInt(type));
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
@@ -4328,10 +4415,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movp(rbp, rsp);
- Push(rsi); // Context.
Push(Smi::FromInt(type));
- Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- Push(kScratchRegister);
+ if (type == StackFrame::INTERNAL) {
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister);
+ }
if (emit_debug_code()) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
@@ -4345,7 +4433,8 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+ kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
@@ -4356,15 +4445,16 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
- kFPOnStackSize + kPCOnStackSize);
- DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
- DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
+ ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ Push(Smi::FromInt(StackFrame::EXIT));
+ DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister); // Accessed from EditFrame::code_slot.
@@ -4391,7 +4481,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
- int offset = -2 * kPointerSize;
+ int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -4438,7 +4528,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Registers:
// r15 : argv
if (save_doubles) {
- int offset = -2 * kPointerSize;
+ int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -4501,8 +4591,22 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(scratch));
DCHECK(!scratch.is(kScratchRegister));
- // Load current lexical context from the stack frame.
- movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ movp(scratch, rbp);
+ bind(&load_context);
+ DCHECK(SmiValuesAre32Bits());
+ // This is "JumpIfNotSmi" but without loading the value into a register.
+ cmpl(MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
+ Immediate(0));
+ j(not_equal, &has_context);
+ movp(scratch, MemOperand(scratch, CommonFrameConstants::kCallerFPOffset));
+ jmp(&load_context);
+ bind(&has_context);
+ movp(scratch,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
@@ -5513,19 +5617,39 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
-
- leap(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Move(kScratchRegister, new_space_start);
- cmpp(scratch_reg, kScratchRegister);
- j(less, no_memento_found);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
+ testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(zero, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xorp(scratch_reg, receiver_reg);
+ testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(not_zero, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
- CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ // Memento map check.
+ bind(&map_check);
+ CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
Heap::kAllocationMementoMapRootIndex);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9c0b7964b3..af3dd031ca 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -56,6 +56,8 @@ enum class SmiOperationConstraint {
kBailoutOnOverflow = 1 << 2
};
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
@@ -326,7 +328,7 @@ class MacroAssembler: public Assembler {
void DebugBreak();
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame; either in normal or
@@ -370,6 +372,16 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // |ra_state| defines whether return address is already pushed to stack or
+ // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1, ReturnAddressState ra_state);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -806,6 +818,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
+ void Set(Register dst, int64_t x, RelocInfo::Mode rmode);
void Set(const Operand& dst, intptr_t x);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
@@ -1014,12 +1027,6 @@ class MacroAssembler: public Assembler {
return (target.requires_rex() ? 2 : 1) + target.operand_size();
}
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- Call(self, RelocInfo::CODE_TARGET);
- }
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
@@ -1191,6 +1198,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number, enabled via --debug-code.
void AssertNumber(Register object);
+ void AssertNotNumber(Register object);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 7af1d02f32..802c80fa71 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -81,6 +81,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -116,6 +120,20 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Memory::Address_at(pc_) = updated_reference;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+ }
+}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -140,8 +158,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -322,6 +340,10 @@ Immediate::Immediate(int x) {
rmode_ = RelocInfo::NONE32;
}
+Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
+ x_ = reinterpret_cast<int32_t>(x);
+ rmode_ = rmode;
+}
Immediate::Immediate(const ExternalReference& ext) {
x_ = reinterpret_cast<int32_t>(ext.address());
@@ -430,6 +452,11 @@ void Assembler::emit_code_relative_offset(Label* label) {
}
}
+void Assembler::emit_b(Immediate x) {
+ DCHECK(x.is_int8() || x.is_uint8());
+ uint8_t value = static_cast<uint8_t>(x.x_);
+ *pc_++ = value;
+}
void Assembler::emit_w(const Immediate& x) {
DCHECK(RelocInfo::IsNone(x.rmode_));
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 66fda5787f..e74d77030a 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -617,8 +617,8 @@ void Assembler::and_(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
+void Assembler::cmpb(const Operand& op, Immediate imm8) {
+ DCHECK(imm8.is_int8() || imm8.is_uint8());
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x3C);
@@ -626,7 +626,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
EMIT(0x80);
emit_operand(edi, op); // edi == 7
}
- EMIT(imm8);
+ emit_b(imm8);
}
@@ -655,6 +655,19 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
emit_w(imm16);
}
+void Assembler::cmpw(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x39);
+ emit_operand(reg, op);
+}
+
+void Assembler::cmpw(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
void Assembler::cmp(Register reg, int32_t imm32) {
EnsureSpace ensure_space(this);
@@ -939,19 +952,26 @@ void Assembler::sar_cl(const Operand& dst) {
emit_operand(edi, dst);
}
-
void Assembler::sbb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0x1B);
emit_operand(dst, src);
}
+void Assembler::shld(Register dst, Register src, uint8_t shift) {
+ DCHECK(is_uint5(shift));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA4);
+ emit_operand(src, Operand(dst));
+ EMIT(shift);
+}
-void Assembler::shld(Register dst, const Operand& src) {
+void Assembler::shld_cl(Register dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA5);
- emit_operand(dst, src);
+ emit_operand(src, Operand(dst));
}
@@ -975,15 +995,6 @@ void Assembler::shl_cl(const Operand& dst) {
emit_operand(esp, dst);
}
-
-void Assembler::shrd(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(dst, src);
-}
-
-
void Assembler::shr(const Operand& dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@@ -1004,6 +1015,21 @@ void Assembler::shr_cl(const Operand& dst) {
emit_operand(ebp, dst);
}
+void Assembler::shrd(Register dst, Register src, uint8_t shift) {
+ DCHECK(is_uint5(shift));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAC);
+ emit_operand(dst, Operand(src));
+ EMIT(shift);
+}
+
+void Assembler::shrd_cl(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(src, dst);
+}
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
@@ -1026,8 +1052,8 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
- if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
- test_b(reg, imm.x_);
+ if (imm.is_uint8()) {
+ test_b(reg, imm);
return;
}
@@ -1064,8 +1090,8 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
- if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
- return test_b(op, imm.x_);
+ if (imm.is_uint8()) {
+ return test_b(op, imm);
}
EnsureSpace ensure_space(this);
EMIT(0xF7);
@@ -1073,25 +1099,25 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
emit(imm);
}
-
-void Assembler::test_b(Register reg, uint8_t imm8) {
+void Assembler::test_b(Register reg, Immediate imm8) {
+ DCHECK(imm8.is_uint8());
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx.
if (reg.is(eax)) {
EMIT(0xA8);
- EMIT(imm8);
+ emit_b(imm8);
} else if (reg.is_byte_register()) {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
+ emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
} else {
+ EMIT(0x66);
EMIT(0xF7);
EMIT(0xC0 | reg.code());
- emit(imm8);
+ emit_w(imm8);
}
}
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
+void Assembler::test_b(const Operand& op, Immediate imm8) {
if (op.is_reg_only()) {
test_b(op.reg(), imm8);
return;
@@ -1099,9 +1125,42 @@ void Assembler::test_b(const Operand& op, uint8_t imm8) {
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
- EMIT(imm8);
+ emit_b(imm8);
+}
+
+void Assembler::test_w(Register reg, Immediate imm16) {
+ DCHECK(imm16.is_int16() || imm16.is_uint16());
+ EnsureSpace ensure_space(this);
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ emit_w(imm16);
+ } else {
+ EMIT(0x66);
+ EMIT(0xF7);
+ EMIT(0xc0 | reg.code());
+ emit_w(imm16);
+ }
}
+void Assembler::test_w(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x85);
+ emit_operand(reg, op);
+}
+
+void Assembler::test_w(const Operand& op, Immediate imm16) {
+ DCHECK(imm16.is_int16() || imm16.is_uint16());
+ if (op.is_reg_only()) {
+ test_w(op.reg(), imm16);
+ return;
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit_w(imm16);
+}
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 15fc29c29f..96eced9624 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -272,6 +272,7 @@ class Immediate BASE_EMBEDDED {
inline explicit Immediate(Handle<Object> handle);
inline explicit Immediate(Smi* value);
inline explicit Immediate(Address addr);
+ inline explicit Immediate(Address x, RelocInfo::Mode rmode);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
@@ -281,9 +282,15 @@ class Immediate BASE_EMBEDDED {
bool is_int8() const {
return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
}
+ bool is_uint8() const {
+ return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
+ }
bool is_int16() const {
return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
}
+ bool is_uint16() const {
+ return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
+ }
private:
inline explicit Immediate(Label* value);
@@ -659,13 +666,18 @@ class Assembler : public AssemblerBase {
void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x);
- void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
+ void cmpb(const Operand& op, Immediate imm8);
void cmpb(Register reg, const Operand& op);
void cmpb(const Operand& op, Register reg);
+ void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
+ void cmpw(const Operand& dst, Immediate src);
+ void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
+ void cmpw(Register dst, const Operand& src);
+ void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
+ void cmpw(const Operand& dst, Register src);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
@@ -731,21 +743,20 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
- void shld(Register dst, Register src) { shld(dst, Operand(src)); }
- void shld(Register dst, const Operand& src);
-
void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
void shl(const Operand& dst, uint8_t imm8);
void shl_cl(Register dst) { shl_cl(Operand(dst)); }
void shl_cl(const Operand& dst);
-
- void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
- void shrd(Register dst, const Operand& src);
+ void shld(Register dst, Register src, uint8_t shift);
+ void shld_cl(Register dst, Register src);
void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
void shr(const Operand& dst, uint8_t imm8);
void shr_cl(Register dst) { shr_cl(Operand(dst)); }
void shr_cl(const Operand& dst);
+ void shrd(Register dst, Register src, uint8_t shift);
+ void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
+ void shrd_cl(const Operand& dst, Register src);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
@@ -756,10 +767,18 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
- void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8);
- void test_b(const Operand& op, uint8_t imm8);
+ void test(const Operand& op, Register reg) { test(reg, op); }
+ void test_b(Register reg, const Operand& op);
+ void test_b(Register reg, Immediate imm8);
+ void test_b(const Operand& op, Immediate imm8);
+ void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+ void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
+ void test_w(Register reg, const Operand& op);
+ void test_w(Register reg, Immediate imm16);
+ void test_w(const Operand& op, Immediate imm16);
+ void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+ void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
@@ -961,7 +980,9 @@ class Assembler : public AssemblerBase {
static bool IsNop(Address addr);
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
int relocation_writer_size() {
return (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -1003,6 +1024,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode rmode,
TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
+ inline void emit_b(Immediate x);
inline void emit_w(const Immediate& x);
inline void emit_q(uint64_t x);
@@ -1048,8 +1070,8 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index ce07908d93..9e13172c85 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -123,6 +123,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
+ // -- esi: context
// -- edi: constructor function
// -- ebx: allocation site or undefined
// -- edx: new target
@@ -134,6 +135,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(ebx);
+ __ push(esi);
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
@@ -201,7 +203,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -325,9 +327,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ Move(esi, Immediate(0));
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -603,27 +602,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// they are to be pushed onto the stack.
// -----------------------------------
- // Save number of arguments on the stack below where arguments are going
- // to be pushed.
- __ mov(ecx, eax);
- __ neg(ecx);
- __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
- __ mov(eax, ecx);
-
// Pop return address to allow tail-call after pushing arguments.
__ Pop(ecx);
- // Find the address of the last argument.
- __ shl(eax, kPointerSizeLog2);
- __ add(eax, ebx);
+ // Push edi in the slot meant for receiver. We need an extra register
+ // so store edi temporarily on stack.
+ __ Push(edi);
- // Push padding for receiver.
- __ Push(Immediate(0));
+ // Find the address of the last argument.
+ __ mov(edi, eax);
+ __ neg(edi);
+ __ shl(edi, kPointerSizeLog2);
+ __ add(edi, ebx);
- Generate_InterpreterPushArgs(masm, eax);
+ Generate_InterpreterPushArgs(masm, edi);
- // Restore number of arguments from slot on stack.
- __ mov(eax, Operand(esp, -kPointerSize));
+ // Restore the constructor from slot on stack. It was pushed at the slot
+ // meant for receiver.
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
// Re-push return address.
__ Push(ecx);
@@ -961,6 +957,28 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
}
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : first argument (left-hand side)
+ // -- esp[8] : receiver (right-hand side)
+ // -----------------------------------
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ mov(InstanceOfDescriptor::LeftRegister(),
+ Operand(ebp, 2 * kPointerSize)); // Load left-hand side.
+ __ mov(InstanceOfDescriptor::RightRegister(),
+ Operand(ebp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ ret(2 * kPointerSize);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1008,7 +1026,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label receiver_not_callable;
__ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &receiver_not_callable, Label::kNear);
// 3. Tail call with no arguments if argArray is null or undefined.
@@ -1131,7 +1150,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label target_not_callable;
__ JumpIfSmi(edi, &target_not_callable, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &target_not_callable, Label::kNear);
// 3a. Apply the target to the given argumentsList (passing undefined for
@@ -1147,7 +1167,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1196,14 +1215,16 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label target_not_constructor;
__ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
__ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &target_not_constructor, Label::kNear);
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
__ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &new_target_not_constructor, Label::kNear);
// 4a. Construct the target with the given new.target and argumentsList.
@@ -1891,18 +1912,20 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ movzx_b(scratch1,
+ Operand::StaticVariable(is_tail_call_elimination_enabled));
__ cmp(scratch1, Immediate(0));
- __ j(not_equal, &done, Label::kNear);
+ __ j(equal, &done, Label::kNear);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::STUB)));
__ j(not_equal, &no_interpreter_frame, Label::kNear);
__ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -1910,16 +1933,18 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_arguments_adaptor, Label::kNear);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(ebp, scratch2);
- __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
__ jmp(&formal_parameter_count_loaded, Label::kNear);
__ bind(&no_arguments_adaptor);
@@ -1928,57 +1953,15 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ mov(scratch1,
FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ mov(
- scratch1,
+ caller_args_count_reg,
FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch2;
- __ sub(scratch1, args_reg);
- __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset));
-
- if (FLAG_debug_code) {
- __ cmp(esp, new_sp_reg);
- __ Check(below, kStackAccessBelowStackPointer);
- }
-
- // Copy receiver and return address as well.
- Register count_reg = scratch1;
- __ lea(count_reg, Operand(args_reg, 2));
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch3;
- __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- __ mov(Operand(esp, 0), tmp_reg);
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- Operand src(esp, count_reg, times_pointer_size, 0);
- Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ dec(count_reg);
- __ mov(tmp_reg, src);
- __ mov(dst, tmp_reg);
- __ bind(&entry);
- __ cmp(count_reg, Immediate(0));
- __ j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- __ mov(esp, new_sp_reg);
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3, ReturnAddressState::kOnStack, 0);
__ bind(&done);
}
} // namespace
@@ -1998,7 +1981,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2010,8 +1993,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- (1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2233,7 +2216,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
RelocInfo::CODE_TARGET);
// Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
@@ -2369,7 +2353,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
// Check if target has a [[Construct]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
__ j(zero, &non_constructor, Label::kNear);
// Only dispatch to bound functions after checking whether they are
@@ -2442,26 +2427,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
- 1 << SharedFunctionInfo::kStrongModeBitWithinByte);
- __ j(equal, &no_strong_error, Label::kNear);
-
- // What we really care about is the required number of arguments.
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
- __ SmiUntag(ecx);
- __ cmp(eax, ecx);
- __ j(greater_equal, &no_strong_error, Label::kNear);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentsAdaptorStackCheck(masm, &stack_overflow);
@@ -2500,7 +2465,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point.
__ bind(&invoke);
// Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
@@ -2675,24 +2640,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&ok);
- __ ret(0);
-}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index ff6c8d29e5..71adfd3531 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_X87
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -84,6 +85,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -241,7 +246,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
__ add(result_reg,
Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
- __ shrd(result_reg, scratch1);
+ __ shrd_cl(scratch1, result_reg);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
{
@@ -366,34 +371,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register scratch = eax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the key is an array index, that is Uint32.
- __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(scratch);
- __ push(receiver); // receiver
- __ push(key); // key
- __ push(scratch); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is on the stack.
Label miss;
@@ -799,7 +776,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
- __ test_b(ebx, kIsIndirectStringMask);
+ __ test_b(ebx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
@@ -808,7 +785,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8) Is the external string one byte? If yes, go to (5).
- __ test_b(ebx, kStringEncodingMask);
+ __ test_b(ebx, Immediate(kStringEncodingMask));
__ j(not_zero, &seq_one_byte_string); // Go to (5).
// eax: sequential subject string (or look-alike, external string)
@@ -933,13 +910,13 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
+ __ cmpb(ecx, Immediate(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
- __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ cmpb(ecx, Immediate(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
// Call runtime on identical SIMD values since we must throw a TypeError.
- __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+ __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
__ j(equal, &runtime_call, Label::kFar);
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -1097,7 +1074,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal, undetectable;
+ Label return_equal, return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1105,16 +1082,16 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime_call, Label::kNear);
+ __ j(not_zero, &runtime_call);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &undetectable, Label::kNear);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &return_unequal, Label::kNear);
__ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
@@ -1128,8 +1105,18 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&undetectable);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(zero, &return_unequal, Label::kNear);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CmpInstanceType(ebx, ODDBALL_TYPE);
+ __ j(zero, &return_equal, Label::kNear);
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(not_zero, &return_unequal, Label::kNear);
+
+ __ bind(&return_equal);
__ Move(eax, Immediate(EQUAL));
__ ret(0); // eax, edx were pushed
}
@@ -1705,8 +1692,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push marker in two places.
int marker = type();
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
+ __ push(Immediate(Smi::FromInt(marker))); // marker
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ __ push(Operand::StaticVariable(context_address)); // context
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
@@ -1835,9 +1823,14 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
__ j(not_equal, &slow_case);
+ // Go to the runtime if the function is not a constructor.
+ __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &slow_case);
+
// Ensure that {function} has an instance prototype.
__ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+ Immediate(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
@@ -1871,7 +1864,7 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Check if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &fast_runtime_fallback, Label::kNear);
// Check if the current object is a Proxy.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -1906,7 +1899,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Push(object);
__ Push(function);
__ PushReturnAddressFrom(scratch);
- __ TailCallRuntime(Runtime::kInstanceOf);
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -2216,13 +2210,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Label two_byte_sequential, runtime_drop_two, sequential_string;
STATIC_ASSERT(kExternalStringTag != 0);
STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, kExternalStringTag);
+ __ test_b(ebx, Immediate(kExternalStringTag));
__ j(zero, &sequential_string);
// Handle external string.
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ebx, kShortExternalStringMask);
+ __ test_b(ebx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
@@ -2235,7 +2229,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ push(edi);
__ SmiUntag(ecx);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, kStringEncodingMask);
+ __ test_b(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
// Sequential one byte string. Allocate the result.
@@ -2324,23 +2318,21 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_heap_number);
- Label not_string, slow_string;
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in eax.
+ __ AssertNotNumber(eax);
+
+ Label not_string;
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
// eax: object
// edi: object map
__ j(above_equal, &not_string, Label::kNear);
- // Check if string has a cached array index.
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &slow_string, Label::kNear);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
- __ Ret();
- __ bind(&slow_string);
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ bind(&not_string);
Label not_oddball;
@@ -2356,26 +2348,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in eax.
+ __ AssertString(eax);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes on argument in eax.
- Label not_smi, positive_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, eax);
- __ j(greater_equal, &positive_smi, Label::kNear);
- __ xor_(eax, eax);
- __ bind(&positive_smi);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(not_zero, &runtime, Label::kNear);
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
__ Ret();
- __ bind(&not_smi);
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength);
+ __ bind(&runtime);
+ __ PopReturnAddressTo(ecx); // Pop return address.
+ __ Push(eax); // Push argument.
+ __ PushReturnAddressFrom(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -2572,44 +2564,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : left string
- // -- eax : right string
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertString(edx);
- __ AssertString(eax);
-
- Label not_same;
- __ cmp(edx, eax);
- __ j(not_equal, &not_same, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat one-byte strings.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
- edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(eax);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : left
@@ -2910,13 +2864,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(left);
+ __ Push(right);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ sub(eax, Immediate(masm->isolate()->factory()->true_value()));
+ __ Ret();
} else {
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3401,7 +3362,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx);
@@ -4111,7 +4072,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, 1);
+ __ test_b(edx, Immediate(1));
__ j(not_zero, &normal_sequence);
}
@@ -4547,7 +4508,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
@@ -4555,7 +4516,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &no_rest_parameters, Label::kNear);
@@ -4697,7 +4658,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
@@ -4933,14 +4894,14 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
- __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
__ j(not_equal, &loop);
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
@@ -5239,7 +5200,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label profiler_disabled;
Label end_profiler_check;
__ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
- __ cmpb(Operand(eax, 0), 0);
+ __ cmpb(Operand(eax, 0), Immediate(0));
__ j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
@@ -5362,17 +5323,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edi : callee
// -- ebx : call_data
// -- ecx : holder
// -- edx : api_function_address
// -- esi : context
- // -- eax : number of arguments if argc is a register
// --
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5399,17 +5356,9 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || eax.is(argc.reg()));
-
- if (argc.is_immediate()) {
- __ pop(return_address);
- // context save.
- __ push(context);
- } else {
- // pop return address and save context
- __ xchg(context, Operand(esp, 0));
- return_address = context;
- }
+ __ pop(return_address);
+ // context save.
+ __ push(context);
// callee
__ push(callee);
@@ -5418,7 +5367,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
// return value
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
// return value default
@@ -5439,7 +5388,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// push return address
__ push(return_address);
- if (!is_lazy) {
+ if (!is_lazy()) {
// load context from callee
__ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
}
@@ -5458,27 +5407,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), scratch);
- if (argc.is_immediate()) {
- __ add(scratch,
- Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
- } else {
- __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
- (FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ mov(ApiParameterOperand(4), argc.reg());
- // FunctionCallbackInfo::is_construct_call_.
- __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
- (FCA::kArgsLength + 1) * kPointerSize));
- __ mov(ApiParameterOperand(5), argc.reg());
- }
+ __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc()));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5491,7 +5426,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
(2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5500,10 +5435,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
int stack_space = 0;
Operand is_construct_call_operand = ApiParameterOperand(5);
Operand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = nullptr;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
ApiParameterOperand(1), stack_space,
stack_space_operand, return_value_operand,
@@ -5511,23 +5444,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- esp[0] : return address
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index c66166f7f0..776edeb646 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -381,6 +381,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(equal, &only_change_map);
+ __ push(esi);
__ push(eax);
__ push(edx);
__ push(ebx);
@@ -425,10 +426,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(ebx);
__ pop(edx);
__ pop(eax);
+ __ pop(esi);
__ jmp(fail);
// Box doubles into heap numbers.
@@ -477,7 +478,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Restore registers.
__ pop(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
__ bind(&success);
}
@@ -545,11 +546,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(result, kShortExternalStringMask);
+ __ test_b(result, Immediate(kShortExternalStringMask));
__ j(not_zero, call_runtime);
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, kStringEncodingMask);
+ __ test_b(result, Immediate(kStringEncodingMask));
__ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &one_byte_external, Label::kNear);
// Two-byte string.
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 3b90276a93..9d4645e782 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -186,20 +186,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- int parameter_count = shared->internal_formal_parameter_count() + 1;
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned alignment_state_offset =
- input_frame_size - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
- DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- return (alignment_state == kAlignmentPaddingPushed);
-}
-
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -260,7 +246,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ push(edi);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
+ __ mov(eax, Immediate(0));
+ Label context_check;
+ __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
@@ -336,20 +327,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(eax);
__ pop(edi);
+ __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, kAlignmentMarkerExpected);
- }
- __ bind(&no_padding);
-
- // Replace the current frame with the output frames.
+ // Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index a3f1939b73..91ce2272e9 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -29,29 +29,18 @@ struct ByteMnemonic {
};
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}};
+ {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER}, {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -906,18 +895,34 @@ static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x0B:
return "ud2";
- case 0x18: return "prefetch";
- case 0xA2: return "cpuid";
- case 0xBE: return "movsx_b";
- case 0xBF: return "movsx_w";
- case 0xB6: return "movzx_b";
- case 0xB7: return "movzx_w";
- case 0xAF: return "imul";
- case 0xA5: return "shld";
- case 0xAD: return "shrd";
- case 0xAC: return "shrd"; // 3-operand version.
- case 0xAB: return "bts";
- case 0xBD: return "bsr";
+ case 0x18:
+ return "prefetch";
+ case 0xA2:
+ return "cpuid";
+ case 0xBE:
+ return "movsx_b";
+ case 0xBF:
+ return "movsx_w";
+ case 0xB6:
+ return "movzx_b";
+ case 0xB7:
+ return "movzx_w";
+ case 0xAF:
+ return "imul";
+ case 0xA4:
+ return "shld";
+ case 0xA5:
+ return "shld";
+ case 0xAD:
+ return "shrd";
+ case 0xAC:
+ return "shrd"; // 3-operand version.
+ case 0xAB:
+ return "bts";
+ case 0xBC:
+ return "bsf";
+ case 0xBD:
+ return "bsr";
default: return NULL;
}
}
@@ -1134,8 +1139,18 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += SetCC(data);
} else if ((f0byte & 0xF0) == 0x40) {
data += CMov(data);
+ } else if (f0byte == 0xA4 || f0byte == 0xAC) {
+ // shld, shrd
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ data += 2;
+ AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
+ NameOfCPURegister(regop), static_cast<int>(imm8));
} else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
+ // shrd_cl, shld_cl, bts
data += 2;
AppendToBuffer("%s ", f0mnem);
int mod, regop, rm;
@@ -1266,6 +1281,13 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
int imm = *reinterpret_cast<int16_t*>(data);
AppendToBuffer(",0x%x", imm);
data += 2;
+ } else if (*data == 0xF7) {
+ data++;
+ AppendToBuffer("%s ", "test_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
} else if (*data == 0x0F) {
data++;
if (*data == 0x38) {
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index 1b900784cc..1a378ed3ec 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -28,10 +28,6 @@ const int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
-
// ----------------------------------------------------
@@ -46,13 +42,11 @@ class EntryFrameConstants : public AllStatic {
static const int kArgvOffset = +6 * kPointerSize;
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
@@ -70,13 +64,11 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
-
- static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index bfed342eb8..e41d42cdf5 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -111,36 +111,9 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return eax; }
-
// static
-const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -269,6 +242,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -313,24 +293,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // stack param count needs (arg count)
Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
+ Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -394,21 +366,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
- eax, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // callee
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 0c459ebfd4..b46167d1f9 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -196,15 +196,15 @@ void MacroAssembler::RememberedSetHelper(
mov(Operand::StaticVariable(store_buffer), scratch);
// Call stub on end of buffer.
// Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
+ j(equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
DCHECK(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
+ j(not_equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
@@ -376,7 +376,7 @@ void MacroAssembler::RecordWriteField(
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(dst, (1 << kPointerSizeLog2) - 1);
+ test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -406,7 +406,7 @@ void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
if (emit_debug_code()) {
Label ok;
lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, (1 << kPointerSizeLog2) - 1);
+ test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -597,6 +597,71 @@ void MacroAssembler::DebugBreak() {
call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
+void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(high, low);
+ shl(high, shift - 32);
+ xor_(low, low);
+ } else {
+ shld(high, low, shift);
+ shl(low, shift);
+ }
+}
+
+void MacroAssembler::ShlPair_cl(Register high, Register low) {
+ shld_cl(high, low);
+ shl_cl(low);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(high, low);
+ xor_(low, low);
+ bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(low, high);
+ shr(low, shift - 32);
+ xor_(high, high);
+ } else {
+ shrd(high, low, shift);
+ shr(high, shift);
+ }
+}
+
+void MacroAssembler::ShrPair_cl(Register high, Register low) {
+ shrd_cl(low, high);
+ shr_cl(high);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(low, high);
+ xor_(high, high);
+ bind(&done);
+}
+
+void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
+ if (shift >= 32) {
+ mov(low, high);
+ sar(low, shift - 32);
+ sar(high, 31);
+ } else {
+ shrd(high, low, shift);
+ sar(high, shift);
+ }
+}
+
+void MacroAssembler::SarPair_cl(Register high, Register low) {
+ shrd_cl(low, high);
+ sar_cl(high);
+ Label done;
+ test(ecx, Immediate(0x20));
+ j(equal, &done, Label::kNear);
+ mov(low, high);
+ sar(high, 31);
+ bind(&done);
+}
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
@@ -634,8 +699,7 @@ void MacroAssembler::CmpObjectType(Register heap_object,
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- static_cast<int8_t>(type));
+ cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
@@ -647,7 +711,7 @@ void MacroAssembler::CheckFastElements(Register map,
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -660,10 +724,10 @@ void MacroAssembler::CheckFastObjectElements(Register map,
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -674,7 +738,7 @@ void MacroAssembler::CheckFastSmiElements(Register map,
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(above, fail, distance);
}
@@ -761,7 +825,7 @@ Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register instance_type) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+ cmpb(instance_type, Immediate(LAST_NAME_TYPE));
return below_equal;
}
@@ -842,6 +906,15 @@ void MacroAssembler::AssertNumber(Register object) {
}
}
+void MacroAssembler::AssertNotNumber(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsANumber);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(not_equal, kOperandIsANumber);
+ }
+}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
@@ -933,12 +1006,10 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
- push(esi); // Callee's context.
- push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ push(Immediate(Smi::FromInt(type)));
}
@@ -976,9 +1047,10 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
- push(esi);
push(Immediate(Smi::FromInt(type)));
- push(Immediate(CodeObject()));
+ if (type == StackFrame::INTERNAL) {
+ push(Immediate(CodeObject()));
+ }
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
Check(not_equal, kCodeObjectNotProperlyPatched);
@@ -988,7 +1060,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
- cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(Smi::FromInt(type)));
Check(equal, kStackFrameTypesMustMatch);
}
@@ -998,15 +1070,17 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
- DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+ DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
+ DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
@@ -1025,7 +1099,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Store FPU state to m108byte.
int space = 108 + argc * kPointerSize;
sub(esp, Immediate(space));
- const int offset = -2 * kPointerSize; // entry fp + code object.
+ const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
fnsave(MemOperand(ebp, offset - 108));
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1065,7 +1139,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore FPU state.
if (save_doubles) {
- const int offset = -2 * kPointerSize;
+ const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
frstor(MemOperand(ebp, offset - 108));
}
@@ -1145,8 +1219,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(scratch2));
DCHECK(!scratch1.is(scratch2));
- // Load current lexical context from the stack frame.
- mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ mov(scratch2, ebp);
+ bind(&load_context);
+ mov(scratch1,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch1, &has_context);
+ mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+ jmp(&load_context);
+ bind(&has_context);
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
@@ -1859,7 +1943,7 @@ void MacroAssembler::BooleanBitTest(Register object,
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
- static_cast<byte>(1 << byte_bit_index));
+ Immediate(1 << byte_bit_index));
}
@@ -2024,6 +2108,87 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::PrepareForTailCall(
+ const ParameterCount& callee_args_count, Register caller_args_count_reg,
+ Register scratch0, Register scratch1, ReturnAddressState ra_state,
+ int number_of_temp_values_after_return_address) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+ DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
+ number_of_temp_values_after_return_address == 0);
+#endif
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch0;
+ if (callee_args_count.is_reg()) {
+ sub(caller_args_count_reg, callee_args_count.reg());
+ lea(new_sp_reg,
+ Operand(ebp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ number_of_temp_values_after_return_address * kPointerSize));
+ } else {
+ lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset -
+ (callee_args_count.immediate() +
+ number_of_temp_values_after_return_address) *
+ kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ cmp(esp, new_sp_reg);
+ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch1;
+ if (ra_state == ReturnAddressState::kOnStack) {
+ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+ tmp_reg);
+ } else {
+ DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+ DCHECK_EQ(0, number_of_temp_values_after_return_address);
+ Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ }
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // +2 here is to copy both receiver and return address.
+ Register count_reg = caller_args_count_reg;
+ if (callee_args_count.is_reg()) {
+ lea(count_reg, Operand(callee_args_count.reg(),
+ 2 + number_of_temp_values_after_return_address));
+ } else {
+ mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
+ number_of_temp_values_after_return_address));
+ // TODO(ishell): Unroll copying loop for small immediate values.
+ }
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ jmp(&entry, Label::kNear);
+ bind(&loop);
+ dec(count_reg);
+ mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
+ mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+ bind(&entry);
+ cmp(count_reg, Immediate(0));
+ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ mov(esp, new_sp_reg);
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2098,7 +2263,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Label skip_flooding;
ExternalReference step_in_enabled =
ExternalReference::debug_step_in_enabled_address(isolate());
- cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
j(equal, &skip_flooding);
{
FrameScope frame(this,
@@ -2695,7 +2860,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
Label succeed;
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
j(zero, &succeed);
- cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ cmpb(operand, Immediate(SYMBOL_TYPE));
j(not_equal, not_unique_name, distance);
bind(&succeed);
@@ -2843,8 +3008,7 @@ void MacroAssembler::CheckPageFlag(
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
} else {
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
}
@@ -2867,7 +3031,7 @@ void MacroAssembler::CheckPageFlagForMap(
DCHECK(!isolate()->heap()->mark_compact_collector()->
IsOnEvacuationCandidate(*map));
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ test_b(Operand::StaticVariable(reference), Immediate(mask));
} else {
test(Operand::StaticVariable(reference), Immediate(mask));
}
@@ -2907,7 +3071,8 @@ void MacroAssembler::HasColor(Register object,
jmp(&other_color, Label::kNear);
bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+ test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
+ Immediate(1));
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
bind(&other_color);
@@ -3009,19 +3174,40 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- cmp(scratch_reg, Immediate(new_space_start));
- j(less, no_memento_found);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(zero, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ xor_(scratch_reg, receiver_reg);
+ test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+ j(not_zero, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
j(greater, no_memento_found);
- cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(isolate()->factory()->allocation_memento_map()));
+ // Memento map check.
+ bind(&map_check);
+ mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
+ cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index fc493610c4..55714132f7 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -44,6 +44,8 @@ enum PointersToHereCheck {
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
@@ -234,7 +236,7 @@ class MacroAssembler: public Assembler {
void DebugBreak();
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame. Expects the number of
@@ -318,6 +320,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // |ra_state| defines whether return address is already pushed to stack or
+ // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed. |number_of_temp_values_after_return_address| specifies
+ // the number of words pushed to the stack after the return address. This is
+ // to allow "allocation" of scratch registers that this function requires
+ // by saving their values on the stack.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1, ReturnAddressState ra_state,
+ int number_of_temp_values_after_return_address);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
@@ -344,6 +360,12 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
+ void ShlPair(Register high, Register low, uint8_t imm8);
+ void ShlPair_cl(Register high, Register low);
+ void ShrPair(Register high, Register low, uint8_t imm8);
+ void ShrPair_cl(Register high, Register src);
+ void SarPair(Register high, Register low, uint8_t imm8);
+ void SarPair_cl(Register high, Register low);
// Expression support
// Support for constant splitting.
@@ -509,6 +531,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number, enabled via --debug-code.
void AssertNumber(Register object);
+ void AssertNotNumber(Register object);
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object);
@@ -757,12 +780,6 @@ class MacroAssembler: public Assembler {
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
void Popcnt(Register dst, const Operand& src);
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- call(self, RelocInfo::CODE_TARGET);
- }
-
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -770,6 +787,7 @@ class MacroAssembler: public Assembler {
void Move(Register dst, const Immediate& x);
void Move(const Operand& dst, const Immediate& x);
+ void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
// Push a handle value.
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 1f722f2f60..a10b63612e 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -72,15 +72,14 @@ class Segment {
size_t size_;
};
-
-Zone::Zone()
+Zone::Zone(base::AccountingAllocator* allocator)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
+ allocator_(allocator),
segment_head_(nullptr) {}
-
Zone::~Zone() {
DeleteAll();
DeleteKeptSegment();
@@ -204,7 +203,7 @@ void Zone::DeleteKeptSegment() {
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
segment_bytes_allocated_ += size;
if (result != nullptr) {
result->Initialize(segment_head_, size);
@@ -217,7 +216,7 @@ Segment* Zone::NewSegment(size_t size) {
// Deletes the given segment. Does not touch the segment chain.
void Zone::DeleteSegment(Segment* segment, size_t size) {
segment_bytes_allocated_ -= size;
- Malloced::Delete(segment);
+ allocator_->Free(segment, size);
}
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 753e2035d3..fa21155fe1 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -7,7 +7,7 @@
#include <limits>
-#include "src/allocation.h"
+#include "src/base/accounting-allocator.h"
#include "src/base/logging.h"
#include "src/globals.h"
#include "src/hashmap.h"
@@ -35,7 +35,7 @@ class Segment;
// from multi-threaded code.
class Zone final {
public:
- Zone();
+ explicit Zone(base::AccountingAllocator* allocator);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -64,6 +64,8 @@ class Zone final {
size_t allocation_size() const { return allocation_size_; }
+ base::AccountingAllocator* allocator() const { return allocator_; }
+
private:
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
@@ -114,6 +116,8 @@ class Zone final {
Address position_;
Address limit_;
+ base::AccountingAllocator* allocator_;
+
Segment* segment_head_;
};
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 7fae8f355a..4b2b7c51e5 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -9,6 +9,11 @@ per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
per-file *-ppc*=mbrandy@us.ibm.com
per-file *-ppc*=michael_dawson@ca.ibm.com
+per-file *-s390*=dstence@us.ibm.com
+per-file *-s390*=joransiu@ca.ibm.com
+per-file *-s390*=jyan@ca.ibm.com
+per-file *-s390*=mbrandy@us.ibm.com
+per-file *-s390*=michael_dawson@ca.ibm.com
per-file *-x87*=chunyang.dai@intel.com
per-file *-x87*=weiliang.lin@intel.com
per-file expression-type-collector*=aseemgarg@chromium.org
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 05f276d3f4..5681f704e1 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -51,11 +51,13 @@ v8::base::Atomic32 CcTest::isolate_used_ = 0;
v8::ArrayBuffer::Allocator* CcTest::allocator_ = NULL;
v8::Isolate* CcTest::isolate_ = NULL;
-
CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
- const char* dependency, bool enabled, bool initialize)
- : callback_(callback), name_(name), dependency_(dependency),
- enabled_(enabled), initialize_(initialize), prev_(last_) {
+ bool enabled, bool initialize)
+ : callback_(callback),
+ name_(name),
+ enabled_(enabled),
+ initialize_(initialize),
+ prev_(last_) {
// Find the base name of this test (const_cast required on Windows).
char *basename = strrchr(const_cast<char *>(file), '/');
if (!basename) {
@@ -128,21 +130,18 @@ void CcTest::DisableAutomaticDispose() {
static void PrintTestList(CcTest* current) {
if (current == NULL) return;
PrintTestList(current->prev());
- if (current->dependency() != NULL) {
- printf("%s/%s<%s\n",
- current->file(), current->name(), current->dependency());
- } else {
- printf("%s/%s<\n", current->file(), current->name());
- }
+ printf("%s/%s\n", current->file(), current->name());
}
class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
+ void* data = AllocateUninitialized(length == 0 ? 1 : length);
return data == NULL ? data : memset(data, 0, length);
}
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+ virtual void* AllocateUninitialized(size_t length) {
+ return malloc(length == 0 ? 1 : length);
+ }
virtual void Free(void* data, size_t length) { free(data); }
// TODO(dslomov): Remove when v8:2823 is fixed.
virtual void Free(void* data) { UNREACHABLE(); }
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 3b76ce1778..2e00e88b5b 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -71,6 +71,7 @@
'compiler/test-pipeline.cc',
'compiler/test-representation-change.cc',
'compiler/test-run-bytecode-graph-builder.cc',
+ 'compiler/test-run-calls-to-external-references.cc',
'compiler/test-run-deopt.cc',
'compiler/test-run-inlining.cc',
'compiler/test-run-intrinsics.cc',
@@ -88,8 +89,10 @@
'cctest.cc',
'expression-type-collector.cc',
'expression-type-collector.h',
+ 'interpreter/interpreter-tester.cc',
'interpreter/test-bytecode-generator.cc',
'interpreter/test-interpreter.cc',
+ 'interpreter/test-interpreter-intrinsics.cc',
'interpreter/bytecode-expectations-printer.cc',
'interpreter/bytecode-expectations-printer.h',
'gay-fixed.cc',
@@ -140,7 +143,6 @@
'test-fixed-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
- 'test-gc-tracer.cc',
'test-global-handles.cc',
'test-global-object.cc',
'test-hashing.cc',
@@ -167,7 +169,6 @@
'test-sampler-api.cc',
'test-serialize.cc',
'test-simd.cc',
- 'test-slots-buffer.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
@@ -188,6 +189,7 @@
'test-weaksets.cc',
'trace-extension.cc',
'wasm/test-run-wasm.cc',
+ 'wasm/test-run-wasm-64.cc',
'wasm/test-run-wasm-js.cc',
'wasm/test-run-wasm-module.cc',
'wasm/test-signatures.h',
@@ -201,7 +203,8 @@
'test-code-stubs-ia32.cc',
'test-disasm-ia32.cc',
'test-macro-assembler-ia32.cc',
- 'test-log-stack-tracer.cc'
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-ia32.cc'
],
}],
['v8_target_arch=="x64"', {
@@ -211,7 +214,8 @@
'test-code-stubs-x64.cc',
'test-disasm-x64.cc',
'test-macro-assembler-x64.cc',
- 'test-log-stack-tracer.cc'
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-x64.cc'
],
}],
['v8_target_arch=="arm"', {
@@ -220,7 +224,8 @@
'test-code-stubs.cc',
'test-code-stubs-arm.cc',
'test-disasm-arm.cc',
- 'test-macro-assembler-arm.cc'
+ 'test-macro-assembler-arm.cc',
+ 'test-run-wasm-relocation-arm.cc'
],
}],
['v8_target_arch=="arm64"', {
@@ -232,7 +237,22 @@
'test-disasm-arm64.cc',
'test-fuzz-arm64.cc',
'test-javascript-arm64.cc',
- 'test-js-arm64-variables.cc'
+ 'test-js-arm64-variables.cc',
+ 'test-run-wasm-relocation-arm64.cc'
+ ],
+ }],
+ ['v8_target_arch=="s390"', {
+ 'sources': [ ### gcmole(arch:s390) ###
+ 'test-assembler-s390.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-s390.cc'
+ ],
+ }],
+ ['v8_target_arch=="s390x"', {
+ 'sources': [ ### gcmole(arch:s390x) ###
+ 'test-assembler-s390.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-s390.cc'
],
}],
['v8_target_arch=="ppc"', {
@@ -274,7 +294,8 @@
'test-code-stubs-x87.cc',
'test-disasm-x87.cc',
'test-macro-assembler-x87.cc',
- 'test-log-stack-tracer.cc'
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-x87.cc'
],
}],
[ 'OS=="linux" or OS=="qnx"', {
@@ -293,7 +314,9 @@
},
},
}],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
+ or v8_target_arch=="arm" or v8_target_arch=="arm64" \
+ or v8_target_arch=="s390" or v8_target_arch=="s390x"', {
# disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
'cflags': ['-ffp-contract=off'],
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index fe9ae6e38d..dac3a5b94f 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -34,37 +34,23 @@
#include "src/v8.h"
#ifndef TEST
-#define TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true, true); \
+#define TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, true); \
static void Test##Name()
#endif
#ifndef UNINITIALIZED_TEST
-#define UNINITIALIZED_TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true, false); \
- static void Test##Name()
-#endif
-
-#ifndef DEPENDENT_TEST
-#define DEPENDENT_TEST(Name, Dep) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true, true); \
- static void Test##Name()
-#endif
-
-#ifndef UNINITIALIZED_DEPENDENT_TEST
-#define UNINITIALIZED_DEPENDENT_TEST(Name, Dep) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true, false); \
+#define UNINITIALIZED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, true, false); \
static void Test##Name()
#endif
#ifndef DISABLED_TEST
-#define DISABLED_TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, false, true); \
+#define DISABLED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, false, true); \
static void Test##Name()
#endif
@@ -94,14 +80,13 @@ class CcTest {
public:
typedef void (TestFunction)();
CcTest(TestFunction* callback, const char* file, const char* name,
- const char* dependency, bool enabled, bool initialize);
+ bool enabled, bool initialize);
~CcTest() { i::DeleteArray(file_); }
void Run();
static CcTest* last() { return last_; }
CcTest* prev() { return prev_; }
const char* file() { return file_; }
const char* name() { return name_; }
- const char* dependency() { return dependency_; }
bool enabled() { return enabled_; }
static v8::Isolate* isolate() {
@@ -168,7 +153,6 @@ class CcTest {
TestFunction* callback_;
const char* file_;
const char* name_;
- const char* dependency_;
bool enabled_;
bool initialize_;
CcTest* prev_;
@@ -601,12 +585,13 @@ class InitializedHandleScope {
class HandleAndZoneScope : public InitializedHandleScope {
public:
- HandleAndZoneScope() {}
+ HandleAndZoneScope() : main_zone_(&allocator_) {}
// Prefixing the below with main_ reduces a lot of naming clashes.
i::Zone* main_zone() { return &main_zone_; }
private:
+ v8::base::AccountingAllocator allocator_;
i::Zone main_zone_;
};
diff --git a/deps/v8/test/cctest/cctest.isolate b/deps/v8/test/cctest/cctest.isolate
index ab55466214..dd03ab8384 100644
--- a/deps/v8/test/cctest/cctest.isolate
+++ b/deps/v8/test/cctest/cctest.isolate
@@ -6,6 +6,7 @@
'files': [
'./cctest.status',
'./testcfg.py',
+ './interpreter/bytecode_expectations/',
],
},
'includes': [
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 88b2750a54..c992ab6160 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -39,7 +39,6 @@
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
'test-serialize/TestThatAlwaysFails': [FAIL],
- 'test-serialize/DependentTestThatAlwaysFails': [FAIL],
'test-api/SealHandleScope': [FAIL],
# This test always fails. It tests that LiveEdit causes abort when turned off.
@@ -73,11 +72,9 @@
# BUG(2999). The cpu profiler tests are notoriously flaky.
'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
+ 'test-cpu-profiler/CollectCpuProfile': [SKIP],
'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
-
- # BUG(3525). Test crashes flakily.
- 'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
- 'test-debug/RecursiveBreakpointsGlobal': [PASS, FLAKY],
+ 'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
# BUG(v8:4358). Hangs flakily.
'test-debug/ProcessDebugMessagesThreaded': [SKIP],
@@ -89,17 +86,13 @@
'test-func-name-inference/UpperCaseClass': [FAIL],
'test-func-name-inference/LowerCaseClass': [FAIL],
- # BUG(3956). Strong mode is being deprecated. Decide about these tests.
- 'test-api/StrongModeAccessCheckAllowed': [FAIL],
- 'test-api/StrongModeAccessCheckBlocked': [FAIL],
-
##############################################################################
# TurboFan compiler failures.
# Some tests are just too slow to run for now.
'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
- 'test-serialize/SerializeToplevelLargeCodeObject': [PASS, NO_VARIANTS],
+ 'test-serialize/CodeSerializerLargeCodeObject': [PASS, NO_VARIANTS],
'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
@@ -129,7 +122,6 @@
'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [PASS, NO_VARIANTS],
'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [PASS, NO_VARIANTS],
'test-cpu-profiler/DeoptUntrackedFunction': [PASS, NO_VARIANTS],
- 'test-cpu-profiler/TickLines': [PASS, NO_VARIANTS],
############################################################################
# Slow tests.
@@ -139,9 +131,7 @@
'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
-
- # TODO(machenbach, mvstanton): Flaky in debug on all platforms.
- 'test-lockers/LockerUnlocker': [PASS, ['mode == debug', FLAKY]],
+ 'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
##############################################################################
@@ -152,8 +142,8 @@
'test-api/Bug618': [PASS],
# BUG(v8:3385).
- 'test-serialize/DeserializeFromSecondSerialization': [PASS, FAIL],
- 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [PASS, FAIL],
+ 'test-serialize/StartupSerializerOnceRunScript': [PASS, FAIL],
+ 'test-serialize/StartupSerializerTwiceRunScript': [PASS, FAIL],
# BUG(v8:3154).
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
@@ -169,6 +159,18 @@
# BUG(v8:3434).
' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
+
+ # BUG(v8:4795).
+ 'test-run-wasm-js/Run_JSSelectAlign_0': [SKIP],
+ 'test-run-wasm-js/Run_JSSelectAlign_2': [SKIP],
+ 'test-run-wasm-js/Run_JSSelectAlign_4': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_0': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_1': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_2': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_3': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_4': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_5': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_6': [SKIP],
}], # 'arch == arm64'
['arch == arm64 and simulator_run == True', {
@@ -235,44 +237,34 @@
# BUG(v8:4642).
'test-lockers/LockAndUnlockDifferentIsolates': [PASS, NO_VARIANTS],
-}], # 'system == windows'
-
-##############################################################################
-['system == macos', {
- # BUG(3125).
- 'test-debug/DebugGetLoadedScripts': [PASS, FLAKY],
- 'test-debug/DebugStepLinear': [PASS, FLAKY],
- 'test-debug/DebuggerClearMessageHandler': [PASS, FLAKY],
-}], # 'system == macos'
+ # BUG(v8:2999,v8:4751).
+ 'test-cpu-profiler/CollectDeoptEvents': [SKIP],
+ 'test-cpu-profiler/FunctionCallSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
+ 'test-cpu-profiler/JsNativeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
+}], # 'system == windows'
##############################################################################
['byteorder == big', {
# TODO(mips-team): Fix Wasm for big-endian.
'test-run-wasm-module/Run_WasmModule_CallAdd': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_CallAdd_rev': [SKIP],
'test-run-wasm-module/Run_WasmModule_CallMain_recursive': [SKIP],
'test-run-wasm-module/Run_WasmModule_ReadLoadedDataSegment': [SKIP],
'test-run-wasm-module/Run_WasmModule_Return114': [SKIP],
'test-run-wasm-module/Run_WasmModule_CheckMemoryIsZero': [SKIP],
'test-run-wasm-module/Run_WasmModule_Global': [SKIP],
- 'test-run-wasm/Run_WasmInt32*': [SKIP],
- 'test-run-wasm/Run_Wasm_TableSwitch*': [SKIP],
- 'test-run-wasm/Run_Wasm_StoreMemI32_offset': [SKIP],
- 'test-run-wasm/Run_Wasm_Int32LoadInt16_*': [SKIP],
+ 'test-run-wasm/Run_Wasm_Int32LoadInt16_signext': [SKIP],
+ 'test-run-wasm/Run_Wasm_Int32LoadInt16_zeroext': [SKIP],
'test-run-wasm/Run_WasmMixedGlobals': [SKIP],
- 'test-run-wasm/Run_WasmCall*': [SKIP],
- 'test-run-wasm/Run_WasmMixedCall_*': [SKIP],
- 'test-run-wasm/Run_WasmInt64*': [SKIP],
- 'test-run-wasm/Run_Wasm_LoadStoreI64_sx': [SKIP],
- 'test-run-wasm/Run_WASM_Int64DivS_byzero_const': [SKIP],
- 'test-run-wasm/Run_TestI64WasmRunner': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_1': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_2': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_3': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_4': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_5': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_6': [SKIP],
+ 'test-run-wasm-64/Run_WasmI64*': [SKIP],
+ 'test-run-wasm-64/Run_Wasm_I64*': [SKIP],
+ 'test-run-wasm-64/Run_Wasm_LoadStoreI64_sx': [SKIP],
+ 'test-run-wasm-64/Run_TestI64WasmRunner': [SKIP],
+ 'test-run-wasm-64/Run_WasmCall_Int64Sub': [SKIP],
+ 'test-run-wasm-64/Run_Wasm_MemI64_Sum': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -284,10 +276,10 @@
'test-log/ProfLazyMode': [SKIP],
# BUG(1075): Unresolved crashes.
- 'test-serialize/Deserialize': [SKIP],
- 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
- 'test-serialize/DeserializeAndRunScript2': [SKIP],
- 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+ 'test-serialize/StartupSerializerOnce': [SKIP],
+ 'test-serialize/StartupSerializerTwice': [SKIP],
+ 'test-serialize/StartupSerializerOnceRunScript': [SKIP],
+ 'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
############################################################################
# Slow tests.
@@ -295,10 +287,6 @@
'test-api/Threading2': [PASS, SLOW],
'test-api/Threading3': [PASS, SLOW],
'test-api/Threading4': [PASS, SLOW],
-
- # Crashes due to OOM in simulator.
- 'test-types/Distributivity1': [PASS, FLAKY],
- 'test-types/Distributivity2': [PASS, FLAKY],
}], # 'arch == arm'
##############################################################################
@@ -309,10 +297,10 @@
'test-heap/TestSizeOfRegExpCode': [SKIP],
# BUG(1075): Unresolved crashes on MIPS also.
- 'test-serialize/Deserialize': [SKIP],
- 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
- 'test-serialize/DeserializeAndRunScript2': [SKIP],
- 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+ 'test-serialize/StartupSerializerOnce': [SKIP],
+ 'test-serialize/StartupSerializerTwice': [SKIP],
+ 'test-serialize/StartupSerializerOnceRunScript': [SKIP],
+ 'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -336,10 +324,10 @@
'test-heap/TestSizeOfRegExpCode': [SKIP],
# BUG(1075): Unresolved crashes on MIPS also.
- 'test-serialize/Deserialize': [SKIP],
- 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
- 'test-serialize/DeserializeAndRunScript2': [SKIP],
- 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+ 'test-serialize/StartupSerializerOnce': [SKIP],
+ 'test-serialize/StartupSerializerTwice': [SKIP],
+ 'test-serialize/StartupSerializerOnceRunScript': [SKIP],
+ 'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
}], # 'arch == mips64el or arch == mips64'
##############################################################################
@@ -353,6 +341,7 @@
'test-run-machops/RunFloat64MulAndFloat64Add2': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub2': [SKIP],
+ 'test-cpu-profiler/Inlining': [SKIP],
}], # 'arch == x87'
##############################################################################
@@ -498,56 +487,12 @@
}], # 'arch == ppc64 and simulator_run == True'
+##############################################################################
['ignition == True', {
- # TODO(yangguo,4690): Test failures in debugger tests.
- 'test-debug/DebugStepLocals': [FAIL],
- 'test-debug/DebugStepKeyedLoadLoop': [FAIL],
- 'test-debug/DebugStepKeyedStoreLoop': [FAIL],
- 'test-debug/DebugStepIf': [FAIL],
- 'test-debug/DebugStepNamedLoadLoop': [FAIL],
- 'test-debug/DebugStepDeclarations': [FAIL],
- 'test-debug/BreakPointConstructCallWithGC': [PASS, FAIL],
- 'test-debug/DebugStepNamedStoreLoop': [FAIL],
- 'test-debug/DebugStepLinearMixedICs': [FAIL],
- 'test-debug/DebugStepSwitch': [FAIL],
- 'test-debug/DebugStepWhile': [FAIL],
- 'test-debug/DebugStepFor': [FAIL],
- 'test-debug/DebugStepForContinue': [FAIL],
- 'test-debug/DebugStepForIn': [FAIL],
- 'test-debug/DebugStepDoWhile': [FAIL],
- 'test-debug/DebugConditional': [FAIL],
- 'test-debug/DebugStepForBreak': [FAIL],
- 'test-debug/DebugStepWith': [FAIL],
- 'test-debug/DebugStepFunctionApply': [FAIL],
- 'test-debug/StepInOutBranch': [FAIL],
- 'test-debug/DebugStepFunctionCall': [FAIL],
-
- # TODO(yangguo,4690): Required DebuggerStatement support.
- 'test-profile-generator/BailoutReason': [FAIL],
-
- # TODO(rmcilroy,4680): Check failed: toplevel_test_code_event_found.
- 'test-serialize/SerializeToplevelIsolates': [FAIL],
-
- # BUG(4333). Function name inferrer does not work for ES6 clases.
- 'test-func-name-inference/UpperCaseClass': [TIMEOUT],
- 'test-func-name-inference/LowerCaseClass': [TIMEOUT],
-
- # TODO(rmcilroy,4681): Requires support for generators.
- 'test-inobject-slack-tracking/JSGeneratorObjectBasic': [FAIL],
- 'test-inobject-slack-tracking/JSGeneratorObjectBasicNoInlineNew': [FAIL],
- 'test-api/IsGeneratorFunctionOrObject': [FAIL],
-
- # TODO(rmcilroy,4680): Strong mode failures.
- 'test-api/AccessorShouldThrowOnError': [FAIL],
- 'test-api/InterceptorShouldThrowOnError': [FAIL],
# TODO(rmcilroy,4680): The function_data field should be a BytecodeArray on interpreter entry
'test-api/SetFunctionEntryHook': [FAIL],
- # TODO(rmcilroy,4680): Fail on shared_function_data()->IsUndefined in
- #compiler.cc
- 'test-heap/CanonicalSharedFunctionInfo': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
-
# TODO(rmcilroy,4680): Check failed: !function->shared()->is_compiled() || function->IsOptimized().
'test-heap/TestCodeFlushingPreAged': [FAIL],
'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
@@ -555,71 +500,61 @@
'test-heap/TestCodeFlushingIncremental': [FAIL],
'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
- # TODO(rmcilroy,4680): Check failed: fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft().
- 'test-compiler/OptimizedCodeSharing2': [FAIL],
- 'test-compiler/OptimizedCodeSharing3': [FAIL],
-
- # TODO(rmcilroy,4689): Stack trace line number failures.
- 'test-run-jsexceptions/ThrowMessagePosition': [FAIL],
- 'test-api/TryCatchMixedNesting': [FAIL],
-
- # TODO(rmcilroy,4680): Test assert errors.
- 'test-cpu-profiler/CodeEvents': [FAIL],
- 'test-cpu-profiler/TickEvents': [FAIL],
- 'test-cpu-profiler/BoundFunctionCall': [FAIL],
- 'test-cpu-profiler/CollectCpuProfile': [FAIL],
- 'test-cpu-profiler/CollectSampleAPI': [FAIL],
- 'test-cpu-profiler/CpuProfileDeepStack': [FAIL],
- 'test-cpu-profiler/FunctionApplySample': [FAIL],
- 'test-cpu-profiler/FunctionCallSample': [FAIL],
- 'test-cpu-profiler/FunctionDetails': [FAIL],
- 'test-cpu-profiler/HotDeoptNoFrameEntry': [FAIL],
- 'test-cpu-profiler/JsNative1JsNative2JsSample': [FAIL],
- 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [FAIL],
- 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [FAIL],
- 'test-cpu-profiler/JsNativeJsSample': [FAIL],
- 'test-cpu-profiler/NativeMethodUninitializedIC': [FAIL],
- 'test-cpu-profiler/NativeMethodMonomorphicIC': [FAIL],
- 'test-cpu-profiler/NativeAccessorUninitializedIC': [FAIL],
- 'test-cpu-profiler/NativeAccessorMonomorphicIC': [FAIL],
- 'test-cpu-profiler/SampleWhenFrameIsNotSetup': [FAIL],
- 'test-sampler-api/StackFramesConsistent': [FAIL],
- 'test-profile-generator/LineNumber': [FAIL],
- 'test-profile-generator/ProfileNodeScriptId': [FAIL],
- 'test-profile-generator/RecordStackTraceAtStartProfiling': [FAIL],
+ # TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
+ # on nodes (behind --turbo_source_positions flag).
+ 'test-cpu-profiler/TickLinesOptimized': [FAIL],
+
+ # TODO(rmcilroy,4680): Fails to find the correct function name for the
+ # anonymous function. Fails without ignition but with --no-lazy also, so seems
+ # to be an issue when eagerly parsing.
+ 'test-func-name-inference/ReturnAnonymousFunction': [FAIL],
+
+ # TODO(mythria,4780): Related to type feedback support for calls.
'test-feedback-vector/VectorCallICStates': [FAIL],
'test-compiler/FeedbackVectorPreservedAcrossRecompiles': [FAIL],
- 'test-api/PromiseRejectCallback': [FAIL],
- 'test-api/SetJitCodeEventHandler': [FAIL],
'test-heap/WeakFunctionInConstructor': [FAIL],
- 'test-heap/Regress169209': [FAIL],
'test-heap/IncrementalMarkingClearsMonomorphicConstructor': [FAIL],
'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [FAIL],
- 'test-heap/CompilationCacheCachingBehavior': [FAIL],
'test-heap/CellsInOptimizedCodeAreWeak': [FAIL],
- 'test-run-inlining/InlineTwice': [FAIL],
- 'test-serialize/SerializeInternalReference': [FAIL, ['arch == arm or arch == arm64', PASS]],
-}], # ignition == True
-['ignition == True and arch == x64', {
- # TODO(rmcilroy,4680): The function_data field should be a BytecodeArray on interpreter entry
- 'test-serialize/PerIsolateSnapshotBlobsOutdatedContextWithOverflow': [PASS, ['mode == debug', FAIL]],
- 'test-serialize/PerIsolateSnapshotBlobsWithLocker': [PASS, ['mode == debug', FAIL]],
- 'test-serialize/SnapshotBlobsStackOverflow': [PASS, ['mode == debug', FAIL]],
- 'test-serialize/PerIsolateSnapshotBlobs': [PASS, ['mode == debug', FAIL]],
- 'test-serialize/SerializationMemoryStats': [PASS, ['mode == debug', FAIL]],
+ # TODO(mythria,4680): Lack of code-ageing in interpreter.
+ 'test-heap/Regress169209': [FAIL],
- # TODO(rmcilroy,4680): Test assert errors.
- 'test-heap-profiler/HeapSnapshotSimd': [PASS, ['mode == debug', FAIL]],
- 'test-api/InitializeDefaultIsolateOnSecondaryThread1': [PASS, ['mode == debug', FAIL]],
-}],
+ # TODO(mythria,4680): Lack of code-ageing and/or lack of compilation cache
+ # in interpreter.
+ 'test-heap/CompilationCacheCachingBehavior': [FAIL],
-##############################################################################
-# exclude test issues for which fixes for PPC did not make it into 5.0
-# These should be removed when we upgrade Node.js to use v8 5.1
-['arch == ppc64', {
- 'test-heap/ReleaseOverReservedPages' : [SKIP],
-}], # 'arch == ppc64''
+ # TODO(mstarzinger,4680): Fails due to the turbo-asm pipeline only being taken
+ # in compiler.cc GetLazyCode for uncompiled code, and no similar path for eager
+ # code.
+ 'test-api/TurboAsmDisablesNeuter': [FAIL],
+
+ # TODO(rmcilroy,4837): We don't set a LoadContextSlot for a function as
+ # immutable in the BytecodeGraphBuilder, therefore no inlining happens.
+ 'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
+ 'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
+ 'test-run-inlining/InlineTwice': [FAIL],
+ 'test-run-inlining/InlineSurplusArgumentsObject': [FAIL],
+ 'test-run-inlining/InlineTwiceDependentDiamond': [FAIL],
+ 'test-run-inlining/InlineWithArguments': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedTwice': [FAIL],
+ 'test-run-inlining/InlineOmitArgumentsObject': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedOnce': [FAIL],
+ 'test-run-inlining/InlineOmitArgumentsDeopt': [FAIL],
+ 'test-run-inlining/InlineTwiceDependentDiamondDifferent': [FAIL],
+ 'test-run-inlining/SimpleInliningContext': [FAIL],
+ 'test-run-inlining/InlineMutuallyRecursive': [FAIL],
+ 'test-run-inlining/InlineLoopGuardedEmpty': [FAIL],
+ 'test-run-inlining/InlineLoopGuardedOnce': [FAIL],
+ 'test-run-inlining/InlineOmitArguments': [FAIL],
+ 'test-run-inlining/SimpleInlining': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedEmpty': [FAIL],
+ 'test-run-inlining/InlineNestedBuiltin': [FAIL],
+ 'test-run-inlining/InlineSurplusArguments': [FAIL],
+ 'test-run-inlining/InlineBuiltin': [FAIL],
+ 'test-run-inlining/InlineTwiceDependent': [FAIL],
+ 'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
+}], # ignition == True
]
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 8ee6b99f3b..c75bde1e91 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -220,7 +220,8 @@ class CallHelper {
Simulator::CallArgument(p5), Simulator::CallArgument::End()};
return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
}
-#elif USE_SIMULATOR && (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64)
+#elif USE_SIMULATOR && \
+ (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X)
uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
int64_t p3 = 0, int64_t p4 = 0, int64_t p5 = 0) {
Simulator* simulator = Simulator::current(isolate_);
@@ -264,8 +265,8 @@ class CallHelper {
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
ParameterTraits<P4>::Cast(p4), ParameterTraits<P5>::Cast(p5)));
}
-#elif USE_SIMULATOR && \
- (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_PPC)
+#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0, int32_t p5 = 0) {
Simulator* simulator = Simulator::current(isolate_);
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index fc0956fb50..9b5a9d90a2 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -563,7 +563,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param0);
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(*i, 9.0)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, bt.call(*i, 9.0)); }
}
{
@@ -571,7 +571,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param1);
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, bt.call(-11.25, *i)); }
}
}
@@ -587,7 +587,7 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
{
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Parameter(0));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(*i, m.Call(*i)); }
}
{
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Int64(),
@@ -647,7 +647,7 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
m.Call(*i);
- CheckDoubleEq(*i, result);
+ CHECK_DOUBLE_EQ(*i, result);
}
}
{
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index c6093ce6e4..631bdde5ce 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -175,7 +175,7 @@ class FunctionTester : public InitializedHandleScope {
uint32_t flags_;
Handle<JSFunction> Compile(Handle<JSFunction> function) {
- Zone zone;
+ Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
info.MarkAsDeoptimizationEnabled();
@@ -224,7 +224,7 @@ class FunctionTester : public InitializedHandleScope {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
- Zone zone;
+ Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index c3b4308a93..c5c41667a0 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -457,6 +457,27 @@ TEST(BranchCombineFloat64Compares) {
}
}
+TEST(BranchCombineEffectLevel) {
+ // Test that the load doesn't get folded into the branch, as there's a store
+ // between them. See http://crbug.com/611976.
+ int32_t input = 0;
+
+ RawMachineAssemblerTester<int32_t> m;
+ Node* a = m.LoadFromPointer(&input, MachineType::Int32());
+ Node* compare = m.Word32And(a, m.Int32Constant(1));
+ Node* equal = m.Word32Equal(compare, m.Int32Constant(0));
+ m.StoreToPointer(&input, MachineRepresentation::kWord32, m.Int32Constant(1));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(equal, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(42));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(0));
+
+ CHECK_EQ(42, m.Call());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
index e850da7735..ddeabe479b 100644
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
@@ -237,13 +237,13 @@ TEST(RunChangeTaggedToFloat64) {
{
Handle<Object> number = t.factory()->NewNumber(input);
t.Call(*number);
- CheckDoubleEq(input, result);
+ CHECK_DOUBLE_EQ(input, result);
}
{
Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
t.Call(*number);
- CheckDoubleEq(input, result);
+ CHECK_DOUBLE_EQ(input, result);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
index 0306561020..ff02cc9b44 100644
--- a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
@@ -13,12 +13,19 @@ namespace compiler {
class CodeStubAssemblerTester : public CodeStubAssembler {
public:
+ // Test generating code for a stub.
CodeStubAssemblerTester(Isolate* isolate,
const CallInterfaceDescriptor& descriptor)
: CodeStubAssembler(isolate, isolate->runtime_zone(), descriptor,
Code::ComputeFlags(Code::STUB), "test"),
scope_(isolate) {}
+ // Test generating code for a JS function (e.g. builtins).
+ CodeStubAssemblerTester(Isolate* isolate, int parameter_count)
+ : CodeStubAssembler(isolate, isolate->runtime_zone(), parameter_count,
+ Code::ComputeFlags(Code::FUNCTION), "test"),
+ scope_(isolate) {}
+
private:
HandleScope scope_;
LocalContext context_;
@@ -68,12 +75,12 @@ TEST(SimpleCallRuntime1Arg) {
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = m.SmiTag(m.Int32Constant(256));
- m.Return(m.CallRuntime(Runtime::kMathSqrt, context, b));
+ Node* b = m.SmiTag(m.Int32Constant(0));
+ m.Return(m.CallRuntime(Runtime::kNumberToSmi, context, b));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+ CHECK_EQ(0, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
@@ -82,12 +89,12 @@ TEST(SimpleTailCallRuntime1Arg) {
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = m.SmiTag(m.Int32Constant(256));
- m.TailCallRuntime(Runtime::kMathSqrt, context, b);
+ Node* b = m.SmiTag(m.Int32Constant(0));
+ m.TailCallRuntime(Runtime::kNumberToSmi, context, b);
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+ CHECK_EQ(0, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
@@ -247,6 +254,113 @@ TEST(FixedArrayAccessSmiIndex) {
CHECK_EQ(733, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
+TEST(LoadHeapNumberValue) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1234);
+ m.Return(m.SmiTag(
+ m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number)))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(1234, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(LoadInstanceType) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<HeapObject> undefined = isolate->factory()->undefined_value();
+ m.Return(m.SmiTag(m.LoadInstanceType(m.HeapConstant(undefined))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(InstanceType::ODDBALL_TYPE,
+ Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+namespace {
+
+class TestBitField : public BitField<unsigned, 3, 3> {};
+
+} // namespace
+
+TEST(BitFieldDecode) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ m.Return(m.SmiTag(m.BitFieldDecode<TestBitField>(m.Int32Constant(0x2f))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ // value = 00101111
+ // mask = 00111000
+ // result = 101
+ CHECK_EQ(5, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+namespace {
+
+Handle<JSFunction> CreateFunctionFromCode(int parameter_count_with_receiver,
+ Handle<Code> code) {
+ Isolate* isolate = code->GetIsolate();
+ Handle<String> name = isolate->factory()->InternalizeUtf8String("test");
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionWithoutPrototype(name, code);
+ function->shared()->set_internal_formal_parameter_count(
+ parameter_count_with_receiver - 1); // Implicit undefined receiver.
+ return function;
+}
+
+} // namespace
+
+TEST(JSFunction) {
+ const int kNumParams = 3; // Receiver, left, right.
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ m.Return(m.SmiTag(m.Int32Add(m.SmiToWord32(m.Parameter(1)),
+ m.SmiToWord32(m.Parameter(2)))));
+ Handle<Code> code = m.GenerateCode();
+ Handle<JSFunction> function = CreateFunctionFromCode(kNumParams, code);
+ Handle<Object> args[] = {Handle<Smi>(Smi::FromInt(23), isolate),
+ Handle<Smi>(Smi::FromInt(34), isolate)};
+ MaybeHandle<Object> result =
+ Execution::Call(isolate, function, isolate->factory()->undefined_value(),
+ arraysize(args), args);
+ CHECK_EQ(57, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(SplitEdgeBranchMerge) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Label l1(&m), merge(&m);
+ m.Branch(m.Int32Constant(1), &l1, &merge);
+ m.Bind(&l1);
+ m.Goto(&merge);
+ m.Bind(&merge);
+ USE(m.GenerateCode());
+}
+
+TEST(SplitEdgeSwitchMerge) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Label l1(&m), l2(&m), l3(&m), default_label(&m);
+ CodeStubAssembler::Label* labels[] = {&l1, &l2};
+ int32_t values[] = {1, 2};
+ m.Branch(m.Int32Constant(1), &l3, &l1);
+ m.Bind(&l3);
+ m.Switch(m.Int32Constant(2), &default_label, values, labels, 2);
+ m.Bind(&l1);
+ m.Goto(&l2);
+ m.Bind(&l2);
+ m.Goto(&default_label);
+ m.Bind(&default_label);
+ USE(m.GenerateCode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 4de3373dad..4cf72a55ce 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -268,7 +268,8 @@ TEST(InstructionAddGapMove) {
TEST(InstructionOperands) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
{
TestInstr* i = TestInstr::New(&zone, 101);
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 24db6a532e..0075de5329 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -975,7 +975,7 @@ TEST(OrderCompareEffects) {
BinopEffectsTester B(ops[j], Type::Symbol(), Type::String());
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
- Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+ Node* i0 = B.CheckConvertedInput(IrOpcode::kStringToNumber, 0, false);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
// Inputs should be commuted.
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 71f774f562..ed3d79e4ba 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -106,7 +106,8 @@ class TestCode : public HandleAndZoneScope {
void VerifyForwarding(TestCode& code, int count, int* expected) {
- Zone local_zone;
+ base::AccountingAllocator allocator;
+ Zone local_zone(&allocator);
ZoneVector<RpoNumber> result(&local_zone);
JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_, true);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 6722f59d60..0cbdb4c6b2 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -30,12 +30,12 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code = isolate->factory()
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
- Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+ Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
source_code, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
Handle<Object>(), Handle<Context>(isolate->native_context()), NULL, NULL,
v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_function, isolate->native_context());
+ shared, isolate->native_context());
}
@@ -96,7 +96,7 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
- Zone zone;
+ Zone zone(isolate->allocator());
ToNumberStub stub(isolate);
CompilationInfo info("test", isolate, &zone, Code::ComputeFlags(Code::STUB));
CallInterfaceDescriptor interface_descriptor =
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 68bfc2858f..fb61e20197 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -127,7 +127,7 @@ class LoopFinderTester : HandleAndZoneScope {
OFStream os(stdout);
os << AsRPO(graph);
}
- Zone zone;
+ Zone zone(main_isolate()->allocator());
loop_tree = LoopFinder::BuildLoopTree(&graph, &zone);
}
return loop_tree;
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 7c08238411..2108ab1302 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -69,7 +69,8 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
TEST(ReturnThreeValues) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
CallDescriptor* desc = GetCallDescriptor(&zone, 3, 2);
HandleAndZoneScope handles;
RawMachineAssembler m(handles.main_isolate(),
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index de1c2c02a2..d317c3877c 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -141,7 +141,8 @@ void CheckInputs(Node* node, Node** inputs, int input_count) {
TEST(NodeUseIteratorReplaceUses) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator1, n0);
@@ -166,7 +167,8 @@ TEST(NodeUseIteratorReplaceUses) {
TEST(NodeUseIteratorReplaceUsesSelf) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator1, n0);
@@ -190,7 +192,8 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
TEST(ReplaceInput) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator0);
@@ -216,7 +219,8 @@ TEST(ReplaceInput) {
TEST(OwnedBy) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
{
@@ -266,7 +270,8 @@ TEST(OwnedBy) {
TEST(Uses) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -288,7 +293,8 @@ TEST(Uses) {
TEST(Inputs) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -316,7 +322,8 @@ TEST(Inputs) {
TEST(RemoveInput) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -346,7 +353,8 @@ TEST(RemoveInput) {
TEST(AppendInputsAndIterator) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -368,7 +376,8 @@ TEST(AppendInputsAndIterator) {
TEST(NullInputsSimple) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -395,7 +404,8 @@ TEST(NullInputsSimple) {
TEST(NullInputsAppended) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -418,7 +428,8 @@ TEST(NullInputsAppended) {
TEST(ReplaceUsesFromAppendedInputs) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -446,7 +457,8 @@ TEST(ReplaceUsesFromAppendedInputs) {
TEST(ReplaceInputMultipleUses) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -464,7 +476,8 @@ TEST(ReplaceInputMultipleUses) {
TEST(TrimInputCountInline) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
{
@@ -532,7 +545,8 @@ TEST(TrimInputCountInline) {
TEST(TrimInputCountOutOfLine1) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
{
@@ -626,7 +640,8 @@ TEST(TrimInputCountOutOfLine1) {
TEST(TrimInputCountOutOfLine2) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
{
@@ -695,7 +710,8 @@ TEST(TrimInputCountOutOfLine2) {
TEST(NullAllInputs) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
for (int i = 0; i < 2; i++) {
@@ -747,7 +763,8 @@ TEST(NullAllInputs) {
TEST(AppendAndTrim) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
Graph graph(&zone);
Node* nodes[] = {
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 7353e167d9..7e75bf8eb0 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -55,13 +55,13 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckFloat64Constant(Node* n, double expected) {
Float64Matcher m(n);
CHECK(m.HasValue());
- CheckDoubleEq(expected, m.Value());
+ CHECK_DOUBLE_EQ(expected, m.Value());
}
void CheckFloat32Constant(Node* n, float expected) {
CHECK_EQ(IrOpcode::kFloat32Constant, n->opcode());
float fval = OpParameter<float>(n->op());
- CheckDoubleEq(expected, fval);
+ CHECK_FLOAT_EQ(expected, fval);
}
void CheckHeapConstant(Node* n, HeapObject* expected) {
@@ -74,7 +74,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
NumberMatcher m(n);
CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
CHECK(m.HasValue());
- CheckDoubleEq(expected, m.Value());
+ CHECK_DOUBLE_EQ(expected, m.Value());
}
Node* Parameter(int index = 0) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 9a038221a1..c32f92387e 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -79,12 +79,6 @@ class BytecodeGraphTester {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_loop_assignment_analysis = false;
- // Set ignition filter flag via SetFlagsFromString to avoid double-free
- // (or potential leak with StrDup() based on ownership confusion).
- ScopedVector<char> ignition_filter(64);
- SNPrintF(ignition_filter, "--ignition-filter=%s", filter);
- FlagList::SetFlagsFromString(ignition_filter.start(),
- ignition_filter.length());
// Ensure handler table is generated.
isolate->interpreter()->Initialize();
}
@@ -659,6 +653,28 @@ TEST(BytecodeGraphBuilderCallRuntime) {
}
}
+TEST(BytecodeGraphBuilderInvokeIntrinsic) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"function f(arg0) { return %_IsJSReceiver(arg0); }\nf()",
+ {factory->false_value(), factory->NewNumberFromInt(1)}},
+ {"function f(arg0) { return %_IsArray(arg0) }\nf(undefined)",
+ {factory->true_value(), BytecodeGraphTester::NewObject("[1, 2, 3]")}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
void TestBytecodeGraphBuilderGlobals(size_t shard) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
@@ -2835,29 +2851,6 @@ TEST(BytecodeGraphBuilderConstInLookupContextChain) {
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*const_decl[i].return_value()));
}
-
- // Tests for Legacy constant.
- bool old_flag_legacy_const = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- ExpectedSnippet<0> legacy_const_decl[] = {
- {"return outerConst = 23;", {handle(Smi::FromInt(23), isolate)}},
- {"outerConst = 30; return outerConst;",
- {handle(Smi::FromInt(10), isolate)}},
- };
-
- for (size_t i = 0; i < arraysize(legacy_const_decl); i++) {
- ScopedVector<char> script(1024);
- SNPrintF(script, "%s %s %s", prologue, legacy_const_decl[i].code_snippet,
- epilogue);
-
- BytecodeGraphTester tester(isolate, zone, script.start(), "*");
- auto callable = tester.GetCallable<>();
- Handle<Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*legacy_const_decl[i].return_value()));
- }
-
- FLAG_legacy_const = old_flag_legacy_const;
}
TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
@@ -2911,43 +2904,6 @@ TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
}
}
-TEST(BytecodeGraphBuilderLegacyConstDeclaration) {
- bool old_flag_legacy_const = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- ExpectedSnippet<0> snippets[] = {
- {"const x = (x = 10) + 3; return x;",
- {handle(Smi::FromInt(13), isolate)}},
- {"const x = 10; x = 20; return x;", {handle(Smi::FromInt(10), isolate)}},
- {"var a = 10;\n"
- "for (var i = 0; i < 10; ++i) {\n"
- " const x = i;\n" // Legacy constants are not block scoped.
- " a = a + x;\n"
- "}\n"
- "return a;\n",
- {handle(Smi::FromInt(10), isolate)}},
- {"const x = 20; eval('x = 10;'); return x;",
- {handle(Smi::FromInt(20), isolate)}},
- };
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- ScopedVector<char> script(1024);
- SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
- snippets[i].code_snippet, kFunctionName);
-
- BytecodeGraphTester tester(isolate, zone, script.start());
- auto callable = tester.GetCallable<>();
- Handle<Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*snippets[i].return_value()));
- }
-
- FLAG_legacy_const = old_flag_legacy_const;
-}
-
TEST(BytecodeGraphBuilderDebuggerStatement) {
FLAG_expose_debug_as = "debug";
HandleAndZoneScope scope;
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
new file mode 100644
index 0000000000..3b79cd8a44
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -0,0 +1,531 @@
+// Copyright 2014 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename T>
+void TestExternalReferenceRoundingFunction(
+ BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
+ T (*comparison)(T)) {
+ T parameter;
+
+ Node* function = m->ExternalConstant(ref);
+ m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
+ m->PointerConstant(&parameter));
+ m->Return(m->Int32Constant(4356));
+ FOR_FLOAT64_INPUTS(i) {
+ parameter = *i;
+ m->Call();
+ CHECK_DOUBLE_EQ(comparison(*i), parameter);
+ }
+}
+
+TEST(RunCallF32Trunc) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f32_trunc(m.isolate());
+ TestExternalReferenceRoundingFunction<float>(&m, ref, truncf);
+}
+
+TEST(RunCallF32Floor) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f32_floor(m.isolate());
+ TestExternalReferenceRoundingFunction<float>(&m, ref, floorf);
+}
+
+TEST(RunCallF32Ceil) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f32_ceil(m.isolate());
+ TestExternalReferenceRoundingFunction<float>(&m, ref, ceilf);
+}
+
+TEST(RunCallF32RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f32_nearest_int(m.isolate());
+ TestExternalReferenceRoundingFunction<float>(&m, ref, nearbyintf);
+}
+
+TEST(RunCallF64Trunc) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f64_trunc(m.isolate());
+ TestExternalReferenceRoundingFunction<double>(&m, ref, trunc);
+}
+
+TEST(RunCallF64Floor) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f64_floor(m.isolate());
+ TestExternalReferenceRoundingFunction<double>(&m, ref, floor);
+}
+
+TEST(RunCallF64Ceil) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f64_ceil(m.isolate());
+ TestExternalReferenceRoundingFunction<double>(&m, ref, ceil);
+}
+
+TEST(RunCallF64RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_f64_nearest_int(m.isolate());
+ TestExternalReferenceRoundingFunction<double>(&m, ref, nearbyint);
+}
+
+TEST(RunCallInt64ToFloat32) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_int64_to_float32(m.isolate());
+
+ int64_t input;
+ float output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function, m.PointerConstant(&input),
+ m.PointerConstant(&output));
+ m.Return(m.Int32Constant(4356));
+ FOR_INT64_INPUTS(i) {
+ input = *i;
+ m.Call();
+ CHECK_FLOAT_EQ(static_cast<float>(*i), output);
+ }
+}
+
+TEST(RunCallUint64ToFloat32) {
+ struct {
+ uint64_t input;
+ uint32_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3f800000},
+ {0xffffffff, 0x4f800000},
+ {0x1b09788b, 0x4dd84bc4},
+ {0x4c5fce8, 0x4c98bf9d},
+ {0xcc0de5bf, 0x4f4c0de6},
+ {0x2, 0x40000000},
+ {0x3, 0x40400000},
+ {0x4, 0x40800000},
+ {0x5, 0x40a00000},
+ {0x8, 0x41000000},
+ {0x9, 0x41100000},
+ {0xffffffffffffffff, 0x5f800000},
+ {0xfffffffffffffffe, 0x5f800000},
+ {0xfffffffffffffffd, 0x5f800000},
+ {0x0, 0x0},
+ {0x100000000, 0x4f800000},
+ {0xffffffff00000000, 0x5f800000},
+ {0x1b09788b00000000, 0x5dd84bc4},
+ {0x4c5fce800000000, 0x5c98bf9d},
+ {0xcc0de5bf00000000, 0x5f4c0de6},
+ {0x200000000, 0x50000000},
+ {0x300000000, 0x50400000},
+ {0x400000000, 0x50800000},
+ {0x500000000, 0x50a00000},
+ {0x800000000, 0x51000000},
+ {0x900000000, 0x51100000},
+ {0x273a798e187937a3, 0x5e1ce9e6},
+ {0xece3af835495a16b, 0x5f6ce3b0},
+ {0xb668ecc11223344, 0x5d3668ed},
+ {0x9e, 0x431e0000},
+ {0x43, 0x42860000},
+ {0xaf73, 0x472f7300},
+ {0x116b, 0x458b5800},
+ {0x658ecc, 0x4acb1d98},
+ {0x2b3b4c, 0x4a2ced30},
+ {0x88776655, 0x4f087766},
+ {0x70000000, 0x4ee00000},
+ {0x7200000, 0x4ce40000},
+ {0x7fffffff, 0x4f000000},
+ {0x56123761, 0x4eac246f},
+ {0x7fffff00, 0x4efffffe},
+ {0x761c4761eeeeeeee, 0x5eec388f},
+ {0x80000000eeeeeeee, 0x5f000000},
+ {0x88888888dddddddd, 0x5f088889},
+ {0xa0000000dddddddd, 0x5f200000},
+ {0xddddddddaaaaaaaa, 0x5f5dddde},
+ {0xe0000000aaaaaaaa, 0x5f600000},
+ {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
+ {0xfffffffdeeeeeeee, 0x5f800000},
+ {0xf0000000dddddddd, 0x5f700000},
+ {0x7fffffdddddddd, 0x5b000000},
+ {0x3fffffaaaaaaaa, 0x5a7fffff},
+ {0x1fffffaaaaaaaa, 0x59fffffd},
+ {0xfffff, 0x497ffff0},
+ {0x7ffff, 0x48ffffe0},
+ {0x3ffff, 0x487fffc0},
+ {0x1ffff, 0x47ffff80},
+ {0xffff, 0x477fff00},
+ {0x7fff, 0x46fffe00},
+ {0x3fff, 0x467ffc00},
+ {0x1fff, 0x45fff800},
+ {0xfff, 0x457ff000},
+ {0x7ff, 0x44ffe000},
+ {0x3ff, 0x447fc000},
+ {0x1ff, 0x43ff8000},
+ {0x3fffffffffff, 0x56800000},
+ {0x1fffffffffff, 0x56000000},
+ {0xfffffffffff, 0x55800000},
+ {0x7ffffffffff, 0x55000000},
+ {0x3ffffffffff, 0x54800000},
+ {0x1ffffffffff, 0x54000000},
+ {0x8000008000000000, 0x5f000000},
+ {0x8000008000000001, 0x5f000001},
+ {0x8000008000000002, 0x5f000001},
+ {0x8000008000000004, 0x5f000001},
+ {0x8000008000000008, 0x5f000001},
+ {0x8000008000000010, 0x5f000001},
+ {0x8000008000000020, 0x5f000001},
+ {0x8000009000000000, 0x5f000001},
+ {0x800000a000000000, 0x5f000001},
+ {0x8000008000100000, 0x5f000001},
+ {0x8000000000000400, 0x5f000000},
+ {0x8000000000000401, 0x5f000000}};
+
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::wasm_uint64_to_float32(m.isolate());
+
+ uint64_t input;
+ float output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function, m.PointerConstant(&input),
+ m.PointerConstant(&output));
+ m.Return(m.Int32Constant(4356));
+
+ for (size_t i = 0; i < arraysize(values); i++) {
+ input = values[i].input;
+ m.Call();
+ CHECK_EQ(values[i].expected, bit_cast<uint32_t>(output));
+ }
+}
+
+TEST(RunCallInt64ToFloat64) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_int64_to_float64(m.isolate());
+
+ int64_t input;
+ double output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function, m.PointerConstant(&input),
+ m.PointerConstant(&output));
+ m.Return(m.Int32Constant(4356));
+ FOR_INT64_INPUTS(i) {
+ input = *i;
+ m.Call();
+ CHECK_DOUBLE_EQ(static_cast<double>(*i), output);
+ }
+}
+
+TEST(RunCallUint64ToFloat64) {
+ struct {
+ uint64_t input;
+ uint64_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3ff0000000000000},
+ {0xffffffff, 0x41efffffffe00000},
+ {0x1b09788b, 0x41bb09788b000000},
+ {0x4c5fce8, 0x419317f3a0000000},
+ {0xcc0de5bf, 0x41e981bcb7e00000},
+ {0x2, 0x4000000000000000},
+ {0x3, 0x4008000000000000},
+ {0x4, 0x4010000000000000},
+ {0x5, 0x4014000000000000},
+ {0x8, 0x4020000000000000},
+ {0x9, 0x4022000000000000},
+ {0xffffffffffffffff, 0x43f0000000000000},
+ {0xfffffffffffffffe, 0x43f0000000000000},
+ {0xfffffffffffffffd, 0x43f0000000000000},
+ {0x100000000, 0x41f0000000000000},
+ {0xffffffff00000000, 0x43efffffffe00000},
+ {0x1b09788b00000000, 0x43bb09788b000000},
+ {0x4c5fce800000000, 0x439317f3a0000000},
+ {0xcc0de5bf00000000, 0x43e981bcb7e00000},
+ {0x200000000, 0x4200000000000000},
+ {0x300000000, 0x4208000000000000},
+ {0x400000000, 0x4210000000000000},
+ {0x500000000, 0x4214000000000000},
+ {0x800000000, 0x4220000000000000},
+ {0x900000000, 0x4222000000000000},
+ {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
+ {0xece3af835495a16b, 0x43ed9c75f06a92b4},
+ {0xb668ecc11223344, 0x43a6cd1d98224467},
+ {0x9e, 0x4063c00000000000},
+ {0x43, 0x4050c00000000000},
+ {0xaf73, 0x40e5ee6000000000},
+ {0x116b, 0x40b16b0000000000},
+ {0x658ecc, 0x415963b300000000},
+ {0x2b3b4c, 0x41459da600000000},
+ {0x88776655, 0x41e10eeccaa00000},
+ {0x70000000, 0x41dc000000000000},
+ {0x7200000, 0x419c800000000000},
+ {0x7fffffff, 0x41dfffffffc00000},
+ {0x56123761, 0x41d5848dd8400000},
+ {0x7fffff00, 0x41dfffffc0000000},
+ {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
+ {0x80000000eeeeeeee, 0x43e00000001dddde},
+ {0x88888888dddddddd, 0x43e11111111bbbbc},
+ {0xa0000000dddddddd, 0x43e40000001bbbbc},
+ {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
+ {0xe0000000aaaaaaaa, 0x43ec000000155555},
+ {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
+ {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
+ {0xf0000000dddddddd, 0x43ee0000001bbbbc},
+ {0x7fffffdddddddd, 0x435ffffff7777777},
+ {0x3fffffaaaaaaaa, 0x434fffffd5555555},
+ {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
+ {0xfffff, 0x412ffffe00000000},
+ {0x7ffff, 0x411ffffc00000000},
+ {0x3ffff, 0x410ffff800000000},
+ {0x1ffff, 0x40fffff000000000},
+ {0xffff, 0x40efffe000000000},
+ {0x7fff, 0x40dfffc000000000},
+ {0x3fff, 0x40cfff8000000000},
+ {0x1fff, 0x40bfff0000000000},
+ {0xfff, 0x40affe0000000000},
+ {0x7ff, 0x409ffc0000000000},
+ {0x3ff, 0x408ff80000000000},
+ {0x1ff, 0x407ff00000000000},
+ {0x3fffffffffff, 0x42cfffffffffff80},
+ {0x1fffffffffff, 0x42bfffffffffff00},
+ {0xfffffffffff, 0x42affffffffffe00},
+ {0x7ffffffffff, 0x429ffffffffffc00},
+ {0x3ffffffffff, 0x428ffffffffff800},
+ {0x1ffffffffff, 0x427ffffffffff000},
+ {0x8000008000000000, 0x43e0000010000000},
+ {0x8000008000000001, 0x43e0000010000000},
+ {0x8000000000000400, 0x43e0000000000000},
+ {0x8000000000000401, 0x43e0000000000001},
+ {0x8000000000000402, 0x43e0000000000001},
+ {0x8000000000000404, 0x43e0000000000001},
+ {0x8000000000000408, 0x43e0000000000001},
+ {0x8000000000000410, 0x43e0000000000001},
+ {0x8000000000000420, 0x43e0000000000001},
+ {0x8000000000000440, 0x43e0000000000001},
+ {0x8000000000000480, 0x43e0000000000001},
+ {0x8000000000000500, 0x43e0000000000001},
+ {0x8000000000000600, 0x43e0000000000001}};
+
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::wasm_uint64_to_float64(m.isolate());
+
+ uint64_t input;
+ double output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function, m.PointerConstant(&input),
+ m.PointerConstant(&output));
+ m.Return(m.Int32Constant(4356));
+
+ for (size_t i = 0; i < arraysize(values); i++) {
+ input = values[i].input;
+ m.Call();
+ CHECK_EQ(values[i].expected, bit_cast<uint64_t>(output));
+ }
+}
+
+TEST(RunCallFloat32ToInt64) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_float32_to_int64(m.isolate());
+
+ float input;
+ int64_t output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(
+ MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m.PointerConstant(&input), m.PointerConstant(&output)));
+ FOR_FLOAT32_INPUTS(i) {
+ input = *i;
+ if (*i >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
+ *i < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(static_cast<int64_t>(*i), output);
+ } else {
+ CHECK_EQ(0, m.Call());
+ }
+ }
+}
+
+TEST(RunCallFloat32ToUint64) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::wasm_float32_to_uint64(m.isolate());
+
+ float input;
+ uint64_t output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(
+ MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m.PointerConstant(&input), m.PointerConstant(&output)));
+ FOR_FLOAT32_INPUTS(i) {
+ input = *i;
+ if (*i > -1.0 &&
+ *i < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(static_cast<uint64_t>(*i), output);
+ } else {
+ CHECK_EQ(0, m.Call());
+ }
+ }
+}
+
+TEST(RunCallFloat64ToInt64) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_float64_to_int64(m.isolate());
+
+ double input;
+ int64_t output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(
+ MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m.PointerConstant(&input), m.PointerConstant(&output)));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ if (*i >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+ *i < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(static_cast<int64_t>(*i), output);
+ } else {
+ CHECK_EQ(0, m.Call());
+ }
+ }
+}
+
+TEST(RunCallFloat64ToUint64) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::wasm_float64_to_uint64(m.isolate());
+
+ double input;
+ uint64_t output;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(
+ MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m.PointerConstant(&input), m.PointerConstant(&output)));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ if (*i > -1.0 &&
+ *i < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(static_cast<uint64_t>(*i), output);
+ } else {
+ CHECK_EQ(0, m.Call());
+ }
+ }
+}
+
+TEST(RunCallInt64Div) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_int64_div(m.isolate());
+
+ int64_t dst;
+ int64_t src;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m.PointerConstant(&dst), m.PointerConstant(&src)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ dst = *i;
+ src = *j;
+ if (src == 0) {
+ CHECK_EQ(0, m.Call());
+ } else if (src == -1 && dst == std::numeric_limits<int64_t>::min()) {
+ CHECK_EQ(-1, m.Call());
+ } else {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(*i / *j, dst);
+ }
+ }
+ }
+}
+
+TEST(RunCallInt64Mod) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_int64_mod(m.isolate());
+
+ int64_t dst;
+ int64_t src;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m.PointerConstant(&dst), m.PointerConstant(&src)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ dst = *i;
+ src = *j;
+ if (src == 0) {
+ CHECK_EQ(0, m.Call());
+ } else {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(*i % *j, dst);
+ }
+ }
+ }
+}
+
+TEST(RunCallUint64Div) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_uint64_div(m.isolate());
+
+ uint64_t dst;
+ uint64_t src;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m.PointerConstant(&dst), m.PointerConstant(&src)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ dst = *i;
+ src = *j;
+ if (src == 0) {
+ CHECK_EQ(0, m.Call());
+ } else {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(*i / *j, dst);
+ }
+ }
+ }
+}
+
+TEST(RunCallUint64Mod) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_uint64_mod(m.isolate());
+
+ uint64_t dst;
+ uint64_t src;
+
+ Node* function = m.ExternalConstant(ref);
+ m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m.PointerConstant(&dst), m.PointerConstant(&src)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ dst = *i;
+ src = *j;
+ if (src == 0) {
+ CHECK_EQ(0, m.Call());
+ } else {
+ CHECK_EQ(1, m.Call());
+ CHECK_EQ(*i % *j, dst);
+ }
+ }
+ }
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index f332d7499b..234060c7f7 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -268,7 +268,8 @@ TEST(InlineTwice) {
"(function () {"
" var x = 42;"
" function bar(s) { AssertInlineCount(2); return x + s; };"
- " return (function (s,t) { return bar(s) + bar(t); });"
+ " function foo(s, t) { return bar(s) + bar(t); };"
+ " return foo;"
"})();",
kInlineFlags);
@@ -542,33 +543,6 @@ TEST(InlineNestedBuiltin) {
}
-TEST(StrongModeArity) {
- FLAG_strong_mode = true;
- FunctionTester T(
- "(function () {"
- " function foo(x, y) { 'use strong'; return x; }"
- " function bar(x, y) { return foo(x); }"
- " return bar;"
- "})();",
- kInlineFlags);
- T.CheckThrows(T.undefined(), T.undefined());
-}
-
-
-TEST(StrongModeArityOuter) {
- FLAG_strong_mode = true;
- FunctionTester T(
- "(function () {"
- " 'use strong';"
- " function foo(x, y) { return x; }"
- " function bar(x, y) { return foo(x); }"
- " return bar;"
- "})();",
- kInlineFlags);
- T.CheckThrows(T.undefined(), T.undefined());
-}
-
-
TEST(InlineSelfRecursive) {
FunctionTester T(
"(function () {"
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 6e9ebf2282..6aa5f391ad 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -34,31 +34,6 @@ TEST(ClassOf) {
}
-#define COUNTER_NAME "hurz"
-
-static int* LookupCounter(const char* name) {
- static int counter = 1234;
- return strcmp(name, COUNTER_NAME) == 0 ? &counter : nullptr;
-}
-
-
-TEST(IncrementStatsCounter) {
- FLAG_native_code_counters = true;
- reinterpret_cast<v8::Isolate*>(CcTest::InitIsolateOnce())
- ->SetCounterFunction(LookupCounter);
- FunctionTester T(
- "(function() { %_IncrementStatsCounter('" COUNTER_NAME "'); })", flags);
- StatsCounter counter(T.main_isolate(), COUNTER_NAME);
- if (!counter.Enabled()) return;
-
- int old_value = *counter.GetInternalPointer();
- T.CheckCall(T.undefined());
- CHECK_EQ(old_value + 1, *counter.GetInternalPointer());
-}
-
-#undef COUNTER_NAME
-
-
TEST(IsArray) {
FunctionTester T("(function(a) { return %_IsArray(a); })", flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index c28295857e..12566c242a 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -129,12 +129,18 @@ TEST(ConstructorCall) {
}
-TEST(RuntimeCallCPP2) {
+TEST(RuntimeCall) {
FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a,b) { return %NumberImul(a, b); })");
+ FunctionTester T("(function(a) { return %IsJSReceiver(a); })");
- T.CheckCall(T.Val(2730), T.Val(42), T.Val(65));
- T.CheckCall(T.Val(798), T.Val(42), T.Val(19));
+ T.CheckCall(T.false_value(), T.Val(23), T.undefined());
+ T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
+ T.CheckCall(T.false_value(), T.Val("str"), T.undefined());
+ T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+ T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+ T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+ T.CheckCall(T.true_value(), T.NewObject("({})"), T.undefined());
+ T.CheckCall(T.true_value(), T.NewObject("([])"), T.undefined());
}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index 4bf10ca8fe..80a918134f 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -16,7 +16,7 @@ TEST(ArgumentsMapped) {
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
CHECK(JSObject::cast(*arguments)->HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
- Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ Handle<Object> length = Object::GetProperty(arguments, l).ToHandleChecked();
CHECK_EQ(4, length->Number());
}
@@ -29,7 +29,7 @@ TEST(ArgumentsUnmapped) {
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
- Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ Handle<Object> length = Object::GetProperty(arguments, l).ToHandleChecked();
CHECK_EQ(4, length->Number());
}
@@ -42,7 +42,7 @@ TEST(ArgumentsRest) {
CHECK(arguments->IsJSObject() && arguments->IsJSArray());
CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
- Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ Handle<Object> length = Object::GetProperty(arguments, l).ToHandleChecked();
CHECK_EQ(3, length->Number());
}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index 9a2c4679a0..b68fc1cdde 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -394,17 +394,6 @@ TEST(GlobalLoad) {
}
-TEST(GlobalStoreSloppy) {
- FLAG_legacy_const = true;
- FunctionTester T("(function(a,b) { g = a + b; return g; })");
-
- T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
- CompileRun("delete g");
- CompileRun("const g = 23");
- T.CheckCall(T.Val(23), T.Val(55), T.Val(44));
-}
-
-
TEST(GlobalStoreStrict) {
FunctionTester T("(function(a,b) { 'use strict'; g = a + b; return g; })");
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index fba9e0e1a5..2bfe1244be 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -1197,7 +1197,7 @@ TEST(RunLoadStoreFloat32Offset) {
p1 = *j;
p2 = *j - 5;
CHECK_EQ(magic, m.Call());
- CheckDoubleEq(p1, p2);
+ CHECK_DOUBLE_EQ(p1, p2);
}
}
}
@@ -1224,7 +1224,7 @@ TEST(RunLoadStoreFloat64Offset) {
p1 = *j;
p2 = *j - 5;
CHECK_EQ(magic, m.Call());
- CheckDoubleEq(p1, p2);
+ CHECK_DOUBLE_EQ(p1, p2);
}
}
}
@@ -3658,10 +3658,7 @@ TEST(RunFloat32Add) {
m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i + *j;
- CheckFloatEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i + *j, m.Call(*i, *j)); }
}
}
@@ -3672,10 +3669,7 @@ TEST(RunFloat32Sub) {
m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i - *j;
- CheckFloatEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*i, *j)); }
}
}
@@ -3686,10 +3680,7 @@ TEST(RunFloat32Mul) {
m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i * *j;
- CheckFloatEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i * *j, m.Call(*i, *j)); }
}
}
@@ -3700,10 +3691,7 @@ TEST(RunFloat32Div) {
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i / *j;
- CheckFloatEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i / *j, m.Call(*i, *j)); }
}
}
@@ -3714,10 +3702,7 @@ TEST(RunFloat64Add) {
m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- volatile double expected = *i + *j;
- CheckDoubleEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i + *j, m.Call(*i, *j)); }
}
}
@@ -3728,10 +3713,7 @@ TEST(RunFloat64Sub) {
m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- volatile double expected = *i - *j;
- CheckDoubleEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i - *j, m.Call(*i, *j)); }
}
}
@@ -3742,10 +3724,7 @@ TEST(RunFloat64Mul) {
m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- volatile double expected = *i * *j;
- CheckDoubleEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i * *j, m.Call(*i, *j)); }
}
}
@@ -3756,10 +3735,7 @@ TEST(RunFloat64Div) {
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- volatile double expected = *i / *j;
- CheckDoubleEq(expected, m.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i / *j, m.Call(*i, *j)); }
}
}
@@ -3770,7 +3746,7 @@ TEST(RunFloat64Mod) {
m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(modulo(*i, *j), m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(modulo(*i, *j), m.Call(*i, *j)); }
}
}
@@ -3816,10 +3792,7 @@ TEST(RunFloat32AddP) {
bt.AddReturn(m.Float32Add(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- float expected = *pl + *pr;
- CheckFloatEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl + *pr, bt.call(*pl, *pr)); }
}
}
@@ -3831,10 +3804,7 @@ TEST(RunFloat64AddP) {
bt.AddReturn(m.Float64Add(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl + *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl + *pr, bt.call(*pl, *pr)); }
}
}
@@ -3848,8 +3818,7 @@ TEST(RunFloa32MaxP) {
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
- double expected = *pl > *pr ? *pl : *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
}
}
}
@@ -3864,8 +3833,7 @@ TEST(RunFloat64MaxP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl > *pr ? *pl : *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
}
}
}
@@ -3880,8 +3848,7 @@ TEST(RunFloat32MinP) {
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
- double expected = *pl < *pr ? *pl : *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
}
}
}
@@ -3896,8 +3863,7 @@ TEST(RunFloat64MinP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl < *pr ? *pl : *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
}
}
}
@@ -3910,10 +3876,7 @@ TEST(RunFloat32SubP) {
bt.AddReturn(m.Float32Sub(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- float expected = *pl - *pr;
- CheckFloatEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl - *pr, bt.call(*pl, *pr)); }
}
}
@@ -3923,10 +3886,7 @@ TEST(RunFloat32SubImm1) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0)));
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i - *j;
- CheckFloatEq(expected, m.Call(*j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*j)); }
}
}
@@ -3936,10 +3896,7 @@ TEST(RunFloat32SubImm2) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i)));
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *j - *i;
- CheckFloatEq(expected, m.Call(*j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*j - *i, m.Call(*j)); }
}
}
@@ -3949,7 +3906,7 @@ TEST(RunFloat64SubImm1) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0)));
- FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, m.Call(*j)); }
}
}
@@ -3959,7 +3916,7 @@ TEST(RunFloat64SubImm2) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Sub(m.Parameter(0), m.Float64Constant(*i)));
- FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*j - *i, m.Call(*j)); }
}
}
@@ -3973,7 +3930,7 @@ TEST(RunFloat64SubP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl - *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(expected, bt.call(*pl, *pr));
}
}
}
@@ -3986,10 +3943,7 @@ TEST(RunFloat32MulP) {
bt.AddReturn(m.Float32Mul(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- float expected = *pl * *pr;
- CheckFloatEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl * *pr, bt.call(*pl, *pr)); }
}
}
@@ -4003,7 +3957,7 @@ TEST(RunFloat64MulP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl * *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(expected, bt.call(*pl, *pr));
}
}
}
@@ -4018,7 +3972,7 @@ TEST(RunFloat64MulAndFloat64Add1) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
FOR_FLOAT64_INPUTS(k) {
- CheckDoubleEq((*i * *j) + *k, m.Call(*i, *j, *k));
+ CHECK_DOUBLE_EQ((*i * *j) + *k, m.Call(*i, *j, *k));
}
}
}
@@ -4034,7 +3988,7 @@ TEST(RunFloat64MulAndFloat64Add2) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
FOR_FLOAT64_INPUTS(k) {
- CheckDoubleEq(*i + (*j * *k), m.Call(*i, *j, *k));
+ CHECK_DOUBLE_EQ(*i + (*j * *k), m.Call(*i, *j, *k));
}
}
}
@@ -4050,7 +4004,7 @@ TEST(RunFloat64MulAndFloat64Sub1) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
FOR_FLOAT64_INPUTS(k) {
- CheckDoubleEq((*i * *j) - *k, m.Call(*i, *j, *k));
+ CHECK_DOUBLE_EQ((*i * *j) - *k, m.Call(*i, *j, *k));
}
}
}
@@ -4066,7 +4020,7 @@ TEST(RunFloat64MulAndFloat64Sub2) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
FOR_FLOAT64_INPUTS(k) {
- CheckDoubleEq(*i - (*j * *k), m.Call(*i, *j, *k));
+ CHECK_DOUBLE_EQ(*i - (*j * *k), m.Call(*i, *j, *k));
}
}
}
@@ -4078,7 +4032,7 @@ TEST(RunFloat64MulImm1) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Mul(m.Float64Constant(*i), m.Parameter(0)));
- FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*i * *j, m.Call(*j)); }
}
}
@@ -4088,7 +4042,7 @@ TEST(RunFloat64MulImm2) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Mul(m.Parameter(0), m.Float64Constant(*i)));
- FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j * *i, m.Call(*j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_FLOAT_EQ(*j * *i, m.Call(*j)); }
}
}
@@ -4100,10 +4054,7 @@ TEST(RunFloat32DivP) {
bt.AddReturn(m.Float32Div(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- float expected = *pl / *pr;
- CheckFloatEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl / *pr, bt.call(*pl, *pr)); }
}
}
@@ -4115,10 +4066,7 @@ TEST(RunFloat64DivP) {
bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double expected = *pl / *pr;
- CheckDoubleEq(expected, bt.call(*pl, *pr));
- }
+ FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl / *pr, bt.call(*pl, *pr)); }
}
}
@@ -4130,11 +4078,7 @@ TEST(RunFloat64ModP) {
bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- double expected = modulo(*i, *j);
- double found = bt.call(*i, *j);
- CheckDoubleEq(expected, found);
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(modulo(*i, *j), bt.call(*i, *j)); }
}
}
@@ -4143,7 +4087,7 @@ TEST(RunChangeInt32ToFloat64_A) {
int32_t magic = 0x986234;
BufferedRawMachineAssemblerTester<double> m;
m.Return(m.ChangeInt32ToFloat64(m.Int32Constant(magic)));
- CheckDoubleEq(static_cast<double>(magic), m.Call());
+ CHECK_DOUBLE_EQ(static_cast<double>(magic), m.Call());
}
@@ -4151,7 +4095,7 @@ TEST(RunChangeInt32ToFloat64_B) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Int32());
m.Return(m.ChangeInt32ToFloat64(m.Parameter(0)));
- FOR_INT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i)); }
}
@@ -4159,7 +4103,7 @@ TEST(RunChangeUint32ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
- FOR_UINT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i)); }
}
@@ -4169,7 +4113,7 @@ TEST(RunTruncateFloat32ToInt32) {
FOR_FLOAT32_INPUTS(i) {
if (*i <= static_cast<float>(std::numeric_limits<int32_t>::max()) &&
*i >= static_cast<float>(std::numeric_limits<int32_t>::min())) {
- CheckFloatEq(static_cast<int32_t>(*i), m.Call(*i));
+ CHECK_FLOAT_EQ(static_cast<int32_t>(*i), m.Call(*i));
}
}
}
@@ -4193,7 +4137,7 @@ TEST(RunTruncateFloat32ToUint32) {
FOR_FLOAT32_INPUTS(i) {
if (*i <= static_cast<float>(std::numeric_limits<uint32_t>::max()) &&
*i >= static_cast<float>(std::numeric_limits<uint32_t>::min())) {
- CheckFloatEq(static_cast<uint32_t>(*i), m.Call(*i));
+ CHECK_FLOAT_EQ(static_cast<uint32_t>(*i), m.Call(*i));
}
}
}
@@ -4250,9 +4194,262 @@ TEST(RunTruncateFloat64ToFloat32) {
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckFloatEq(DoubleToFloat32(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(DoubleToFloat32(*i), m.Call(*i)); }
+}
+
+uint64_t ToInt64(uint32_t low, uint32_t high) {
+ return (static_cast<uint64_t>(high) << 32) | static_cast<uint64_t>(low);
+}
+
+#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_X87
+TEST(RunInt32PairAdd) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd = m.Int32PairAdd(m.Parameter(0), m.Parameter(1), m.Parameter(2),
+ m.Parameter(3));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32));
+ CHECK_EQ(*i + *j, ToInt64(low, high));
+ }
+ }
+}
+
+void TestInt32PairAddWithSharedInput(int a, int b, int c, int d) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd = m.Int32PairAdd(m.Parameter(a), m.Parameter(b), m.Parameter(c),
+ m.Parameter(d));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ m.Call(*i, *j);
+ uint32_t inputs[] = {*i, *j};
+ CHECK_EQ(ToInt64(inputs[a], inputs[b]) + ToInt64(inputs[c], inputs[d]),
+ ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunInt32PairAddWithSharedInput) {
+ TestInt32PairAddWithSharedInput(0, 0, 0, 0);
+ TestInt32PairAddWithSharedInput(1, 0, 0, 0);
+ TestInt32PairAddWithSharedInput(0, 1, 0, 0);
+ TestInt32PairAddWithSharedInput(0, 0, 1, 0);
+ TestInt32PairAddWithSharedInput(0, 0, 0, 1);
+ TestInt32PairAddWithSharedInput(1, 1, 0, 0);
}
+TEST(RunInt32PairSub) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairSub = m.Int32PairSub(m.Parameter(0), m.Parameter(1), m.Parameter(2),
+ m.Parameter(3));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairSub));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairSub));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32));
+ CHECK_EQ(*i - *j, ToInt64(low, high));
+ }
+ }
+}
+
+void TestInt32PairSubWithSharedInput(int a, int b, int c, int d) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairSub = m.Int32PairSub(m.Parameter(a), m.Parameter(b), m.Parameter(c),
+ m.Parameter(d));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairSub));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairSub));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ m.Call(*i, *j);
+ uint32_t inputs[] = {*i, *j};
+ CHECK_EQ(ToInt64(inputs[a], inputs[b]) - ToInt64(inputs[c], inputs[d]),
+ ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunInt32PairSubWithSharedInput) {
+ TestInt32PairSubWithSharedInput(0, 0, 0, 0);
+ TestInt32PairSubWithSharedInput(1, 0, 0, 0);
+ TestInt32PairSubWithSharedInput(0, 1, 0, 0);
+ TestInt32PairSubWithSharedInput(0, 0, 1, 0);
+ TestInt32PairSubWithSharedInput(0, 0, 0, 1);
+ TestInt32PairSubWithSharedInput(1, 1, 0, 0);
+}
+
+TEST(RunInt32PairMul) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairMul = m.Int32PairMul(m.Parameter(0), m.Parameter(1), m.Parameter(2),
+ m.Parameter(3));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairMul));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairMul));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32));
+ CHECK_EQ(*i * *j, ToInt64(low, high));
+ }
+ }
+}
+
+void TestInt32PairMulWithSharedInput(int a, int b, int c, int d) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairMul = m.Int32PairMul(m.Parameter(a), m.Parameter(b), m.Parameter(c),
+ m.Parameter(d));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairMul));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairMul));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ m.Call(*i, *j);
+ uint32_t inputs[] = {*i, *j};
+ CHECK_EQ(ToInt64(inputs[a], inputs[b]) * ToInt64(inputs[c], inputs[d]),
+ ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunInt32PairMulWithSharedInput) {
+ TestInt32PairMulWithSharedInput(0, 0, 0, 0);
+ TestInt32PairMulWithSharedInput(1, 0, 0, 0);
+ TestInt32PairMulWithSharedInput(0, 1, 0, 0);
+ TestInt32PairMulWithSharedInput(0, 0, 1, 0);
+ TestInt32PairMulWithSharedInput(0, 0, 0, 1);
+ TestInt32PairMulWithSharedInput(1, 1, 0, 0);
+ TestInt32PairMulWithSharedInput(0, 1, 1, 0);
+}
+
+TEST(RunWord32PairShl) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairShl(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i << j, ToInt64(low, high));
+ }
+ }
+}
+
+void TestWord32PairShlWithSharedInput(int a, int b) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairShl(m.Parameter(a), m.Parameter(b), m.Parameter(1));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT32_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(*i, j);
+ uint32_t inputs[] = {*i, j};
+ CHECK_EQ(ToInt64(inputs[a], inputs[b]) << j, ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunWord32PairShlWithSharedInput) {
+ TestWord32PairShlWithSharedInput(0, 0);
+ TestWord32PairShlWithSharedInput(0, 1);
+ TestWord32PairShlWithSharedInput(1, 0);
+ TestWord32PairShlWithSharedInput(1, 1);
+}
+
+#endif
TEST(RunDeadChangeFloat64ToInt32) {
RawMachineAssemblerTester<int32_t> m;
@@ -5298,7 +5495,9 @@ TEST(RunChangeFloat32ToFloat64) {
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) {
+ CHECK_DOUBLE_EQ(static_cast<double>(*i), m.Call(*i));
+ }
}
@@ -5306,7 +5505,7 @@ TEST(RunFloat32Constant) {
FOR_FLOAT32_INPUTS(i) {
BufferedRawMachineAssemblerTester<float> m;
m.Return(m.Float32Constant(*i));
- CheckFloatEq(*i, m.Call());
+ CHECK_FLOAT_EQ(*i, m.Call());
}
}
@@ -5340,7 +5539,7 @@ TEST(RunFloat64InsertLowWord32) {
double expected = bit_cast<double>(
(bit_cast<uint64_t>(*i) & ~(V8_UINT64_C(0xFFFFFFFF))) |
(static_cast<uint64_t>(bit_cast<uint32_t>(*j))));
- CheckDoubleEq(expected, m.Call(*i, *j));
+ CHECK_DOUBLE_EQ(expected, m.Call(*i, *j));
}
}
}
@@ -5355,7 +5554,7 @@ TEST(RunFloat64InsertHighWord32) {
uint64_t expected = (bit_cast<uint64_t>(*i) & 0xFFFFFFFF) |
(static_cast<uint64_t>(*j) << 32);
- CheckDoubleEq(bit_cast<double>(expected), m.Call(*i, *j));
+ CHECK_DOUBLE_EQ(bit_cast<double>(expected), m.Call(*i, *j));
}
}
}
@@ -5364,14 +5563,14 @@ TEST(RunFloat64InsertHighWord32) {
TEST(RunFloat32Abs) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Abs(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(std::abs(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(std::abs(*i), m.Call(*i)); }
}
TEST(RunFloat64Abs) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Abs(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(std::abs(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(std::abs(*i), m.Call(*i)); }
}
@@ -5479,7 +5678,7 @@ TEST(RunFloat32RoundDown) {
m.Return(m.Float32RoundDown(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floorf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), m.Call(*i)); }
}
@@ -5489,7 +5688,7 @@ TEST(RunFloat64RoundDown1) {
m.Return(m.Float64RoundDown(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), m.Call(*i)); }
}
@@ -5511,7 +5710,7 @@ TEST(RunFloat32RoundUp) {
if (!m.machine()->Float32RoundUp().IsSupported()) return;
m.Return(m.Float32RoundUp(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceilf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), m.Call(*i)); }
}
@@ -5520,7 +5719,7 @@ TEST(RunFloat64RoundUp) {
if (!m.machine()->Float64RoundUp().IsSupported()) return;
m.Return(m.Float64RoundUp(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), m.Call(*i)); }
}
@@ -5529,7 +5728,7 @@ TEST(RunFloat32RoundTiesEven) {
if (!m.machine()->Float32RoundTiesEven().IsSupported()) return;
m.Return(m.Float32RoundTiesEven(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyint(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyint(*i), m.Call(*i)); }
}
@@ -5538,7 +5737,7 @@ TEST(RunFloat64RoundTiesEven) {
if (!m.machine()->Float64RoundTiesEven().IsSupported()) return;
m.Return(m.Float64RoundTiesEven(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(nearbyint(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), m.Call(*i)); }
}
@@ -5548,7 +5747,7 @@ TEST(RunFloat32RoundTruncate) {
m.Return(m.Float32RoundTruncate(m.Parameter(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(truncf(*i), m.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), m.Call(*i)); }
}
@@ -5653,79 +5852,6 @@ TEST(RunCallCFunction8) {
}
#endif // USE_SIMULATOR
-template <typename T>
-void TestExternalReferenceFunction(
- BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
- T (*comparison)(T)) {
- T parameter;
-
- Node* function = m->ExternalConstant(ref);
- m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
- m->PointerConstant(&parameter));
- m->Return(m->Int32Constant(4356));
- FOR_FLOAT64_INPUTS(i) {
- parameter = *i;
- m->Call();
- CheckDoubleEq(comparison(*i), parameter);
- }
-}
-
-TEST(RunCallExternalReferenceF32Trunc) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f32_trunc_wrapper_function(m.isolate());
- TestExternalReferenceFunction<float>(&m, ref, truncf);
-}
-
-TEST(RunCallExternalReferenceF32Floor) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f32_floor_wrapper_function(m.isolate());
- TestExternalReferenceFunction<float>(&m, ref, floorf);
-}
-
-TEST(RunCallExternalReferenceF32Ceil) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f32_ceil_wrapper_function(m.isolate());
- TestExternalReferenceFunction<float>(&m, ref, ceilf);
-}
-
-TEST(RunCallExternalReferenceF32RoundTiesEven) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f32_nearest_int_wrapper_function(m.isolate());
- TestExternalReferenceFunction<float>(&m, ref, nearbyintf);
-}
-
-TEST(RunCallExternalReferenceF64Trunc) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f64_trunc_wrapper_function(m.isolate());
- TestExternalReferenceFunction<double>(&m, ref, trunc);
-}
-
-TEST(RunCallExternalReferenceF64Floor) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f64_floor_wrapper_function(m.isolate());
- TestExternalReferenceFunction<double>(&m, ref, floor);
-}
-
-TEST(RunCallExternalReferenceF64Ceil) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f64_ceil_wrapper_function(m.isolate());
- TestExternalReferenceFunction<double>(&m, ref, ceil);
-}
-
-TEST(RunCallExternalReferenceF64RoundTiesEven) {
- BufferedRawMachineAssemblerTester<int32_t> m;
- ExternalReference ref =
- ExternalReference::f64_nearest_int_wrapper_function(m.isolate());
- TestExternalReferenceFunction<double>(&m, ref, nearbyint);
-}
-
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
TEST(RunCheckedLoadInt64) {
@@ -5907,7 +6033,7 @@ TEST(RunTryTruncateFloat32ToUint64WithCheck) {
TEST(RunTryTruncateFloat64ToUint64WithoutCheck) {
BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Float64());
- m.Return(m.TruncateFloat64ToUint64(m.Parameter(0)));
+ m.Return(m.TryTruncateFloat64ToUint64(m.Parameter(0)));
FOR_UINT64_INPUTS(j) {
double input = static_cast<double>(*j);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 89114097d8..bfdcc0e8ca 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -270,7 +270,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
- Zone zone;
+ Zone zone(inner->GetIsolate()->allocator());
MachineSignature* msig =
const_cast<MachineSignature*>(desc->GetMachineSignature());
int param_count = static_cast<int>(msig->parameter_count());
@@ -437,7 +437,7 @@ class Computer {
Handle<Code> inner = Handle<Code>::null();
{
// Build the graph for the computation.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
build(desc, raw);
@@ -452,7 +452,7 @@ class Computer {
Handle<Code> wrapper = Handle<Code>::null();
{
// Wrap the above code with a callable function that passes constants.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -484,7 +484,7 @@ class Computer {
Handle<Code> wrapper = Handle<Code>::null();
{
// Wrap the above code with a callable function that loads from {input}.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -522,7 +522,7 @@ class Computer {
static void TestInt32Sub(CallDescriptor* desc) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Zone zone;
+ Zone zone(isolate->allocator());
GraphAndBuilders inner(&zone);
{
// Build the add function.
@@ -563,7 +563,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Handle<Code> inner = Handle<Code>::null();
{
// Writes all parameters into the output buffer.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
Node* base = raw.PointerConstant(output);
@@ -580,7 +580,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Handle<Code> wrapper = Handle<Code>::null();
{
// Loads parameters from the input buffer and calls the above code.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -619,7 +619,8 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
static void Test_RunInt32SubWithRet(int retreg) {
Int32Signature sig(2);
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
RegisterPairs pairs;
while (pairs.More()) {
int parray[2];
@@ -670,7 +671,8 @@ TEST(Run_Int32Sub_all_allocatable_single) {
Int32Signature sig(2);
RegisterPairs pairs;
while (pairs.More()) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
int parray[1];
int rarray[1];
pairs.Next(&rarray[0], &parray[0], true);
@@ -687,7 +689,8 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
Int32Signature sig(20);
RegisterPairs pairs;
while (pairs.More()) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
int parray[2];
int rarray[] = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
@@ -739,7 +742,8 @@ static void Test_Int32_WeightedSum_of_size(int count) {
Int32Signature sig(count);
for (int p0 = 0; p0 < Register::kNumRegisters; p0++) {
if (Register::from_code(p0).IsAllocatable()) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
int parray[] = {p0};
int rarray[] = {
@@ -807,7 +811,8 @@ void Test_Int32_Select() {
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
for (int i = which + 1; i <= 64; i++) {
Int32Signature sig(i);
@@ -849,7 +854,8 @@ TEST(Int64Select_registers) {
ArgsBuffer<int64_t>::Sig sig(2);
RegisterPairs pairs;
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -876,7 +882,8 @@ TEST(Float32Select_registers) {
ArgsBuffer<float32>::Sig sig(2);
Float32RegisterPairs pairs;
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -904,7 +911,8 @@ TEST(Float64Select_registers) {
ArgsBuffer<float64>::Sig sig(2);
Float64RegisterPairs pairs;
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -927,7 +935,8 @@ TEST(Float32Select_stack_params_return_reg) {
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float32>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -949,7 +958,8 @@ TEST(Float64Select_stack_params_return_reg) {
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float64>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -972,7 +982,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
{
Isolate* isolate = CcTest::InitIsolateOnce();
// Build the actual select.
- Zone zone;
+ Zone zone(isolate->allocator());
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
raw.Return(raw.Parameter(which));
@@ -1002,7 +1012,8 @@ TEST(Float64StackParamsToStackParams) {
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
ArgsBuffer<float64>::Sig sig(2);
RegisterConfig config(params, rets);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -1068,7 +1079,8 @@ void MixedParamTest(int start) {
RegisterConfig config(palloc, ralloc);
for (int which = 0; which < num_params; which++) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
HandleScope scope(isolate);
MachineSignature::Builder builder(&zone, 1, num_params);
builder.AddReturn(params[which]);
@@ -1079,7 +1091,7 @@ void MixedParamTest(int start) {
Handle<Code> select;
{
// build the select.
- Zone zone;
+ Zone zone(&allocator);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
raw.Return(raw.Parameter(which));
@@ -1096,7 +1108,7 @@ void MixedParamTest(int start) {
CSignature0<int32_t> csig;
{
// Wrap the select code with a callable function that passes constants.
- Zone zone;
+ Zone zone(&allocator);
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -1189,7 +1201,7 @@ void TestStackSlot(MachineType slot_type, T expected) {
Allocator ralloc(rarray_gp, 1, rarray_fp, 1);
RegisterConfig config(palloc, ralloc);
- Zone zone;
+ Zone zone(isolate->allocator());
HandleScope scope(isolate);
MachineSignature::Builder builder(&zone, 1, 12);
builder.AddReturn(MachineType::Int32());
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index f856368509..6997967b42 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -15,7 +15,6 @@ static const char* load_tests[] = {
"var x = (r = x)", "undefined", "undefined",
"var x = (a?1:2); r = x", "1", "2",
"const x = a; r = x", "123", "0",
- "const x = (r = x)", "undefined", "undefined",
"const x = (a?3:4); r = x", "3", "4",
"'use strict'; const x = a; r = x", "123", "0",
"'use strict'; const x = (r = x)", throws, throws,
@@ -29,9 +28,6 @@ static const char* store_tests[] = {
"var x = 1; x = a; r = x", "123", "0",
"var x = (a?(x=4,2):3); r = x", "2", "3",
"var x = (a?4:5); x = a; r = x", "123", "0",
- "const x = 1; x = a; r = x", "1", "1",
- "const x = (a?(x=4,2):3); r = x", "2", "3",
- "const x = (a?4:5); x = a; r = x", "4", "5",
// Assignments to 'const' are SyntaxErrors, handled by the parser,
// hence we cannot test them here because they are early errors.
"'use strict'; let x = 1; x = a; r = x", "123", "0",
@@ -39,16 +35,8 @@ static const char* store_tests[] = {
"'use strict'; let x = (a?4:5); x = a; r = x", "123", "0",
NULL};
-static const char* bind_tests[] = {
- "if (a) { const x = a }; r = x;", "123", "undefined",
- "for (; a > 0; a--) { const x = a }; r = x", "123", "undefined",
- // Re-initialization of variables other than legacy 'const' is not
- // possible due to sane variable scoping, hence no tests here.
- NULL};
-
static void RunVariableTests(const char* source, const char* tests[]) {
- i::FLAG_legacy_const = true;
EmbeddedVector<char, 512> buffer;
for (int i = 0; tests[i] != NULL; i += 3) {
@@ -99,18 +87,6 @@ TEST(ContextStoreVariables) {
}
-TEST(StackInitializeVariables) {
- const char* source = "(function(a,r) { %s; return r; })";
- RunVariableTests(source, bind_tests);
-}
-
-
-TEST(ContextInitializeVariables) {
- const char* source = "(function(a,r) { %s; function f() {x} return r; })";
- RunVariableTests(source, bind_tests);
-}
-
-
TEST(SelfReferenceVariable) {
FunctionTester T("(function self() { return self; })");
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 1b752edd3c..b5e992915f 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -928,10 +928,8 @@ TEST(LowerBooleanToNumber_tagged_tagged) {
CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
}
-
static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
- Type::Number(), Type::Any()};
-
+ Type::Number()};
TEST(LowerNumberCmp_to_int32) {
TestingGraph t(Type::Signed32(), Type::Signed32());
@@ -956,18 +954,13 @@ TEST(LowerNumberCmp_to_uint32) {
TEST(LowerNumberCmp_to_float64) {
- static Type* types[] = {Type::Number(), Type::Any()};
-
- for (size_t i = 0; i < arraysize(types); i++) {
- TestingGraph t(types[i], types[i]);
+ TestingGraph t(Type::Number(), Type::Number());
- t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
- t.simplified()->NumberEqual());
- t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
- t.simplified()->NumberLessThan());
- t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
- t.simplified()->NumberLessThanOrEqual());
- }
+ t.CheckLoweringBinop(IrOpcode::kFloat64Equal, t.simplified()->NumberEqual());
+ t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
+ t.simplified()->NumberLessThan());
+ t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
+ t.simplified()->NumberLessThanOrEqual());
}
@@ -1140,25 +1133,8 @@ TEST(LowerReferenceEqual_to_wordeq) {
t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
}
-
-TEST(LowerStringOps_to_call_and_compare) {
- // These tests need linkage for the calls.
- TestingGraph t(Type::String(), Type::String());
- IrOpcode::Value compare_eq =
- static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
- IrOpcode::Value compare_lt =
- static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
- IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
- t.machine()->IntLessThanOrEqual()->opcode());
- t.CheckLoweringStringBinop(compare_eq, t.simplified()->StringEqual());
- t.CheckLoweringStringBinop(compare_lt, t.simplified()->StringLessThan());
- t.CheckLoweringStringBinop(compare_le,
- t.simplified()->StringLessThanOrEqual());
- }
-
-
- void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
- MachineType to, Type* type = Type::Any()) {
+void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
+ MachineType to, Type* type = Type::Any()) {
TestingGraph t(Type::Any());
Node* in = t.ExampleWithOutput(from);
NodeProperties::SetType(in, type);
@@ -1169,7 +1145,6 @@ TEST(LowerStringOps_to_call_and_compare) {
CHECK_EQ(in, use->InputAt(0)->InputAt(0));
}
-
TEST(InsertBasicChanges) {
CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, MachineType::Float64(),
MachineType::Int32(), Type::Signed32());
@@ -1187,27 +1162,29 @@ TEST(InsertBasicChanges) {
CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, MachineType::Float64(),
MachineType::AnyTagged());
CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64,
- MachineType::AnyTagged(), MachineType::Float64());
+ MachineType::AnyTagged(), MachineType::Float64(),
+ Type::Number());
CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, MachineType::Int32(),
- MachineType::Float64());
+ MachineType::Float64(), Type::Signed32());
CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, MachineType::Int32(),
- MachineType::AnyTagged());
+ MachineType::AnyTagged(), Type::Signed32());
CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, MachineType::Uint32(),
- MachineType::Float64());
+ MachineType::Float64(), Type::Unsigned32());
CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, MachineType::Uint32(),
- MachineType::AnyTagged());
+ MachineType::AnyTagged(), Type::Unsigned32());
}
-
static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
IrOpcode::Value input_change,
- IrOpcode::Value output_change) {
+ IrOpcode::Value output_change,
+ Type* type = Type::Any()) {
Node* binop =
op->ControlInputCount() == 0
? t->graph()->NewNode(op, t->p0, t->p1)
: t->graph()->NewNode(op, t->p0, t->p1, t->graph()->start());
+ NodeProperties::SetType(binop, type);
t->Return(binop);
t->Lower();
CHECK_EQ(input_change, binop->InputAt(0)->opcode());
@@ -1230,7 +1207,9 @@ TEST(InsertChangesAroundInt32Binops) {
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeInt32ToTagged);
+ IrOpcode::kChangeInt32ToTagged, Type::Signed32());
+ CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
+ IrOpcode::kChangeInt32ToTagged, Type::Signed32());
}
}
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 83cd33c5b0..7d26dbaf0c 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -274,7 +274,7 @@ class ValueHelper {
0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff,
0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
- 0x8000000000000401};
+ 0x8000000000000401, 0x0000000000000020};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
@@ -321,6 +321,12 @@ static inline void CheckFloatEq(volatile float x, volatile float y) {
}
}
+#define CHECK_FLOAT_EQ(lhs, rhs) \
+ do { \
+ volatile float tmp = lhs; \
+ CheckFloatEq(tmp, rhs); \
+ } while (0)
+
static inline void CheckDoubleEq(volatile double x, volatile double y) {
if (std::isnan(x)) {
CHECK(std::isnan(y));
@@ -329,6 +335,12 @@ static inline void CheckDoubleEq(volatile double x, volatile double y) {
}
}
+#define CHECK_DOUBLE_EQ(lhs, rhs) \
+ do { \
+ volatile double tmp = lhs; \
+ CheckDoubleEq(tmp, rhs); \
+ } while (0)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 5d098f57ab..599c5d8be2 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -29,14 +29,14 @@
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
+ V(Regress589413) \
V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \
CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
- #Name, NULL, true, true); \
+ #Name, true, true); \
void v8::internal::HeapTester::Test##Name()
-
#define THREADED_HEAP_TEST(Name) \
RegisterThreadedTest register_##Name(v8::internal::HeapTester::Test##Name, \
#Name); \
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 064e5a82c0..0feee5fc46 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -35,7 +35,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
- it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ it.next()->MarkNeverAllocateForTesting();
}
{
@@ -80,7 +80,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
- it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ it.next()->MarkNeverAllocateForTesting();
}
{
@@ -155,7 +155,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
PageIterator it(heap->old_space());
while (it.has_next()) {
- it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
@@ -241,7 +241,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
isolate->factory()->NewFixedArray(10, TENURED);
PageIterator it(heap->old_space());
while (it.has_next()) {
- it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 88aee8adf8..424e9870d8 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1164,7 +1164,7 @@ TEST(Iteration) {
// Allocate a JS array to OLD_SPACE and NEW_SPACE
objs[next_objs_index++] = factory->NewJSArray(10);
objs[next_objs_index++] =
- factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, Strength::WEAK, TENURED);
+ factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, TENURED);
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij");
@@ -1572,8 +1572,7 @@ TEST(CompilationCacheCachingBehavior) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
CompilationCache* compilation_cache = isolate->compilation_cache();
- LanguageMode language_mode =
- construct_language_mode(FLAG_use_strict, FLAG_use_strong);
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
v8::HandleScope scope(CcTest::isolate());
const char* raw_source =
@@ -2068,7 +2067,7 @@ static HeapObject* NewSpaceAllocateAligned(int size,
heap->new_space()->AllocateRawAligned(size, alignment);
HeapObject* obj = NULL;
allocation.To(&obj);
- heap->CreateFillerObjectAt(obj->address(), size);
+ heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -2171,7 +2170,7 @@ static HeapObject* OldSpaceAllocateAligned(int size,
heap->old_space()->AllocateRawAligned(size, alignment);
HeapObject* obj = NULL;
allocation.To(&obj);
- heap->CreateFillerObjectAt(obj->address(), size);
+ heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -2334,11 +2333,7 @@ TEST(GrowAndShrinkNewSpace) {
Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space();
- if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
- heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
- // The max size cannot exceed the reserved size, since semispaces must be
- // always within the reserved space. We can't test new space growing and
- // shrinking if the reserved size is the same as the minimum (initial) size.
+ if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
return;
}
@@ -2383,11 +2378,7 @@ TEST(GrowAndShrinkNewSpace) {
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
- heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
- // The max size cannot exceed the reserved size, since semispaces must be
- // always within the reserved space. We can't test new space growing and
- // shrinking if the reserved size is the same as the minimum (initial) size.
+ if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
return;
}
@@ -4285,8 +4276,9 @@ TEST(Regress169928) {
AllocationMemento::kSize + kPointerSize);
CHECK(allocation.To(&obj));
Address addr_obj = obj->address();
- CcTest::heap()->CreateFillerObjectAt(
- addr_obj, AllocationMemento::kSize + kPointerSize);
+ CcTest::heap()->CreateFillerObjectAt(addr_obj,
+ AllocationMemento::kSize + kPointerSize,
+ ClearRecordedSlots::kNo);
// Give the array a name, making sure not to allocate strings.
v8::Local<v8::Object> array_obj = v8::Utils::ToLocal(array);
@@ -4644,7 +4636,7 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
- 10000000, 10000000, IncrementalMarking::IdleStepActions());
+ 10000000, IncrementalMarking::IdleStepActions());
// Create references from the large object to the object on the evacuation
// candidate.
@@ -4937,7 +4929,8 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
i, i, i, i, i, i, i, i);
CompileRun(source.start());
}
- heap->CollectAllGarbage();
+ // We have to abort incremental marking here to abandon black pages.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
int elements = 0;
if (heap->weak_object_to_code_table()->IsHashTable()) {
@@ -5457,6 +5450,9 @@ TEST(WeakCellsWithIncrementalMarking) {
CHECK(weak_cell->value()->IsFixedArray());
weak_cells[i] = inner_scope.CloseAndEscape(weak_cell);
}
+ // Call collect all twice to make sure that we also cleared
+ // weak cells that were allocated on black pages.
+ heap->CollectAllGarbage();
heap->CollectAllGarbage();
CHECK_EQ(*survivor, weak_cells[0]->value());
for (int i = 1; i < N; i++) {
@@ -6003,7 +5999,7 @@ TEST(PreprocessStackTrace) {
Handle<Object> exception = v8::Utils::OpenHandle(*try_catch.Exception());
Handle<Name> key = isolate->factory()->stack_trace_symbol();
Handle<Object> stack_trace =
- JSObject::GetProperty(exception, key).ToHandleChecked();
+ Object::GetProperty(exception, key).ToHandleChecked();
Handle<Object> code =
Object::GetElement(isolate, stack_trace, 3).ToHandleChecked();
CHECK(code->IsCode());
@@ -6277,6 +6273,7 @@ static void RemoveCodeAndGC(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(obj);
fun->ReplaceCode(*isolate->builtins()->CompileLazy());
fun->shared()->ReplaceCode(*isolate->builtins()->CompileLazy());
+ fun->shared()->ClearBytecodeArray(); // Bytecode is code too.
isolate->heap()->CollectAllAvailableGarbage("remove code and gc");
}
@@ -6320,14 +6317,14 @@ TEST(OldGenerationAllocationThroughput) {
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, 0, counter2);
- size_t throughput =
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
+ size_t throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
tracer->SampleAllocation(time3, 0, counter3);
- throughput =
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
+ throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
}
@@ -6344,7 +6341,8 @@ TEST(AllocationThroughput) {
int time2 = 200;
size_t counter2 = 2000;
tracer->SampleAllocation(time2, counter2, counter2);
- size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+ size_t throughput = static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(100));
CHECK_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
int time3 = 1000;
size_t counter3 = 30000;
@@ -6531,5 +6529,93 @@ HEAP_TEST(Regress587004) {
heap->CollectGarbage(NEW_SPACE);
}
+HEAP_TEST(Regress589413) {
+ FLAG_stress_compaction = true;
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ FLAG_concurrent_sweeping = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ // Get the heap in clean state.
+ heap->CollectGarbage(OLD_SPACE);
+ heap->CollectGarbage(OLD_SPACE);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ // Fill the new space with byte arrays with elements looking like pointers.
+ const int M = 256;
+ ByteArray* byte_array;
+ while (heap->AllocateByteArray(M).To(&byte_array)) {
+ for (int j = 0; j < M; j++) {
+ byte_array->set(j, 0x31);
+ }
+ // Add the array in root set.
+ handle(byte_array);
+ }
+ // Make sure the byte arrays will be promoted on the next GC.
+ heap->CollectGarbage(NEW_SPACE);
+ // This number is close to large free list category threshold.
+ const int N = 0x3eee;
+ {
+ std::vector<FixedArray*> arrays;
+ std::set<Page*> pages;
+ FixedArray* array;
+ // Fill all pages with fixed arrays.
+ heap->set_force_oom(true);
+ while (heap->AllocateFixedArray(N, TENURED).To(&array)) {
+ arrays.push_back(array);
+ pages.insert(Page::FromAddress(array->address()));
+ // Add the array in root set.
+ handle(array);
+ }
+ // Expand and full one complete page with fixed arrays.
+ heap->set_force_oom(false);
+ while (heap->AllocateFixedArray(N, TENURED).To(&array)) {
+ arrays.push_back(array);
+ pages.insert(Page::FromAddress(array->address()));
+ // Add the array in root set.
+ handle(array);
+ // Do not expand anymore.
+ heap->set_force_oom(true);
+ }
+ // Expand and mark the new page as evacuation candidate.
+ heap->set_force_oom(false);
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ Handle<HeapObject> ec_obj = factory->NewFixedArray(5000, TENURED);
+ Page* ec_page = Page::FromAddress(ec_obj->address());
+ ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ // Make all arrays point to evacuation candidate so that
+ // slots are recorded for them.
+ for (size_t j = 0; j < arrays.size(); j++) {
+ array = arrays[j];
+ for (int i = 0; i < N; i++) {
+ array->set(i, *ec_obj);
+ }
+ }
+ }
+ SimulateIncrementalMarking(heap);
+ for (size_t j = 0; j < arrays.size(); j++) {
+ heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(arrays[j], N - 1);
+ }
+ }
+ // Force allocation from the free list.
+ heap->set_force_oom(true);
+ heap->CollectGarbage(OLD_SPACE);
+}
+
+TEST(Regress609761) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+
+ intptr_t size_before = heap->SizeOfObjects();
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
+ array->Shrink(1);
+ intptr_t size_after = heap->SizeOfObjects();
+ CHECK_EQ(size_after, size_before + array->Size());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 5822d4a979..74cbf2cb34 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -86,8 +86,8 @@ class MockPlatform : public v8::Platform {
}
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t id, uint64_t bind_id,
- int numArgs, const char** argNames,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int numArgs, const char** argNames,
const uint8_t* argTypes, const uint64_t* argValues,
unsigned int flags) override {
return 0;
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index 770804f162..bf4d3cc999 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -46,7 +46,8 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
if (result.To(&obj)) {
- heap->CreateFillerObjectAt(obj->address(), static_cast<int>(size_in_bytes));
+ heap->CreateFillerObjectAt(obj->address(), static_cast<int>(size_in_bytes),
+ ClearRecordedSlots::kNo);
return true;
}
return false;
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 12d5ce21ea..a7cf161ca6 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -317,11 +317,12 @@ TEST(MemoryAllocator) {
{
int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
- Page* first_page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
+ Page* first_page = memory_allocator->AllocatePage<Page>(
+ faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
+ NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
- CHECK(Page::IsValid(first_page));
+ CHECK(first_page->is_valid());
CHECK(first_page->next_page() == faked_space.anchor());
total_pages++;
@@ -330,9 +331,10 @@ TEST(MemoryAllocator) {
}
// Again, we should get n or n - 1 pages.
- Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
- &faked_space, NOT_EXECUTABLE);
- CHECK(Page::IsValid(other));
+ Page* other = memory_allocator->AllocatePage<Page>(
+ faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
+ NOT_EXECUTABLE);
+ CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
int page_count = 0;
@@ -343,7 +345,7 @@ TEST(MemoryAllocator) {
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page();
- CHECK(Page::IsValid(second_page));
+ CHECK(second_page->is_valid());
// OldSpace's destructor will tear down the space and free up all pages.
}
@@ -362,8 +364,8 @@ TEST(NewSpace) {
NewSpace new_space(heap);
- CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
- CcTest::heap()->ReservedSemiSpaceSize()));
+ CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
+ CcTest::heap()->InitialSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
@@ -519,7 +521,8 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
- space->heap()->CreateFillerObjectAt(filler->address(), size);
+ space->heap()->CreateFillerObjectAt(filler->address(), size,
+ ClearRecordedSlots::kNo);
return filler;
}
@@ -528,7 +531,8 @@ static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
- space->heap()->CreateFillerObjectAt(filler->address(), size);
+ space->heap()->CreateFillerObjectAt(filler->address(), size,
+ ClearRecordedSlots::kNo);
return filler;
}
diff --git a/deps/v8/test/cctest/heap/utils-inl.h b/deps/v8/test/cctest/heap/utils-inl.h
index f255bb6c03..56033c151e 100644
--- a/deps/v8/test/cctest/heap/utils-inl.h
+++ b/deps/v8/test/cctest/heap/utils-inl.h
@@ -49,7 +49,7 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
if (length <= 0) {
// Not enough room to create another fixed array. Let's create a filler.
heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
- free_memory);
+ free_memory, ClearRecordedSlots::kNo);
break;
}
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index d5e0456511..bf43b95402 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -13,6 +13,7 @@
#include "src/base/logging.h"
#include "src/base/smart-pointers.h"
#include "src/compiler.h"
+#include "src/runtime/runtime.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
@@ -94,11 +95,18 @@ void BytecodeExpectationsPrinter::PrintEscapedString(
}
}
+namespace {
+i::Runtime::FunctionId IndexToFunctionId(uint32_t index) {
+ return static_cast<i::Runtime::FunctionId>(index);
+}
+} // namespace
+
void BytecodeExpectationsPrinter::PrintBytecodeOperand(
std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
const Bytecode& bytecode, int op_index, int parameter_count) const {
OperandType op_type = Bytecodes::GetOperandType(bytecode, op_index);
- OperandSize op_size = Bytecodes::GetOperandSize(bytecode, op_index);
+ OperandSize op_size = Bytecodes::GetOperandSize(
+ bytecode, op_index, bytecode_iter.current_operand_scale());
const char* size_tag;
switch (op_size) {
@@ -108,6 +116,9 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
case OperandSize::kShort:
size_tag = "16";
break;
+ case OperandSize::kQuad:
+ size_tag = "32";
+ break;
default:
UNREACHABLE();
return;
@@ -136,15 +147,27 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
} else {
stream << 'U' << size_tag << '(';
- if (Bytecodes::IsImmediateOperandType(op_type)) {
- // We need a cast, otherwise the result is printed as char.
- stream << static_cast<int>(bytecode_iter.GetImmediateOperand(op_index));
- } else if (Bytecodes::IsRegisterCountOperandType(op_type)) {
- stream << bytecode_iter.GetRegisterCountOperand(op_index);
- } else if (Bytecodes::IsIndexOperandType(op_type)) {
- stream << bytecode_iter.GetIndexOperand(op_index);
- } else {
- UNREACHABLE();
+ switch (op_type) {
+ case OperandType::kFlag8:
+ stream << bytecode_iter.GetFlagOperand(op_index);
+ break;
+ case OperandType::kIdx:
+ stream << bytecode_iter.GetIndexOperand(op_index);
+ break;
+ case OperandType::kImm:
+ stream << bytecode_iter.GetImmediateOperand(op_index);
+ break;
+ case OperandType::kRegCount:
+ stream << bytecode_iter.GetRegisterCountOperand(op_index);
+ break;
+ case OperandType::kRuntimeId: {
+ uint32_t operand = bytecode_iter.GetRuntimeIdOperand(op_index);
+ stream << "Runtime::k"
+ << i::Runtime::FunctionForId(IndexToFunctionId(operand))->name;
+ break;
+ }
+ default:
+ UNREACHABLE();
}
stream << ')';
@@ -155,9 +178,12 @@ void BytecodeExpectationsPrinter::PrintBytecode(
std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
int parameter_count) const {
Bytecode bytecode = bytecode_iter.current_bytecode();
-
+ OperandScale operand_scale = bytecode_iter.current_operand_scale();
+ if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+ Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
+ stream << "B(" << Bytecodes::ToString(prefix) << "), ";
+ }
stream << "B(" << Bytecodes::ToString(bytecode) << ')';
-
int operands_count = Bytecodes::NumberOfOperands(bytecode);
for (int op_index = 0; op_index < operands_count; ++op_index) {
stream << ", ";
@@ -212,14 +238,14 @@ void BytecodeExpectationsPrinter::PrintFrameSize(
int frame_size = bytecode_array->frame_size();
DCHECK_EQ(frame_size % kPointerSize, 0);
- stream << "frame size: " << frame_size / kPointerSize;
- if (frame_size > 0) stream << " # in multiples of sizeof(void*)";
- stream << "\nparameter count: " << bytecode_array->parameter_count() << '\n';
+ stream << "frame size: " << frame_size / kPointerSize
+ << "\nparameter count: " << bytecode_array->parameter_count() << '\n';
}
void BytecodeExpectationsPrinter::PrintBytecodeSequence(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
- stream << "bytecodes: [\n";
+ stream << "bytecode array length: " << bytecode_array->length()
+ << "\nbytecodes: [\n";
BytecodeArrayIterator bytecode_iter(bytecode_array);
for (; !bytecode_iter.done(); bytecode_iter.Advance()) {
stream << " ";
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 236a7d4190..0fcead5716 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -36,6 +36,7 @@ class BytecodeExpectationsPrinter final {
const_pool_type_(t),
execute_(true),
wrap_(true),
+ top_level_(false),
test_function_name_(kDefaultTopFunctionName) {}
void PrintExpectation(std::ostream& stream, // NOLINT
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
new file mode 100644
index 0000000000..5ced3eab63
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -0,0 +1,124 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return [ 1, 2 ];
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return [ a, a + 1 ];
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 39
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(KeyedStoreICSloppy), R(2), R(1), U8(1),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Add), R(3),
+ B(KeyedStoreICSloppy), R(2), R(1), U8(1),
+ B(Ldar), R(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return [ [ 1, 2 ], [ 3 ] ];
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return [ [ a, 2 ], [ a + 2 ] ];
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 69
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(1), U8(0), U8(3),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(KeyedStoreICSloppy), R(4), R(3), U8(1),
+ B(Ldar), R(4),
+ B(KeyedStoreICSloppy), R(2), R(1), U8(5),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(2), U8(1), U8(3),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(Star), R(5),
+ B(LdaSmi), U8(2),
+ B(Add), R(5),
+ B(KeyedStoreICSloppy), R(4), R(3), U8(3),
+ B(Ldar), R(4),
+ B(KeyedStoreICSloppy), R(2), R(1), U8(5),
+ B(Ldar), R(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
new file mode 100644
index 0000000000..d9fd2dac37
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
@@ -0,0 +1,1051 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ return [ 1 , 2 ];
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1033
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
new file mode 100644
index 0000000000..a87a0856a6
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -0,0 +1,278 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0, y = 1;
+ return (x = 2, y = 3, x = 4, y = 5);
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 25
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(LdaSmi), U8(5),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 55;
+ var y = (x = 100);
+ return y;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ B(LdaSmi), U8(100),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 55;
+ x = x + (x = 100) + (x = 101);
+ return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 24
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(100),
+ B(Star), R(0),
+ B(Add), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(101),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 55;
+ x = (x = 56) - x + (x = 57);
+ x++;
+ return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ B(LdaSmi), U8(56),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Sub), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(57),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(ToNumber),
+ B(Star), R(1),
+ B(Inc),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 55;
+ var y = x + (x = 1) + (x = 2) + (x = 3);
+ return y;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Add), R(3),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 55;
+ var x = x + (x = 1) + (x = 2) + (x = 3);
+ return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Add), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10, y = 20;
+ return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + y;
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 70
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Add), R(2),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(4),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(Mul), R(4),
+ B(Add), R(3),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Add), R(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(Add), R(3),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(Star), R(1),
+ B(Add), R(2),
+ B(Star), R(3),
+ B(Ldar), R(1),
+ B(Add), R(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 17;
+ return 1 + x + (x++) + (++x);
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 38
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(17),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Add), R(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(ToNumber),
+ B(Star), R(1),
+ B(Inc),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ B(Add), R(2),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(0),
+ B(Add), R(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
new file mode 100644
index 0000000000..43adcd2e80
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -0,0 +1,91 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = 1; if (a || a < 0) { return 1; }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 21
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanTrue), U8(9),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestLessThan), R(1),
+ B(JumpIfToBooleanFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; if (a && a < 0) { return 1; }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 21
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(9),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestLessThan), R(1),
+ B(JumpIfToBooleanFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; a = (a || a < 0) ? 2 : 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanTrue), U8(9),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestLessThan), R(1),
+ B(JumpIfToBooleanFalse), U8(6),
+ B(LdaSmi), U8(2),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
new file mode 100644
index 0000000000..62cce08ee7
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -0,0 +1,859 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0;
+ while (false) { x = 99; break; continue; }
+ return x;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ while (false) {
+ x = x + 1;
+ };
+ return x;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ var y = 1;
+ while (x < 10) {
+ y = y * 12;
+ x = x + 1;
+ if (x == 3) continue;
+ if (x == 4) break;
+ }
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 66
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(10),
+ B(TestLessThan), R(2),
+ B(JumpIfFalse), U8(47),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(12),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-39),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(4),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(4),
+ B(Jump), U8(-53),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var i = 0;
+ while (true) {
+ if (i < 0) continue;
+ if (i == 3) break;
+ if (i == 4) break;
+ if (i == 10) continue;
+ if (i == 5) break;
+ i = i + 1;
+ }
+ return i;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 79
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestLessThan), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-10),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(50),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(38),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(10),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-46),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(5),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(14),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-70),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var i = 0;
+ while (true) {
+ while (i < 3) {
+ if (i == 2) break;
+ i = i + 1;
+ }
+ i = i + 1;
+ break;
+ }
+ return i;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 57
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(TestLessThan), R(1),
+ B(JumpIfFalse), U8(27),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(14),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-33),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(4),
+ B(Jump), U8(-48),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10;
+ var y = 1;
+ while (x) {
+ y = y * 12;
+ x = x - 1;
+ }
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 39
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanFalse), U8(25),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(12),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Sub), R(2),
+ B(Star), R(0),
+ B(Jump), U8(-25),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; var y = 1;
+ do {
+ y = y * 10;
+ if (x == 5) break;
+ if (x == 6) continue;
+ x = x + 1;
+ } while (x < 10);
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 66
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(10),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(34),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(6),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(12),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(10),
+ B(TestLessThan), R(2),
+ B(JumpIfTrue), U8(-53),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10;
+ var y = 1;
+ do {
+ y = y * 12;
+ x = x - 1;
+ } while (x);
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(12),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Sub), R(2),
+ B(Star), R(0),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanTrue), U8(-23),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; var y = 1;
+ do {
+ y = y * 10;
+ if (x == 5) break;
+ x = x + 1;
+ if (x == 6) continue;
+ } while (false);
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 54
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(10),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(22),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(6),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(2),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; var y = 1;
+ do {
+ y = y * 10;
+ if (x == 5) break;
+ x = x + 1;
+ if (x == 6) continue;
+ } while (true);
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 56
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(10),
+ B(Mul), R(2),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(24),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(6),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-41),
+ B(Jump), U8(-43),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ for (;;) {
+ if (x == 1) break;
+ if (x == 2) continue;
+ x = x + 1;
+ }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(26),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-23),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-35),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ for (var x = 0;;) {
+ if (x == 1) break;
+ if (x == 2) continue;
+ x = x + 1;
+ }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(26),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-23),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-35),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ for (;; x = x + 1) {
+ if (x == 1) break;
+ if (x == 2) continue;
+ }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(26),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(2),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-35),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ for (var x = 0;; x = x + 1) {
+ if (x == 1) break;
+ if (x == 2) continue;
+ }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(26),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(2),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-35),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var u = 0;
+ for (var i = 0; i < 100; i = i + 1) {
+ u = u + 1;
+ continue;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 44
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(100),
+ B(TestLessThan), R(2),
+ B(JumpIfFalse), U8(27),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Jump), U8(2),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(1),
+ B(Jump), U8(-33),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var y = 1;
+ for (var x = 10; x; --x) {
+ y = y * 12;
+ }
+ return y;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 35
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(10),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(JumpIfToBooleanFalse), U8(21),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(12),
+ B(Mul), R(2),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ B(ToNumber),
+ B(Dec),
+ B(Star), R(1),
+ B(Jump), U8(-21),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ for (var i = 0; false; i++) {
+ x = x + 1;
+ };
+ return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ for (var i = 0; true; ++i) {
+ x = x + 1;
+ if (x == 20) break;
+ };
+ return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 39
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(20),
+ B(TestEqual), R(2),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(10),
+ B(Ldar), R(1),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(1),
+ B(Jump), U8(-27),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 0;
+ while (a) {
+ {
+ let z = 1;
+ function f() { z = 2; }
+ if (z) continue;
+ z++;
+ }
+ }
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 120
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(JumpIfToBooleanFalse), U8(112),
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Ldar), R(closure),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(JumpIfToBooleanFalse), U8(8),
+ B(PopContext), R(3),
+ B(PopContext), R(3),
+ B(Jump), U8(-69),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(ToNumber),
+ B(Star), R(4),
+ B(Inc),
+ B(Star), R(5),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1),
+ B(Ldar), R(5),
+ B(StaContextSlot), R(context), U8(4),
+ B(PopContext), R(3),
+ B(Jump), U8(-112),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
new file mode 100644
index 0000000000..86fcea7cbf
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -0,0 +1,231 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0;
+ label: {
+ x = x + 1;
+ break label;
+ x = x + 1;
+ }
+ return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(2),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var sum = 0;
+ outer: {
+ for (var x = 0; x < 10; ++x) {
+ for (var y = 0; y < 3; ++y) {
+ ++sum;
+ if (x + y == 12) { break outer; }
+ }
+ }
+ }
+ return sum;
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 75
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(10),
+ B(TestLessThan), R(3),
+ B(JumpIfFalse), U8(57),
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Ldar), R(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(TestLessThan), R(3),
+ B(JumpIfFalse), U8(35),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ B(Star), R(3),
+ B(Ldar), R(2),
+ B(Add), R(3),
+ B(Star), R(4),
+ B(LdaSmi), U8(12),
+ B(TestEqual), R(4),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Ldar), R(2),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(2),
+ B(Jump), U8(-41),
+ B(Ldar), R(1),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(1),
+ B(Jump), U8(-63),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ outer: {
+ let y = 10;
+ function f() { return y; }
+ break outer;
+ }
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 51
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(Ldar), R(closure),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(3), U8(2),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(0),
+ B(Star), R(0),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(PopContext), R(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = 1;
+ outer: {
+ inner: {
+ let y = 2;
+ function f() { return x + y; }
+ if (y) break outer;
+ y = 3;
+ }
+ }
+ x = 4;
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 131
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Ldar), R(closure),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(0),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(1),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(JumpIfToBooleanFalse), U8(6),
+ B(PopContext), R(3),
+ B(Jump), U8(27),
+ B(LdaSmi), U8(3),
+ B(Star), R(4),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Ldar), R(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(PopContext), R(3),
+ B(LdaSmi), U8(4),
+ B(Star), R(4),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(4),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Ldar), R(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
new file mode 100644
index 0000000000..11c9e5b0a8
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -0,0 +1,63 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function t() { }
+ function f() { return t(); }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), U8(0), U8(3),
+ B(Star), R(0),
+ B(Call), R(0), R(1), U8(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ "t",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function t(a, b, c) { }
+ function f() { return t(1, 2, 3); }
+ f();
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 27
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), U8(0), U8(3),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(Star), R(4),
+ B(Call), R(0), R(1), U8(4), U8(1),
+ B(Return),
+]
+constant pool: [
+ "t",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
new file mode 100644
index 0000000000..7983532579
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -0,0 +1,57 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ g = function(){}; eval(''); return g();
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 85
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(StaLookupSlotSloppy), U8(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(2), U8(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(Call), R(1), R(2), U8(1), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
new file mode 100644
index 0000000000..0ba64b7d6c
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -0,0 +1,90 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function bar() { this.value = 0; }
+ function f() { return new bar(); }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(0),
+ B(New), R(0), R(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function bar(x) { this.value = 18; this.x = x;}
+ function f() { return new bar(3); }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(New), R(0), R(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function bar(w, x, y, z) {
+ this.value = 18;
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+ function f() { return new bar(3, 4, 5); }
+ f();
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 25
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(New), R(0), R(1), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
new file mode 100644
index 0000000000..87d5e4eea5
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -0,0 +1,93 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() { %TheHole() }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { return %IsArray(a) }
+ f(undefined);
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kIsArray), R(0), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() { return %Add(1, 2) }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kAdd), R(0), U8(2),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() { return %spread_iterable([1]) }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(1),
+ B(CallJSRuntime), U8(115), R(0), U8(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
new file mode 100644
index 0000000000..9dc97ca59d
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -0,0 +1,260 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: test
+
+---
+snippet: "
+ var test;
+ (function() {
+ class A {
+ method() { return 2; }
+ }
+ class B extends A {
+ method() { return super.method() + 1; }
+ }
+ test = new B().method;
+ test();
+ })();
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 57
+bytecodes: [
+ B(Ldar), R(closure),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1),
+ B(Star), R(6),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(6), U8(3),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
+ B(Mov), R(3), R(2),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(1), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Add), R(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var test;
+ (function() {
+ class A {
+ get x() { return 1; }
+ set x(val) { return; }
+ }
+ class B extends A {
+ method() { super.x = 2; return super.x; }
+ }
+ test = new B().method;
+ test();
+ })();
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 80
+bytecodes: [
+ B(Ldar), R(closure),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Star), R(5),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(5), U8(1),
+ B(Star), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
+ B(Ldar), R(this),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(4), U8(3),
+ B(Star), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(1), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var test;
+ (function() {
+ class A {
+ constructor(x) { this.x_ = x; }
+ }
+ class B extends A {
+ constructor() { super(1); this.y_ = 2; }
+ }
+ test = new B().constructor;
+ })();
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 106
+bytecodes: [
+ B(Ldar), R(closure),
+ B(Star), R(1),
+ B(Ldar), R(new_target),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(2), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(New), R(2), R(3), U8(1),
+ B(Star), R(2),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(4),
+ B(Jump), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(Star), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(StoreICStrict), R(2), U8(3), U8(4),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var test;
+ (function() {
+ class A {
+ constructor() { this.x_ = 1; }
+ }
+ class B extends A {
+ constructor() { super(); this.y_ = 2; }
+ }
+ test = new B().constructor;
+ })();
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 102
+bytecodes: [
+ B(Ldar), R(closure),
+ B(Star), R(1),
+ B(Ldar), R(new_target),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(2), U8(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(New), R(2), R(0), U8(0),
+ B(Star), R(2),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(4),
+ B(Jump), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(Star), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(StoreICStrict), R(2), U8(3), U8(4),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
new file mode 100644
index 0000000000..7cb6ab3f49
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -0,0 +1,241 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ class Person {
+ constructor(name) { this.name = name; }
+ speak() { console.log(this.name + ' is speaking.'); }
+ }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 73
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(2),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(34),
+ B(Star), R(4),
+ B(Wide), B(LdaSmi), U16(148),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
+ B(Star), R(2),
+ B(LoadIC), R(2), U8(1), U8(1),
+ B(Star), R(3),
+ B(Mov), R(3), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CreateClosure), U8(3), U8(0),
+ B(Star), R(6),
+ B(LdaSmi), U8(2),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ class person {
+ constructor(name) { this.name = name; }
+ speak() { console.log(this.name + ' is speaking.'); }
+ }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 73
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(2),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(34),
+ B(Star), R(4),
+ B(Wide), B(LdaSmi), U16(148),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
+ B(Star), R(2),
+ B(LoadIC), R(2), U8(1), U8(1),
+ B(Star), R(3),
+ B(Mov), R(3), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CreateClosure), U8(3), U8(0),
+ B(Star), R(6),
+ B(LdaSmi), U8(2),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var n0 = 'a';
+ var n1 = 'b';
+ class N {
+ [n0]() { return n0; }
+ static [n1]() { return n1; }
+ }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 127
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaConstant), U8(1),
+ B(StaContextSlot), R(context), U8(5),
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(3),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(4),
+ B(LdaSmi), U8(62),
+ B(Star), R(5),
+ B(Wide), B(LdaSmi), U16(128),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(LoadIC), R(3), U8(3), U8(1),
+ B(Star), R(4),
+ B(Mov), R(4), R(5),
+ B(LdaContextSlot), R(context), U8(4),
+ B(ToName),
+ B(Star), R(6),
+ B(CreateClosure), U8(4), U8(0),
+ B(Star), R(7),
+ B(LdaSmi), U8(2),
+ B(Star), R(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(Mov), R(3), R(5),
+ B(LdaContextSlot), R(context), U8(5),
+ B(ToName),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(TestEqualStrict), R(6),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(CreateClosure), U8(5), U8(0),
+ B(Star), R(7),
+ B(LdaSmi), U8(1),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var count = 0;
+ class C { constructor() { count++; }}
+ return new C();
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 74
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(3),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(4),
+ B(LdaSmi), U8(49),
+ B(Star), R(5),
+ B(LdaSmi), U8(86),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(LoadIC), R(3), U8(1), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(3),
+ B(New), R(3), R(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
new file mode 100644
index 0000000000..10931e9567
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -0,0 +1,139 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = 1; a += 2;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; a /= 2;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Div), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = { val: 2 }; a.name *= 2;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 27
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(1), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(Mul), R(2),
+ B(StoreICSloppy), R(1), U8(1), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = { 1: 2 }; a[1] ^= 2;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 30
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(KeyedLoadIC), R(1), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(BitwiseXor), R(3),
+ B(KeyedStoreICSloppy), R(1), R(2), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; (function f() { return a; }); a |= 24;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 30
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), U8(24),
+ B(BitwiseOr), R(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
new file mode 100644
index 0000000000..6a0703a7d8
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -0,0 +1,54 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return 1 ? 2 : 3;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(JumpIfToBooleanFalse), U8(6),
+ B(LdaSmi), U8(2),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 1 ? 2 ? 3 : 4 : 5;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(JumpIfToBooleanFalse), U8(14),
+ B(LdaSmi), U8(2),
+ B(JumpIfToBooleanFalse), U8(6),
+ B(LdaSmi), U8(3),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(4),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(5),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
new file mode 100644
index 0000000000..bc6a587946
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -0,0 +1,116 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ const x = 10;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = 10; return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = ( x = 20);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Ldar), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = 10; x = 20;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 36
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Ldar), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
new file mode 100644
index 0000000000..9e26a1e75f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -0,0 +1,138 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ const x = 10; function f1() {return x;}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 24
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = 10; function f1() {return x;} return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = (x = 20); function f1() {return x;}
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 50
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ const x = 10; x = 20; function f1() {return x;}
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 52
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
new file mode 100644
index 0000000000..8e48b14ece
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -0,0 +1,105 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(arg1) { return function() { arg1 = 2; }; }
+ f();
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 17
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }
+ f();
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 22
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }
+ f();
+"
+frame size: 1
+parameter count: 5
+bytecode array length: 22
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(arg2),
+ B(StaContextSlot), R(context), U8(4),
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() { var self = this; return function() { self = 2; }; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
new file mode 100644
index 0000000000..e4af981e39
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -0,0 +1,920 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a; return function() { a = 1; };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return function() { a = 2; };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; var b = 2; return function() { a = 2; b = 3 };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 22
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaSmi), U8(2),
+ B(StaContextSlot), R(context), U8(5),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a; (function() { a = 2; })(); return a;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 25
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(1), U8(1),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ 'use strict';
+ let a = 1;
+ { let b = 2; return function() { a + b; }; }
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 47
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(Ldar), R(closure),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(2), U8(2),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaSmi), U8(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(0),
+ B(PopContext), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ 'use strict';
+ var a0 = 0;
+ var a1 = 0;
+ var a2 = 0;
+ var a3 = 0;
+ var a4 = 0;
+ var a5 = 0;
+ var a6 = 0;
+ var a7 = 0;
+ var a8 = 0;
+ var a9 = 0;
+ var a10 = 0;
+ var a11 = 0;
+ var a12 = 0;
+ var a13 = 0;
+ var a14 = 0;
+ var a15 = 0;
+ var a16 = 0;
+ var a17 = 0;
+ var a18 = 0;
+ var a19 = 0;
+ var a20 = 0;
+ var a21 = 0;
+ var a22 = 0;
+ var a23 = 0;
+ var a24 = 0;
+ var a25 = 0;
+ var a26 = 0;
+ var a27 = 0;
+ var a28 = 0;
+ var a29 = 0;
+ var a30 = 0;
+ var a31 = 0;
+ var a32 = 0;
+ var a33 = 0;
+ var a34 = 0;
+ var a35 = 0;
+ var a36 = 0;
+ var a37 = 0;
+ var a38 = 0;
+ var a39 = 0;
+ var a40 = 0;
+ var a41 = 0;
+ var a42 = 0;
+ var a43 = 0;
+ var a44 = 0;
+ var a45 = 0;
+ var a46 = 0;
+ var a47 = 0;
+ var a48 = 0;
+ var a49 = 0;
+ var a50 = 0;
+ var a51 = 0;
+ var a52 = 0;
+ var a53 = 0;
+ var a54 = 0;
+ var a55 = 0;
+ var a56 = 0;
+ var a57 = 0;
+ var a58 = 0;
+ var a59 = 0;
+ var a60 = 0;
+ var a61 = 0;
+ var a62 = 0;
+ var a63 = 0;
+ var a64 = 0;
+ var a65 = 0;
+ var a66 = 0;
+ var a67 = 0;
+ var a68 = 0;
+ var a69 = 0;
+ var a70 = 0;
+ var a71 = 0;
+ var a72 = 0;
+ var a73 = 0;
+ var a74 = 0;
+ var a75 = 0;
+ var a76 = 0;
+ var a77 = 0;
+ var a78 = 0;
+ var a79 = 0;
+ var a80 = 0;
+ var a81 = 0;
+ var a82 = 0;
+ var a83 = 0;
+ var a84 = 0;
+ var a85 = 0;
+ var a86 = 0;
+ var a87 = 0;
+ var a88 = 0;
+ var a89 = 0;
+ var a90 = 0;
+ var a91 = 0;
+ var a92 = 0;
+ var a93 = 0;
+ var a94 = 0;
+ var a95 = 0;
+ var a96 = 0;
+ var a97 = 0;
+ var a98 = 0;
+ var a99 = 0;
+ var a100 = 0;
+ var a101 = 0;
+ var a102 = 0;
+ var a103 = 0;
+ var a104 = 0;
+ var a105 = 0;
+ var a106 = 0;
+ var a107 = 0;
+ var a108 = 0;
+ var a109 = 0;
+ var a110 = 0;
+ var a111 = 0;
+ var a112 = 0;
+ var a113 = 0;
+ var a114 = 0;
+ var a115 = 0;
+ var a116 = 0;
+ var a117 = 0;
+ var a118 = 0;
+ var a119 = 0;
+ var a120 = 0;
+ var a121 = 0;
+ var a122 = 0;
+ var a123 = 0;
+ var a124 = 0;
+ var a125 = 0;
+ var a126 = 0;
+ var a127 = 0;
+ var a128 = 0;
+ var a129 = 0;
+ var a130 = 0;
+ var a131 = 0;
+ var a132 = 0;
+ var a133 = 0;
+ var a134 = 0;
+ var a135 = 0;
+ var a136 = 0;
+ var a137 = 0;
+ var a138 = 0;
+ var a139 = 0;
+ var a140 = 0;
+ var a141 = 0;
+ var a142 = 0;
+ var a143 = 0;
+ var a144 = 0;
+ var a145 = 0;
+ var a146 = 0;
+ var a147 = 0;
+ var a148 = 0;
+ var a149 = 0;
+ var a150 = 0;
+ var a151 = 0;
+ var a152 = 0;
+ var a153 = 0;
+ var a154 = 0;
+ var a155 = 0;
+ var a156 = 0;
+ var a157 = 0;
+ var a158 = 0;
+ var a159 = 0;
+ var a160 = 0;
+ var a161 = 0;
+ var a162 = 0;
+ var a163 = 0;
+ var a164 = 0;
+ var a165 = 0;
+ var a166 = 0;
+ var a167 = 0;
+ var a168 = 0;
+ var a169 = 0;
+ var a170 = 0;
+ var a171 = 0;
+ var a172 = 0;
+ var a173 = 0;
+ var a174 = 0;
+ var a175 = 0;
+ var a176 = 0;
+ var a177 = 0;
+ var a178 = 0;
+ var a179 = 0;
+ var a180 = 0;
+ var a181 = 0;
+ var a182 = 0;
+ var a183 = 0;
+ var a184 = 0;
+ var a185 = 0;
+ var a186 = 0;
+ var a187 = 0;
+ var a188 = 0;
+ var a189 = 0;
+ var a190 = 0;
+ var a191 = 0;
+ var a192 = 0;
+ var a193 = 0;
+ var a194 = 0;
+ var a195 = 0;
+ var a196 = 0;
+ var a197 = 0;
+ var a198 = 0;
+ var a199 = 0;
+ var a200 = 0;
+ var a201 = 0;
+ var a202 = 0;
+ var a203 = 0;
+ var a204 = 0;
+ var a205 = 0;
+ var a206 = 0;
+ var a207 = 0;
+ var a208 = 0;
+ var a209 = 0;
+ var a210 = 0;
+ var a211 = 0;
+ var a212 = 0;
+ var a213 = 0;
+ var a214 = 0;
+ var a215 = 0;
+ var a216 = 0;
+ var a217 = 0;
+ var a218 = 0;
+ var a219 = 0;
+ var a220 = 0;
+ var a221 = 0;
+ var a222 = 0;
+ var a223 = 0;
+ var a224 = 0;
+ var a225 = 0;
+ var a226 = 0;
+ var a227 = 0;
+ var a228 = 0;
+ var a229 = 0;
+ var a230 = 0;
+ var a231 = 0;
+ var a232 = 0;
+ var a233 = 0;
+ var a234 = 0;
+ var a235 = 0;
+ var a236 = 0;
+ var a237 = 0;
+ var a238 = 0;
+ var a239 = 0;
+ var a240 = 0;
+ var a241 = 0;
+ var a242 = 0;
+ var a243 = 0;
+ var a244 = 0;
+ var a245 = 0;
+ var a246 = 0;
+ var a247 = 0;
+ var a248 = 0;
+ eval();
+ var b = 100;
+ return b
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 1046
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateUnmappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(7),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(8),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(9),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(10),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(11),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(12),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(13),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(14),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(15),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(16),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(17),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(18),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(19),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(20),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(21),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(22),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(23),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(24),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(25),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(26),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(27),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(28),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(29),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(30),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(31),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(32),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(33),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(34),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(35),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(36),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(37),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(38),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(39),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(40),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(41),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(42),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(43),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(44),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(45),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(46),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(47),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(48),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(49),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(50),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(51),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(52),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(53),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(54),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(55),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(56),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(57),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(58),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(59),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(60),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(61),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(62),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(63),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(64),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(65),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(66),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(67),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(68),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(69),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(70),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(71),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(72),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(73),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(74),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(75),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(76),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(77),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(78),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(79),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(80),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(81),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(82),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(83),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(84),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(85),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(86),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(87),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(88),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(89),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(90),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(91),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(92),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(93),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(94),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(95),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(96),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(97),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(98),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(99),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(100),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(101),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(102),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(103),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(104),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(105),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(106),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(107),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(108),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(109),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(110),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(111),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(112),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(113),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(114),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(115),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(116),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(117),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(118),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(119),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(120),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(121),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(122),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(123),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(124),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(125),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(126),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(127),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(128),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(129),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(130),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(131),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(132),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(133),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(134),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(135),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(136),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(137),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(138),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(139),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(140),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(141),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(142),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(143),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(144),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(145),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(146),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(147),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(148),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(149),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(150),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(151),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(152),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(153),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(154),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(155),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(156),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(157),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(158),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(159),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(160),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(161),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(162),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(163),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(164),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(165),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(166),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(167),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(168),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(169),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(170),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(171),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(172),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(173),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(174),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(175),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(176),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(177),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(178),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(179),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(180),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(181),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(182),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(183),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(184),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(185),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(186),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(187),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(188),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(189),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(190),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(191),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(192),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(193),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(194),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(195),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(196),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(197),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(198),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(199),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(200),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(201),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(202),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(203),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(204),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(205),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(206),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(207),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(208),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(209),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(210),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(211),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(212),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(213),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(214),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(215),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(216),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(217),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(218),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(219),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(220),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(221),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(222),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(223),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(224),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(225),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(226),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(227),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(228),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(229),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(230),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(231),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(232),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(233),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(234),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(235),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(236),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(237),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(238),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(239),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(240),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(241),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(242),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(243),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(244),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(245),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(246),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(247),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(248),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(249),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(250),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(251),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(252),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(253),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(254),
+ B(LdaZero),
+ B(StaContextSlot), R(context), U8(255),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(1), U8(0),
+ B(LdaSmi), U8(100),
+ B(Wide), B(StaContextSlot), R16(context), U16(256),
+ B(Wide), B(LdaContextSlot), R16(context), U16(256),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
new file mode 100644
index 0000000000..af82777aa6
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -0,0 +1,297 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = 1; return ++a;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return a++;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(ToNumber),
+ B(Star), R(1),
+ B(Inc),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return --a;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(ToNumber),
+ B(Dec),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return a--;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(ToNumber),
+ B(Star), R(1),
+ B(Dec),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = { val: 1 }; return a.val++;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(1), U8(1),
+ B(ToNumber),
+ B(Star), R(2),
+ B(Inc),
+ B(StoreICSloppy), R(1), U8(1), U8(3),
+ B(Ldar), R(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = { val: 1 }; return --a.val;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 22
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(1), U8(1),
+ B(ToNumber),
+ B(Dec),
+ B(StoreICSloppy), R(1), U8(1), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var name = 'var'; var a = { val: 1 }; return a[name]--;
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 33
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(5),
+ B(Star), R(2),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(KeyedLoadIC), R(2), U8(1),
+ B(ToNumber),
+ B(Star), R(4),
+ B(Dec),
+ B(KeyedStoreICSloppy), R(2), R(3), U8(3),
+ B(Ldar), R(4),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var name = 'var'; var a = { val: 1 }; return ++a[name];
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(5),
+ B(Star), R(2),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(KeyedLoadIC), R(2), U8(1),
+ B(ToNumber),
+ B(Inc),
+ B(KeyedStoreICSloppy), R(2), R(3), U8(3),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; var b = function() { return a }; return ++a;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 27
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(LdaContextSlot), R(context), U8(4),
+ B(ToNumber),
+ B(Inc),
+ B(StaContextSlot), R(context), U8(4),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; var b = function() { return a }; return a--;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(LdaContextSlot), R(context), U8(4),
+ B(ToNumber),
+ B(Star), R(2),
+ B(Dec),
+ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(2),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var idx = 1; var a = [1, 2]; return a[idx++] = 2;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 28
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(ToNumber),
+ B(Star), R(3),
+ B(Inc),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(KeyedStoreICSloppy), R(2), R(3), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
new file mode 100644
index 0000000000..f088ce16d4
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -0,0 +1,147 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() { return arguments; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() { return arguments[0]; }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(KeyedLoadIC), R(1), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() { 'use strict'; return arguments; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { return arguments[0]; }
+ f();
+"
+frame size: 3
+parameter count: 2
+bytecode array length: 25
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaZero),
+ B(KeyedLoadIC), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b, c) { return arguments; }
+ f();
+"
+frame size: 2
+parameter count: 4
+bytecode array length: 29
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(6),
+ B(Ldar), R(arg1),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(arg2),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b, c) { 'use strict'; return arguments; }
+ f();
+"
+frame size: 1
+parameter count: 4
+bytecode array length: 7
+bytecodes: [
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
new file mode 100644
index 0000000000..47df336b82
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -0,0 +1,115 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(...restArgs) { return restArgs; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(CreateRestParameter),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, ...restArgs) { return restArgs; }
+ f();
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 14
+bytecodes: [
+ B(CreateRestParameter),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, ...restArgs) { return restArgs[0]; }
+ f();
+"
+frame size: 3
+parameter count: 2
+bytecode array length: 20
+bytecodes: [
+ B(CreateRestParameter),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(1),
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaZero),
+ B(KeyedLoadIC), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }
+ f();
+"
+frame size: 5
+parameter count: 2
+bytecode array length: 35
+bytecodes: [
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ B(CreateRestParameter),
+ B(Star), R(1),
+ B(LdaTheHole),
+ B(Star), R(2),
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(2),
+ B(Ldar), R(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(KeyedLoadIC), R(3), U8(1),
+ B(Star), R(4),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(LdaZero),
+ B(KeyedLoadIC), R(3), U8(3),
+ B(Add), R(4),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
new file mode 100644
index 0000000000..464e0af69e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
@@ -0,0 +1,83 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return; var a = 1; a();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ if (false) { return; }; var a = 1;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ if (true) { return 1; } else { return 2; };
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; if (a) { return 1; }; return 2;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(LdaSmi), U8(2),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
new file mode 100644
index 0000000000..cd106bb52d
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -0,0 +1,127 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: no
+wrap: no
+top level: yes
+
+---
+snippet: "
+ var a = 1;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ B(StackCheck),
+ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2),
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ a=2;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ B(StackCheck),
+ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ B(LdaSmi), U8(2),
+ B(StaGlobalSloppy), U8(1), U8(3),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {}
+ f();
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(1), U8(1),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(1), U8(3),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
new file mode 100644
index 0000000000..29fee7627f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -0,0 +1,146 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = {x:13, y:14}; return delete a.x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaConstant), U8(1),
+ B(DeletePropertySloppy), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ 'use strict'; var a = {x:13, y:14}; return delete a.x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaConstant), U8(1),
+ B(DeletePropertyStrict), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = {1:13, 2:14}; return delete a[2];
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(DeletePropertySloppy), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 10; return delete a;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaFalse),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ 'use strict';
+ var a = {1:10};
+ (function f1() {return a;});
+ return delete a[1];
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 30
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(0),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(DeletePropertyStrict), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return delete 'test';
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaTrue),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
new file mode 100644
index 0000000000..839371d31e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
@@ -0,0 +1,88 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ z = 10;
+ function f1() {
+ var y;
+ eval(\"function t() { delete x; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ z = 10;
+ function f1() {
+ var y;
+ eval(\"function t() { return delete y; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaFalse),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ z = 10;
+ function f1() {
+ var y;
+ eval(\"function t() { return delete z; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "z",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
new file mode 100644
index 0000000000..68bac32119
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
@@ -0,0 +1,26 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ debugger;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Debugger),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
new file mode 100644
index 0000000000..9630fae2bb
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
@@ -0,0 +1,78 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+do expressions: yes
+
+---
+snippet: "
+ var a = do { }; return a;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = do { var x = 100; }; return a;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(100),
+ B(Star), R(1),
+ B(LdaUndefined),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ while(true) { var a = 10; a = do { ++a; break; }; a = 20; }
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ B(StackCheck),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(1),
+ B(ToNumber),
+ B(Inc),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Jump), U8(12),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Jump), U8(-21),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
new file mode 100644
index 0000000000..274b762e22
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -0,0 +1,49 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return eval('1;');
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 65
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(2), U8(0),
+ B(Return),
+]
+constant pool: [
+ "eval",
+ "1;",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
new file mode 100644
index 0000000000..20f68e72b8
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -0,0 +1,258 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ for (var p in null) {}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ for (var p in undefined) {}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ for (var p in undefined) {}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 'potatoes';
+ for (var p in x) { return p; }
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 45
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(JumpIfUndefined), U8(38),
+ B(JumpIfNull), U8(36),
+ B(ToObject),
+ B(Star), R(3),
+ B(ForInPrepare), R(4),
+ B(LdaZero),
+ B(Star), R(7),
+ B(ForInDone), R(7), R(6),
+ B(JumpIfTrue), U8(23),
+ B(ForInNext), R(3), R(7), R(4), U8(1),
+ B(JumpIfUndefined), U8(10),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(Return),
+ B(ForInStep), R(7),
+ B(Star), R(7),
+ B(Jump), U8(-24),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ for (var p in [1,2,3]) { x += p; }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 57
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(JumpIfUndefined), U8(47),
+ B(JumpIfNull), U8(45),
+ B(ToObject),
+ B(Star), R(3),
+ B(ForInPrepare), R(4),
+ B(LdaZero),
+ B(Star), R(7),
+ B(ForInDone), R(7), R(6),
+ B(JumpIfTrue), U8(32),
+ B(ForInNext), R(3), R(7), R(4), U8(1),
+ B(JumpIfUndefined), U8(19),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(Ldar), R(1),
+ B(Star), R(8),
+ B(Ldar), R(2),
+ B(Add), R(8),
+ B(Star), R(1),
+ B(ForInStep), R(7),
+ B(Star), R(7),
+ B(Jump), U8(-33),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = { 'a': 1, 'b': 2 };
+ for (x['a'] in [10, 20, 30]) {
+ if (x['a'] == 10) continue;
+ if (x['a'] == 20) break;
+ }
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 94
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(JumpIfUndefined), U8(79),
+ B(JumpIfNull), U8(77),
+ B(ToObject),
+ B(Star), R(1),
+ B(ForInPrepare), R(2),
+ B(LdaZero),
+ B(Star), R(5),
+ B(ForInDone), R(5), R(4),
+ B(JumpIfTrue), U8(64),
+ B(ForInNext), R(1), R(5), R(2), U8(9),
+ B(JumpIfUndefined), U8(51),
+ B(Star), R(6),
+ B(Ldar), R(0),
+ B(Star), R(7),
+ B(Ldar), R(6),
+ B(StoreICSloppy), R(7), U8(2), U8(7),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(6),
+ B(LoadIC), R(6), U8(2), U8(3),
+ B(Star), R(7),
+ B(LdaSmi), U8(10),
+ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(20),
+ B(Ldar), R(0),
+ B(Star), R(6),
+ B(LoadIC), R(6), U8(2), U8(5),
+ B(Star), R(7),
+ B(LdaSmi), U8(20),
+ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(8),
+ B(ForInStep), R(5),
+ B(Star), R(5),
+ B(Jump), U8(-65),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = [ 10, 11, 12 ] ;
+ for (x[0] in [1,2,3]) { return x[3]; }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 69
+bytecodes: [
+ B(StackCheck),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(0),
+ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(JumpIfUndefined), U8(56),
+ B(JumpIfNull), U8(54),
+ B(ToObject),
+ B(Star), R(1),
+ B(ForInPrepare), R(2),
+ B(LdaZero),
+ B(Star), R(5),
+ B(ForInDone), R(5), R(4),
+ B(JumpIfTrue), U8(41),
+ B(ForInNext), R(1), R(5), R(2), U8(7),
+ B(JumpIfUndefined), U8(28),
+ B(Star), R(6),
+ B(Ldar), R(0),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Ldar), R(6),
+ B(KeyedStoreICSloppy), R(7), R(8), U8(5),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(6),
+ B(LdaSmi), U8(3),
+ B(KeyedLoadIC), R(6), U8(3),
+ B(Return),
+ B(ForInStep), R(5),
+ B(Star), R(5),
+ B(Jump), U8(-42),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
new file mode 100644
index 0000000000..14bc1db60f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -0,0 +1,760 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ for (var p of [0, 1, 2]) {}
+"
+frame size: 16
+parameter count: 1
+bytecode array length: 347
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(14),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(14), U8(3),
+ B(Star), R(13),
+ B(Call), R(13), R(14), U8(1), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(15),
+ B(LoadIC), R(15), U8(2), U8(7),
+ B(Star), R(14),
+ B(Call), R(14), R(15), U8(1), U8(5),
+ B(Star), R(2),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
+ B(LogicalNot),
+ B(JumpIfFalse), U8(11),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(28),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(4), U8(11),
+ B(Star), R(0),
+ B(Ldar), R(4),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(-70),
+ B(Jump), U8(47),
+ B(Star), R(14),
+ B(LdaConstant), U8(5),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
+ B(Star), R(12),
+ B(PushContext), R(8),
+ B(Ldar), R(3),
+ B(Star), R(13),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(13),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(8),
+ B(LdaSmi), U8(-1),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(11),
+ B(Ldar), R(3),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestEqualStrict), R(12),
+ B(JumpIfTrue), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(12),
+ B(LogicalNot),
+ B(JumpIfFalseConstant), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(LoadIC), R(12), U8(6), U8(13),
+ B(Star), R(5),
+ B(Star), R(12),
+ B(LdaNull),
+ B(TestEqual), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(124),
+ B(Ldar), R(3),
+ B(Star), R(12),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(79),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(139),
+ B(Star), R(12),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(12),
+ B(Ldar), R(5),
+ B(Star), R(13),
+ B(Ldar), R(1),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(30),
+ B(Star), R(14),
+ B(LdaConstant), U8(5),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(12),
+ B(PushContext), R(8),
+ B(PopContext), R(8),
+ B(Jump), U8(37),
+ B(Ldar), R(5),
+ B(Star), R(12),
+ B(Ldar), R(1),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(6),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(11),
+ B(Ldar), R(6),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(9),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [10, 152, 158],
+ [13, 105, 107],
+ [250, 263, 265],
+]
+
+---
+snippet: "
+ var x = 'potatoes';
+ for (var p of x) { return p; }
+"
+frame size: 17
+parameter count: 1
+bytecode array length: 363
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(7),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(12),
+ B(Mov), R(context), R(13),
+ B(Ldar), R(7),
+ B(Star), R(15),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(15), U8(3),
+ B(Star), R(14),
+ B(Call), R(14), R(15), U8(1), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(16),
+ B(LoadIC), R(16), U8(2), U8(7),
+ B(Star), R(15),
+ B(Call), R(15), R(16), U8(1), U8(5),
+ B(Star), R(2),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(14), U8(1),
+ B(LogicalNot),
+ B(JumpIfFalse), U8(11),
+ B(Ldar), R(2),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(Ldar), R(2),
+ B(Star), R(14),
+ B(LoadIC), R(14), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(32),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Ldar), R(2),
+ B(Star), R(14),
+ B(LoadIC), R(14), U8(4), U8(11),
+ B(Star), R(0),
+ B(Ldar), R(4),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(8),
+ B(Star), R(11),
+ B(LdaZero),
+ B(Star), R(10),
+ B(Jump), U8(63),
+ B(Jump), U8(-74),
+ B(Jump), U8(47),
+ B(Star), R(15),
+ B(LdaConstant), U8(5),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(14), U8(3),
+ B(Star), R(13),
+ B(PushContext), R(9),
+ B(Ldar), R(3),
+ B(Star), R(14),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(14),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(9),
+ B(LdaSmi), U8(-1),
+ B(Star), R(10),
+ B(Jump), U8(8),
+ B(Star), R(11),
+ B(LdaSmi), U8(1),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(12),
+ B(Ldar), R(3),
+ B(Star), R(13),
+ B(LdaZero),
+ B(TestEqualStrict), R(13),
+ B(JumpIfTrue), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(13),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(13),
+ B(LogicalNot),
+ B(JumpIfFalseConstant), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(6), U8(13),
+ B(Star), R(5),
+ B(Star), R(13),
+ B(LdaNull),
+ B(TestEqual), R(13),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(124),
+ B(Ldar), R(3),
+ B(Star), R(13),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(13),
+ B(JumpIfFalse), U8(79),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(13),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(13),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(139),
+ B(Star), R(13),
+ B(LdaConstant), U8(8),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(13),
+ B(Ldar), R(5),
+ B(Star), R(14),
+ B(Ldar), R(1),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::k_Call), R(14), U8(2),
+ B(Jump), U8(30),
+ B(Star), R(15),
+ B(LdaConstant), U8(5),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(14), U8(3),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(13),
+ B(PushContext), R(9),
+ B(PopContext), R(9),
+ B(Jump), U8(37),
+ B(Ldar), R(5),
+ B(Star), R(13),
+ B(Ldar), R(1),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
+ B(Star), R(6),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(11),
+ B(Ldar), R(6),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(12), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(10),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(10),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ B(Ldar), R(11),
+ B(Return),
+ B(Ldar), R(11),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [14, 158, 164],
+ [17, 111, 113],
+ [257, 270, 272],
+]
+
+---
+snippet: "
+ for (var x of [10, 20, 30]) {
+ if (x == 10) continue;
+ if (x == 20) break;
+ }
+"
+frame size: 16
+parameter count: 1
+bytecode array length: 369
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(14),
+ B(LdaConstant), U8(1),
+ B(KeyedLoadIC), R(14), U8(3),
+ B(Star), R(13),
+ B(Call), R(13), R(14), U8(1), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(15),
+ B(LoadIC), R(15), U8(2), U8(7),
+ B(Star), R(14),
+ B(Call), R(14), R(15), U8(1), U8(5),
+ B(Star), R(2),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
+ B(LogicalNot),
+ B(JumpIfFalse), U8(11),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(50),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Ldar), R(2),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(4), U8(11),
+ B(Star), R(0),
+ B(Ldar), R(4),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(7),
+ B(Star), R(13),
+ B(LdaSmi), U8(10),
+ B(TestEqual), R(13),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(-75),
+ B(Ldar), R(7),
+ B(Star), R(13),
+ B(LdaSmi), U8(20),
+ B(TestEqual), R(13),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(-92),
+ B(Jump), U8(47),
+ B(Star), R(14),
+ B(LdaConstant), U8(5),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
+ B(Star), R(12),
+ B(PushContext), R(8),
+ B(Ldar), R(3),
+ B(Star), R(13),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(13),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(8),
+ B(LdaSmi), U8(-1),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(11),
+ B(Ldar), R(3),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestEqualStrict), R(12),
+ B(JumpIfTrue), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(12),
+ B(LogicalNot),
+ B(JumpIfFalseConstant), U8(9),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(LoadIC), R(12), U8(6), U8(13),
+ B(Star), R(5),
+ B(Star), R(12),
+ B(LdaNull),
+ B(TestEqual), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(124),
+ B(Ldar), R(3),
+ B(Star), R(12),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(79),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(139),
+ B(Star), R(12),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(12),
+ B(Ldar), R(5),
+ B(Star), R(13),
+ B(Ldar), R(1),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(30),
+ B(Star), R(14),
+ B(LdaConstant), U8(5),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(12),
+ B(PushContext), R(8),
+ B(PopContext), R(8),
+ B(Jump), U8(37),
+ B(Ldar), R(5),
+ B(Star), R(12),
+ B(Ldar), R(1),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(6),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(11),
+ B(Ldar), R(6),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(9),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [10, 174, 180],
+ [13, 127, 129],
+ [272, 285, 287],
+]
+
+---
+snippet: "
+ var x = { 'a': 1, 'b': 2 };
+ for (x['a'] of [1,2,3]) { return x['a']; }
+"
+frame size: 15
+parameter count: 1
+bytecode array length: 379
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(8),
+ B(Star), R(6),
+ B(LdaUndefined),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
+ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(Star), R(13),
+ B(LdaConstant), U8(2),
+ B(KeyedLoadIC), R(13), U8(3),
+ B(Star), R(12),
+ B(Call), R(12), R(13), U8(1), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(0),
+ B(Star), R(14),
+ B(LoadIC), R(14), U8(3), U8(7),
+ B(Star), R(13),
+ B(Call), R(13), R(14), U8(1), U8(5),
+ B(Star), R(1),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(LogicalNot),
+ B(JumpIfFalse), U8(11),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(Ldar), R(1),
+ B(Star), R(12),
+ B(LoadIC), R(12), U8(4), U8(9),
+ B(JumpIfToBooleanTrue), U8(42),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(Ldar), R(6),
+ B(Star), R(12),
+ B(Ldar), R(1),
+ B(Star), R(13),
+ B(LoadIC), R(13), U8(5), U8(11),
+ B(StoreICSloppy), R(12), U8(6), U8(13),
+ B(Ldar), R(3),
+ B(StackCheck),
+ B(Ldar), R(6),
+ B(Star), R(12),
+ B(LoadIC), R(12), U8(6), U8(15),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Jump), U8(63),
+ B(Jump), U8(-84),
+ B(Jump), U8(47),
+ B(Star), R(13),
+ B(LdaConstant), U8(7),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(12), U8(3),
+ B(Star), R(11),
+ B(PushContext), R(7),
+ B(Ldar), R(2),
+ B(Star), R(12),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(PopContext), R(7),
+ B(LdaSmi), U8(-1),
+ B(Star), R(8),
+ B(Jump), U8(8),
+ B(Star), R(9),
+ B(LdaSmi), U8(1),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(10),
+ B(Ldar), R(2),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(11),
+ B(JumpIfTrue), U8(9),
+ B(Ldar), R(0),
+ B(Star), R(11),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(11),
+ B(LogicalNot),
+ B(JumpIfFalseConstant), U8(11),
+ B(Ldar), R(0),
+ B(Star), R(11),
+ B(LoadIC), R(11), U8(8), U8(17),
+ B(Star), R(4),
+ B(Star), R(11),
+ B(LdaNull),
+ B(TestEqual), R(11),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(124),
+ B(Ldar), R(2),
+ B(Star), R(11),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(11),
+ B(JumpIfFalse), U8(79),
+ B(Ldar), R(4),
+ B(TypeOf),
+ B(Star), R(11),
+ B(LdaConstant), U8(9),
+ B(TestEqualStrict), R(11),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(139),
+ B(Star), R(11),
+ B(LdaConstant), U8(10),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(11),
+ B(Ldar), R(4),
+ B(Star), R(12),
+ B(Ldar), R(0),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(30),
+ B(Star), R(13),
+ B(LdaConstant), U8(7),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(12), U8(3),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(7),
+ B(PopContext), R(7),
+ B(Jump), U8(37),
+ B(Ldar), R(4),
+ B(Star), R(11),
+ B(Ldar), R(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::k_Call), R(11), U8(2),
+ B(Star), R(5),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(11),
+ B(Ldar), R(5),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ B(Ldar), R(9),
+ B(Return),
+ B(Ldar), R(9),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [18, 174, 180],
+ [21, 127, 129],
+ [273, 286, 288],
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
new file mode 100644
index 0000000000..06e78ab3e0
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -0,0 +1,72 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return function(){ }
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(CreateClosure), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return (function(){ })()
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(Call), R(0), R(1), U8(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return (function(x){ return x; })(1)
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 19
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Call), R(0), R(1), U8(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
new file mode 100644
index 0000000000..65add5e3c3
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -0,0 +1,57 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var global = 1;
+ function f() { return global &= 1; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(BitwiseAnd), R(0),
+ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ "global",
+]
+handlers: [
+]
+
+---
+snippet: "
+ unallocated = 1;
+ function f() { return unallocated += 1; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Add), R(0),
+ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ "unallocated",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
new file mode 100644
index 0000000000..e572cf4858
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -0,0 +1,105 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var global = 1;
+ function f() { return ++global; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(ToNumber),
+ B(Inc),
+ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ "global",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var global = 1;
+ function f() { return global--; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(ToNumber),
+ B(Star), R(0),
+ B(Dec),
+ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ "global",
+]
+handlers: [
+]
+
+---
+snippet: "
+ unallocated = 1;
+ function f() { 'use strict'; return --unallocated; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(ToNumber),
+ B(Dec),
+ B(StaGlobalStrict), U8(0), U8(3),
+ B(Return),
+]
+constant pool: [
+ "unallocated",
+]
+handlers: [
+]
+
+---
+snippet: "
+ unallocated = 1;
+ function f() { return unallocated++; }
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(ToNumber),
+ B(Star), R(0),
+ B(Inc),
+ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ "unallocated",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
new file mode 100644
index 0000000000..c52673a312
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -0,0 +1,115 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var a = {x:13, y:14};
+ function f() {
+ return delete a.x;
+ };
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(DeletePropertySloppy), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = {1:13, 2:14};
+ function f() {
+ 'use strict';
+ return delete a[1];
+ };
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(DeletePropertyStrict), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = {x:13, y:14};
+ function f() {
+ return delete a;
+ };
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ B(StackCheck),
+ B(LdaContextSlot), R(context), U8(3),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(DeletePropertySloppy), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ b = 30;
+ function f() {
+ return delete b;
+ };
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 16
+bytecodes: [
+ B(StackCheck),
+ B(LdaContextSlot), R(context), U8(3),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(DeletePropertySloppy), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
new file mode 100644
index 0000000000..3970995ecd
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -0,0 +1,1112 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return 1.2;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Return),
+]
+constant pool: [
+ 1.2,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1.2; return 2.6;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Return),
+]
+constant pool: [
+ 1.2,
+ 2.6,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 3.14; return 3.14;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Return),
+]
+constant pool: [
+ 3.14,
+ 3.14,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414;
+ a = 1.414; a = 3.14;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1033
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(256),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 1.414,
+ 3.14,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
new file mode 100644
index 0000000000..6101e0186d
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -0,0 +1,764 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() {
+ if (0) {
+ return 1;
+ } else {
+ return -1;
+ }
+ };
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(-1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ if ('lucky') {
+ return 1;
+ } else {
+ return -1;
+ }
+ };
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ if (false) {
+ return 1;
+ } else {
+ return -1;
+ }
+ };
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(-1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ if (false) {
+ return 1;
+ }
+ };
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ var a = 1;
+ if (a) {
+ a += 1;
+ } else {
+ return 2;
+ }
+ };
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 24
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(14),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(5),
+ B(LdaSmi), U8(2),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) {
+ if (a <= 0) {
+ return 200;
+ } else {
+ return -200;
+ }
+ };
+ f(99);
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 22
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaZero),
+ B(TestLessThanOrEqual), R(0),
+ B(JumpIfFalse), U8(7),
+ B(Wide), B(LdaSmi), U16(200),
+ B(Return),
+ B(Wide), B(LdaSmi), U16(-200),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) { if (a in b) { return 200; } }f('prop', { prop: 'yes'});
+"
+frame size: 1
+parameter count: 3
+bytecode array length: 18
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestIn), R(0),
+ B(JumpIfFalse), U8(7),
+ B(Wide), B(LdaSmi), U16(200),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(z) { var a = 0; var b = 0; if (a === 0.01) {
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ return 200; } else { return -200; } } f(0.001);
+"
+frame size: 3
+parameter count: 2
+bytecode array length: 287
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(TestEqualStrict), R(2),
+ B(JumpIfFalseConstant), U8(1),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Wide), B(LdaSmi), U16(200),
+ B(Return),
+ B(Wide), B(LdaSmi), U16(-200),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ 0.01,
+ 265,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ var a = 0; var b = 0;
+ if (a) {
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ b = a; a = b;
+ return 200; } else { return -200; }
+ };
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 281
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanFalseConstant), U8(0),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Wide), B(LdaSmi), U16(200),
+ B(Return),
+ B(Wide), B(LdaSmi), U16(-200),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ 265,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) {
+ if (a == b) { return 1; }
+ if (a === b) { return 1; }
+ if (a < b) { return 1; }
+ if (a > b) { return 1; }
+ if (a <= b) { return 1; }
+ if (a >= b) { return 1; }
+ if (a in b) { return 1; }
+ if (a instanceof b) { return 1; }
+ return 0;
+ }
+ f(1, 1);
+"
+frame size: 1
+parameter count: 3
+bytecode array length: 107
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestEqualStrict), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestGreaterThan), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestLessThanOrEqual), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestGreaterThanOrEqual), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestIn), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(TestInstanceOf), R(0),
+ B(JumpIfFalse), U8(5),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(LdaZero),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ var a = 0;
+ if (a) {
+ return 20;
+ } else {
+ return -20;
+ }
+ };
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(5),
+ B(LdaSmi), U8(20),
+ B(Return),
+ B(LdaSmi), U8(-20),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
new file mode 100644
index 0000000000..c89599015b
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
@@ -0,0 +1,63 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return 12345678;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(ExtraWide), B(LdaSmi), U32(12345678),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1234; return 5678;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ B(Wide), B(LdaSmi), U16(5678),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1234; return 1234;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ B(Wide), B(LdaSmi), U16(1234),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
new file mode 100644
index 0000000000..0d0f426a0a
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
@@ -0,0 +1,1303 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.1;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.2;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.3;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ var x = 0.4;
+ for (var i = 0; i < 3; i++) {
+ if (i == 1) continue;
+ if (i == 2) break;
+ }
+ return 3;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 1422
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(256),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(257),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(258),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(259),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(260),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(261),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(262),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(263),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(264),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(265),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(266),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(267),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(268),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(269),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(270),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(271),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(272),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(273),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(274),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(275),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(276),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(277),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(278),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(279),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(280),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(281),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(282),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(283),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(284),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(285),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(286),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(287),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(288),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(289),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(290),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(291),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(292),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(293),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(294),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(295),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(296),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(297),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(298),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(299),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(300),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(301),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(302),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(303),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(304),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(305),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(306),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(307),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(308),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(309),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(310),
+ B(Star), R(0),
+ B(Wide), B(LdaConstant), U16(311),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(TestLessThan), R(2),
+ B(Wide), B(JumpIfFalse), U16(46),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(2),
+ B(Wide), B(JumpIfFalse), U16(7),
+ B(Wide), B(Jump), U16(19),
+ B(Ldar), R(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(TestEqual), R(2),
+ B(Wide), B(JumpIfFalse), U16(7),
+ B(Wide), B(Jump), U16(13),
+ B(Ldar), R(1),
+ B(ToNumber),
+ B(Star), R(2),
+ B(Inc),
+ B(Star), R(1),
+ B(Jump), U8(-53),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.1,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.2,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.3,
+ 0.4,
+ 0.4,
+ 0.4,
+ 0.4,
+ 0.4,
+ 0.4,
+ 0.4,
+ 0.4,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
new file mode 100644
index 0000000000..1b775dd3e5
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -0,0 +1,114 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ let x = 10;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 10
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = 10; return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = (x = 20);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 27
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Ldar), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = 10; x = 20;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Ldar), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
new file mode 100644
index 0000000000..f2487874e4
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -0,0 +1,136 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ let x = 10; function f1() {return x;}
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 24
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = 10; function f1() {return x;} return x;
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = (x = 20); function f1() {return x;}
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 45
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ let x = 10; x = 20; function f1() {return x;}
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 47
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(0),
+ B(Star), R(0),
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
new file mode 100644
index 0000000000..82a31dd3b5
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -0,0 +1,604 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var a = 1;
+ function f() { return a; }
+ f()
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function t() { }
+ function f() { return t; }
+ f()
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "t",
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = 1;
+ function f() { return a; }
+ f()
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobal), U8(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = 1;
+ function f(b) {
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ return a;
+ }
+ f({name: 1});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 1032
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(255),
+ B(Wide), B(LdaGlobal), U16(1), U16(257),
+ B(Return),
+]
+constant pool: [
+ "name",
+ "a",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
new file mode 100644
index 0000000000..7fa24bf4ed
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -0,0 +1,952 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0; return x || 3;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 9
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(JumpIfToBooleanTrue), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return (x == 1) || 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(TestEqual), R(1),
+ B(JumpIfTrue), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return x && 3;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 9
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return (x == 0) && 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqual), R(1),
+ B(JumpIfFalse), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return x || (1, 2, 3);
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 9
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(JumpIfToBooleanTrue), U8(4),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanTrue), U8(16),
+ B(Ldar), R(0),
+ B(Ldar), R(1),
+ B(Ldar), R(0),
+ B(Ldar), R(1),
+ B(LdaSmi), U8(5),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; var a = 2, b = 3; return x || (
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2, 3);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 276
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanTrueConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+ 260,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; var a = 2, b = 3; return x && (
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2, 3);
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 275
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(JumpIfToBooleanFalseConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+ 260,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; var a = 2, b = 3; return (x > 3) || (
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2, 3);
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 282
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(TestGreaterThan), R(3),
+ B(JumpIfTrueConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+ 260,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; var a = 2, b = 3; return (x < 5) && (
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2,
+ a = 1, b = 2, 3);
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 281
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(5),
+ B(TestLessThan), R(3),
+ B(JumpIfFalseConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+ 260,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 0 && 3;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 1 || 3;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; return x && 3 || 0, 1;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 15
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(LdaSmi), U8(3),
+ B(JumpIfToBooleanTrue), U8(3),
+ B(LdaZero),
+ B(LdaSmi), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
new file mode 100644
index 0000000000..1e96047b92
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -0,0 +1,139 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ eval('var x = 10;'); return x;
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 67
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(2), U8(0),
+ B(LdaLookupSlot), U8(2),
+ B(Return),
+]
+constant pool: [
+ "eval",
+ "var x = 10;",
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ eval('var x = 10;'); return typeof x;
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 68
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(2), U8(0),
+ B(LdaLookupSlotInsideTypeof), U8(2),
+ B(TypeOf),
+ B(Return),
+]
+constant pool: [
+ "eval",
+ "var x = 10;",
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ x = 20; return eval('');
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 69
+bytecodes: [
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ B(StackCheck),
+ B(LdaSmi), U8(20),
+ B(StaLookupSlotSloppy), U8(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
+ B(Star), R(1),
+ B(Call), R(1), R(2), U8(2), U8(0),
+ B(Return),
+]
+constant pool: [
+ "x",
+ "eval",
+ "",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
new file mode 100644
index 0000000000..feb5e6c9ca
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
@@ -0,0 +1,106 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() { return x; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaLookupSlot), U8(0),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() { x = 10; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaLookupSlotSloppy), U8(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() { 'use strict'; x = 10; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(StaLookupSlotStrict), U8(0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() { return typeof x; }; f = t; f();\");
+ }
+ f1();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaLookupSlotInsideTypeof), U8(0),
+ B(TypeOf),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
new file mode 100644
index 0000000000..0e261bbaa3
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
@@ -0,0 +1,4219 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() {\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"return x;\" +
+ \"};\" +
+ \"f = t; f();\"
+ );
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1030
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(LdaLookupSlot), U16(256),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() {\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"return typeof x;\" +
+ \"};\" +
+ \"f = t; f();\"
+ );
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1031
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(LdaLookupSlotInsideTypeof), U16(256),
+ B(TypeOf),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() {\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"x = 10;\" +
+ \"};\" +
+ \"f = t; f();\"
+ );
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1033
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(LdaSmi), U8(10),
+ B(Wide), B(StaLookupSlotSloppy), U16(256),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ var x = 1;
+ function f1() {
+ eval(\"function t() {\" +
+ \"'use strict';\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"var y = 2.3;\" +
+ \"x = 10;\" +
+ \"};\" +
+ \"f = t; f();\"
+ );
+ }
+ f1();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1033
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(LdaSmi), U8(10),
+ B(Wide), B(StaLookupSlotStrict), U16(256),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
new file mode 100644
index 0000000000..cd3332a3ee
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
@@ -0,0 +1,57 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return new.target;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 19
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ new.target;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
new file mode 100644
index 0000000000..620f6c7bcc
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -0,0 +1,462 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return { };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(7),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { name: 'string', val: 9.2 };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return { name: 'string', val: a };
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(StoreICSloppy), R(1), U8(1), U8(1),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return { val: a, val: a + 1 };
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Add), R(2),
+ B(StoreICSloppy), R(1), U8(1), U8(1),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { func: function() { } };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(CreateClosure), U8(1), U8(0),
+ B(StoreICSloppy), R(0), U8(2), U8(1),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { func(a) { return a; } };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(CreateClosure), U8(1), U8(0),
+ B(StoreICSloppy), R(0), U8(2), U8(1),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { get a() { return 2; } };
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 33
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(Mov), R(0), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(3),
+ B(LdaNull),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { get a() { return this.x; }, set a(val) { this.x = val } };
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 35
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(Mov), R(0), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(3),
+ B(CreateClosure), U8(3), U8(0),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { set b(val) { this.y = val } };
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 33
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(0),
+ B(Mov), R(0), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(LdaNull),
+ B(Star), R(3),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; return { 1: a };
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 33
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(Mov), R(1), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ return { __proto__: null };
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 21
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(7),
+ B(Star), R(0),
+ B(Mov), R(0), R(1),
+ B(LdaNull),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 'test'; return { [a]: 1 };
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(7),
+ B(Star), R(1),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(0),
+ B(ToName),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 'test'; return { val: a, [a]: 1 };
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 43
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(5),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(StoreICSloppy), R(1), U8(2), U8(1),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(0),
+ B(ToName),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 'test'; return { [a]: 1, __proto__: {} };
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 53
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(1), U8(7),
+ B(Star), R(1),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(0),
+ B(ToName),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Mov), R(1), R(2),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(7),
+ B(Star), R(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 77
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(7),
+ B(Star), R(1),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(0),
+ B(ToName),
+ B(Star), R(3),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Mov), R(1), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(CreateClosure), U8(4), U8(0),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
+ B(Mov), R(1), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(CreateClosure), U8(5), U8(0),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), R(2), U8(4),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
new file mode 100644
index 0000000000..12295bc29e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
@@ -0,0 +1,1052 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ return { name: 'string', val: 9.2 };
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 1035
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(CreateObjectLiteral), U16(256), U16(0), U8(5),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
new file mode 100644
index 0000000000..7ae2d12190
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -0,0 +1,74 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function Outer() {
+ var outerVar = 1;
+ function Inner(innerArg) {
+ this.innerFunc = function() { return outerVar * innerArg; }
+ }
+ this.getInnerFunc = function() { return new Inner(1).innerFunc; }
+ }
+ var f = new Outer().getInnerFunc();
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 21
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(context),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(1),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(4),
+ B(Star), R(1),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Mul), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function Outer() {
+ var outerVar = 1;
+ function Inner(innerArg) {
+ this.innerFunc = function() { outerVar = innerArg; }
+ }
+ this.getInnerFunc = function() { return new Inner(1).innerFunc; }
+ }
+ var f = new Outer().getInnerFunc();
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 22
+bytecodes: [
+ B(StackCheck),
+ B(LdaContextSlot), R(context), U8(4),
+ B(Star), R(0),
+ B(Ldar), R(context),
+ B(Star), R(1),
+ B(LdaContextSlot), R(1), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(StaContextSlot), R(1), U8(4),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
new file mode 100644
index 0000000000..9c65a5cc6e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
@@ -0,0 +1,139 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() { return this; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1) { return arg1; }
+ f();
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1) { return this; }
+ f();
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }
+ f();
+"
+frame size: 0
+parameter count: 8
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }
+ f();
+"
+frame size: 0
+parameter count: 8
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(this),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1) { arg1 = 1; }
+ f();
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(arg0),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(arg1, arg2, arg3, arg4) { arg2 = 1; }
+ f();
+"
+frame size: 0
+parameter count: 5
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(arg1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
new file mode 100644
index 0000000000..28dcf65b7c
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -0,0 +1,276 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0; return x;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 5
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return x + 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Add), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return x - 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Sub), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 4; return x * 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Mul), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 4; return x / 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Div), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 4; return x % 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Mod), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; return x | 2;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(BitwiseOr), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; return x ^ 2;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(BitwiseXor), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1; return x & 2;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(BitwiseAnd), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10; return x << 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(ShiftLeft), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10; return x >> 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(ShiftRight), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 10; return x >>> 3;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(ShiftRightLogical), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0; return (x, 3);
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
new file mode 100644
index 0000000000..4b6f21686a
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
@@ -0,0 +1,177 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return null;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaNull),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return true;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaTrue),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return false;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaFalse),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 0;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 3
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return +1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return -1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(-1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return +127;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(127),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return -128;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(-128),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
new file mode 100644
index 0000000000..f9f4b1df78
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -0,0 +1,625 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(a) { return a.func(); }
+ f(new (function Obj() { this.func = function() { return; }})())
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(3),
+ B(Star), R(0),
+ B(Call), R(0), R(1), U8(1), U8(1),
+ B(Return),
+]
+constant pool: [
+ "func",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b, c) { return a.func(b, c); }
+ f(new (function Obj() { this.func = function() { return; }})(), 1, 2)
+"
+frame size: 4
+parameter count: 4
+bytecode array length: 25
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(3),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(2),
+ B(Ldar), R(arg2),
+ B(Star), R(3),
+ B(Call), R(0), R(1), U8(3), U8(1),
+ B(Return),
+]
+constant pool: [
+ "func",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) { return a.func(b + b, b); }
+ f(new (function Obj() { this.func = function() { return; }})(), 1)
+"
+frame size: 4
+parameter count: 3
+bytecode array length: 31
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(3),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(3),
+ B(Ldar), R(arg1),
+ B(Add), R(3),
+ B(Star), R(2),
+ B(Ldar), R(arg1),
+ B(Star), R(3),
+ B(Call), R(0), R(1), U8(3), U8(1),
+ B(Return),
+]
+constant pool: [
+ "func",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) {
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ a.func;
+ return a.func(); }
+ f(new (function Obj() { this.func = function() { return; }})())
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 1050
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(255),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Wide), B(LoadIC), R16(1), U16(0), U16(259),
+ B(Star), R(0),
+ B(Wide), B(Call), R16(0), R16(1), U16(1), U16(257),
+ B(Return),
+]
+constant pool: [
+ "func",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
new file mode 100644
index 0000000000..0033e00d53
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -0,0 +1,1575 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(a) { return a.name; }
+ f({name : \"test\"});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { return a[\"key\"]; }
+ f({key : \"test\"});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 10
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Return),
+]
+constant pool: [
+ "key",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { return a[100]; }
+ f({100 : \"test\"});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(100),
+ B(KeyedLoadIC), R(0), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) { return a[b]; }
+ f({arg : \"test\"}, \"arg\");
+"
+frame size: 1
+parameter count: 3
+bytecode array length: 11
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(0), U8(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { var b = a.name; return a[-124]; }
+ f({\"-124\" : \"test\", name : 123 })
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 21
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LdaSmi), U8(-124),
+ B(KeyedLoadIC), R(1), U8(3),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) {
+ var b;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ b = a.name;
+ return a.name;
+ }
+ f({name : \"test\"})
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 1294
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(3),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(5),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(7),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(9),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(11),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(13),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(15),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(17),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(19),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(21),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(23),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(25),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(27),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(29),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(31),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(33),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(35),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(37),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(39),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(41),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(43),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(45),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(47),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(49),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(51),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(53),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(55),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(57),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(59),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(61),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(63),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(65),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(67),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(69),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(71),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(73),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(75),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(77),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(79),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(81),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(83),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(85),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(87),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(89),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(91),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(93),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(95),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(97),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(99),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(101),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(103),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(105),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(107),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(109),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(111),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(113),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(115),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(117),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(119),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(121),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(123),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(125),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(127),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(129),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(131),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(133),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(135),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(137),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(139),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(141),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(143),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(145),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(147),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(149),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(151),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(153),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(155),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(157),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(159),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(161),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(163),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(165),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(167),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(169),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(171),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(173),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(175),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(177),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(179),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(181),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(183),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(185),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(187),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(189),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(191),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(193),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(195),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(197),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(199),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(201),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(203),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(205),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(207),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(209),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(211),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(213),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(215),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(217),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(219),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(221),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(223),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(225),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(227),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(229),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(231),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(233),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(235),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(237),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(239),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(241),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(243),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(245),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(247),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(249),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(251),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(253),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(0), U8(255),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Wide), B(LoadIC), R16(1), U16(0), U16(257),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) {
+ var c;
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ c = a[b];
+ return a[b];
+ }
+ f({name : \"test\"}, \"name\")
+"
+frame size: 2
+parameter count: 3
+bytecode array length: 1422
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(3),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(5),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(7),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(9),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(11),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(13),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(15),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(17),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(19),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(21),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(23),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(25),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(27),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(29),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(31),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(33),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(35),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(37),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(39),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(41),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(43),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(45),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(47),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(49),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(51),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(53),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(55),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(57),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(59),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(61),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(63),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(65),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(67),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(69),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(71),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(73),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(75),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(77),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(79),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(81),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(83),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(85),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(87),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(89),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(91),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(93),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(95),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(97),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(99),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(101),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(103),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(105),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(107),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(109),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(111),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(113),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(115),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(117),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(119),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(121),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(123),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(125),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(127),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(129),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(131),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(133),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(135),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(137),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(139),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(141),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(143),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(145),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(147),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(149),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(151),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(153),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(155),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(157),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(159),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(161),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(163),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(165),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(167),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(169),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(171),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(173),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(175),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(177),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(179),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(181),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(183),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(185),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(187),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(189),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(191),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(193),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(195),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(197),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(199),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(201),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(203),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(205),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(207),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(209),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(211),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(213),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(215),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(217),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(219),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(221),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(223),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(225),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(227),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(229),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(231),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(233),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(235),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(237),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(239),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(241),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(243),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(245),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(247),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(249),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(251),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(253),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(KeyedLoadIC), R(1), U8(255),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(Ldar), R(arg1),
+ B(Wide), B(KeyedLoadIC), R16(1), U16(257),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
new file mode 100644
index 0000000000..cfec770752
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -0,0 +1,3358 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(a) { a.name = \"val\"; }
+ f({name : \"test\"})
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 13
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(StoreICSloppy), R(0), U8(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { a[\"key\"] = \"val\"; }
+ f({key : \"test\"})
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 13
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(StoreICSloppy), R(0), U8(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+ "key",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { a[100] = \"val\"; }
+ f({100 : \"test\"})
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(100),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) { a[b] = \"val\"; }
+ f({arg : \"test\"}, \"arg\")
+"
+frame size: 2
+parameter count: 3
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { a.name = a[-124]; }
+ f({\"-124\" : \"test\", name : 123 })
+"
+frame size: 2
+parameter count: 2
+bytecode array length: 20
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg0),
+ B(Star), R(1),
+ B(LdaSmi), U8(-124),
+ B(KeyedLoadIC), R(1), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) { \"use strict\"; a.name = \"val\"; }
+ f({name : \"test\"})
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 13
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(StoreICStrict), R(0), U8(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) { \"use strict\"; a[b] = \"val\"; }
+ f({arg : \"test\"}, \"arg\")
+"
+frame size: 2
+parameter count: 3
+bytecode array length: 17
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(KeyedStoreICStrict), R(0), R(1), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "val",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) {
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 2;
+ }
+ f({name : \"test\"})
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 1297
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICSloppy), R(0), U8(0), U8(255),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Wide), B(StoreICSloppy), R16(0), U16(0), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a) {
+ 'use strict';
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 1;
+ a.name = 2;
+ }
+ f({name : \"test\"})
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 1297
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(StoreICStrict), R(0), U8(0), U8(255),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Wide), B(StoreICStrict), R16(0), U16(0), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "name",
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) {
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 2;
+ }
+ f({name : \"test\"})
+"
+frame size: 2
+parameter count: 3
+bytecode array length: 1813
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICSloppy), R(0), R(1), U8(255),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Wide), B(KeyedStoreICSloppy), R16(0), R16(1), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) {
+ 'use strict';
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 1;
+ a[b] = 2;
+ }
+ f({name : \"test\"})
+"
+frame size: 2
+parameter count: 3
+bytecode array length: 1813
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(KeyedStoreICStrict), R(0), R(1), U8(255),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(Ldar), R(arg1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Wide), B(KeyedStoreICStrict), R16(0), R16(1), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
new file mode 100644
index 0000000000..26b4992a64
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -0,0 +1,70 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return /ab+d/;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ "ab+d",
+]
+handlers: [
+]
+
+---
+snippet: "
+ return /(\\w+)\\s(\\w+)/i;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(2),
+ B(Return),
+]
+constant pool: [
+ "(\x5cw+)\x5cs(\x5cw+)",
+]
+handlers: [
+]
+
+---
+snippet: "
+ return /ab+d/.exec('abdd');
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 23
+bytecodes: [
+ B(StackCheck),
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
+ B(Star), R(1),
+ B(LoadIC), R(1), U8(1), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ B(Call), R(0), R(1), U8(2), U8(1),
+ B(Return),
+]
+constant pool: [
+ "ab+d",
+ "exec",
+ "abdd",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
new file mode 100644
index 0000000000..4f0248864e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
@@ -0,0 +1,1051 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ a = 1.23;
+ return /ab+d/;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 1033
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ B(Wide), B(CreateRegExpLiteral), U16(256), U16(0), U8(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::HEAP_NUMBER_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
new file mode 100644
index 0000000000..596a571dd0
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -0,0 +1,103 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var ld_a = 1;
+ while(true) {
+ ld_a = ld_a + ld_a;
+ if (ld_a > 10) break;
+ }
+ return ld_a;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(10),
+ B(TestGreaterThan), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(4),
+ B(Jump), U8(-21),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var ld_a = 1;
+ do {
+ ld_a = ld_a + ld_a;
+ if (ld_a > 10) continue;
+ } while(false);
+ return ld_a;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 29
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(10),
+ B(TestGreaterThan), R(1),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(2),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var ld_a = 1;
+ ld_a = ld_a + ld_a;
+ return ld_a;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 14
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
new file mode 100644
index 0000000000..f31bb2e7af
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -0,0 +1,1171 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var a = 1;
+ function f() { a = 2; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(2),
+ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = \"test\"; function f(b) { a = b; }
+ f(\"global\");
+"
+frame size: 0
+parameter count: 2
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ 'use strict'; var a = 1;
+ function f() { a = 2; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(2),
+ B(StaGlobalStrict), U8(0), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = 1;
+ function f() { a = 2; }
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(2),
+ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = 1;
+ function f(b) {
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ a = 2;
+ }
+ f({name: 1});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 1035
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(255),
+ B(LdaSmi), U8(2),
+ B(Wide), B(StaGlobalSloppy), U16(1), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "name",
+ "a",
+]
+handlers: [
+]
+
+---
+snippet: "
+ a = 1;
+ function f(b) {
+ 'use strict';
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ b.name;
+ a = 2;
+ }
+ f({name: 1});
+"
+frame size: 1
+parameter count: 2
+bytecode array length: 1035
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(1),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(3),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(5),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(7),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(9),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(11),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(13),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(15),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(17),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(19),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(21),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(23),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(25),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(27),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(29),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(31),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(33),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(35),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(37),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(39),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(41),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(43),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(45),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(47),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(49),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(51),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(53),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(55),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(57),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(59),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(61),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(63),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(65),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(67),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(69),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(71),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(73),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(75),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(77),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(79),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(81),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(83),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(85),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(87),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(89),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(91),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(93),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(95),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(97),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(99),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(101),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(103),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(105),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(107),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(109),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(111),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(113),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(115),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(117),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(119),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(121),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(123),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(125),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(127),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(129),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(131),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(133),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(135),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(137),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(139),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(141),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(143),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(145),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(147),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(149),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(151),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(153),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(155),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(157),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(159),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(161),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(163),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(165),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(167),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(169),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(171),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(173),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(175),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(177),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(179),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(181),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(183),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(185),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(187),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(189),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(191),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(193),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(195),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(197),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(199),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(201),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(203),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(205),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(207),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(209),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(211),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(213),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(215),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(217),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(219),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(221),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(223),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(225),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(227),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(229),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(231),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(233),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(235),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(237),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(239),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(241),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(243),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(245),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(247),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(249),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(251),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(253),
+ B(Ldar), R(arg0),
+ B(Star), R(0),
+ B(LoadIC), R(0), U8(0), U8(255),
+ B(LdaSmi), U8(2),
+ B(Wide), B(StaGlobalStrict), U16(1), U16(257),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "name",
+ "a",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
new file mode 100644
index 0000000000..6ff6b5d17e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
@@ -0,0 +1,67 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ return \"This is a string\";
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Return),
+]
+constant pool: [
+ "This is a string",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = \"First string\"; return \"Second string\";
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Return),
+]
+constant pool: [
+ "First string",
+ "Second string",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = \"Same string\"; return \"Same string\";
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 8
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaConstant), U8(0),
+ B(Return),
+]
+constant pool: [
+ "Same string",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
new file mode 100644
index 0000000000..ec879086a3
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -0,0 +1,523 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 1: return 2;
+ case 2: return 3;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(2),
+ B(Return),
+ B(LdaSmi), U8(3),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 1: a = 2; break;
+ case 2: a = 3; break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 37
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 1: a = 2; // fall-through
+ case 2: a = 3; break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 35
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(8),
+ B(Jump), U8(12),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 2: break;
+ case 3: break;
+ default: a = 1; break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 35
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(6),
+ B(Jump), U8(6),
+ B(Jump), U8(10),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(typeof(a)) {
+ case 2: a = 1; break;
+ case 3: a = 2; break;
+ default: a = 3; break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 44
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(TypeOf),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case typeof(a): a = 1; break;
+ default: a = 2; break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(Ldar), R(1),
+ B(TypeOf),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 1:
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ a = 2;
+ break;
+ case 2:
+ a = 3;
+ break;
+ }
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 289
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrueConstant), U8(0),
+ B(JumpConstant), U8(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Jump), U8(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ 262,
+ 266,
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1;
+ switch(a) {
+ case 1:
+ switch(a + 1) {
+ case 2 : a = 1; break;
+ default : a = 2; break;
+ } // fall-through
+ case 2: a = 3;
+ }
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 61
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Star), R(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(36),
+ B(Jump), U8(38),
+ B(Ldar), R(2),
+ B(Star), R(4),
+ B(LdaSmi), U8(1),
+ B(Add), R(4),
+ B(Star), R(1),
+ B(Star), R(4),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(4),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Jump), U8(8),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(Jump), U8(2),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
new file mode 100644
index 0000000000..6a018eaa04
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
@@ -0,0 +1,65 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ var f;
+ f = function f() {};
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 19
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(5),
+ B(Mov), R(1), R(0),
+ B(Ldar), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var f;
+ f = function f() { return f; };
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 23
+bytecodes: [
+ B(LdaTheHole),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(5),
+ B(Mov), R(1), R(0),
+ B(Ldar), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
new file mode 100644
index 0000000000..79ccb2d1e5
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
@@ -0,0 +1,66 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ throw 1;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Throw),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ throw 'Error';
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ B(StackCheck),
+ B(LdaConstant), U8(0),
+ B(Throw),
+]
+constant pool: [
+ "Error",
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = 1; if (a) { throw 'Error'; };
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(JumpIfToBooleanFalse), U8(5),
+ B(LdaConstant), U8(0),
+ B(Throw),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "Error",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
new file mode 100644
index 0000000000..fefa9a511e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -0,0 +1,47 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: no
+wrap: no
+top level: yes
+
+---
+snippet: "
+ var a = { func: function() { } };
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 44
+bytecodes: [
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ B(StackCheck),
+ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CreateObjectLiteral), U8(2), U8(0), U8(5),
+ B(Star), R(4),
+ B(CreateClosure), U8(3), U8(0),
+ B(StoreICSloppy), R(4), U8(4), U8(3),
+ B(Ldar), R(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
new file mode 100644
index 0000000000..3b954817e4
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -0,0 +1,98 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ try { return 1; } catch(e) { return 2; }
+"
+frame size: 5
+parameter count: 1
+bytecode array length: 40
+bytecodes: [
+ B(StackCheck),
+ B(Mov), R(context), R(1),
+ B(LdaSmi), U8(1),
+ B(Return),
+ B(Star), R(3),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(Ldar), R(closure),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(2), U8(3),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(1),
+ B(PushContext), R(0),
+ B(LdaSmi), U8(2),
+ B(PopContext), R(0),
+ B(Return),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "e",
+]
+handlers: [
+ [4, 7, 7],
+]
+
+---
+snippet: "
+ var a;
+ try { a = 1 } catch(e1) {};
+ try { a = 2 } catch(e2) { a = 3 }
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 81
+bytecodes: [
+ B(StackCheck),
+ B(Mov), R(context), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(30),
+ B(Star), R(4),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(Ldar), R(closure),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(2),
+ B(PushContext), R(1),
+ B(PopContext), R(1),
+ B(Mov), R(context), R(2),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Jump), U8(34),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Ldar), R(closure),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(2),
+ B(PushContext), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "e1",
+ "e2",
+]
+handlers: [
+ [4, 8, 10],
+ [41, 45, 47],
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
new file mode 100644
index 0000000000..22287f6c5b
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -0,0 +1,180 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var a = 1;
+ try { a = 2; } finally { a = 3; }
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 51
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Mov), R(context), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(-1),
+ B(Star), R(1),
+ B(Jump), U8(7),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(3), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(2),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+ [8, 12, 18],
+]
+
+---
+snippet: "
+ var a = 1;
+ try { a = 2; } catch(e) { a = 20 } finally { a = 3; }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 88
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Mov), R(context), R(4),
+ B(Mov), R(context), R(5),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Jump), U8(34),
+ B(Star), R(7),
+ B(LdaConstant), U8(0),
+ B(Star), R(6),
+ B(Ldar), R(closure),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(5),
+ B(PushContext), R(1),
+ B(LdaSmi), U8(20),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(7),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "e",
+]
+handlers: [
+ [8, 49, 55],
+ [11, 15, 17],
+]
+
+---
+snippet: "
+ var a; try {
+ try { a = 1 } catch(e) { a = 2 }
+ } catch(e) { a = 20 } finally { a = 3; }
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 121
+bytecodes: [
+ B(StackCheck),
+ B(Mov), R(context), R(4),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(34),
+ B(Star), R(8),
+ B(LdaConstant), U8(0),
+ B(Star), R(7),
+ B(Ldar), R(closure),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(7), U8(3),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(6),
+ B(PushContext), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(Jump), U8(34),
+ B(Star), R(7),
+ B(LdaConstant), U8(0),
+ B(Star), R(6),
+ B(Ldar), R(closure),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(5),
+ B(PushContext), R(1),
+ B(LdaSmi), U8(20),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(7),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+ "e",
+]
+handlers: [
+ [4, 82, 88],
+ [7, 48, 50],
+ [10, 14, 16],
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
new file mode 100644
index 0000000000..869144778f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -0,0 +1,55 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: string
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() {
+ var x = 13;
+ return typeof(x);
+ };
+ f();
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 7
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ B(TypeOf),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 13;
+ function f() {
+ return typeof(x);
+ };
+ f();
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(LdaGlobalInsideTypeof), U8(0), U8(1),
+ B(TypeOf),
+ B(Return),
+]
+constant pool: [
+ "x",
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
new file mode 100644
index 0000000000..0946948c29
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -0,0 +1,193 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x = 0;
+ while (x != 10) {
+ x = x + 10;
+ }
+ return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 31
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(10),
+ B(TestEqual), R(1),
+ B(LogicalNot),
+ B(JumpIfFalse), U8(15),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(10),
+ B(Add), R(1),
+ B(Star), R(0),
+ B(Jump), U8(-22),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = false;
+ do {
+ x = !x;
+ } while(x == false);
+ return x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 22
+bytecodes: [
+ B(StackCheck),
+ B(LdaFalse),
+ B(Star), R(0),
+ B(StackCheck),
+ B(Ldar), R(0),
+ B(LogicalNot),
+ B(Star), R(0),
+ B(Ldar), R(0),
+ B(Star), R(1),
+ B(LdaFalse),
+ B(TestEqual), R(1),
+ B(JumpIfTrue), U8(-13),
+ B(Ldar), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 101;
+ return void(x * 3);
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(101),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Mul), R(1),
+ B(LdaUndefined),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 1234;
+ var y = void (x * x - 1);
+ return y;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 23
+bytecodes: [
+ B(StackCheck),
+ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(Mul), R(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Sub), R(3),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 13;
+ return ~x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(-1),
+ B(BitwiseXor), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 13;
+ return +x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(1),
+ B(Mul), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 13;
+ return -x;
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 12
+bytecodes: [
+ B(StackCheck),
+ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), U8(-1),
+ B(Mul), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
new file mode 100644
index 0000000000..4a65a7254e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -0,0 +1,1326 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: number
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ x0 = x127;
+ return x0;
+"
+frame size: 157
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(127),
+ B(Star), R(0),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ x127 = x126;
+ return x127;
+"
+frame size: 157
+parameter count: 1
+bytecode array length: 6
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(126),
+ B(Star), R(127),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ if (x2 > 3) { return x129; }
+ return x128;
+"
+frame size: 158
+parameter count: 1
+bytecode array length: 23
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(2),
+ B(Wide), B(Star), R16(157),
+ B(LdaSmi), U8(3),
+ B(Wide), B(TestGreaterThan), R16(157),
+ B(JumpIfFalse), U8(7),
+ B(Wide), B(Ldar), R16(129),
+ B(Return),
+ B(Ldar), R(128),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ var x0 = 0;
+ if (x129 == 3) { var x129 = x0; }
+ if (x2 > 3) { return x0; }
+ return x129;
+"
+frame size: 158
+parameter count: 1
+bytecode array length: 48
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(Wide), B(Ldar), R16(129),
+ B(Wide), B(Star), R16(157),
+ B(LdaSmi), U8(3),
+ B(Wide), B(TestEqual), R16(157),
+ B(JumpIfFalse), U8(8),
+ B(Ldar), R(0),
+ B(Wide), B(Star), R16(129),
+ B(Ldar), R(2),
+ B(Wide), B(Star), R16(157),
+ B(LdaSmi), U8(3),
+ B(Wide), B(TestGreaterThan), R16(157),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(0),
+ B(Return),
+ B(Wide), B(Ldar), R16(129),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ var x0 = 0;
+ var x1 = 0;
+ for (x128 = 0; x128 < 64; x128++) { x1 += x128;}return x128;
+"
+frame size: 158
+parameter count: 1
+bytecode array length: 54
+bytecodes: [
+ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(128),
+ B(Ldar), R(128),
+ B(Wide), B(Star), R16(157),
+ B(LdaSmi), U8(64),
+ B(Wide), B(TestLessThan), R16(157),
+ B(JumpIfFalse), U8(29),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Wide), B(Star), R16(157),
+ B(Ldar), R(128),
+ B(Wide), B(Add), R16(157),
+ B(Star), R(1),
+ B(Ldar), R(128),
+ B(ToNumber),
+ B(Wide), B(Star), R16(157),
+ B(Inc),
+ B(Star), R(128),
+ B(Jump), U8(-39),
+ B(Ldar), R(128),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ var x0 = 1234;
+ var x1 = 0;
+ for (x128 in x0) { x1 += x128;}return x1;
+"
+frame size: 163
+parameter count: 1
+bytecode array length: 80
+bytecodes: [
+ B(StackCheck),
+ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfUndefined), U8(65),
+ B(JumpIfNull), U8(63),
+ B(ToObject),
+ B(Wide), B(Star), R16(157),
+ B(Wide), B(ForInPrepare), R16(158),
+ B(LdaZero),
+ B(Wide), B(Star), R16(161),
+ B(Wide), B(ForInDone), R16(161), R16(160),
+ B(JumpIfTrue), U8(41),
+ B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(1),
+ B(JumpIfUndefined), U8(19),
+ B(Star), R(128),
+ B(StackCheck),
+ B(Ldar), R(1),
+ B(Wide), B(Star), R16(162),
+ B(Ldar), R(128),
+ B(Wide), B(Add), R16(162),
+ B(Star), R(1),
+ B(Wide), B(ForInStep), R16(161),
+ B(Wide), B(Star), R16(161),
+ B(Jump), U8(-45),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x0;
+ var x1;
+ var x2;
+ var x3;
+ var x4;
+ var x5;
+ var x6;
+ var x7;
+ var x8;
+ var x9;
+ var x10;
+ var x11;
+ var x12;
+ var x13;
+ var x14;
+ var x15;
+ var x16;
+ var x17;
+ var x18;
+ var x19;
+ var x20;
+ var x21;
+ var x22;
+ var x23;
+ var x24;
+ var x25;
+ var x26;
+ var x27;
+ var x28;
+ var x29;
+ var x30;
+ var x31;
+ var x32;
+ var x33;
+ var x34;
+ var x35;
+ var x36;
+ var x37;
+ var x38;
+ var x39;
+ var x40;
+ var x41;
+ var x42;
+ var x43;
+ var x44;
+ var x45;
+ var x46;
+ var x47;
+ var x48;
+ var x49;
+ var x50;
+ var x51;
+ var x52;
+ var x53;
+ var x54;
+ var x55;
+ var x56;
+ var x57;
+ var x58;
+ var x59;
+ var x60;
+ var x61;
+ var x62;
+ var x63;
+ var x64;
+ var x65;
+ var x66;
+ var x67;
+ var x68;
+ var x69;
+ var x70;
+ var x71;
+ var x72;
+ var x73;
+ var x74;
+ var x75;
+ var x76;
+ var x77;
+ var x78;
+ var x79;
+ var x80;
+ var x81;
+ var x82;
+ var x83;
+ var x84;
+ var x85;
+ var x86;
+ var x87;
+ var x88;
+ var x89;
+ var x90;
+ var x91;
+ var x92;
+ var x93;
+ var x94;
+ var x95;
+ var x96;
+ var x97;
+ var x98;
+ var x99;
+ var x100;
+ var x101;
+ var x102;
+ var x103;
+ var x104;
+ var x105;
+ var x106;
+ var x107;
+ var x108;
+ var x109;
+ var x110;
+ var x111;
+ var x112;
+ var x113;
+ var x114;
+ var x115;
+ var x116;
+ var x117;
+ var x118;
+ var x119;
+ var x120;
+ var x121;
+ var x122;
+ var x123;
+ var x124;
+ var x125;
+ var x126;
+ var x127;
+ var x128;
+ var x129;
+ var x130;
+ var x131;
+ var x132;
+ var x133;
+ var x134;
+ var x135;
+ var x136;
+ var x137;
+ var x138;
+ var x139;
+ var x140;
+ var x141;
+ var x142;
+ var x143;
+ var x144;
+ var x145;
+ var x146;
+ var x147;
+ var x148;
+ var x149;
+ var x150;
+ var x151;
+ var x152;
+ var x153;
+ var x154;
+ var x155;
+ var x156;
+ x0 = %Add(x64, x63);
+ x1 = %Add(x27, x143);
+ %TheHole();
+ return x1;
+"
+frame size: 159
+parameter count: 1
+bytecode array length: 55
+bytecodes: [
+ B(StackCheck),
+ B(Ldar), R(64),
+ B(Wide), B(Star), R16(157),
+ B(Ldar), R(63),
+ B(Wide), B(Star), R16(158),
+ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ B(Star), R(0),
+ B(Ldar), R(27),
+ B(Wide), B(Star), R16(157),
+ B(Wide), B(Ldar), R16(143),
+ B(Wide), B(Star), R16(158),
+ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
+ B(Ldar), R(1),
+ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
new file mode 100644
index 0000000000..c5af81f15f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -0,0 +1,36 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: yes
+
+---
+snippet: "
+ with ({x:42}) { return x; }
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(0), U8(5),
+ B(Star), R(1),
+ B(ToObject),
+ B(Star), R(2),
+ B(Ldar), R(closure),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kPushWithContext), R(2), U8(2),
+ B(PushContext), R(0),
+ B(LdaLookupSlot), U8(1),
+ B(PopContext), R(0),
+ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+]
+handlers: [
+]
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 567aa41a8e..92ba9ba0bd 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -4,6 +4,7 @@
#include <cstring>
#include <fstream>
+#include <vector>
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
@@ -15,10 +16,20 @@
#include "src/compiler.h"
#include "src/interpreter/interpreter.h"
+#ifdef V8_OS_POSIX
+#include <dirent.h>
+#endif
+
using v8::internal::interpreter::BytecodeExpectationsPrinter;
+#define REPORT_ERROR(MESSAGE) (((std::cerr << "ERROR: ") << MESSAGE) << '\n')
+
namespace {
+#ifdef V8_OS_POSIX
+const char* kGoldenFilesPath = "test/cctest/interpreter/bytecode_expectations/";
+#endif
+
class ProgramOptions final {
public:
static ProgramOptions FromCommandLine(int argc, char** argv);
@@ -32,8 +43,8 @@ class ProgramOptions final {
wrap_(true),
execute_(true),
top_level_(false),
- legacy_const_(false),
do_expressions_(false),
+ verbose_(false),
const_pool_type_(
BytecodeExpectationsPrinter::ConstantPoolType::kMixed) {}
@@ -52,12 +63,13 @@ class ProgramOptions final {
bool wrap() const { return wrap_; }
bool execute() const { return execute_; }
bool top_level() const { return top_level_; }
- bool legacy_const() const { return legacy_const_; }
bool do_expressions() const { return do_expressions_; }
+ bool verbose() const { return verbose_; }
+ bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
BytecodeExpectationsPrinter::ConstantPoolType const_pool_type() const {
return const_pool_type_;
}
- std::string input_filename() const { return input_filename_; }
+ std::vector<std::string> input_filenames() const { return input_filenames_; }
std::string output_filename() const { return output_filename_; }
std::string test_function_name() const { return test_function_name_; }
@@ -70,10 +82,10 @@ class ProgramOptions final {
bool wrap_;
bool execute_;
bool top_level_;
- bool legacy_const_;
bool do_expressions_;
+ bool verbose_;
BytecodeExpectationsPrinter::ConstantPoolType const_pool_type_;
- std::string input_filename_;
+ std::vector<std::string> input_filenames_;
std::string output_filename_;
std::string test_function_name_;
};
@@ -144,6 +156,39 @@ bool ParseBoolean(const char* string) {
const char* BooleanToString(bool value) { return value ? "yes" : "no"; }
+#ifdef V8_OS_POSIX
+
+bool StrEndsWith(const char* string, const char* suffix) {
+ int string_size = i::StrLength(string);
+ int suffix_size = i::StrLength(suffix);
+ if (string_size < suffix_size) return false;
+
+ return strcmp(string + (string_size - suffix_size), suffix) == 0;
+}
+
+bool CollectGoldenFiles(std::vector<std::string>* golden_file_list,
+ const char* directory_path) {
+ DIR* directory = opendir(directory_path);
+ if (!directory) return false;
+
+ dirent entry_buffer;
+ dirent* entry;
+
+ while (readdir_r(directory, &entry_buffer, &entry) == 0 && entry) {
+ if (StrEndsWith(entry->d_name, ".golden")) {
+ std::string golden_filename(kGoldenFilesPath);
+ golden_filename += entry->d_name;
+ golden_file_list->push_back(golden_filename);
+ }
+ }
+
+ closedir(directory);
+
+ return true;
+}
+
+#endif // V8_OS_POSIX
+
// static
ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
ProgramOptions options;
@@ -165,28 +210,38 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.execute_ = false;
} else if (strcmp(argv[i], "--top-level") == 0) {
options.top_level_ = true;
- } else if (strcmp(argv[i], "--legacy-const") == 0) {
- options.legacy_const_ = true;
} else if (strcmp(argv[i], "--do-expressions") == 0) {
options.do_expressions_ = true;
+ } else if (strcmp(argv[i], "--verbose") == 0) {
+ options.verbose_ = true;
} else if (strncmp(argv[i], "--output=", 9) == 0) {
options.output_filename_ = argv[i] + 9;
} else if (strncmp(argv[i], "--test-function-name=", 21) == 0) {
options.test_function_name_ = argv[i] + 21;
} else if (strncmp(argv[i], "--", 2) != 0) { // It doesn't start with --
- if (!options.input_filename_.empty()) {
- std::cerr << "ERROR: More than one input file specified\n";
- options.parsing_failed_ = true;
- break;
- }
- options.input_filename_ = argv[i];
+ options.input_filenames_.push_back(argv[i]);
} else {
- std::cerr << "ERROR: Unknonwn option " << argv[i] << "\n";
+ REPORT_ERROR("Unknown option " << argv[i]);
options.parsing_failed_ = true;
break;
}
}
+ if (options.rebaseline_ && options.input_filenames_.empty()) {
+#ifdef V8_OS_POSIX
+ if (options.verbose_) {
+ std::cout << "Looking for golden files in " << kGoldenFilesPath << '\n';
+ }
+ if (!CollectGoldenFiles(&options.input_filenames_, kGoldenFilesPath)) {
+ REPORT_ERROR("Golden files autodiscovery failed.");
+ options.parsing_failed_ = true;
+ }
+#else
+ REPORT_ERROR("Golden files autodiscovery requires a POSIX OS, sorry.");
+ options.parsing_failed_ = true;
+#endif
+ }
+
return options;
}
@@ -196,28 +251,44 @@ bool ProgramOptions::Validate() const {
if (const_pool_type_ ==
BytecodeExpectationsPrinter::ConstantPoolType::kUnknown) {
- std::cerr << "ERROR: Unknown constant pool type.\n";
+ REPORT_ERROR("Unknown constant pool type.");
return false;
}
- if (!read_from_stdin_ && input_filename_.empty()) {
- std::cerr << "ERROR: No input file specified.\n";
+ if (!read_from_stdin_ && input_filenames_.empty()) {
+ REPORT_ERROR("No input file specified.");
return false;
}
- if (read_from_stdin_ && !input_filename_.empty()) {
- std::cerr << "ERROR: Reading from stdin, but input files supplied.\n";
+ if (read_from_stdin_ && !input_filenames_.empty()) {
+ REPORT_ERROR("Reading from stdin, but input files supplied.");
return false;
}
if (rebaseline_ && read_raw_js_snippet_) {
- std::cerr << "ERROR: Cannot use --rebaseline on a raw JS snippet.\n";
+ REPORT_ERROR("Cannot use --rebaseline on a raw JS snippet.");
+ return false;
+ }
+
+ if (rebaseline_ && !output_filename_.empty()) {
+ REPORT_ERROR("Output file cannot be specified together with --rebaseline.");
+ return false;
+ }
+
+ if (rebaseline_ && read_from_stdin_) {
+ REPORT_ERROR("Cannot --rebaseline when input is --stdin.");
+ return false;
+ }
+
+ if (input_filenames_.size() > 1 && !rebaseline_ && !read_raw_js_snippet()) {
+ REPORT_ERROR(
+ "Multiple input files, but no --rebaseline or --raw-js specified.");
return false;
}
if (top_level_ && !test_function_name_.empty()) {
- std::cerr << "ERROR: test function name specified while processing "
- "top level code.\n";
+ REPORT_ERROR(
+ "Test function name specified while processing top level code.");
return false;
}
@@ -243,8 +314,6 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
test_function_name_ = line.c_str() + 20;
} else if (line.compare(0, 11, "top level: ") == 0) {
top_level_ = ParseBoolean(line.c_str() + 11);
- } else if (line.compare(0, 14, "legacy const: ") == 0) {
- legacy_const_ = ParseBoolean(line.c_str() + 14);
} else if (line.compare(0, 16, "do expressions: ") == 0) {
do_expressions_ = ParseBoolean(line.c_str() + 16);
} else if (line == "---") {
@@ -270,7 +339,6 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
}
if (top_level_) stream << "\ntop level: yes";
- if (legacy_const_) stream << "\nlegacy const: yes";
if (do_expressions_) stream << "\ndo expressions: yes";
stream << "\n\n";
@@ -360,41 +428,66 @@ void ExtractSnippets(std::vector<std::string>* snippet_list,
void GenerateExpectationsFile(std::ostream& stream, // NOLINT
const std::vector<std::string>& snippet_list,
- const ProgramOptions& options,
- const char* exec_path) {
- V8InitializationScope platform(exec_path);
- {
- v8::Isolate::Scope isolate_scope(platform.isolate());
- v8::HandleScope handle_scope(platform.isolate());
- v8::Local<v8::Context> context = v8::Context::New(platform.isolate());
- v8::Context::Scope context_scope(context);
-
- BytecodeExpectationsPrinter printer(platform.isolate(),
- options.const_pool_type());
- printer.set_wrap(options.wrap());
- printer.set_execute(options.execute());
- printer.set_top_level(options.top_level());
- if (!options.test_function_name().empty()) {
- printer.set_test_function_name(options.test_function_name());
- }
+ const V8InitializationScope& platform,
+ const ProgramOptions& options) {
+ v8::Isolate::Scope isolate_scope(platform.isolate());
+ v8::HandleScope handle_scope(platform.isolate());
+ v8::Local<v8::Context> context = v8::Context::New(platform.isolate());
+ v8::Context::Scope context_scope(context);
+
+ BytecodeExpectationsPrinter printer(platform.isolate(),
+ options.const_pool_type());
+ printer.set_wrap(options.wrap());
+ printer.set_execute(options.execute());
+ printer.set_top_level(options.top_level());
+ if (!options.test_function_name().empty()) {
+ printer.set_test_function_name(options.test_function_name());
+ }
- if (options.legacy_const()) i::FLAG_legacy_const = true;
- if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
+ if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
+
+ stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
+ options.PrintHeader(stream);
+ for (const std::string& snippet : snippet_list) {
+ printer.PrintExpectation(stream, snippet);
+ }
- stream << "#\n# Autogenerated by generate-bytecode-expectations\n#\n\n";
- options.PrintHeader(stream);
- for (const std::string& snippet : snippet_list) {
- printer.PrintExpectation(stream, snippet);
+ i::FLAG_harmony_do_expressions = false;
+}
+
+bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
+ const V8InitializationScope& platform,
+ const ProgramOptions& options,
+ const std::string& output_filename) {
+ std::ofstream output_file_handle;
+ if (!options.write_to_stdout()) {
+ output_file_handle.open(output_filename.c_str());
+ if (!output_file_handle.is_open()) {
+ REPORT_ERROR("Could not open " << output_filename << " for writing.");
+ return false;
}
}
+ std::ostream& output_stream =
+ options.write_to_stdout() ? std::cout : output_file_handle;
+
+ GenerateExpectationsFile(output_stream, snippet_list, platform, options);
+
+ return true;
+}
+
+void PrintMessage(v8::Local<v8::Message> message, v8::Local<v8::Value>) {
+ std::cerr << "INFO: " << *v8::String::Utf8Value(message->Get()) << '\n';
}
+void DiscardMessage(v8::Local<v8::Message>, v8::Local<v8::Value>) {}
+
void PrintUsage(const char* exec_path) {
std::cerr
<< "\nUsage: " << exec_path
- << " [OPTIONS]... [INPUT FILE]\n\n"
+ << " [OPTIONS]... [INPUT FILES]...\n\n"
"Options:\n"
" --help Print this help message.\n"
+ " --verbose Emit messages about the progress of the tool.\n"
" --raw-js Read raw JavaScript, instead of the output format.\n"
" --stdin Read from standard input instead of file.\n"
" --rebaseline Rebaseline input snippet file.\n"
@@ -402,8 +495,7 @@ void PrintUsage(const char* exec_path) {
" --no-execute Do not execute after compilation.\n"
" --test-function-name=foo "
"Specify the name of the test function.\n"
- " --top-level Process top level code, not the top-level function."
- " --legacy-const Enable legacy_const flag.\n"
+ " --top-level Process top level code, not the top-level function.\n"
" --do-expressions Enable harmony_do_expressions flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
@@ -431,39 +523,52 @@ int main(int argc, char** argv) {
return options.print_help() ? 0 : 1;
}
- std::ifstream input_file_handle;
- if (!options.read_from_stdin()) {
- input_file_handle.open(options.input_filename().c_str());
- if (!input_file_handle.is_open()) {
- std::cerr << "ERROR: Could not open '" << options.input_filename()
- << "' for reading.\n";
- return 2;
- }
- }
- std::istream& input_stream =
- options.read_from_stdin() ? std::cin : input_file_handle;
-
- if (options.rebaseline()) {
- options.UpdateFromHeader(input_stream);
- CHECK(options.Validate());
- }
+ V8InitializationScope platform(argv[0]);
+ platform.isolate()->AddMessageListener(
+ options.suppress_runtime_errors() ? DiscardMessage : PrintMessage);
std::vector<std::string> snippet_list;
- ExtractSnippets(&snippet_list, input_stream, options.read_raw_js_snippet());
- std::ofstream output_file_handle;
- if (!options.write_to_stdout()) {
- output_file_handle.open(options.rebaseline()
- ? options.input_filename().c_str()
- : options.output_filename().c_str());
- if (!output_file_handle.is_open()) {
- std::cerr << "ERROR: Could not open '" << options.output_filename()
- << "' for writing.\n";
- return 3;
+ if (options.read_from_stdin()) {
+ // Rebaseline will never get here, so we will always take the
+ // GenerateExpectationsFile at the end of this function.
+ DCHECK(!options.rebaseline());
+ ExtractSnippets(&snippet_list, std::cin, options.read_raw_js_snippet());
+ } else {
+ for (const std::string& input_filename : options.input_filenames()) {
+ if (options.verbose()) {
+ std::cerr << "Processing " << input_filename << '\n';
+ }
+
+ std::ifstream input_stream(input_filename.c_str());
+ if (!input_stream.is_open()) {
+ REPORT_ERROR("Could not open " << input_filename << " for reading.");
+ return 2;
+ }
+
+ ProgramOptions updated_options = options;
+ if (options.rebaseline()) {
+ updated_options.UpdateFromHeader(input_stream);
+ CHECK(updated_options.Validate());
+ }
+
+ ExtractSnippets(&snippet_list, input_stream,
+ options.read_raw_js_snippet());
+
+ if (options.rebaseline()) {
+ if (!WriteExpectationsFile(snippet_list, platform, updated_options,
+ input_filename)) {
+ return 3;
+ }
+ snippet_list.clear();
+ }
}
}
- std::ostream& output_stream =
- options.write_to_stdout() ? std::cout : output_file_handle;
- GenerateExpectationsFile(output_stream, snippet_list, options, argv[0]);
+ if (!options.rebaseline()) {
+ if (!WriteExpectationsFile(snippet_list, platform, options,
+ options.output_filename())) {
+ return 3;
+ }
+ }
}
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
new file mode 100644
index 0000000000..df60c452cd
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/interpreter/interpreter-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+MaybeHandle<Object> CallInterpreter(Isolate* isolate,
+ Handle<JSFunction> function) {
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), 0, nullptr);
+}
+
+InterpreterTester::InterpreterTester(
+ Isolate* isolate, const char* source, MaybeHandle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector, const char* filter)
+ : isolate_(isolate),
+ source_(source),
+ bytecode_(bytecode),
+ feedback_vector_(feedback_vector) {
+ i::FLAG_ignition = true;
+ i::FLAG_always_opt = false;
+ // Ensure handler table is generated.
+ isolate->interpreter()->Initialize();
+}
+
+InterpreterTester::InterpreterTester(
+ Isolate* isolate, Handle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector, const char* filter)
+ : InterpreterTester(isolate, nullptr, bytecode, feedback_vector, filter) {}
+
+InterpreterTester::InterpreterTester(Isolate* isolate, const char* source,
+ const char* filter)
+ : InterpreterTester(isolate, source, MaybeHandle<BytecodeArray>(),
+ MaybeHandle<TypeFeedbackVector>(), filter) {}
+
+InterpreterTester::~InterpreterTester() {}
+
+Local<Message> InterpreterTester::CheckThrowsReturnMessage() {
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate_));
+ auto callable = GetCallable<>();
+ MaybeHandle<Object> no_result = callable();
+ CHECK(isolate_->has_pending_exception());
+ CHECK(try_catch.HasCaught());
+ CHECK(no_result.is_null());
+ isolate_->OptionalRescheduleException(true);
+ CHECK(!try_catch.Message().IsEmpty());
+ return try_catch.Message();
+}
+
+Handle<Object> InterpreterTester::NewObject(const char* script) {
+ return v8::Utils::OpenHandle(*CompileRun(script));
+}
+
+Handle<String> InterpreterTester::GetName(Isolate* isolate, const char* name) {
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(name);
+ return isolate->factory()->string_table()->LookupString(isolate, result);
+}
+
+std::string InterpreterTester::SourceForBody(const char* body) {
+ return "function " + function_name() + "() {\n" + std::string(body) + "\n}";
+}
+
+std::string InterpreterTester::function_name() {
+ return std::string(kFunctionName);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
new file mode 100644
index 0000000000..f8a0a8a67a
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -0,0 +1,128 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/execution.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/interpreter.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/test-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+MaybeHandle<Object> CallInterpreter(Isolate* isolate,
+ Handle<JSFunction> function);
+template <class... A>
+static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
+ Handle<JSFunction> function,
+ A... args) {
+ Handle<Object> argv[] = {args...};
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), sizeof...(args),
+ argv);
+}
+
+template <class... A>
+class InterpreterCallable {
+ public:
+ InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
+ : isolate_(isolate), function_(function) {}
+ virtual ~InterpreterCallable() {}
+
+ MaybeHandle<Object> operator()(A... args) {
+ return CallInterpreter(isolate_, function_, args...);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<JSFunction> function_;
+};
+
+namespace {
+const char kFunctionName[] = "f";
+} // namespace
+
+class InterpreterTester {
+ public:
+ InterpreterTester(Isolate* isolate, const char* source,
+ MaybeHandle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector,
+ const char* filter);
+
+ InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector =
+ MaybeHandle<TypeFeedbackVector>(),
+ const char* filter = kFunctionName);
+
+ InterpreterTester(Isolate* isolate, const char* source,
+ const char* filter = kFunctionName);
+
+ virtual ~InterpreterTester();
+
+ template <class... A>
+ InterpreterCallable<A...> GetCallable() {
+ return InterpreterCallable<A...>(isolate_, GetBytecodeFunction<A...>());
+ }
+
+ Local<Message> CheckThrowsReturnMessage();
+
+ static Handle<Object> NewObject(const char* script);
+
+ static Handle<String> GetName(Isolate* isolate, const char* name);
+
+ static std::string SourceForBody(const char* body);
+
+ static std::string function_name();
+
+ private:
+ Isolate* isolate_;
+ const char* source_;
+ MaybeHandle<BytecodeArray> bytecode_;
+ MaybeHandle<TypeFeedbackVector> feedback_vector_;
+
+ template <class... A>
+ Handle<JSFunction> GetBytecodeFunction() {
+ Handle<JSFunction> function;
+ if (source_) {
+ CompileRun(source_);
+ v8::Local<v8::Context> context =
+ v8::Isolate::GetCurrent()->GetCurrentContext();
+ Local<Function> api_function =
+ Local<Function>::Cast(CcTest::global()
+ ->Get(context, v8_str(kFunctionName))
+ .ToLocalChecked());
+ function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
+ } else {
+ int arg_count = sizeof...(A);
+ std::string source("(function " + function_name() + "(");
+ for (int i = 0; i < arg_count; i++) {
+ source += i == 0 ? "a" : ", a";
+ }
+ source += "){})";
+ function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source.c_str()))));
+ function->ReplaceCode(
+ *isolate_->builtins()->InterpreterEntryTrampoline());
+ }
+
+ if (!bytecode_.is_null()) {
+ function->shared()->set_function_data(*bytecode_.ToHandleChecked());
+ }
+ if (!feedback_vector_.is_null()) {
+ function->shared()->set_feedback_vector(
+ *feedback_vector_.ToHandleChecked());
+ }
+ return function;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterTester);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 73767eb3c6..2519f25142 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <fstream>
+
#include "src/v8.h"
#include "src/compiler.h"
@@ -9,7936 +11,1893 @@
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/interpreter/bytecode-expectations-printer.h"
#include "test/cctest/test-feedback-vector.h"
namespace v8 {
namespace internal {
namespace interpreter {
-static const InstanceType kInstanceTypeDontCare = static_cast<InstanceType>(-1);
+#define XSTR(A) #A
+#define STR(A) XSTR(A)
-class BytecodeGeneratorHelper {
- public:
- const char* kFunctionName = "f";
+#define UNIQUE_VAR() "var a" STR(__COUNTER__) " = 0;\n"
- static const int kLastParamIndex =
- -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+#define REPEAT_2(...) __VA_ARGS__ __VA_ARGS__
+#define REPEAT_4(...) REPEAT_2(__VA_ARGS__) REPEAT_2(__VA_ARGS__)
+#define REPEAT_8(...) REPEAT_4(__VA_ARGS__) REPEAT_4(__VA_ARGS__)
+#define REPEAT_16(...) REPEAT_8(__VA_ARGS__) REPEAT_8(__VA_ARGS__)
+#define REPEAT_32(...) REPEAT_16(__VA_ARGS__) REPEAT_16(__VA_ARGS__)
+#define REPEAT_64(...) REPEAT_32(__VA_ARGS__) REPEAT_32(__VA_ARGS__)
+#define REPEAT_128(...) REPEAT_64(__VA_ARGS__) REPEAT_64(__VA_ARGS__)
+#define REPEAT_256(...) REPEAT_128(__VA_ARGS__) REPEAT_128(__VA_ARGS__)
+
+#define REPEAT_127(...) \
+ REPEAT_64(__VA_ARGS__) \
+ REPEAT_32(__VA_ARGS__) \
+ REPEAT_16(__VA_ARGS__) \
+ REPEAT_8(__VA_ARGS__) \
+ REPEAT_4(__VA_ARGS__) \
+ REPEAT_2(__VA_ARGS__) \
+ __VA_ARGS__
- BytecodeGeneratorHelper() {
+#define REPEAT_249(...) \
+ REPEAT_127(__VA_ARGS__) \
+ REPEAT_64(__VA_ARGS__) \
+ REPEAT_32(__VA_ARGS__) \
+ REPEAT_16(__VA_ARGS__) \
+ REPEAT_8(__VA_ARGS__) \
+ REPEAT_2(__VA_ARGS__)
+
+#define REPEAT_2_UNIQUE_VARS() UNIQUE_VAR() UNIQUE_VAR()
+#define REPEAT_4_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS()
+#define REPEAT_8_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS()
+#define REPEAT_16_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS()
+#define REPEAT_32_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS()
+#define REPEAT_64_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS()
+#define REPEAT_128_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS()
+
+#define REPEAT_249_UNIQUE_VARS() \
+ REPEAT_128_UNIQUE_VARS() \
+ REPEAT_64_UNIQUE_VARS() \
+ REPEAT_32_UNIQUE_VARS() \
+ REPEAT_16_UNIQUE_VARS() \
+ REPEAT_8_UNIQUE_VARS() \
+ UNIQUE_VAR()
+
+static const char* kGoldenFileDirectory =
+ "test/cctest/interpreter/bytecode_expectations/";
+
+class InitializedIgnitionHandleScope : public InitializedHandleScope {
+ public:
+ InitializedIgnitionHandleScope() {
i::FLAG_ignition = true;
- i::FLAG_ignition_filter = StrDup(kFunctionName);
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
CcTest::i_isolate()->interpreter()->Initialize();
}
+};
- Isolate* isolate() { return CcTest::i_isolate(); }
- Factory* factory() { return CcTest::i_isolate()->factory(); }
-
- Handle<BytecodeArray> MakeTopLevelBytecode(const char* source) {
- const char* old_ignition_filter = i::FLAG_ignition_filter;
- i::FLAG_ignition_filter = "*";
- Local<v8::Script> script = v8_compile(source);
- i::FLAG_ignition_filter = old_ignition_filter;
- i::Handle<i::JSFunction> js_function = v8::Utils::OpenHandle(*script);
- return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
+void SkipGoldenFileHeader(std::istream& stream) { // NOLINT
+ std::string line;
+ int separators_seen = 0;
+ while (std::getline(stream, line)) {
+ if (line == "---") separators_seen += 1;
+ if (separators_seen == 2) return;
}
+}
- Handle<BytecodeArray> MakeBytecode(const char* script,
- const char* function_name) {
- CompileRun(script);
- v8::Local<v8::Context> context =
- v8::Isolate::GetCurrent()->GetCurrentContext();
- Local<Function> function = Local<Function>::Cast(
- CcTest::global()->Get(context, v8_str(function_name)).ToLocalChecked());
- i::Handle<i::JSFunction> js_function =
- i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*function));
- return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
- }
+std::string LoadGolden(const std::string& golden_filename) {
+ std::ifstream expected_file((kGoldenFileDirectory + golden_filename).c_str());
+ CHECK(expected_file.is_open());
+ SkipGoldenFileHeader(expected_file);
+ std::ostringstream expected_stream;
+ // Restore the first separator, which was consumed by SkipGoldenFileHeader
+ expected_stream << "---\n" << expected_file.rdbuf();
+ return expected_stream.str();
+}
- Handle<BytecodeArray> MakeBytecode(const char* script, const char* filter,
- const char* function_name) {
- const char* old_ignition_filter = i::FLAG_ignition_filter;
- i::FLAG_ignition_filter = filter;
- Handle<BytecodeArray> return_val = MakeBytecode(script, function_name);
- i::FLAG_ignition_filter = old_ignition_filter;
- return return_val;
+template <size_t N>
+std::string BuildActual(const BytecodeExpectationsPrinter& printer,
+ const char* (&snippet_list)[N],
+ const char* prologue = nullptr,
+ const char* epilogue = nullptr) {
+ std::ostringstream actual_stream;
+ for (const char* snippet : snippet_list) {
+ std::string source_code;
+ if (prologue) source_code += prologue;
+ source_code += snippet;
+ if (epilogue) source_code += epilogue;
+ printer.PrintExpectation(actual_stream, source_code);
}
+ return actual_stream.str();
+}
- Handle<BytecodeArray> MakeBytecodeForFunctionBody(const char* body) {
- static const char kFormat[] = "function %s() { %s }\n%s();";
- static const int kFormatLength = arraysize(kFormat);
- int length = kFormatLength + 2 * StrLength(kFunctionName) + StrLength(body);
- ScopedVector<char> program(length);
- length = SNPrintF(program, kFormat, kFunctionName, body, kFunctionName);
- CHECK_GT(length, 0);
- return MakeBytecode(program.start(), kFunctionName);
- }
+using ConstantPoolType = BytecodeExpectationsPrinter::ConstantPoolType;
- Handle<BytecodeArray> MakeBytecodeForFunction(const char* function) {
- ScopedVector<char> program(3072);
- SNPrintF(program, "%s\n%s();", function, kFunctionName);
- return MakeBytecode(program.start(), kFunctionName);
- }
+TEST(PrimitiveReturnStatements) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "",
- Handle<BytecodeArray> MakeBytecodeForFunctionNoFilter(const char* function) {
- ScopedVector<char> program(3072);
- SNPrintF(program, "%s\n%s();", function, kFunctionName);
- return MakeBytecode(program.start(), "*", kFunctionName);
- }
-};
+ "return;",
+ "return null;",
-// Helper macros for handcrafting bytecode sequences.
-#define B(x) static_cast<uint8_t>(Bytecode::k##x)
-#define U8(x) static_cast<uint8_t>((x) & 0xff)
-#define R(x) static_cast<uint8_t>(-(x) & 0xff)
-#define R16(x) U16(-(x))
-#define A(x, n) R(helper.kLastParamIndex - (n) + 1 + (x))
-#define THIS(n) A(0, n)
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define U16(x) static_cast<uint8_t>((x) & 0xff), \
- static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff)
-#define U16I(x) static_cast<uint8_t>((x) & 0xff), \
- static_cast<uint8_t>(((x++) >> kBitsPerByte) & 0xff)
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define U16(x) static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff), \
- static_cast<uint8_t>((x) & 0xff)
-#define U16I(x) static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff), \
- static_cast<uint8_t>((x++) & 0xff)
-#else
-#error Unknown byte ordering
-#endif
+ "return true;",
-#define XSTR(A) #A
-#define STR(A) XSTR(A)
+ "return false;",
-#define COMMA() ,
-#define SPACE()
-#define UNIQUE_VAR() "var a" STR(__COUNTER__) " = 0;\n"
+ "return 0;",
-#define REPEAT_2(SEP, ...) \
- __VA_ARGS__ SEP() __VA_ARGS__
-#define REPEAT_4(SEP, ...) \
- REPEAT_2(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__)
-#define REPEAT_8(SEP, ...) \
- REPEAT_4(SEP, __VA_ARGS__) SEP() REPEAT_4(SEP, __VA_ARGS__)
-#define REPEAT_16(SEP, ...) \
- REPEAT_8(SEP, __VA_ARGS__) SEP() REPEAT_8(SEP, __VA_ARGS__)
-#define REPEAT_32(SEP, ...) \
- REPEAT_16(SEP, __VA_ARGS__) SEP() REPEAT_16(SEP, __VA_ARGS__)
-#define REPEAT_64(SEP, ...) \
- REPEAT_32(SEP, __VA_ARGS__) SEP() REPEAT_32(SEP, __VA_ARGS__)
-#define REPEAT_128(SEP, ...) \
- REPEAT_64(SEP, __VA_ARGS__) SEP() REPEAT_64(SEP, __VA_ARGS__)
-#define REPEAT_256(SEP, ...) \
- REPEAT_128(SEP, __VA_ARGS__) SEP() REPEAT_128(SEP, __VA_ARGS__)
-
-#define REPEAT_127(SEP, ...) \
- REPEAT_64(SEP, __VA_ARGS__) SEP() REPEAT_32(SEP, __VA_ARGS__) SEP() \
- REPEAT_16(SEP, __VA_ARGS__) SEP() REPEAT_8(SEP, __VA_ARGS__) SEP() \
- REPEAT_4(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__) SEP() \
- __VA_ARGS__
+ "return +1;",
-#define REPEAT_249(SEP, ...) \
- REPEAT_127(SEP, __VA_ARGS__) SEP() REPEAT_64(SEP, __VA_ARGS__) SEP() \
- REPEAT_32(SEP, __VA_ARGS__) SEP() REPEAT_16(SEP, __VA_ARGS__) SEP() \
- REPEAT_8(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__)
-
-#define REPEAT_249_UNIQUE_VARS() \
-UNIQUE_VAR() REPEAT_127(UNIQUE_VAR) UNIQUE_VAR() REPEAT_64(UNIQUE_VAR) \
-UNIQUE_VAR() REPEAT_32(UNIQUE_VAR) UNIQUE_VAR() REPEAT_16(UNIQUE_VAR) \
-UNIQUE_VAR() REPEAT_8(UNIQUE_VAR) UNIQUE_VAR() REPEAT_2(UNIQUE_VAR)
-
-// Structure for containing expected bytecode snippets.
-template<typename T, int C = 6>
-struct ExpectedSnippet {
- const char* code_snippet;
- int frame_size;
- int parameter_count;
- int bytecode_length;
- const uint8_t bytecode[2048];
- int constant_count;
- T constants[C];
- int handler_count;
- struct {
- int start;
- int end;
- int handler;
- } handlers[C];
-};
+ "return -1;",
+ "return +127;",
-static void CheckConstant(int expected, Object* actual) {
- CHECK_EQ(expected, Smi::cast(actual)->value());
+ "return -128;",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("PrimitiveReturnStatements.golden"));
}
+TEST(PrimitiveExpressions) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var x = 0; return x;",
-static void CheckConstant(double expected, Object* actual) {
- CHECK_EQ(expected, HeapNumber::cast(actual)->value());
-}
+ "var x = 0; return x + 3;",
+ "var x = 0; return x - 3;",
-static void CheckConstant(const char* expected, Object* actual) {
- Handle<String> expected_string =
- CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(expected);
- CHECK(String::cast(actual)->Equals(*expected_string));
-}
+ "var x = 4; return x * 3;",
+ "var x = 4; return x / 3;",
-static void CheckConstant(Handle<Object> expected, Object* actual) {
- CHECK(actual == *expected || expected->StrictEquals(actual));
-}
+ "var x = 4; return x % 3;",
+ "var x = 1; return x | 2;",
-static void CheckConstant(InstanceType expected, Object* actual) {
- if (expected != kInstanceTypeDontCare) {
- CHECK_EQ(expected, HeapObject::cast(actual)->map()->instance_type());
- }
-}
+ "var x = 1; return x ^ 2;",
+ "var x = 1; return x & 2;",
-template <typename T, int C>
-static void CheckBytecodeArrayEqual(const ExpectedSnippet<T, C>& expected,
- Handle<BytecodeArray> actual) {
- CHECK_EQ(expected.frame_size, actual->frame_size());
- CHECK_EQ(expected.parameter_count, actual->parameter_count());
- CHECK_EQ(expected.bytecode_length, actual->length());
- if (expected.constant_count == 0) {
- CHECK_EQ(CcTest::heap()->empty_fixed_array(), actual->constant_pool());
- } else {
- CHECK_EQ(expected.constant_count, actual->constant_pool()->length());
- for (int i = 0; i < expected.constant_count; i++) {
- CheckConstant(expected.constants[i], actual->constant_pool()->get(i));
- }
- }
- if (expected.handler_count == 0) {
- CHECK_EQ(CcTest::heap()->empty_fixed_array(), actual->handler_table());
- } else {
- HandlerTable* table = HandlerTable::cast(actual->handler_table());
- CHECK_EQ(expected.handler_count, table->NumberOfRangeEntries());
- for (int i = 0; i < expected.handler_count; i++) {
- CHECK_EQ(expected.handlers[i].start, table->GetRangeStart(i));
- CHECK_EQ(expected.handlers[i].end, table->GetRangeEnd(i));
- CHECK_EQ(expected.handlers[i].handler, table->GetRangeHandler(i));
- }
- }
+ "var x = 10; return x << 3;",
- BytecodeArrayIterator iterator(actual);
- int i = 0;
- while (!iterator.done()) {
- int bytecode_index = i++;
- Bytecode bytecode = iterator.current_bytecode();
- if (Bytecodes::ToByte(bytecode) != expected.bytecode[bytecode_index]) {
- std::ostringstream stream;
- stream << "Check failed: expected bytecode [" << bytecode_index
- << "] to be " << Bytecodes::ToString(static_cast<Bytecode>(
- expected.bytecode[bytecode_index]))
- << " but got " << Bytecodes::ToString(bytecode);
- FATAL(stream.str().c_str());
- }
- for (int j = 0; j < Bytecodes::NumberOfOperands(bytecode); ++j) {
- OperandType operand_type = Bytecodes::GetOperandType(bytecode, j);
- int operand_index = i;
- i += static_cast<int>(Bytecodes::SizeOfOperand(operand_type));
- uint32_t raw_operand = iterator.GetRawOperand(j, operand_type);
- uint32_t expected_operand;
- switch (Bytecodes::SizeOfOperand(operand_type)) {
- case OperandSize::kNone:
- UNREACHABLE();
- return;
- case OperandSize::kByte:
- expected_operand =
- static_cast<uint32_t>(expected.bytecode[operand_index]);
- break;
- case OperandSize::kShort:
- expected_operand =
- ReadUnalignedUInt16(&expected.bytecode[operand_index]);
- break;
- default:
- UNREACHABLE();
- return;
- }
- if (raw_operand != expected_operand) {
- std::ostringstream stream;
- stream << "Check failed: expected operand [" << j << "] for bytecode ["
- << bytecode_index << "] to be "
- << static_cast<unsigned int>(expected_operand) << " but got "
- << static_cast<unsigned int>(raw_operand);
- FATAL(stream.str().c_str());
- }
- }
- iterator.Advance();
- }
-}
+ "var x = 10; return x >> 3;",
+ "var x = 10; return x >>> 3;",
-TEST(PrimitiveReturnStatements) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"return;",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"return null;",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaNull), //
- B(Return) //
- },
- 0},
- {"return true;",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaTrue), //
- B(Return) //
- },
- 0},
- {"return false;",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaFalse), //
- B(Return) //
- },
- 0},
- {"return 0;",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Return) //
- },
- 0},
- {"return +1;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Return) //
- },
- 0},
- {"return -1;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(-1), //
- B(Return) //
- },
- 0},
- {"return +127;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(127), //
- B(Return) //
- },
- 0},
- {"return -128;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(-128), //
- B(Return) //
- },
- 0},
+ "var x = 0; return (x, 3);",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("PrimitiveExpressions.golden"));
}
+TEST(LogicalExpressions) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var x = 0; return x || 3;",
-TEST(PrimitiveExpressions) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var x = 0; return x;",
- kPointerSize,
- 1,
- 5,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Return)},
- 0},
- {"var x = 0; return x + 3;",
- 2 * kPointerSize,
- 1,
- 11,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Add), R(1), //
- B(Return)},
- 0},
- {"var x = 0; return x - 3;",
- 2 * kPointerSize,
- 1,
- 11,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Sub), R(1), //
- B(Return)},
- 0},
- {"var x = 4; return x * 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(4), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Mul), R(1), //
- B(Return)},
- 0},
- {"var x = 4; return x / 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(4), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Div), R(1), //
- B(Return)},
- 0},
- {"var x = 4; return x % 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(4), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Mod), R(1), //
- B(Return)},
- 0},
- {"var x = 1; return x | 2;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(BitwiseOr), R(1), //
- B(Return)},
- 0},
- {"var x = 1; return x ^ 2;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(BitwiseXor), R(1), //
- B(Return)},
- 0},
- {"var x = 1; return x & 2;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(BitwiseAnd), R(1), //
- B(Return)},
- 0},
- {"var x = 10; return x << 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(ShiftLeft), R(1), //
- B(Return)},
- 0},
- {"var x = 10; return x >> 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(ShiftRight), R(1), //
- B(Return)},
- 0},
- {"var x = 10; return x >>> 3;",
- 2 * kPointerSize,
- 1,
- 12,
- {B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(ShiftRightLogical), R(1), //
- B(Return)},
- 0},
- {"var x = 0; return (x, 3);",
- 1 * kPointerSize,
- 1,
- 7,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- };
- // clang-format on
+ "var x = 0; return (x == 1) || 3;",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ "var x = 0; return x && 3;",
+ "var x = 0; return (x == 0) && 3;",
-TEST(LogicalExpressions) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var x = 0; return x || 3;",
- 1 * kPointerSize,
- 1,
- 9,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(JumpIfToBooleanTrue), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var x = 0; return (x == 1) || 3;",
- 2 * kPointerSize,
- 1,
- 15,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(1), //
- B(JumpIfTrue), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var x = 0; return x && 3;",
- 1 * kPointerSize,
- 1,
- 9,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var x = 0; return (x == 0) && 3;",
- 2 * kPointerSize,
- 1,
- 14,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var x = 0; return x || (1, 2, 3);",
- 1 * kPointerSize,
- 1,
- 9,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(JumpIfToBooleanTrue), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);",
- 3 * kPointerSize,
- 1,
- 32,
- {B(StackCheck), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(4), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(16), //
- B(Ldar), R(0), //
- B(Ldar), R(1), //
- B(Ldar), R(0), //
- B(Ldar), R(1), //
- B(LdaSmi8), U8(5), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 0},
- {"var x = 1; var a = 2, b = 3; return x || ("
- REPEAT_32(SPACE, "a = 1, b = 2, ")
- "3);",
- 3 * kPointerSize,
- 1,
- 276,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanTrueConstant), U8(0), //
- REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 1,
- {260, 0, 0, 0}},
- {"var x = 0; var a = 2, b = 3; return x && ("
- REPEAT_32(SPACE, "a = 1, b = 2, ")
- "3);",
- 3 * kPointerSize,
- 1,
- 275,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanFalseConstant), U8(0), //
- REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
- B(LdaSmi8), U8(3), //
- B(Return)}, //
- 1,
- {260, 0, 0, 0}},
- {"var x = 1; var a = 2, b = 3; return (x > 3) || ("
- REPEAT_32(SPACE, "a = 1, b = 2, ")
- "3);",
- 4 * kPointerSize,
- 1,
- 282,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(3), //
- B(TestGreaterThan), R(3), //
- B(JumpIfTrueConstant), U8(0), //
- REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 1,
- {260, 0, 0, 0}},
- {"var x = 0; var a = 2, b = 3; return (x < 5) && ("
- REPEAT_32(SPACE, "a = 1, b = 2, ")
- "3);",
- 4 * kPointerSize,
- 1,
- 281,
- {B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(5), //
- B(TestLessThan), R(3), //
- B(JumpIfFalseConstant), U8(0), //
- REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
- B(LdaSmi8), U8(3), //
- B(Return)},
- 1,
- {260, 0, 0, 0}},
- {"return 0 && 3;",
- 0 * kPointerSize,
- 1,
- 3,
- {B(StackCheck), //
- B(LdaZero), //
- B(Return)},
- 0},
- {"return 1 || 3;",
- 0 * kPointerSize,
- 1,
- 4,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Return)},
- 0},
- {"var x = 1; return x && 3 || 0, 1;",
- 1 * kPointerSize,
- 1,
- 15,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(4), //
- B(LdaSmi8), U8(3), //
- B(JumpIfToBooleanTrue), U8(3), //
- B(LdaZero), //
- B(LdaSmi8), U8(1), //
- B(Return)},
- 0}
- };
- // clang-format on
+ "var x = 0; return x || (1, 2, 3);",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ "var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);",
+ "var x = 1; var a = 2, b = 3; return x || (" //
+ REPEAT_32("\n a = 1, b = 2, ") //
+ "3);",
-TEST(Parameters) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"function f() { return this; }",
- 0,
- 1,
- 4,
- {B(StackCheck), //
- B(Ldar), THIS(1), //
- B(Return)},
- 0},
- {"function f(arg1) { return arg1; }",
- 0,
- 2,
- 4,
- {B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Return)},
- 0},
- {"function f(arg1) { return this; }",
- 0,
- 2,
- 4,
- {B(StackCheck), //
- B(Ldar), THIS(2), //
- B(Return)},
- 0},
- {"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }",
- 0,
- 8,
- 4,
- {B(StackCheck), //
- B(Ldar), A(4, 8), //
- B(Return)},
- 0},
- {"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }",
- 0,
- 8,
- 4,
- {B(StackCheck), //
- B(Ldar), THIS(8), //
- B(Return)},
- 0},
- {"function f(arg1) { arg1 = 1; }",
- 0,
- 2,
- 7,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), A(1, 2), //
- B(LdaUndefined), //
- B(Return)},
- 0},
- {"function f(arg1, arg2, arg3, arg4) { arg2 = 1; }",
- 0,
- 5,
- 7,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), A(2, 5), //
- B(LdaUndefined), //
- B(Return)},
- 0},
+ "var x = 0; var a = 2, b = 3; return x && (" //
+ REPEAT_32("\n a = 1, b = 2, ") //
+ "3);",
+
+ "var x = 1; var a = 2, b = 3; return (x > 3) || (" //
+ REPEAT_32("\n a = 1, b = 2, ") //
+ "3);",
+
+ "var x = 0; var a = 2, b = 3; return (x < 5) && (" //
+ REPEAT_32("\n a = 1, b = 2, ") //
+ "3);",
+
+ "return 0 && 3;",
+
+ "return 1 || 3;",
+
+ "var x = 1; return x && 3 || 0, 1;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("LogicalExpressions.golden"));
}
+TEST(Parameters) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function f() { return this; }",
-TEST(IntegerConstants) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"return 12345678;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Return) //
- },
- 1,
- {12345678}},
- {"var a = 1234; return 5678;",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(Return) //
- },
- 2,
- {1234, 5678}},
- {"var a = 1234; return 1234;",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(Return) //
- },
- 1,
- {1234}}
+ "function f(arg1) { return arg1; }",
+
+ "function f(arg1) { return this; }",
+
+ "function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }",
+
+ "function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }",
+
+ "function f(arg1) { arg1 = 1; }",
+
+ "function f(arg1, arg2, arg3, arg4) { arg2 = 1; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("Parameters.golden"));
}
+TEST(IntegerConstants) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "return 12345678;",
-TEST(HeapNumberConstants) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int wide_idx = 0;
-
- // clang-format off
- ExpectedSnippet<double, 257> snippets[] = {
- {"return 1.2;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Return) //
- },
- 1,
- {1.2}},
- {"var a = 1.2; return 2.6;",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(Return) //
- },
- 2,
- {1.2, 2.6}},
- {"var a = 3.14; return 3.14;",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(Return) //
- },
- 2,
- {3.14, 3.14}},
- {"var a;"
- REPEAT_256(SPACE, " a = 1.414;")
- " a = 3.14;",
- 1 * kPointerSize,
- 1,
- 1032,
- {
- B(StackCheck), //
- REPEAT_256(COMMA, //
- B(LdaConstant), U8(wide_idx++), //
- B(Star), R(0)), //
- B(LdaConstantWide), U16(wide_idx), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- },
- 257,
- {REPEAT_256(COMMA, 1.414),
- 3.14}}
+ "var a = 1234; return 5678;",
+
+ "var a = 1234; return 1234;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("IntegerConstants.golden"));
}
+TEST(HeapNumberConstants) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "return 1.2;",
+
+ "var a = 1.2; return 2.6;",
-TEST(StringConstants) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"return \"This is a string\";",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Return) //
- },
- 1,
- {"This is a string"}},
- {"var a = \"First string\"; return \"Second string\";",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(Return) //
- },
- 2,
- {"First string", "Second string"}},
- {"var a = \"Same string\"; return \"Same string\";",
- 1 * kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(Return) //
- },
- 1,
- {"Same string"}}
+ "var a = 3.14; return 3.14;",
+
+ "var a;" //
+ REPEAT_256("\na = 1.414;") //
+ " a = 3.14;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("HeapNumberConstants.golden"));
}
+TEST(StringConstants) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "return \"This is a string\";",
-TEST(PropertyLoads) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // These are a hack used by the LoadICXXXWide tests below.
- int wide_idx_1 = vector->GetIndex(slot1) - 2;
- int wide_idx_2 = vector->GetIndex(slot1) - 2;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function f(a) { return a.name; }\nf({name : \"test\"})",
- 1 * kPointerSize,
- 2,
- 10,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a) { return a[\"key\"]; }\nf({key : \"test\"})",
- 1 * kPointerSize,
- 2,
- 10,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 1,
- {"key"}},
- {"function f(a) { return a[100]; }\nf({100 : \"test\"})",
- 1 * kPointerSize,
- 2,
- 11,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(100), //
- B(KeyedLoadIC), R(0), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 0},
- {"function f(a, b) { return a[b]; }\nf({arg : \"test\"}, \"arg\")",
- 1 * kPointerSize,
- 3,
- 11,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(1, 2), //
- B(KeyedLoadIC), R(0), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 0},
- {"function f(a) { var b = a.name; return a[-124]; }\n"
- "f({\"-124\" : \"test\", name : 123 })",
- 2 * kPointerSize,
- 2,
- 21,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot1)), //
- B(Star), R(0), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(-124), //
- B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a) {\n"
- " var b;\n"
- "b = a.name;"
- REPEAT_127(SPACE, " b = a.name; ")
- " return a.name; }\n"
- "f({name : \"test\"})\n",
- 2 * kPointerSize,
- 2,
- 1292,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), U8(wide_idx_1 += 2), //
- B(Star), R(0), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), //
- U8((wide_idx_1 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadICWide), R(1), U16(0), U16(wide_idx_1 + 2), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a, b) {\n"
- " var c;\n"
- " c = a[b];"
- REPEAT_127(SPACE, " c = a[b]; ")
- " return a[b]; }\n"
- "f({name : \"test\"}, \"name\")\n",
- 2 * kPointerSize,
- 3,
- 1420,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadIC), R(1), U8((wide_idx_2 += 2)), //
- B(Star), R(0), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadIC), R(1), U8((wide_idx_2 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICWide), R(1), U16(wide_idx_2 + 2), //
- B(Return), //
- }},
+ "var a = \"First string\"; return \"Second string\";",
+
+ "var a = \"Same string\"; return \"Same string\";",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("StringConstants.golden"));
}
+TEST(PropertyLoads) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(PropertyStores) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddStoreICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // These are a hack used by the StoreICXXXWide tests below.
- int wide_idx_1 = vector->GetIndex(slot1) - 2;
- int wide_idx_2 = vector->GetIndex(slot1) - 2;
- int wide_idx_3 = vector->GetIndex(slot1) - 2;
- int wide_idx_4 = vector->GetIndex(slot1) - 2;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function f(a) { a.name = \"val\"; }\nf({name : \"test\"})",
- kPointerSize,
- 2,
- 13,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"val", "name"}},
- {"function f(a) { a[\"key\"] = \"val\"; }\nf({key : \"test\"})",
- kPointerSize,
- 2,
- 13,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"val", "key"}},
- {"function f(a) { a[100] = \"val\"; }\nf({100 : \"test\"})",
- 2 * kPointerSize,
- 2,
- 17,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(100), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(KeyedStoreICSloppy), R(0), R(1), //
- U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"val"}},
- {"function f(a, b) { a[b] = \"val\"; }\nf({arg : \"test\"}, \"arg\")",
- 2 * kPointerSize,
- 3,
- 17,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(KeyedStoreICSloppy), R(0), R(1), //
- U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"val"}},
- {"function f(a) { a.name = a[-124]; }\n"
- "f({\"-124\" : \"test\", name : 123 })",
- 2 * kPointerSize,
- 2,
- 20,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(-124), //
- B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot1)), //
- B(StoreICSloppy), R(0), U8(0), U8(vector->GetIndex(slot2)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a) { \"use strict\"; a.name = \"val\"; }\n"
- "f({name : \"test\"})",
- kPointerSize,
- 2,
- 13,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(StoreICStrict), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"val", "name"}},
- {"function f(a, b) { \"use strict\"; a[b] = \"val\"; }\n"
- "f({arg : \"test\"}, \"arg\")",
- 2 * kPointerSize,
- 3,
- 17,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(KeyedStoreICStrict), R(0), R(1), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"val"}},
- {"function f(a) {\n"
- "a.name = 1;"
- REPEAT_127(SPACE, " a.name = 1; ")
- " a.name = 2; }\n"
- "f({name : \"test\"})\n",
- kPointerSize,
- 2,
- 1295,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(StoreICSloppy), R(0), U8(0), U8((wide_idx_1 += 2)), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(StoreICSloppy), R(0), U8(0), //
- U8((wide_idx_1 += 2))), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(StoreICSloppyWide), R(0), U16(0), U16(wide_idx_1 + 2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a) {\n"
- " 'use strict';\n"
- " a.name = 1;"
- REPEAT_127(SPACE, " a.name = 1; ")
- " a.name = 2; }\n"
- "f({name : \"test\"})\n",
- kPointerSize,
- 2,
- 1295,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(StoreICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(StoreICStrict), R(0), U8(0), //
- U8((wide_idx_2 += 2))), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(StoreICStrictWide), R(0), U16(0), U16(wide_idx_2 + 2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a, b) {\n"
- " a[b] = 1;"
- REPEAT_127(SPACE, " a[b] = 1; ")
- " a[b] = 2; }\n"
- "f({name : \"test\"})\n",
- 2 * kPointerSize,
- 3,
- 1810,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICSloppy), R(0), R(1), U8(wide_idx_3 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICSloppy), R(0), R(1), //
- U8((wide_idx_3 += 2))), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(KeyedStoreICSloppyWide), R(0), R(1), U16(wide_idx_3 + 2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"function f(a, b) {\n"
- " 'use strict';\n"
- " a[b] = 1;"
- REPEAT_127(SPACE, " a[b] = 1; ")
- " a[b] = 2; }\n"
- "f({name : \"test\"})\n",
- 2 * kPointerSize,
- 3,
- 1810,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICStrict), R(0), R(1), U8(wide_idx_4 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICStrict), R(0), R(1), //
- U8((wide_idx_4 += 2))), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(KeyedStoreICStrictWide), R(0), R(1), U16(wide_idx_4 + 2), //
- B(LdaUndefined), //
- B(Return), //
- }}
+ const char* snippets[] = {
+ "function f(a) { return a.name; }\n"
+ "f({name : \"test\"});",
+
+ "function f(a) { return a[\"key\"]; }\n"
+ "f({key : \"test\"});",
+
+ "function f(a) { return a[100]; }\n"
+ "f({100 : \"test\"});",
+
+ "function f(a, b) { return a[b]; }\n"
+ "f({arg : \"test\"}, \"arg\");",
+
+ "function f(a) { var b = a.name; return a[-124]; }\n"
+ "f({\"-124\" : \"test\", name : 123 })",
+
+ "function f(a) {\n"
+ " var b;\n"
+ " b = a.name;\n"
+ REPEAT_127(" b = a.name;\n")
+ " return a.name;\n"
+ "}\n"
+ "f({name : \"test\"})\n",
+
+ "function f(a, b) {\n"
+ " var c;\n"
+ " c = a[b];\n"
+ REPEAT_127(" c = a[b];\n")
+ " return a[b];\n"
+ "}\n"
+ "f({name : \"test\"}, \"name\")\n",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyLoads.golden"));
}
+TEST(PropertyStores) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-#define FUNC_ARG "new (function Obj() { this.func = function() { return; }})()"
+ const char* snippets[] = {
+ "function f(a) { a.name = \"val\"; }\n"
+ "f({name : \"test\"})",
+ "function f(a) { a[\"key\"] = \"val\"; }\n"
+ "f({key : \"test\"})",
-TEST(PropertyCall) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // These are a hack used by the CallWide test below.
- int wide_idx = vector->GetIndex(slot1) - 2;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function f(a) { return a.func(); }\nf(" FUNC_ARG ")",
- 2 * kPointerSize,
- 2,
- 17,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 1,
- {"func"}},
- {"function f(a, b, c) { return a.func(b, c); }\nf(" FUNC_ARG ", 1, 2)",
- 4 * kPointerSize,
- 4,
- 25,
- {
- B(StackCheck), //
- B(Ldar), A(1, 4), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(Ldar), A(2, 4), //
- B(Star), R(2), //
- B(Ldar), A(3, 4), //
- B(Star), R(3), //
- B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 1,
- {"func"}},
- {"function f(a, b) { return a.func(b + b, b); }\nf(" FUNC_ARG ", 1)",
- 4 * kPointerSize,
- 3,
- 31,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(Star), R(3), //
- B(Ldar), A(2, 3), //
- B(Add), R(3), //
- B(Star), R(2), //
- B(Ldar), A(2, 3), //
- B(Star), R(3), //
- B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 1,
- {"func"}},
- {"function f(a) {\n"
- " a.func;\n" REPEAT_127(
- SPACE, " a.func;\n") " return a.func(); }\nf(" FUNC_ARG ")",
- 2 * kPointerSize,
- 2,
- 1047,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8((wide_idx += 2))), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadICWide), R(1), U16(0), U16(wide_idx + 4), //
- B(Star), R(0), //
- B(CallWide), R16(0), R16(1), U16(1), U16(wide_idx + 2), //
- B(Return), //
- },
- 1,
- {"func"}},
+ "function f(a) { a[100] = \"val\"; }\n"
+ "f({100 : \"test\"})",
+
+ "function f(a, b) { a[b] = \"val\"; }\n"
+ "f({arg : \"test\"}, \"arg\")",
+
+ "function f(a) { a.name = a[-124]; }\n"
+ "f({\"-124\" : \"test\", name : 123 })",
+
+ "function f(a) { \"use strict\"; a.name = \"val\"; }\n"
+ "f({name : \"test\"})",
+
+ "function f(a, b) { \"use strict\"; a[b] = \"val\"; }\n"
+ "f({arg : \"test\"}, \"arg\")",
+
+ "function f(a) {\n"
+ " a.name = 1;\n"
+ REPEAT_127(" a.name = 1;\n")
+ " a.name = 2;\n"
+ "}\n"
+ "f({name : \"test\"})\n",
+
+ "function f(a) {\n"
+ " 'use strict';\n"
+ " a.name = 1;\n"
+ REPEAT_127(" a.name = 1;\n")
+ " a.name = 2;\n"
+ "}\n"
+ "f({name : \"test\"})\n",
+
+ "function f(a, b) {\n"
+ " a[b] = 1;\n"
+ REPEAT_127(" a[b] = 1;\n")
+ " a[b] = 2;\n"
+ "}\n"
+ "f({name : \"test\"})\n",
+
+ "function f(a, b) {\n"
+ " 'use strict';\n"
+ " a[b] = 1;\n"
+ REPEAT_127(" a[b] = 1;\n")
+ " a[b] = 2;\n"
+ "}\n"
+ "f({name : \"test\"})\n",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyStores.golden"));
}
+#define FUNC_ARG "new (function Obj() { this.func = function() { return; }})()"
+
+TEST(PropertyCall) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(LoadGlobal) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // These are a hack used by the LdaGlobalXXXWide tests below.
- int wide_idx_1 = vector->GetIndex(slot) - 2;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var a = 1;\nfunction f() { return a; }\nf()",
- 0,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"function t() { }\nfunction f() { return t; }\nf()",
- 0,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {"t"}},
- {"a = 1;\nfunction f() { return a; }\nf()",
- 0,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"a = 1;"
- "function f(b) {\n"
- " b.name;\n"
- REPEAT_127(SPACE, "b.name; ")
- " return a;"
- "}\nf({name: 1});",
- kPointerSize,
- 2,
- 1031,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2)), //
- B(LdaGlobalWide), U16(1), U16(wide_idx_1 + 2), //
- B(Return), //
- },
- 2,
- {"name", "a"}},
+ const char* snippets[] = {
+ "function f(a) { return a.func(); }\n"
+ "f(" FUNC_ARG ")",
+
+ "function f(a, b, c) { return a.func(b, c); }\n"
+ "f(" FUNC_ARG ", 1, 2)",
+
+ "function f(a, b) { return a.func(b + b, b); }\n"
+ "f(" FUNC_ARG ", 1)",
+
+ "function f(a) {\n"
+ " a.func;\n" //
+ REPEAT_127(" a.func;\n") //
+ " return a.func(); }\n"
+ "f(" FUNC_ARG ")",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyCall.golden"));
}
+TEST(LoadGlobal) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "var a = 1;\n"
+ "function f() { return a; }\n"
+ "f()",
-TEST(StoreGlobal) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // These are a hack used by the StaGlobalXXXWide tests below.
- int wide_idx_1 = vector->GetIndex(slot) - 2;
- int wide_idx_2 = vector->GetIndex(slot) - 2;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var a = 1;\nfunction f() { a = 2; }\nf()",
- 0,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"var a = \"test\"; function f(b) { a = b; }\nf(\"global\")",
- 0,
- 2,
- 8,
- {
- B(StackCheck), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"'use strict'; var a = 1;\nfunction f() { a = 2; }\nf()",
- 0,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"a = 1;\nfunction f() { a = 2; }\nf()",
- 0,
- 1,
- 8,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"a"}},
- {"a = 1;"
- "function f(b) {"
- " b.name;\n"
- REPEAT_127(SPACE, "b.name; ")
- " a = 2; }\n"
- "f({name: 1});",
- kPointerSize,
- 2,
- 1034,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2)), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"name", "a"}},
- {"a = 1;"
- "function f(b) {\n"
- " 'use strict';\n"
- " b.name;\n"
- REPEAT_127(SPACE, "b.name; ")
- " a = 2; }\n"
- "f({name: 1});",
- kPointerSize,
- 2,
- 1034,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_2 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadIC), R(0), U8(0), U8(wide_idx_2 += 2)), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"name", "a"}},
+ "function t() { }\n"
+ "function f() { return t; }\n"
+ "f()",
+
+ "a = 1;\n"
+ "function f() { return a; }\n"
+ "f()",
+
+ "a = 1;\n"
+ "function f(b) {\n"
+ " b.name;\n"
+ REPEAT_127(" b.name;\n")
+ " return a;\n"
+ "}\n"
+ "f({name: 1});",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LoadGlobal.golden"));
}
+TEST(StoreGlobal) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(CallGlobal) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function t() { }\nfunction f() { return t(); }\nf()",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 1,
- {"t"}},
- {"function t(a, b, c) { }\nfunction f() { return t(1, 2, 3); }\nf()",
- 5 * kPointerSize,
- 1,
- 27,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(3), //
- B(Star), R(4), //
- B(Call), R(0), R(1), U8(4), U8(vector->GetIndex(slot1)), //
- B(Return) //
- },
- 1,
- {"t"}},
+ const char* snippets[] = {
+ "var a = 1;\n"
+ "function f() { a = 2; }\n"
+ "f();",
+
+ "var a = \"test\"; function f(b) { a = b; }\n"
+ "f(\"global\");",
+
+ "'use strict'; var a = 1;\n"
+ "function f() { a = 2; }\n"
+ "f();",
+
+ "a = 1;\n"
+ "function f() { a = 2; }\n"
+ "f();",
+
+ "a = 1;\n"
+ "function f(b) {\n"
+ " b.name;\n"
+ REPEAT_127(" b.name;\n")
+ " a = 2;\n"
+ "}\n"
+ "f({name: 1});",
+
+ "a = 1;\n"
+ "function f(b) {\n"
+ " 'use strict';\n"
+ " b.name;\n"
+ REPEAT_127(" b.name;\n")
+ " a = 2;\n"
+ "}\n"
+ "f({name: 1});",
};
- // clang-format on
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("StoreGlobal.golden"));
}
+TEST(CallGlobal) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(CallRuntime) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {
- "function f() { %TheHole() }\nf()",
- 0,
- 1,
- 8,
- {
- B(StackCheck), //
- B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- },
- {
- "function f(a) { return %IsArray(a) }\nf(undefined)",
- 1 * kPointerSize,
- 2,
- 11,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kIsArray), R(0), U8(1), //
- B(Return) //
- },
- },
- {
- "function f() { return %Add(1, 2) }\nf()",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kAdd), R(0), U8(2), //
- B(Return) //
- },
- },
- {
- "function f() { return %spread_iterable([1]) }\nf()",
- 2 * kPointerSize,
- 1,
- 16,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
- B(Star), R(1), //
- B(CallJSRuntime), U16(Context::SPREAD_ITERABLE_INDEX), R(0), //
- /* */ U8(2), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE},
- },
+ const char* snippets[] = {
+ "function t() { }\n"
+ "function f() { return t(); }\n"
+ "f();",
+
+ "function t(a, b, c) { }\n"
+ "function f() { return t(1, 2, 3); }\n"
+ "f();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallGlobal.golden"));
}
+TEST(CallRuntime) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(IfConditions) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- Handle<Object> unused = helper.factory()->undefined_value();
-
- // clang-format off
- ExpectedSnippet<Handle<Object>> snippets[] = {
- {"function f() { if (0) { return 1; } else { return -1; } } f()",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(-1), //
- B(Return), //
- },
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f() { if ('lucky') { return 1; } else { return -1; } } f();",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- },
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f() { if (false) { return 1; } else { return -1; } } f();",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(-1), //
- B(Return), //
- },
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f() { if (false) { return 1; } } f();",
- 0,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f() { var a = 1; if (a) { a += 1; } else { return 2; } } f();",
- 2 * kPointerSize,
- 1,
- 24,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(14), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(5), //
- B(LdaSmi8), U8(2), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f(a) { if (a <= 0) { return 200; } else { return -200; } }"
- "f(99);",
- kPointerSize,
- 2,
- 18,
- {
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LdaZero), //
- B(TestLessThanOrEqual), R(0), //
- B(JumpIfFalse), U8(5), //
- B(LdaConstant), U8(0), //
- B(Return), //
- B(LdaConstant), U8(1), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {helper.factory()->NewNumberFromInt(200),
- helper.factory()->NewNumberFromInt(-200), unused, unused, unused,
- unused}},
- {"function f(a, b) { if (a in b) { return 200; } }"
- "f('prop', { prop: 'yes'});",
- kPointerSize,
- 3,
- 16,
- {
- B(StackCheck), //
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(TestIn), R(0), //
- B(JumpIfFalse), U8(5), //
- B(LdaConstant), U8(0), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {helper.factory()->NewNumberFromInt(200), unused, unused, unused, unused,
- unused}},
- {"function f(z) { var a = 0; var b = 0; if (a === 0.01) { "
- REPEAT_64(SPACE, "b = a; a = b; ")
- " return 200; } else { return -200; } } f(0.001)",
- 3 * kPointerSize,
- 2,
- 283,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaConstant), U8(0), //
- B(TestEqualStrict), R(2), //
- B(JumpIfFalseConstant), U8(2), //
- B(Ldar), R(0), //
- REPEAT_64(COMMA, //
- B(Star), R(1), //
- B(Star), R(0)), //
- B(LdaConstant), U8(1), //
- B(Return), //
- B(LdaConstant), U8(3), //
- B(Return), //
- B(LdaUndefined), //
- B(Return)}, //
- 4,
- {helper.factory()->NewHeapNumber(0.01),
- helper.factory()->NewNumberFromInt(200),
- helper.factory()->NewNumberFromInt(263),
- helper.factory()->NewNumberFromInt(-200), unused, unused}},
- {"function f() { var a = 0; var b = 0; if (a) { "
- REPEAT_64(SPACE, "b = a; a = b; ")
- " return 200; } else { return -200; } } f()",
- 2 * kPointerSize,
- 1,
- 277,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanFalseConstant), U8(1), //
- B(Ldar), R(0), //
- REPEAT_64(COMMA, //
- B(Star), R(1), //
- B(Star), R(0)), //
- B(LdaConstant), U8(0), //
- B(Return), //
- B(LdaConstant), U8(2), //
- B(Return), //
- B(LdaUndefined), //
- B(Return)}, //
- 3,
- {helper.factory()->NewNumberFromInt(200),
- helper.factory()->NewNumberFromInt(263),
- helper.factory()->NewNumberFromInt(-200), unused, unused, unused}},
-
- {"function f(a, b) {\n"
- " if (a == b) { return 1; }\n"
- " if (a === b) { return 1; }\n"
- " if (a < b) { return 1; }\n"
- " if (a > b) { return 1; }\n"
- " if (a <= b) { return 1; }\n"
- " if (a >= b) { return 1; }\n"
- " if (a in b) { return 1; }\n"
- " if (a instanceof b) { return 1; }\n"
- " return 0;\n"
- "} f(1, 1);",
- kPointerSize,
- 3,
- 107,
- {
-#define IF_CONDITION_RETURN(condition) \
- B(Ldar), A(1, 3), \
- B(Star), R(0), \
- B(Ldar), A(2, 3), \
- B(condition), R(0), \
- B(JumpIfFalse), U8(5), \
- B(LdaSmi8), U8(1), \
- B(Return),
- B(StackCheck), //
- IF_CONDITION_RETURN(TestEqual) //
- IF_CONDITION_RETURN(TestEqualStrict) //
- IF_CONDITION_RETURN(TestLessThan) //
- IF_CONDITION_RETURN(TestGreaterThan) //
- IF_CONDITION_RETURN(TestLessThanOrEqual) //
- IF_CONDITION_RETURN(TestGreaterThanOrEqual) //
- IF_CONDITION_RETURN(TestIn) //
- IF_CONDITION_RETURN(TestInstanceOf) //
- B(LdaZero), //
- B(Return)}, //
-#undef IF_CONDITION_RETURN
- 0,
- {unused, unused, unused, unused, unused, unused}},
- {"function f() {"
- " var a = 0;"
- " if (a) {"
- " return 20;"
- "} else {"
- " return -20;}"
- "};"
- "f();",
- 1 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(5), //
- B(LdaSmi8), U8(20), //
- B(Return), //
- B(LdaSmi8), U8(-20), //
- B(Return), //
- B(LdaUndefined), //
- B(Return)
- },
- 0,
- {unused, unused, unused, unused, unused, unused}}
+ const char* snippets[] = {
+ "function f() { %TheHole() }\n"
+ "f();",
+
+ "function f(a) { return %IsArray(a) }\n"
+ "f(undefined);",
+
+ "function f() { return %Add(1, 2) }\n"
+ "f();",
+
+ "function f() { return %spread_iterable([1]) }\n"
+ "f();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallRuntime.golden"));
}
+TEST(IfConditions) {
+ if (FLAG_harmony_instanceof) {
+ // TODO(mvstanton): when ES6 instanceof ships, regenerate the bytecode
+ // expectations and remove this flag check.
+ return;
+ }
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function f() {\n"
+ " if (0) {\n"
+ " return 1;\n"
+ " } else {\n"
+ " return -1;\n"
+ " }\n"
+ "};\n"
+ "f();",
+
+ "function f() {\n"
+ " if ('lucky') {\n"
+ " return 1;\n"
+ " } else {\n"
+ " return -1;\n"
+ " }\n"
+ "};\n"
+ "f();",
+
+ "function f() {\n"
+ " if (false) {\n"
+ " return 1;\n"
+ " } else {\n"
+ " return -1;\n"
+ " }\n"
+ "};\n"
+ "f();",
+
+ "function f() {\n"
+ " if (false) {\n"
+ " return 1;\n"
+ " }\n"
+ "};\n"
+ "f();",
+
+ "function f() {\n"
+ " var a = 1;\n"
+ " if (a) {\n"
+ " a += 1;\n"
+ " } else {\n"
+ " return 2;\n"
+ " }\n"
+ "};\n"
+ "f();",
+
+ "function f(a) {\n"
+ " if (a <= 0) {\n"
+ " return 200;\n"
+ " } else {\n"
+ " return -200;\n"
+ " }\n"
+ "};\n"
+ "f(99);",
+
+ "function f(a, b) { if (a in b) { return 200; } }"
+ "f('prop', { prop: 'yes'});",
+
+ "function f(z) { var a = 0; var b = 0; if (a === 0.01) {\n"
+ REPEAT_64(" b = a; a = b;\n")
+ " return 200; } else { return -200; } } f(0.001);",
+
+ "function f() {\n"
+ " var a = 0; var b = 0;\n"
+ " if (a) {\n"
+ REPEAT_64(" b = a; a = b;\n")
+ " return 200; } else { return -200; }\n"
+ "};\n"
+ "f();",
+
+ "function f(a, b) {\n"
+ " if (a == b) { return 1; }\n"
+ " if (a === b) { return 1; }\n"
+ " if (a < b) { return 1; }\n"
+ " if (a > b) { return 1; }\n"
+ " if (a <= b) { return 1; }\n"
+ " if (a >= b) { return 1; }\n"
+ " if (a in b) { return 1; }\n"
+ " if (a instanceof b) { return 1; }\n"
+ " return 0;\n"
+ "}\n"
+ "f(1, 1);",
+
+ "function f() {\n"
+ " var a = 0;\n"
+ " if (a) {\n"
+ " return 20;\n"
+ " } else {\n"
+ " return -20;\n"
+ " }\n"
+ "};\n"
+ "f();",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("IfConditions.golden"));
+}
TEST(DeclareGlobals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- // Create different feedback vector specs to be precise on slot numbering.
- FeedbackVectorSpec feedback_spec_stores(&zone);
- FeedbackVectorSlot store_slot_1 = feedback_spec_stores.AddStoreICSlot();
- FeedbackVectorSlot store_slot_2 = feedback_spec_stores.AddStoreICSlot();
- USE(store_slot_1);
-
- Handle<i::TypeFeedbackVector> store_vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec_stores);
-
- FeedbackVectorSpec feedback_spec_loads(&zone);
- FeedbackVectorSlot load_slot_1 = feedback_spec_loads.AddLoadICSlot();
- FeedbackVectorSlot call_slot_1 = feedback_spec_loads.AddCallICSlot();
-
- Handle<i::TypeFeedbackVector> load_vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec_loads);
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = 1;",
- 4 * kPointerSize,
- 1,
- 31,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
- B(StackCheck), //
- B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3), //
- B(LdaUndefined), //
- B(Return) //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"function f() {}",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2), //
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1;\na=2;",
- 4 * kPointerSize,
- 1,
- 37,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
- B(StackCheck), //
- B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalSloppy), U8(1), //
- /* */ U8(store_vector->GetIndex(store_slot_2)), //
- B(Star), R(0), //
- B(Return) //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"function f() {}\nf();",
- 3 * kPointerSize,
- 1,
- 29,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(2), //
- B(LdaGlobal), U8(1), U8(load_vector->GetIndex(load_slot_1)), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), //
- /* */ U8(load_vector->GetIndex(call_slot_1)), //
- B(Star), R(0), //
- B(Return) //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+ printer.set_execute(false);
+ printer.set_top_level(true);
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeTopLevelBytecode(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ const char* snippets[] = {
+ "var a = 1;",
+ "function f() {}",
-TEST(BreakableBlocks) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var x = 0;\n"
- "label: {\n"
- " x = x + 1;\n"
- " break label;\n"
- " x = x + 1;\n"
- "}\n"
- "return x;",
- 2 * kPointerSize,
- 1,
- 17,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(2), //
- B(Ldar), R(0), //
- B(Return) //
- }},
- {"var sum = 0;\n"
- "outer: {\n"
- " for (var x = 0; x < 10; ++x) {\n"
- " for (var y = 0; y < 3; ++y) {\n"
- " ++sum;\n"
- " if (x + y == 12) { break outer; }\n"
- " }\n"
- " }\n"
- "}\n"
- "return sum;",
- 5 * kPointerSize,
- 1,
- 75,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(3), //
- B(LdaSmi8), U8(10), //
- B(TestLessThan), R(3), //
- B(JumpIfFalse), U8(57), //
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(2), //
- B(Ldar), R(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(3), //
- B(TestLessThan), R(3), //
- B(JumpIfFalse), U8(35), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(0), //
- B(Ldar), R(1), //
- B(Star), R(3), //
- B(Ldar), R(2), //
- B(Add), R(3), //
- B(Star), R(4), //
- B(LdaSmi8), U8(12), //
- B(TestEqual), R(4), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(18), //
- B(Ldar), R(2), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(2), //
- B(Jump), U8(-41), //
- B(Ldar), R(1), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(1), //
- B(Jump), U8(-63), //
- B(Ldar), R(0), //
- B(Return), //
- }},
- {"outer: {\n"
- " let y = 10;"
- " function f() { return y; }\n"
- " break outer;\n"
- "}\n",
- 5 * kPointerSize,
- 1,
- 51,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(Ldar), R(closure), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(3), U8(2), //
- B(PushContext), R(2), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(1), U8(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(2), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(PopContext), R(2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"let x = 1;\n"
- "outer: {\n"
- " inner: {\n"
- " let y = 2;\n"
- " function f() { return x + y; }\n"
- " if (y) break outer;\n"
- " y = 3;\n"
- " }\n"
- "}\n"
- "x = 4;",
- 6 * kPointerSize,
- 1,
- 131,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(2), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaConstant), U8(0), //
- B(Star), R(4), //
- B(Ldar), R(closure), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2), //
- B(PushContext), R(3), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(1), U8(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(StaContextSlot), R(context), U8(4), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(Star), R(1), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(3), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(JumpIfToBooleanFalse), U8(6), //
- B(PopContext), R(3), //
- B(Jump), U8(27), //
- B(LdaSmi8), U8(3), //
- B(Star), R(4), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(3), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1), //
- B(Ldar), R(4), //
- B(StaContextSlot), R(context), U8(4), //
- B(PopContext), R(3), //
- B(LdaSmi8), U8(4), //
- B(Star), R(4), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(4), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1), //
- B(Ldar), R(4), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return), //
- },
- 5,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ "var a = 1;\n"
+ "a=2;",
+
+ "function f() {}\n"
+ "f();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DeclareGlobals.golden"));
}
+TEST(BreakableBlocks) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+
+ const char* snippets[] = {
+ "var x = 0;\n"
+ "label: {\n"
+ " x = x + 1;\n"
+ " break label;\n"
+ " x = x + 1;\n"
+ "}\n"
+ "return x;",
+
+ "var sum = 0;\n"
+ "outer: {\n"
+ " for (var x = 0; x < 10; ++x) {\n"
+ " for (var y = 0; y < 3; ++y) {\n"
+ " ++sum;\n"
+ " if (x + y == 12) { break outer; }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+
+ "outer: {\n"
+ " let y = 10;\n"
+ " function f() { return y; }\n"
+ " break outer;\n"
+ "}\n",
+
+ "let x = 1;\n"
+ "outer: {\n"
+ " inner: {\n"
+ " let y = 2;\n"
+ " function f() { return x + y; }\n"
+ " if (y) break outer;\n"
+ " y = 3;\n"
+ " }\n"
+ "}\n"
+ "x = 4;",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("BreakableBlocks.golden"));
+}
TEST(BasicLoops) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var x = 0;\n"
- "while (false) { x = 99; break; continue; }\n"
- "return x;",
- 1 * kPointerSize,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Return) //
- }},
- {"var x = 0;"
- "while (false) {"
- " x = x + 1;"
- "};"
- "return x;",
- 1 * kPointerSize,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 0;"
- "var y = 1;"
- "while (x < 10) {"
- " y = y * 12;"
- " x = x + 1;"
- " if (x == 3) continue;"
- " if (x == 4) break;"
- "}"
- "return y;",
- 3 * kPointerSize,
- 1,
- 66,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(10), //
- B(TestLessThan), R(2), //
- B(JumpIfFalse), U8(47), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-39), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(4), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(4), //
- B(Jump), U8(-53), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var i = 0;"
- "while (true) {"
- " if (i < 0) continue;"
- " if (i == 3) break;"
- " if (i == 4) break;"
- " if (i == 10) continue;"
- " if (i == 5) break;"
- " i = i + 1;"
- "}"
- "return i;",
- 2 * kPointerSize,
- 1,
- 79,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(TestLessThan), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-10), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(50), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(4), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(38), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-46), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(14), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-70), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var i = 0;"
- "while (true) {"
- " while (i < 3) {"
- " if (i == 2) break;"
- " i = i + 1;"
- " }"
- " i = i + 1;"
- " break;"
- "}"
- "return i;",
- 2 * kPointerSize,
- 1,
- 57,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(TestLessThan), R(1), //
- B(JumpIfFalse), U8(27), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(14), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-33), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(4), //
- B(Jump), U8(-48), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 10;"
- "var y = 1;"
- "while (x) {"
- " y = y * 12;"
- " x = x - 1;"
- "}"
- "return y;",
- 3 * kPointerSize,
- 1,
- 39,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanFalse), U8(25), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Sub), R(2), //
- B(Star), R(0), //
- B(Jump), U8(-25), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 0; var y = 1;"
- "do {"
- " y = y * 10;"
- " if (x == 5) break;"
- " if (x == 6) continue;"
- " x = x + 1;"
- "} while (x < 10);"
- "return y;",
- 3 * kPointerSize,
- 1,
- 66,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(10), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(34), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(6), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(12), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(10), //
- B(TestLessThan), R(2), //
- B(JumpIfTrue), U8(-53), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 10;"
- "var y = 1;"
- "do {"
- " y = y * 12;"
- " x = x - 1;"
- "} while (x);"
- "return y;",
- 3 * kPointerSize,
- 1,
- 37,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Sub), R(2), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(-23), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 0; var y = 1;"
- "do {"
- " y = y * 10;"
- " if (x == 5) break;"
- " x = x + 1;"
- " if (x == 6) continue;"
- "} while (false);"
- "return y;",
- 3 * kPointerSize,
- 1,
- 54,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(10), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(22), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(6), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(2), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 0; var y = 1;"
- "do {"
- " y = y * 10;"
- " if (x == 5) break;"
- " x = x + 1;"
- " if (x == 6) continue;"
- "} while (true);"
- "return y;",
- 3 * kPointerSize,
- 1,
- 56,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(10), //
- B(Mul), R(2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(24), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(6), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-41), //
- B(Jump), U8(-43), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 0; "
- "for (;;) {"
- " if (x == 1) break;"
- " if (x == 2) continue;"
- " x = x + 1;"
- "}",
- 2 * kPointerSize,
- 1,
- 43,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(26), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-23), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-35), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"for (var x = 0;;) {"
- " if (x == 1) break;"
- " if (x == 2) continue;"
- " x = x + 1;"
- "}",
- 2 * kPointerSize,
- 1,
- 43,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(26), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-23), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-35), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"var x = 0; "
- "for (;; x = x + 1) {"
- " if (x == 1) break;"
- " if (x == 2) continue;"
- "}",
- 2 * kPointerSize,
- 1,
- 43,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(26), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(2), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-35), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"for (var x = 0;; x = x + 1) {"
- " if (x == 1) break;"
- " if (x == 2) continue;"
- "}",
- 2 * kPointerSize,
- 1,
- 43,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(26), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(2), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-35), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"var u = 0;"
- "for (var i = 0; i < 100; i = i + 1) {"
- " u = u + 1;"
- " continue;"
- "}",
- 3 * kPointerSize,
- 1,
- 44,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(100), //
- B(TestLessThan), R(2), //
- B(JumpIfFalse), U8(27), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Jump), U8(2), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(1), //
- B(Jump), U8(-33), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"var y = 1;"
- "for (var x = 10; x; --x) {"
- " y = y * 12;"
- "}"
- "return y;",
- 3 * kPointerSize,
- 1,
- 35,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(JumpIfToBooleanFalse), U8(21), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(2), //
- B(Star), R(0), //
- B(Ldar), R(1), //
- B(ToNumber), //
- B(Dec), //
- B(Star), R(1), //
- B(Jump), U8(-21), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 0;"
- "for (var i = 0; false; i++) {"
- " x = x + 1;"
- "};"
- "return x;",
- 2 * kPointerSize,
- 1,
- 10,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 0;"
- "for (var i = 0; true; ++i) {"
- " x = x + 1;"
- " if (x == 20) break;"
- "};"
- "return x;",
- 3 * kPointerSize,
- 1,
- 39,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(20), //
- B(TestEqual), R(2), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(10), //
- B(Ldar), R(1), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(1), //
- B(Jump), U8(-27), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var a = 0;\n"
- "while (a) {\n"
- " { \n"
- " let z = 1;\n"
- " function f() { z = 2; }\n"
- " if (z) continue;\n"
- " z++;\n"
- " }\n"
- "}\n",
- 7 * kPointerSize,
- 1,
- 118,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(JumpIfToBooleanFalse), U8(110), //
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(4), //
- B(Ldar), R(closure), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2), //
- B(PushContext), R(3), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(1), U8(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(4), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(Star), R(2), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(3), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(JumpIfToBooleanFalse), U8(6), //
- B(PopContext), R(3), //
- B(Jump), U8(-67), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(3), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(ToNumber), //
- B(Star), R(4), //
- B(Inc), //
- B(Star), R(5), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(3), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1), //
- B(Ldar), R(5), //
- B(StaContextSlot), R(context), U8(4), //
- B(PopContext), R(3), //
- B(Jump), U8(-110), //
- B(LdaUndefined), //
- B(Return), //
- },
- 4,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- };
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "var x = 0;\n"
+ "while (false) { x = 99; break; continue; }\n"
+ "return x;",
+
+ "var x = 0;\n"
+ "while (false) {\n"
+ " x = x + 1;\n"
+ "};\n"
+ "return x;",
+
+ "var x = 0;\n"
+ "var y = 1;\n"
+ "while (x < 10) {\n"
+ " y = y * 12;\n"
+ " x = x + 1;\n"
+ " if (x == 3) continue;\n"
+ " if (x == 4) break;\n"
+ "}\n"
+ "return y;",
+
+ "var i = 0;\n"
+ "while (true) {\n"
+ " if (i < 0) continue;\n"
+ " if (i == 3) break;\n"
+ " if (i == 4) break;\n"
+ " if (i == 10) continue;\n"
+ " if (i == 5) break;\n"
+ " i = i + 1;\n"
+ "}\n"
+ "return i;",
+
+ "var i = 0;\n"
+ "while (true) {\n"
+ " while (i < 3) {\n"
+ " if (i == 2) break;\n"
+ " i = i + 1;\n"
+ " }\n"
+ " i = i + 1;\n"
+ " break;\n"
+ "}\n"
+ "return i;",
+
+ "var x = 10;\n"
+ "var y = 1;\n"
+ "while (x) {\n"
+ " y = y * 12;\n"
+ " x = x - 1;\n"
+ "}\n"
+ "return y;",
+
+ "var x = 0; var y = 1;\n"
+ "do {\n"
+ " y = y * 10;\n"
+ " if (x == 5) break;\n"
+ " if (x == 6) continue;\n"
+ " x = x + 1;\n"
+ "} while (x < 10);\n"
+ "return y;",
+
+ "var x = 10;\n"
+ "var y = 1;\n"
+ "do {\n"
+ " y = y * 12;\n"
+ " x = x - 1;\n"
+ "} while (x);\n"
+ "return y;",
+
+ "var x = 0; var y = 1;\n"
+ "do {\n"
+ " y = y * 10;\n"
+ " if (x == 5) break;\n"
+ " x = x + 1;\n"
+ " if (x == 6) continue;\n"
+ "} while (false);\n"
+ "return y;",
+
+ "var x = 0; var y = 1;\n"
+ "do {\n"
+ " y = y * 10;\n"
+ " if (x == 5) break;\n"
+ " x = x + 1;\n"
+ " if (x == 6) continue;\n"
+ "} while (true);\n"
+ "return y;",
+
+ "var x = 0;\n"
+ "for (;;) {\n"
+ " if (x == 1) break;\n"
+ " if (x == 2) continue;\n"
+ " x = x + 1;\n"
+ "}",
+
+ "for (var x = 0;;) {\n"
+ " if (x == 1) break;\n"
+ " if (x == 2) continue;\n"
+ " x = x + 1;\n"
+ "}",
+
+ "var x = 0;\n"
+ "for (;; x = x + 1) {\n"
+ " if (x == 1) break;\n"
+ " if (x == 2) continue;\n"
+ "}",
+
+ "for (var x = 0;; x = x + 1) {\n"
+ " if (x == 1) break;\n"
+ " if (x == 2) continue;\n"
+ "}",
+
+ "var u = 0;\n"
+ "for (var i = 0; i < 100; i = i + 1) {\n"
+ " u = u + 1;\n"
+ " continue;\n"
+ "}",
+
+ "var y = 1;\n"
+ "for (var x = 10; x; --x) {\n"
+ " y = y * 12;\n"
+ "}\n"
+ "return y;",
+
+ "var x = 0;\n"
+ "for (var i = 0; false; i++) {\n"
+ " x = x + 1;\n"
+ "};\n"
+ "return x;",
+
+ "var x = 0;\n"
+ "for (var i = 0; true; ++i) {\n"
+ " x = x + 1;\n"
+ " if (x == 20) break;\n"
+ "};\n"
+ "return x;",
+
+ "var a = 0;\n"
+ "while (a) {\n"
+ " { \n"
+ " let z = 1;\n"
+ " function f() { z = 2; }\n"
+ " if (z) continue;\n"
+ " z++;\n"
+ " }\n"
+ "}",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("BasicLoops.golden"));
}
-
TEST(JumpsRequiringConstantWideOperands) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int constant_count = 0;
- // clang-format off
- ExpectedSnippet<Handle<Object>, 316> snippets[] = {
- {
- REPEAT_256(SPACE, "var x = 0.1;")
- REPEAT_32(SPACE, "var x = 0.2;")
- REPEAT_16(SPACE, "var x = 0.3;")
- REPEAT_8(SPACE, "var x = 0.4;")
- "for (var i = 0; i < 3; i++) {\n"
- " if (i == 1) continue;\n"
- " if (i == 2) break;\n"
- "}\n"
- "return 3;",
- kPointerSize * 3,
- 1,
- 1361,
- {
- B(StackCheck), //
-#define L(c) B(LdaConstant), U8(c), B(Star), R(0)
- REPEAT_256(COMMA, L(constant_count++)),
-#undef L
-#define LW(c) B(LdaConstantWide), U16I(c), B(Star), R(0)
- REPEAT_32(COMMA, LW(constant_count)),
- REPEAT_16(COMMA, LW(constant_count)),
- REPEAT_8(COMMA, LW(constant_count)),
-#undef LW
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
- B(TestLessThan), R(2), //
- B(JumpIfFalseConstantWide), U16(313), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(2), //
- B(JumpIfFalseConstantWide), U16(312), //
- B(JumpConstantWide), U16(315), //
- B(Ldar), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(TestEqual), R(2), //
- B(JumpIfFalseConstantWide), U16(312), //
- B(JumpConstantWide), U16(314), //
- B(Ldar), R(1), //
- B(ToNumber), //
- B(Star), R(2), //
- B(Inc), //
- B(Star), R(1), //
- B(Jump), U8(-48), //
- B(LdaSmi8), U8(3), //
- B(Return) //
- },
- 316,
- {
-#define S(x) CcTest::i_isolate()->factory()->NewNumber(x)
- REPEAT_256(COMMA, S(0.1)),
- REPEAT_32(COMMA, S(0.2)),
- REPEAT_16(COMMA, S(0.3)),
- REPEAT_8(COMMA, S(0.4)),
-#undef S
-#define N(x) CcTest::i_isolate()->factory()->NewNumberFromInt(x)
- N(6), N(42), N(13), N(17)
-#undef N
- }}
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ REPEAT_256("var x = 0.1;\n")
+ REPEAT_32("var x = 0.2;\n")
+ REPEAT_16("var x = 0.3;\n")
+ REPEAT_8("var x = 0.4;\n")
+ "for (var i = 0; i < 3; i++) {\n"
+ " if (i == 1) continue;\n"
+ " if (i == 2) break;\n"
+ "}\n"
+ "return 3;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("JumpsRequiringConstantWideOperands.golden"));
}
-
TEST(UnaryOperators) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var x = 0;"
- "while (x != 10) {"
- " x = x + 10;"
- "}"
- "return x;",
- 2 * kPointerSize,
- 1,
- 31,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(TestEqual), R(1), //
- B(LogicalNot), //
- B(JumpIfFalse), U8(15), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Jump), U8(-22), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = false;"
- "do {"
- " x = !x;"
- "} while(x == false);"
- "return x;",
- 2 * kPointerSize,
- 1,
- 22,
- {
- B(StackCheck), //
- B(LdaFalse), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(LogicalNot), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaFalse), //
- B(TestEqual), R(1), //
- B(JumpIfTrue), U8(-13), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 101;"
- "return void(x * 3);",
- 2 * kPointerSize,
- 1,
- 13,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(101), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Mul), R(1), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
- {"var x = 1234;"
- "var y = void (x * x - 1);"
- "return y;",
- 4 * kPointerSize,
- 1,
- 21,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Mul), R(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(Sub), R(3), //
- B(LdaUndefined), //
- B(Star), R(1), //
- B(Return), //
- },
- 1,
- {1234}},
- {"var x = 13;"
- "return ~x;",
- 2 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(13), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(-1), //
- B(BitwiseXor), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 13;"
- "return +x;",
- 2 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(13), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Mul), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 13;"
- "return -x;",
- 2 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(13), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(-1), //
- B(Mul), R(1), //
- B(Return), //
- },
- 0}
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var x = 0;\n"
+ "while (x != 10) {\n"
+ " x = x + 10;\n"
+ "}\n"
+ "return x;",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ "var x = false;\n"
+ "do {\n"
+ " x = !x;\n"
+ "} while(x == false);\n"
+ "return x;",
+ "var x = 101;\n"
+ "return void(x * 3);",
-TEST(Typeof) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function f() {\n"
- " var x = 13;\n"
- " return typeof(x);\n"
- "}; f();",
- kPointerSize,
- 1,
- 7,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(13), //
- B(Star), R(0), //
- B(TypeOf), //
- B(Return), //
- }},
- {"var x = 13;\n"
- "function f() {\n"
- " return typeof(x);\n"
- "}; f();",
- 0,
- 1,
- 6,
- {
- B(StackCheck), //
- B(LdaGlobalInsideTypeof), U8(0), U8(vector->GetIndex(slot)), //
- B(TypeOf), //
- B(Return), //
- },
- 1,
- {"x"}},
+ "var x = 1234;\n"
+ "var y = void (x * x - 1);\n"
+ "return y;",
+
+ "var x = 13;\n"
+ "return ~x;",
+
+ "var x = 13;\n"
+ "return +x;",
+
+ "var x = 13;\n"
+ "return -x;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("UnaryOperators.golden"));
}
+TEST(Typeof) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(Delete) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int deep_elements_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = {x:13, y:14}; return delete a.x;",
- 2 * kPointerSize,
- 1,
- 16,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(DeletePropertySloppy), R(1), //
- B(Return)},
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"'use strict'; var a = {x:13, y:14}; return delete a.x;",
- 2 * kPointerSize,
- 1,
- 16,
- {B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(DeletePropertyStrict), R(1), //
- B(Return)},
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = {1:13, 2:14}; return delete a[2];",
- 2 * kPointerSize,
- 1,
- 16,
- {B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(DeletePropertySloppy), R(1), //
- B(Return)},
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 10; return delete a;",
- 1 * kPointerSize,
- 1,
- 7,
- {B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaFalse), //
- B(Return)},
- 0},
- {"'use strict';"
- "var a = {1:10};"
- "(function f1() {return a;});"
- "return delete a[1];",
- 2 * kPointerSize,
- 1,
- 30,
- {B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(1), U8(0), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(DeletePropertyStrict), R(1), //
- B(Return)},
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"return delete 'test';",
- 0 * kPointerSize,
- 1,
- 3,
- {B(StackCheck), //
- B(LdaTrue), //
- B(Return)},
- 0},
+ const char* snippets[] = {
+ "function f() {\n"
+ " var x = 13;\n"
+ " return typeof(x);\n"
+ "};",
+
+ "var x = 13;\n"
+ "function f() {\n"
+ " return typeof(x);\n"
+ "};",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("Typeof.golden"));
}
+TEST(Delete) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+
+ const char* snippets[] = {
+ "var a = {x:13, y:14}; return delete a.x;",
-TEST(GlobalDelete) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int context = Register::current_context().index();
- int native_context_index = Context::NATIVE_CONTEXT_INDEX;
- int global_context_index = Context::EXTENSION_INDEX;
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = {x:13, y:14};\n function f() { return delete a.x; };\n f();",
- 1 * kPointerSize,
- 1,
- 11,
- {B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(DeletePropertySloppy), R(0), //
- B(Return)},
- 2,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"a = {1:13, 2:14};\n"
- "function f() {'use strict'; return delete a[1];};\n f();",
- 1 * kPointerSize,
- 1,
- 11,
- {B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(DeletePropertyStrict), R(0), //
- B(Return)},
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = {x:13, y:14};\n function f() { return delete a; };\n f();",
- 2 * kPointerSize,
- 1,
- 16,
- {B(StackCheck), //
- B(LdaContextSlot), R(context), U8(native_context_index), //
- B(Star), R(0), //
- B(LdaContextSlot), R(0), U8(global_context_index), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(DeletePropertySloppy), R(1), //
- B(Return)},
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"b = 30;\n function f() { return delete b; };\n f();",
- 2 * kPointerSize,
- 1,
- 16,
- {B(StackCheck), //
- B(LdaContextSlot), R(context), U8(native_context_index), //
- B(Star), R(0), //
- B(LdaContextSlot), R(0), U8(global_context_index), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(DeletePropertySloppy), R(1), //
- B(Return)},
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}
+ "'use strict'; var a = {x:13, y:14}; return delete a.x;",
+
+ "var a = {1:13, 2:14}; return delete a[2];",
+
+ "var a = 10; return delete a;",
+
+ "'use strict';\n"
+ "var a = {1:10};\n"
+ "(function f1() {return a;});\n"
+ "return delete a[1];",
+
+ "return delete 'test';",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Delete.golden"));
}
+TEST(GlobalDelete) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "var a = {x:13, y:14};\n"
+ "function f() {\n"
+ " return delete a.x;\n"
+ "};\n"
+ "f();",
+
+ "a = {1:13, 2:14};\n"
+ "function f() {\n"
+ " 'use strict';\n"
+ " return delete a[1];\n"
+ "};\n"
+ "f();",
+
+ "var a = {x:13, y:14};\n"
+ "function f() {\n"
+ " return delete a;\n"
+ "};\n"
+ "f();",
+
+ "b = 30;\n"
+ "function f() {\n"
+ " return delete b;\n"
+ "};\n"
+ "f();",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("GlobalDelete.golden"));
+}
TEST(FunctionLiterals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddCallICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"return function(){ }",
- 0,
- 1,
- 5,
- {
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return) //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"return (function(){ })()",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(1), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"return (function(x){ return x; })(1)",
- 3 * kPointerSize,
- 1,
- 19,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(1), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ const char* snippets[] = {
+ "return function(){ }",
+ "return (function(){ })()",
-TEST(RegExpLiterals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
- uint8_t i_flags = JSRegExp::kIgnoreCase;
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"return /ab+d/;",
- 0 * kPointerSize,
- 1,
- 6,
- {
- B(StackCheck), //
- B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {"ab+d"}},
- {"return /(\\w+)\\s(\\w+)/i;",
- 0 * kPointerSize,
- 1,
- 6,
- {
- B(StackCheck), //
- B(CreateRegExpLiteral), U8(0), U8(0), U8(i_flags), //
- B(Return), //
- },
- 1,
- {"(\\w+)\\s(\\w+)"}},
- {"return /ab+d/.exec('abdd');",
- 3 * kPointerSize,
- 1,
- 23,
- {
- B(StackCheck), //
- B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(LdaConstant), U8(2), //
- B(Star), R(2), //
- B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 3,
- {"ab+d", "exec", "abdd"}},
+ "return (function(x){ return x; })(1)",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("FunctionLiterals.golden"));
}
+TEST(RegExpLiterals) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
-TEST(RegExpLiteralsWide) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int wide_idx = 0;
-
- // clang-format off
- ExpectedSnippet<InstanceType, 257> snippets[] = {
- {"var a;" REPEAT_256(SPACE, "a = 1.23;") "return /ab+d/;",
- 1 * kPointerSize,
- 1,
- 1032,
- {
- B(StackCheck), //
- REPEAT_256(COMMA, //
- B(LdaConstant), U8(wide_idx++), //
- B(Star), R(0)), //
- B(CreateRegExpLiteralWide), U16(256), U16(0), U8(0), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ const char* snippets[] = {
+ "return /ab+d/;",
+
+ "return /(\\w+)\\s(\\w+)/i;",
+
+ "return /ab+d/.exec('abdd');",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("RegExpLiterals.golden"));
}
+TEST(RegExpLiteralsWide) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
-TEST(ArrayLiterals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddKeyedStoreICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddKeyedStoreICSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddKeyedStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- int simple_flags =
- ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
- int deep_elements_flags = ArrayLiteral::kDisableMementos;
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"return [ 1, 2 ];",
- 0,
- 1,
- 6,
- {
- B(StackCheck), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
- B(Return) //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1; return [ a, a + 1 ];",
- 4 * kPointerSize,
- 1,
- 39,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
- B(Star), R(2), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot1)), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(Add), R(3), //
- B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(2), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"return [ [ 1, 2 ], [ 3 ] ];",
- 0,
- 1,
- 6,
- {
- B(StackCheck), //
- B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
- B(Return) //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1; return [ [ a, 2 ], [ a + 2 ] ];",
- 6 * kPointerSize,
- 1,
- 69,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
- B(Star), R(2), //
- B(LdaZero), //
- B(Star), R(1), //
- B(CreateArrayLiteral), U8(1), U8(0), U8(simple_flags), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(KeyedStoreICSloppy), R(4), R(3), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(4), //
- B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot3)), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(CreateArrayLiteral), U8(2), U8(1), U8(simple_flags), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(Star), R(5), //
- B(LdaSmi8), U8(2), //
- B(Add), R(5), //
- B(KeyedStoreICSloppy), R(4), R(3), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(4), //
- B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot3)), //
- B(Ldar), R(2), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE}},
+ const char* snippets[] = {
+ "var a;" //
+ REPEAT_256("\na = 1.23;") //
+ "\nreturn /ab+d/;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("RegExpLiteralsWide.golden"));
}
+TEST(ArrayLiterals) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
-TEST(ArrayLiteralsWide) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int wide_idx = 0;
- int simple_flags =
- ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
-
- // clang-format off
- ExpectedSnippet<InstanceType, 257> snippets[] = {
- {"var a;" REPEAT_256(SPACE, "a = 1.23;") "return [ 1 , 2 ];",
- 1 * kPointerSize,
- 1,
- 1032,
- {
- B(StackCheck), //
- REPEAT_256(COMMA, //
- B(LdaConstant), U8(wide_idx++), //
- B(Star), R(0)), //
- B(CreateArrayLiteralWide), U16(256), U16(0), U8(simple_flags), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::FIXED_ARRAY_TYPE}},
+ const char* snippets[] = {
+ "return [ 1, 2 ];",
+
+ "var a = 1; return [ a, a + 1 ];",
+
+ "return [ [ 1, 2 ], [ 3 ] ];",
+
+ "var a = 1; return [ [ a, 2 ], [ a + 2 ] ];",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ArrayLiterals.golden"));
}
+TEST(ArrayLiteralsWide) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
-TEST(ObjectLiterals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- int simple_flags = ObjectLiteral::kFastElements |
- ObjectLiteral::kShallowProperties |
- ObjectLiteral::kDisableMementos;
- int deep_elements_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"return { };",
- kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
- B(Star), R(0), //
- B(Return) //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"return { name: 'string', val: 9.2 };",
- kPointerSize,
- 1,
- 8,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Return) //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1; return { name: 'string', val: a };",
- 2 * kPointerSize,
- 1,
- 20,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = 1; return { val: a, val: a + 1 };",
- 3 * kPointerSize,
- 1,
- 26,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"return { func: function() { } };",
- 1 * kPointerSize,
- 1,
- 17,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(CreateClosure), U8(1), U8(0), //
- B(StoreICSloppy), R(0), U8(2), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"return { func(a) { return a; } };",
- 1 * kPointerSize,
- 1,
- 17,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(CreateClosure), U8(1), U8(0), //
- B(StoreICSloppy), R(0), U8(2), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"return { get a() { return 2; } };",
- 6 * kPointerSize,
- 1,
- 33,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Mov), R(0), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(CreateClosure), U8(2), U8(0), //
- B(Star), R(3), //
- B(LdaNull), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- /* */ R(1), U8(5), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"return { get a() { return this.x; }, set a(val) { this.x = val } };",
- 6 * kPointerSize,
- 1,
- 35,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Mov), R(0), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(CreateClosure), U8(2), U8(0), //
- B(Star), R(3), //
- B(CreateClosure), U8(3), U8(0), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- /* */ R(1), U8(5), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 4,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"return { set b(val) { this.y = val } };",
- 6 * kPointerSize,
- 1,
- 33,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Mov), R(0), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(LdaNull), //
- B(Star), R(3), //
- B(CreateClosure), U8(2), U8(0), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- /* */ R(1), U8(5), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var a = 1; return { 1: a };",
- 6 * kPointerSize,
- 1,
- 33,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Mov), R(1), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"return { __proto__: null }",
- 3 * kPointerSize,
- 1,
- 21,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
- B(Star), R(0), //
- B(Mov), R(0), R(1), //
- B(LdaNull), B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 'test'; return { [a]: 1 }",
- 7 * kPointerSize,
- 1,
- 37,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
- B(Star), R(1), //
- B(Mov), R(1), R(2), //
- B(Ldar), R(0), //
- B(ToName), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(LdaZero), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
- /* */ U8(5), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 2,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 'test'; return { val: a, [a]: 1 }",
- 7 * kPointerSize,
- 1,
- 43,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(StoreICSloppy), R(1), U8(2), U8(vector->GetIndex(slot1)), //
- B(Mov), R(1), R(2), //
- B(Ldar), R(0), //
- B(ToName), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(LdaZero), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
- /* */ U8(5), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 3,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = 'test'; return { [a]: 1, __proto__: {} }",
- 7 * kPointerSize,
- 1,
- 53,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(1), U8(simple_flags), //
- B(Star), R(1), //
- B(Mov), R(1), R(2), //
- B(Ldar), R(0), //
- B(ToName), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(LdaZero), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
- /* */ U8(5), //
- B(Mov), R(1), R(2), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(13), //
- B(Star), R(4), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 2,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE}},
- {"var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };",
- 7 * kPointerSize,
- 1,
- 77,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
- B(Star), R(1), //
- B(Mov), R(1), R(2), //
- B(Ldar), R(0), //
- B(ToName), //
- B(Star), R(3), //
- B(LdaConstant), U8(2), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(LdaZero), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
- /* */ U8(5), //
- B(Mov), R(1), R(2), //
- B(LdaConstant), U8(3), //
- B(Star), R(3), //
- B(CreateClosure), U8(4), U8(0), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), //
- /* */ R(2), U8(4), //
- B(Mov), R(1), R(2), //
- B(LdaConstant), U8(3), //
- B(Star), R(3), //
- B(CreateClosure), U8(5), U8(0), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), //
- /* */ R(2), U8(4), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 6,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ const char* snippets[] = {
+ "var a;" //
+ REPEAT_256("\na = 1.23;") //
+ "\nreturn [ 1 , 2 ];",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ArrayLiteralsWide.golden"));
}
+TEST(ObjectLiterals) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
-TEST(ObjectLiteralsWide) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int deep_elements_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
- int wide_idx = 0;
-
- // clang-format off
- ExpectedSnippet<InstanceType, 257> snippets[] = {
- {"var a;" REPEAT_256(SPACE,
- "a = 1.23;") "return { name: 'string', val: 9.2 };",
- 2 * kPointerSize,
- 1,
- 1034,
- {
- B(StackCheck), //
- REPEAT_256(COMMA, //
- B(LdaConstant), U8(wide_idx++), //
- B(Star), R(0)), //
- B(CreateObjectLiteralWide), U16(256), U16(0), //
- /* */ U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::FIXED_ARRAY_TYPE}},
+ const char* snippets[] = {
+ "return { };",
+
+ "return { name: 'string', val: 9.2 };",
+
+ "var a = 1; return { name: 'string', val: a };",
+
+ "var a = 1; return { val: a, val: a + 1 };",
+
+ "return { func: function() { } };",
+
+ "return { func(a) { return a; } };",
+
+ "return { get a() { return 2; } };",
+
+ "return { get a() { return this.x; }, set a(val) { this.x = val } };",
+
+ "return { set b(val) { this.y = val } };",
+
+ "var a = 1; return { 1: a };",
+
+ "return { __proto__: null };",
+
+ "var a = 'test'; return { [a]: 1 };",
+
+ "var a = 'test'; return { val: a, [a]: 1 };",
+
+ "var a = 'test'; return { [a]: 1, __proto__: {} };",
+
+ "var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ObjectLiterals.golden"));
}
-
-TEST(TopLevelObjectLiterals) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int has_function_flags = ObjectLiteral::kFastElements |
- ObjectLiteral::kHasFunction |
- ObjectLiteral::kDisableMementos;
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = { func: function() { } };",
- 5 * kPointerSize,
- 1,
- 49,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
- B(StackCheck), //
- B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CreateObjectLiteral), U8(2), U8(0), U8(has_function_flags), //
- B(Star), R(4), //
- B(CreateClosure), U8(3), U8(1), //
- B(StoreICSloppy), R(4), U8(4), U8(3), //
- B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1), //
- B(Ldar), R(4), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3), //
- B(LdaUndefined), //
- B(Return), //
- },
- 5,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+TEST(ObjectLiteralsWide) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "var a;" //
+ REPEAT_256("\na = 1.23;") //
+ "\nreturn { name: 'string', val: 9.2 };",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeTopLevelBytecode(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ObjectLiteralsWide.golden"));
}
+TEST(TopLevelObjectLiterals) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+ printer.set_execute(false);
+ printer.set_top_level(true);
-TEST(TryCatch) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"try { return 1; } catch(e) { return 2; }",
- 5 * kPointerSize,
- 1,
- 40,
- {
- B(StackCheck), //
- B(Mov), R(context), R(1), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- B(Star), R(3), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(Ldar), R(closure), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(2), U8(3), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(1), //
- B(PushContext), R(0), //
- B(LdaSmi8), U8(2), //
- B(PopContext), R(0), //
- B(Return), //
- // TODO(mstarzinger): Potential optimization, elide next bytes.
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"e"},
- 1,
- {{4, 7, 7}}},
- {"var a; try { a = 1 } catch(e1) {}; try { a = 2 } catch(e2) { a = 3 }",
- 6 * kPointerSize,
- 1,
- 81,
- {
- B(StackCheck), //
- B(Mov), R(context), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Jump), U8(30), //
- B(Star), R(4), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(Ldar), R(closure), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(2), //
- B(PushContext), R(1), //
- B(PopContext), R(1), //
- B(Mov), R(context), R(2), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(Jump), U8(34), //
- B(Star), R(4), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Ldar), R(closure), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(2), //
- B(PushContext), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(PopContext), R(1), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {"e1", "e2"},
- 2,
- {{4, 8, 10}, {41, 45, 47}}},
+ const char* snippets[] = {
+ "var a = { func: function() { } };",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("TopLevelObjectLiterals.golden"));
}
+TEST(TryCatch) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
-TEST(TryFinally) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var a = 1; try { a = 2; } finally { a = 3; }",
- 4 * kPointerSize,
- 1,
- 51,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Mov), R(context), R(3), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(-1), //
- B(Star), R(1), //
- B(Jump), U8(7), //
- B(Star), R(2), //
- B(LdaZero), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
- /* */ R(3), U8(1), //
- B(LdaZero), //
- B(TestEqualStrict), R(1), //
- B(JumpIfTrue), U8(4), //
- B(Jump), U8(5), //
- B(Ldar), R(2), //
- B(ReThrow), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0,
- {},
- 1,
- {{8, 12, 18}}},
- {"var a = 1; try { a = 2; } catch(e) { a = 20 } finally { a = 3; }",
- 9 * kPointerSize,
- 1,
- 88,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Mov), R(context), R(4), //
- B(Mov), R(context), R(5), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(Jump), U8(34), //
- B(Star), R(7), //
- B(LdaConstant), U8(0), //
- B(Star), R(6), //
- B(Ldar), R(closure), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(5), //
- B(PushContext), R(1), //
- B(LdaSmi8), U8(20), //
- B(Star), R(0), //
- B(PopContext), R(1), //
- B(LdaSmi8), U8(-1), //
- B(Star), R(2), //
- B(Jump), U8(7), //
- B(Star), R(3), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Star), R(4), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
- /* */ R(4), U8(1), //
- B(LdaZero), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(4), //
- B(Jump), U8(5), //
- B(Ldar), R(3), //
- B(ReThrow), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"e"},
- 2,
- {{8, 49, 55}, {11, 15, 17}}},
- {"var a; try {"
- " try { a = 1 } catch(e) { a = 2 }"
- "} catch(e) { a = 20 } finally { a = 3; }",
- 10 * kPointerSize,
- 1,
- 121,
- {
- B(StackCheck), //
- B(Mov), R(context), R(4), //
- B(Mov), R(context), R(5), //
- B(Mov), R(context), R(6), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Jump), U8(34), //
- B(Star), R(8), //
- B(LdaConstant), U8(0), //
- B(Star), R(7), //
- B(Ldar), R(closure), //
- B(Star), R(9), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(7), U8(3), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(6), //
- B(PushContext), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(PopContext), R(1), //
- B(Jump), U8(34), //
- B(Star), R(7), //
- B(LdaConstant), U8(0), //
- B(Star), R(6), //
- B(Ldar), R(closure), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Ldar), R(5), //
- B(PushContext), R(1), //
- B(LdaSmi8), U8(20), //
- B(Star), R(0), //
- B(PopContext), R(1), //
- B(LdaSmi8), U8(-1), //
- B(Star), R(2), //
- B(Jump), U8(7), //
- B(Star), R(3), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
- /* */ R(0), U8(0), //
- B(Star), R(4), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
- /* */ R(4), U8(1), //
- B(LdaZero), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(4), //
- B(Jump), U8(5), //
- B(Ldar), R(3), //
- B(ReThrow), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"e"},
- 3,
- {{4, 82, 88}, {7, 48, 50}, {10, 14, 16}}},
+ const char* snippets[] = {
+ "try { return 1; } catch(e) { return 2; }",
+
+ "var a;\n"
+ "try { a = 1 } catch(e1) {};\n"
+ "try { a = 2 } catch(e2) { a = 3 }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("TryCatch.golden"));
}
+TEST(TryFinally) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "var a = 1;\n"
+ "try { a = 2; } finally { a = 3; }",
-TEST(Throw) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"throw 1;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Throw), //
- },
- 0},
- {"throw 'Error';",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Throw), //
- },
- 1,
- {"Error"}},
- {"var a = 1; if (a) { throw 'Error'; };",
- 1 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(5), //
- B(LdaConstant), U8(0), //
- B(Throw), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"Error"}},
+ "var a = 1;\n"
+ "try { a = 2; } catch(e) { a = 20 } finally { a = 3; }",
+
+ "var a; try {\n"
+ " try { a = 1 } catch(e) { a = 2 }\n"
+ "} catch(e) { a = 20 } finally { a = 3; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("TryFinally.golden"));
}
+TEST(Throw) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "throw 1;",
+
+ "throw 'Error';",
-TEST(CallNew) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
- USE(slot1);
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"function bar() { this.value = 0; }\n"
- "function f() { return new bar(); }\n"
- "f()",
- 1 * kPointerSize,
- 1,
- 11,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(New), R(0), R(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"function bar(x) { this.value = 18; this.x = x;}\n"
- "function f() { return new bar(3); }\n"
- "f()",
- 2 * kPointerSize,
- 1,
- 17,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(New), R(0), R(1), U8(1), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"function bar(w, x, y, z) {\n"
- " this.value = 18;\n"
- " this.x = x;\n"
- " this.y = y;\n"
- " this.z = z;\n"
- "}\n"
- "function f() { return new bar(3, 4, 5); }\n"
- "f()",
- 4 * kPointerSize,
- 1,
- 25,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(4), //
- B(Star), R(2), //
- B(LdaSmi8), U8(5), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(New), R(0), R(1), U8(3), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ "var a = 1; if (a) { throw 'Error'; };",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Throw.golden"));
}
+TEST(CallNew) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(ContextVariables) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
+ const char* snippets[] = {
+ "function bar() { this.value = 0; }\n"
+ "function f() { return new bar(); }\n"
+ "f();",
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddCallICSlot();
+ "function bar(x) { this.value = 18; this.x = x;}\n"
+ "function f() { return new bar(3); }\n"
+ "f();",
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ "function bar(w, x, y, z) {\n"
+ " this.value = 18;\n"
+ " this.x = x;\n"
+ " this.y = y;\n"
+ " this.z = z;\n"
+ "}\n"
+ "function f() { return new bar(3, 4, 5); }\n"
+ "f();",
+ };
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int new_target = Register::new_target().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallNew.golden"));
+}
+TEST(ContextVariables) {
// The wide check below relies on MIN_CONTEXT_SLOTS + 3 + 249 == 256, if this
// ever changes, the REPEAT_XXX should be changed to output the correct number
// of unique variables to trigger the wide slot load / store.
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS + 3 + 249 == 256);
- int wide_slot = first_context_slot + 3;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a; return function() { a = 1; };",
- 1 * kPointerSize,
- 1,
- 12,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var a = 1; return function() { a = 2; };",
- 1 * kPointerSize,
- 1,
- 17,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var a = 1; var b = 2; return function() { a = 2; b = 3 };",
- 1 * kPointerSize,
- 1,
- 22,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(LdaSmi8), U8(2), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var a; (function() { a = 2; })(); return a;",
- 3 * kPointerSize,
- 1,
- 25,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(LdaUndefined), //
- B(Star), R(2), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(vector->GetIndex(slot)), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"'use strict'; let a = 1; { let b = 2; return function() { a + b; }; }",
- 4 * kPointerSize,
- 1,
- 47,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(Ldar), R(closure), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(2), U8(2), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(LdaSmi8), U8(2), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(1), U8(0), //
- B(PopContext), R(0), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"'use strict';\n"
- REPEAT_249_UNIQUE_VARS()
- "eval();"
- "var b = 100;"
- "return b",
- 3 * kPointerSize,
- 1,
- 1042,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateUnmappedArguments), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(StackCheck), //
- REPEAT_249(COMMA, //
- B(LdaZero), //
- B(StaContextSlot), R(context), U8(wide_slot++)), //
- B(LdaUndefined), //
- B(Star), R(2), //
- B(LdaGlobal), U8(0), U8(1), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(LdaSmi8), U8(100), //
- B(StaContextSlotWide), R(context), U16(256), //
- B(LdaContextSlotWide), R(context), U16(256), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- };
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "var a; return function() { a = 1; };",
+ "var a = 1; return function() { a = 2; };",
-TEST(ContextParameters) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"function f(arg1) { return function() { arg1 = 2; }; }",
- 1 * kPointerSize,
- 2,
- 17,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
- 2 * kPointerSize,
- 2,
- 22,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(1), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }",
- 1 * kPointerSize,
- 5,
- 22,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(Ldar), R(helper.kLastParamIndex - 3), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(helper.kLastParamIndex -1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"function f() { var self = this; return function() { self = 2; }; }",
- 1 * kPointerSize,
- 1,
- 17,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- /* */ R(closure), U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ "var a = 1; var b = 2; return function() { a = 2; b = 3 };",
+
+ "var a; (function() { a = 2; })(); return a;",
+
+ "'use strict';\n"
+ "let a = 1;\n"
+ "{ let b = 2; return function() { a + b; }; }",
+
+ "'use strict';\n"
+ REPEAT_249_UNIQUE_VARS()
+ "eval();\n"
+ "var b = 100;\n"
+ "return b",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ContextVariables.golden"));
}
+TEST(ContextParameters) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(OuterContextVariables) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"function Outer() {"
- " var outerVar = 1;"
- " function Inner(innerArg) {"
- " this.innerFunc = function() { return outerVar * innerArg; }"
- " }"
- " this.getInnerFunc = function() { return new Inner(1).innerFunc; }"
- "}"
- "var f = new Outer().getInnerFunc();",
- 2 * kPointerSize,
- 1,
- 21,
- {
- B(StackCheck), //
- B(Ldar), R(context), //
- B(Star), R(0), //
- B(LdaContextSlot), R(0), U8(Context::PREVIOUS_INDEX), //
- B(Star), R(0), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
- B(Star), R(1), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Mul), R(1), //
- B(Return), //
- }},
- {"function Outer() {"
- " var outerVar = 1;"
- " function Inner(innerArg) {"
- " this.innerFunc = function() { outerVar = innerArg; }"
- " }"
- " this.getInnerFunc = function() { return new Inner(1).innerFunc; }"
- "}"
- "var f = new Outer().getInnerFunc();",
- 2 * kPointerSize,
- 1,
- 22,
- {
- B(StackCheck), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Star), R(0), //
- B(Ldar), R(context), //
- B(Star), R(1), //
- B(LdaContextSlot), R(1), U8(Context::PREVIOUS_INDEX), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
- B(LdaUndefined), //
- B(Return), //
- }},
+ const char* snippets[] = {
+ "function f(arg1) { return function() { arg1 = 2; }; }",
+
+ "function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
+
+ "function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }",
+
+ "function f() { var self = this; return function() { self = 2; }; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionNoFilter(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("ContextParameters.golden"));
}
+TEST(OuterContextVariables) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function Outer() {\n"
+ " var outerVar = 1;\n"
+ " function Inner(innerArg) {\n"
+ " this.innerFunc = function() { return outerVar * innerArg; }\n"
+ " }\n"
+ " this.getInnerFunc = function() { return new Inner(1).innerFunc; }\n"
+ "}\n"
+ "var f = new Outer().getInnerFunc();",
+
+ "function Outer() {\n"
+ " var outerVar = 1;\n"
+ " function Inner(innerArg) {\n"
+ " this.innerFunc = function() { outerVar = innerArg; }\n"
+ " }\n"
+ " this.getInnerFunc = function() { return new Inner(1).innerFunc; }\n"
+ "}\n"
+ "var f = new Outer().getInnerFunc();",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("OuterContextVariables.golden"));
+}
TEST(CountOperators) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- FeedbackVectorSpec store_feedback_spec(&zone);
- FeedbackVectorSlot store_slot = store_feedback_spec.AddStoreICSlot();
- Handle<i::TypeFeedbackVector> store_vector =
- i::NewTypeFeedbackVector(helper.isolate(), &store_feedback_spec);
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- int object_literal_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
- int array_literal_flags =
- ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = 1; return ++a;",
- 1 * kPointerSize,
- 1,
- 10,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(0), //
- B(Return), //
- }},
- {"var a = 1; return a++;",
- 2 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(ToNumber), //
- B(Star), R(1), //
- B(Inc), //
- B(Star), R(0), //
- B(Ldar), R(1), //
- B(Return), //
- }},
- {"var a = 1; return --a;",
- 1 * kPointerSize,
- 1,
- 10,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(ToNumber), //
- B(Dec), //
- B(Star), R(0), //
- B(Return), //
- }},
- {"var a = 1; return a--;",
- 2 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(ToNumber), //
- B(Star), R(1), //
- B(Dec), //
- B(Star), R(0), //
- B(Ldar), R(1), //
- B(Return), //
- }},
- {"var a = { val: 1 }; return a.val++;",
- 3 * kPointerSize,
- 1,
- 26,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Star), R(2), //
- B(Inc), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(2), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = { val: 1 }; return --a.val;",
- 2 * kPointerSize,
- 1,
- 22,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Dec), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var name = 'var'; var a = { val: 1 }; return a[name]--;",
- 5 * kPointerSize,
- 1,
- 33,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
- B(Star), R(2), //
- B(Star), R(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Star), R(4), //
- B(Dec), //
- B(KeyedStoreICSloppy), R(2), R(3), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(4), //
- B(Return), //
- },
- 2,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE}},
- {"var name = 'var'; var a = { val: 1 }; return ++a[name];",
- 4 * kPointerSize,
- 1,
- 29,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
- B(Star), R(2), //
- B(Star), R(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Inc), //
- B(KeyedStoreICSloppy), R(2), R(3), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 2,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1; var b = function() { return a }; return ++a;",
- 2 * kPointerSize,
- 1,
- 27,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(ToNumber), //
- B(Inc), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var a = 1; var b = function() { return a }; return a--;",
- 3 * kPointerSize,
- 1,
- 31,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(ToNumber), //
- B(Star), R(2), //
- B(Dec), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(Ldar), R(2), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var idx = 1; var a = [1, 2]; return a[idx++] = 2;",
- 4 * kPointerSize,
- 1,
- 28,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
- B(Star), R(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(ToNumber), //
- B(Star), R(3), //
- B(Inc), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(KeyedStoreICSloppy), R(2), R(3), //
- /* */ U8(store_vector->GetIndex(store_slot)), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "var a = 1; return ++a;",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ "var a = 1; return a++;",
+ "var a = 1; return --a;",
-TEST(GlobalCountOperators) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var global = 1;\nfunction f() { return ++global; }\nf()",
- 0,
- 1,
- 10,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Inc), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 1,
- {"global"}},
- {"var global = 1;\nfunction f() { return global--; }\nf()",
- 1 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Star), R(0), //
- B(Dec), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(0), //
- B(Return),
- },
- 1,
- {"global"}},
- {"unallocated = 1;\nfunction f() { 'use strict'; return --unallocated; }"
- "f()",
- 0,
- 1,
- 10,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Dec), //
- B(StaGlobalStrict), U8(0), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 1,
- {"unallocated"}},
- {"unallocated = 1;\nfunction f() { return unallocated++; }\nf()",
- 1 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Star), R(0), //
- B(Inc), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(0), //
- B(Return),
- },
- 1,
- {"unallocated"}},
+ "var a = 1; return a--;",
+
+ "var a = { val: 1 }; return a.val++;",
+
+ "var a = { val: 1 }; return --a.val;",
+
+ "var name = 'var'; var a = { val: 1 }; return a[name]--;",
+
+ "var name = 'var'; var a = { val: 1 }; return ++a[name];",
+
+ "var a = 1; var b = function() { return a }; return ++a;",
+
+ "var a = 1; var b = function() { return a }; return a--;",
+
+ "var idx = 1; var a = [1, 2]; return a[idx++] = 2;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CountOperators.golden"));
}
+TEST(GlobalCountOperators) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(CompoundExpressions) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- int object_literal_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"var a = 1; a += 2;",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1; a /= 2;",
- 2 * kPointerSize,
- 1,
- 15,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Div), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = { val: 2 }; a.name *= 2;",
- 3 * kPointerSize,
- 1,
- 27,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(Mul), R(2), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var a = { 1: 2 }; a[1] ^= 2;",
- 4 * kPointerSize,
- 1,
- 30,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(3), //
- B(LdaSmi8), U8(2), //
- B(BitwiseXor), R(3), //
- B(KeyedStoreICSloppy), R(1), R(2), U8(vector->GetIndex(slot2)), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var a = 1; (function f() { return a; }); a |= 24;",
- 2 * kPointerSize,
- 1,
- 30,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(LdaContextSlot), R(context), U8(first_context_slot), //
- B(Star), R(1), //
- B(LdaSmi8), U8(24), //
- B(BitwiseOr), R(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ const char* snippets[] = {
+ "var global = 1;\n"
+ "function f() { return ++global; }\n"
+ "f();",
+
+ "var global = 1;\n"
+ "function f() { return global--; }\n"
+ "f();",
+
+ "unallocated = 1;\n"
+ "function f() { 'use strict'; return --unallocated; }\n"
+ "f();",
+
+ "unallocated = 1;\n"
+ "function f() { return unallocated++; }\n"
+ "f();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("GlobalCountOperators.golden"));
}
+TEST(CompoundExpressions) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "var a = 1; a += 2;",
-TEST(GlobalCompoundExpressions) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var global = 1;\nfunction f() { return global &= 1; }\nf()",
- 1 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(BitwiseAnd), R(0), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 1,
- {"global"}},
- {"unallocated = 1;\nfunction f() { return unallocated += 1; }\nf()",
- 1 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Add), R(0), //
- B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 1,
- {"unallocated"}},
+ "var a = 1; a /= 2;",
+
+ "var a = { val: 2 }; a.name *= 2;",
+
+ "var a = { 1: 2 }; a[1] ^= 2;",
+
+ "var a = 1; (function f() { return a; }); a |= 24;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(snippets[i].code_snippet, "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("CompoundExpressions.golden"));
}
+TEST(GlobalCompoundExpressions) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
-TEST(CreateArguments) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"function f() { return arguments; }",
- 1 * kPointerSize,
- 1,
- 7,
- {
- B(CreateMappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Return), //
- }},
- {"function f() { return arguments[0]; }",
- 2 * kPointerSize,
- 1,
- 13,
- {
- B(CreateMappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot)), //
- B(Return), //
- }},
- {"function f() { 'use strict'; return arguments; }",
- 1 * kPointerSize,
- 1,
- 7,
- {
- B(CreateUnmappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Return), //
- }},
- {"function f(a) { return arguments[0]; }",
- 3 * kPointerSize,
- 2,
- 25,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaZero), //
- B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot)), //
- B(Return), //
- }},
- {"function f(a, b, c) { return arguments; }",
- 2 * kPointerSize,
- 4,
- 29,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex - 2), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex - 1), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Return), //
- }},
- {"function f(a, b, c) { 'use strict'; return arguments; }",
- 1 * kPointerSize,
- 4,
- 7,
- {
- B(CreateUnmappedArguments), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Return), //
- }},
+ const char* snippets[] = {
+ "var global = 1;\n"
+ "function f() { return global &= 1; }\n"
+ "f();",
+
+ "unallocated = 1;\n"
+ "function f() { return unallocated += 1; }\n"
+ "f();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("GlobalCompoundExpressions.golden"));
}
-TEST(CreateRestParameter) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
- FeedbackVectorSlot slot1 = feedback_spec.AddKeyedLoadICSlot();
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"function f(...restArgs) { return restArgs; }",
- 1 * kPointerSize,
- 1,
- 7,
- {
- B(CreateRestParameter), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0,
- {}},
- {"function f(a, ...restArgs) { return restArgs; }",
- 2 * kPointerSize,
- 2,
- 14,
- {
- B(CreateRestParameter), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Return), //
- },
- 0,
- {}},
- {"function f(a, ...restArgs) { return restArgs[0]; }",
- 3 * kPointerSize,
- 2,
- 20,
- {
- B(CreateRestParameter), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaZero), //
- B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot)), //
- B(Return), //
- },
- 0,
- {}},
- {"function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }",
- 5 * kPointerSize,
- 2,
- 35,
- {
- B(CreateUnmappedArguments), //
- B(Star), R(0), //
- B(CreateRestParameter), //
- B(Star), R(1), //
- B(LdaTheHole), //
- B(Star), R(2), //
- B(StackCheck), //
- B(Ldar), A(1, 2), //
- B(Star), R(2), //
- B(Ldar), R(1), //
- B(Star), R(3), //
- B(LdaZero), //
- B(KeyedLoadIC), R(3), U8(vector->GetIndex(slot)), //
- B(Star), R(4), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(LdaZero), //
- B(KeyedLoadIC), R(3), U8(vector->GetIndex(slot1)), //
- B(Add), R(4), //
- B(Return), //
- },
- 0,
- {}},
+TEST(CreateArguments) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function f() { return arguments; }",
+
+ "function f() { return arguments[0]; }",
+
+ "function f() { 'use strict'; return arguments; }",
+
+ "function f(a) { return arguments[0]; }",
+
+ "function f(a, b, c) { return arguments; }",
+
+ "function f(a, b, c) { 'use strict'; return arguments; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("CreateArguments.golden"));
}
-TEST(IllegalRedeclaration) {
- bool old_legacy_const_flag = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- CHECK_GE(MessageTemplate::kVarRedeclaration, 128);
- // Must adapt bytecode if this changes.
-
- // clang-format off
- ExpectedSnippet<Handle<Object>, 2> snippets[] = {
- {"const a = 1; { var a = 2; }",
- 3 * kPointerSize,
- 1,
- 14,
- {
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kNewSyntaxError), R(1), U8(2), //
- B(Throw), //
- },
- 2,
- {helper.factory()->NewNumberFromInt(MessageTemplate::kVarRedeclaration),
- helper.factory()->NewStringFromAsciiChecked("a")}},
- };
- // clang-format on
+TEST(CreateRestParameter) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ const char* snippets[] = {
+ "function f(...restArgs) { return restArgs; }",
- FLAG_legacy_const = old_legacy_const_flag;
-}
+ "function f(a, ...restArgs) { return restArgs; }",
+ "function f(a, ...restArgs) { return restArgs[0]; }",
-TEST(ForIn) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int simple_flags =
- ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
- int deep_elements_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
-
- FeedbackVectorSpec feedback_spec(&zone);
- feedback_spec.AddStoreICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddStoreICSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddStoreICSlot();
- FeedbackVectorSlot slot4 = feedback_spec.AddStoreICSlot();
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"for (var p in null) {}",
- 2 * kPointerSize,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"for (var p in undefined) {}",
- 2 * kPointerSize,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"for (var p in undefined) {}",
- 2 * kPointerSize,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"var x = 'potatoes';\n"
- "for (var p in x) { return p; }",
- 8 * kPointerSize,
- 1,
- 46,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(JumpIfUndefined), U8(39), //
- B(JumpIfNull), U8(37), //
- B(ToObject), //
- B(JumpIfNull), U8(34), //
- B(Star), R(3), //
- B(ForInPrepare), R(4), //
- B(LdaZero), //
- B(Star), R(7), //
- B(ForInDone), R(7), R(6), //
- B(JumpIfTrue), U8(22), //
- B(ForInNext), R(3), R(7), R(4), //
- B(JumpIfUndefined), U8(10), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(Return), //
- B(ForInStep), R(7), //
- B(Star), R(7), //
- B(Jump), U8(-23), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var x = 0;\n"
- "for (var p in [1,2,3]) { x += p; }",
- 9 * kPointerSize,
- 1,
- 58,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(1), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
- B(JumpIfUndefined), U8(48), //
- B(JumpIfNull), U8(46), //
- B(ToObject), //
- B(JumpIfNull), U8(43), //
- B(Star), R(3), //
- B(ForInPrepare), R(4), //
- B(LdaZero), //
- B(Star), R(7), //
- B(ForInDone), R(7), R(6), //
- B(JumpIfTrue), U8(31), //
- B(ForInNext), R(3), R(7), R(4), //
- B(JumpIfUndefined), U8(19), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(Ldar), R(1), //
- B(Star), R(8), //
- B(Ldar), R(2), //
- B(Add), R(8), //
- B(Star), R(1), //
- B(ForInStep), R(7), //
- B(Star), R(7), //
- B(Jump), U8(-32), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {InstanceType::FIXED_ARRAY_TYPE}},
- {"var x = { 'a': 1, 'b': 2 };\n"
- "for (x['a'] in [10, 20, 30]) {\n"
- " if (x['a'] == 10) continue;\n"
- " if (x['a'] == 20) break;\n"
- "}",
- 8 * kPointerSize,
- 1,
- 95,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(80), //
- B(JumpIfNull), U8(78), //
- B(ToObject), //
- B(JumpIfNull), U8(75), //
- B(Star), R(1), //
- B(ForInPrepare), R(2), //
- B(LdaZero), //
- B(Star), R(5), //
- B(ForInDone), R(5), R(4), //
- B(JumpIfTrue), U8(63), //
- B(ForInNext), R(1), R(5), R(2), //
- B(JumpIfUndefined), U8(51), //
- B(Star), R(6), //
- B(Ldar), R(0), //
- B(Star), R(7), //
- B(Ldar), R(6), //
- B(StoreICSloppy), R(7), U8(2), U8(vector->GetIndex(slot4)), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(6), //
- B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot2)), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(TestEqual), R(7), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(20), //
- B(Ldar), R(0), //
- B(Star), R(6), //
- B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot3)), //
- B(Star), R(7), //
- B(LdaSmi8), U8(20), //
- B(TestEqual), R(7), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(8), //
- B(ForInStep), R(5), //
- B(Star), R(5), //
- B(Jump), U8(-64), //
- B(LdaUndefined), //
- B(Return), //
- },
- 3,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var x = [ 10, 11, 12 ] ;\n"
- "for (x[0] in [1,2,3]) { return x[3]; }",
- 9 * kPointerSize,
- 1,
- 70,
- {
- B(StackCheck), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
- B(Star), R(0), //
- B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(57), //
- B(JumpIfNull), U8(55), //
- B(ToObject), //
- B(JumpIfNull), U8(52), //
- B(Star), R(1), //
- B(ForInPrepare), R(2), //
- B(LdaZero), //
- B(Star), R(5), //
- B(ForInDone), R(5), R(4), //
- B(JumpIfTrue), U8(40), //
- B(ForInNext), R(1), R(5), R(2), //
- B(JumpIfUndefined), U8(28), //
- B(Star), R(6), //
- B(Ldar), R(0), //
- B(Star), R(7), //
- B(LdaZero), //
- B(Star), R(8), //
- B(Ldar), R(6), //
- B(KeyedStoreICSloppy), R(7), R(8), U8(vector->GetIndex(slot3)), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(6), //
- B(LdaSmi8), U8(3), //
- B(KeyedLoadIC), R(6), U8(vector->GetIndex(slot2)), //
- B(Return), //
- B(ForInStep), R(5), //
- B(Star), R(5), //
- B(Jump), U8(-41), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE}},
+ "function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("CreateRestParameter.golden"));
}
+TEST(ForIn) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "for (var p in null) {}",
+
+ "for (var p in undefined) {}",
+
+ "for (var p in undefined) {}",
-// TODO(rmcilroy): Do something about this; new bytecode is too large
-// (150+ instructions) to adapt manually.
-DISABLED_TEST(ForOf) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int array_literal_flags =
- ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
- int object_literal_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddKeyedLoadICSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddCallICSlot();
- FeedbackVectorSlot slot4 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot5 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot6 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot7 = feedback_spec.AddStoreICSlot();
- FeedbackVectorSlot slot8 = feedback_spec.AddLoadICSlot();
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- // clang-format off
- ExpectedSnippet<InstanceType, 8> snippets[] = {
- {"for (var p of [0, 1, 2]) {}",
- 7 * kPointerSize,
- 1,
- 86,
- {
- B(StackCheck), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
- B(Star), R(5), //
- B(LdaConstant), U8(1), //
- B(KeyedLoadIC), R(5), U8(vector->GetIndex(slot2)), //
- B(Star), R(4), //
- B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(6), //
- B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot4)), //
- B(Star), R(5), //
- B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot3)), //
- B(Star), R(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(4), U8(1), //
- B(LogicalNot), //
- B(JumpIfFalse), U8(11), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
- /* */ R(4), U8(1), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(LoadIC), R(4), U8(3), U8(vector->GetIndex(slot5)), //
- B(JumpIfToBooleanTrue), U8(19), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(LoadIC), R(4), U8(4), U8(vector->GetIndex(slot6)), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(Jump), U8(-61), //
- B(LdaUndefined), //
- B(Return), //
- },
- 5,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var x = 'potatoes';\n"
- "for (var p of x) { return p; }",
- 8 * kPointerSize,
- 1,
- 85,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(Star), R(6), //
- B(LdaConstant), U8(1), //
- B(KeyedLoadIC), R(6), U8(vector->GetIndex(slot2)), //
- B(Star), R(5), //
- B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(7), //
- B(LoadIC), R(7), U8(2), U8(vector->GetIndex(slot4)), //
- B(Star), R(6), //
- B(Call), R(6), R(7), U8(1), U8(vector->GetIndex(slot3)), //
- B(Star), R(2), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(5), U8(1), //
- B(LogicalNot), //
- B(JumpIfFalse), U8(11), //
- B(Ldar), R(2), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
- /* */ R(5), U8(1), //
- B(Ldar), R(2), //
- B(Star), R(5), //
- B(LoadIC), R(5), U8(3), U8(vector->GetIndex(slot5)), //
- B(JumpIfToBooleanTrue), U8(18), //
- B(Ldar), R(2), //
- B(Star), R(5), //
- B(LoadIC), R(5), U8(4), U8(vector->GetIndex(slot6)), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(4), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- },
- 5,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"for (var x of [10, 20, 30]) {\n"
- " if (x == 10) continue;\n"
- " if (x == 20) break;\n"
- "}",
- 7 * kPointerSize,
- 1,
- 108,
- {
- B(StackCheck), //
- B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
- B(Star), R(5), //
- B(LdaConstant), U8(1), //
- B(KeyedLoadIC), R(5), U8(vector->GetIndex(slot2)), //
- B(Star), R(4), //
- B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(Star), R(6), //
- B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot4)), //
- B(Star), R(5), //
- B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot3)), //
- B(Star), R(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(4), U8(1), //
- B(LogicalNot), //
- B(JumpIfFalse), U8(11), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
- /* */ R(4), U8(1), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(LoadIC), R(4), U8(3), U8(vector->GetIndex(slot5)), //
- B(JumpIfToBooleanTrue), U8(41), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(LoadIC), R(4), U8(4), U8(vector->GetIndex(slot6)), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(Star), R(4), //
- B(LdaSmi8), U8(10), //
- B(TestEqual), R(4), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(-69), //
- B(Ldar), R(3), //
- B(Star), R(4), //
- B(LdaSmi8), U8(20), //
- B(TestEqual), R(4), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(4), //
- B(Jump), U8(-83), //
- B(LdaUndefined), //
- B(Return), //
- },
- 5,
- {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"var x = { 'a': 1, 'b': 2 };\n"
- "for (x['a'] of [1,2,3]) { return x['a']; }",
- 6 * kPointerSize,
- 1,
- 103,
- {
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
- B(Star), R(3), //
- B(Star), R(2), //
- B(CreateArrayLiteral), U8(1), U8(1), U8(array_literal_flags), //
- B(Star), R(4), //
- B(LdaConstant), U8(2), //
- B(KeyedLoadIC), R(4), U8(vector->GetIndex(slot2)), //
- B(Star), R(3), //
- B(Call), R(3), R(4), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(5), //
- B(LoadIC), R(5), U8(3), U8(vector->GetIndex(slot4)), //
- B(Star), R(4), //
- B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot3)), //
- B(Star), R(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(3), U8(1), //
- B(LogicalNot), //
- B(JumpIfFalse), U8(11), //
- B(Ldar), R(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
- /* */ R(3), U8(1), //
- B(Ldar), R(1), //
- B(Star), R(3), //
- B(LoadIC), R(3), U8(4), U8(vector->GetIndex(slot5)), //
- B(JumpIfToBooleanTrue), U8(28), //
- B(Ldar), R(2), //
- B(Star), R(3), //
- B(Ldar), R(1), //
- B(Star), R(4), //
- B(LoadIC), R(4), U8(5), U8(vector->GetIndex(slot6)), //
- B(StoreICSloppy), R(3), U8(6), U8(vector->GetIndex(slot7)), //
- B(StackCheck), //
- B(Ldar), R(2), //
- B(Star), R(3), //
- B(LoadIC), R(3), U8(6), U8(vector->GetIndex(slot8)), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- },
- 7,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ "var x = 'potatoes';\n"
+ "for (var p in x) { return p; }",
+
+ "var x = 0;\n"
+ "for (var p in [1,2,3]) { x += p; }",
+
+ "var x = { 'a': 1, 'b': 2 };\n"
+ "for (x['a'] in [10, 20, 30]) {\n"
+ " if (x['a'] == 10) continue;\n"
+ " if (x['a'] == 20) break;\n"
+ "}",
+
+ "var x = [ 10, 11, 12 ] ;\n"
+ "for (x[0] in [1,2,3]) { return x[3]; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ForIn.golden"));
}
+TEST(ForOf) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "for (var p of [0, 1, 2]) {}",
-TEST(Conditional) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"return 1 ? 2 : 3;",
- 0,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(JumpIfToBooleanFalse), U8(6), //
- B(LdaSmi8), U8(2), //
- B(Jump), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Return), //
- }},
- {"return 1 ? 2 ? 3 : 4 : 5;",
- 0,
- 1,
- 20,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(JumpIfToBooleanFalse), U8(14), //
- B(LdaSmi8), U8(2), //
- B(JumpIfToBooleanFalse), U8(6), //
- B(LdaSmi8), U8(3), //
- B(Jump), U8(4), //
- B(LdaSmi8), U8(4), //
- B(Jump), U8(4), //
- B(LdaSmi8), U8(5), //
- B(Return), //
- }},
+ "var x = 'potatoes';\n"
+ "for (var p of x) { return p; }",
+
+ "for (var x of [10, 20, 30]) {\n"
+ " if (x == 10) continue;\n"
+ " if (x == 20) break;\n"
+ "}",
+
+ "var x = { 'a': 1, 'b': 2 };\n"
+ "for (x['a'] of [1,2,3]) { return x['a']; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ForOf.golden"));
}
+TEST(Conditional) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "return 1 ? 2 : 3;",
-TEST(Switch) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 1: return 2;\n"
- " case 2: return 3;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 31,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), // The tag variable is allocated as a
- B(Star), R(0), // local by the parser, hence the store
- B(Star), R(2), // to another local register.
- B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(7), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(2), //
- B(Return), //
- B(LdaSmi8), U8(3), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 1: a = 2; break;\n"
- " case 2: a = 3; break;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 37,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(Jump), U8(14), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 1: a = 2; // fall-through\n"
- " case 2: a = 3; break;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 35,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(8), //
- B(Jump), U8(12), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 2: break;\n"
- " case 3: break;\n"
- " default: a = 1; break;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 35,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(3), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(6), //
- B(Jump), U8(6), //
- B(Jump), U8(10), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(typeof(a)) {\n"
- " case 2: a = 1; break;\n"
- " case 3: a = 2; break;\n"
- " default: a = 3; break;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 44,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(TypeOf), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(3), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(Jump), U8(14), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Jump), U8(14), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case typeof(a): a = 1; break;\n"
- " default: a = 2; break;\n"
- "}\n",
- 3 * kPointerSize,
- 1,
- 32,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(Ldar), R(1), //
- B(TypeOf), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(4), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 1:\n" REPEAT_64(SPACE, " a = 2;")
- "break;\n"
- " case 2: a = 3; break;"
- "}\n",
- 3 * kPointerSize,
- 1,
- 289,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(2), //
- B(JumpIfTrueConstant), U8(0), //
- B(JumpConstant), U8(1), //
- REPEAT_64(COMMA, //
- B(LdaSmi8), U8(2), //
- B(Star), R(1)), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Jump), U8(2), //
- B(LdaUndefined), //
- B(Return), //
- },
- 2,
- {262, 266}},
- {"var a = 1;\n"
- "switch(a) {\n"
- " case 1: \n"
- " switch(a + 1) {\n"
- " case 2 : a = 1; break;\n"
- " default : a = 2; break;\n"
- " } // fall-through\n"
- " case 2: a = 3;\n"
- "}\n",
- 5 * kPointerSize,
- 1,
- 61,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Star), R(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(3), //
- B(JumpIfTrue), U8(10), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(3), //
- B(JumpIfTrue), U8(36), //
- B(Jump), U8(38), //
- B(Ldar), R(2), //
- B(Star), R(4), //
- B(LdaSmi8), U8(1), //
- B(Add), R(4), //
- B(Star), R(1), //
- B(Star), R(4), //
- B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(4), //
- B(JumpIfTrue), U8(4), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2), //
- B(Jump), U8(2), //
- B(LdaSmi8), U8(3), //
- B(Star), R(2), //
- B(LdaUndefined), //
- B(Return), //
- }},
+ "return 1 ? 2 ? 3 : 4 : 5;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Conditional.golden"));
}
+TEST(Switch) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 1: return 2;\n"
+ " case 2: return 3;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 1: a = 2; break;\n"
+ " case 2: a = 3; break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 1: a = 2; // fall-through\n"
+ " case 2: a = 3; break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 2: break;\n"
+ " case 3: break;\n"
+ " default: a = 1; break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(typeof(a)) {\n"
+ " case 2: a = 1; break;\n"
+ " case 3: a = 2; break;\n"
+ " default: a = 3; break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case typeof(a): a = 1; break;\n"
+ " default: a = 2; break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 1:\n"
+ REPEAT_64(" a = 2;\n")
+ " break;\n"
+ " case 2:\n"
+ " a = 3;\n"
+ " break;\n"
+ "}",
+
+ "var a = 1;\n"
+ "switch(a) {\n"
+ " case 1: \n"
+ " switch(a + 1) {\n"
+ " case 2 : a = 1; break;\n"
+ " default : a = 2; break;\n"
+ " } // fall-through\n"
+ " case 2: a = 3;\n"
+ "}",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Switch.golden"));
+}
TEST(BasicBlockToBoolean) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // Check that we generate JumpIfToBoolean if they are at the start of basic
- // blocks.
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var a = 1; if (a || a < 0) { return 1; }",
- 2 * kPointerSize,
- 1,
- 21,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanTrue), U8(9), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(TestLessThan), R(1), //
- B(JumpIfToBooleanFalse), U8(5), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1; if (a && a < 0) { return 1; }",
- 2 * kPointerSize,
- 1,
- 21,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(9), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(TestLessThan), R(1), //
- B(JumpIfToBooleanFalse), U8(5), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var a = 1; a = (a || a < 0) ? 2 : 3;",
- 2 * kPointerSize,
- 1,
- 26,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanTrue), U8(9), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(TestLessThan), R(1), //
- B(JumpIfToBooleanFalse), U8(6), //
- B(LdaSmi8), U8(2), //
- B(Jump), U8(4), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- }},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var a = 1; if (a || a < 0) { return 1; }",
+
+ "var a = 1; if (a && a < 0) { return 1; }",
+
+ "var a = 1; a = (a || a < 0) ? 2 : 3;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("BasicBlockToBoolean.golden"));
}
-
TEST(DeadCodeRemoval) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"return; var a = 1; a();",
- 1 * kPointerSize,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"if (false) { return; }; var a = 1;",
- 1 * kPointerSize,
- 1,
- 7,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"if (true) { return 1; } else { return 2; };",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- }},
- {"var a = 1; if (a) { return 1; }; return 2;",
- 1 * kPointerSize,
- 1,
- 13,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(JumpIfToBooleanFalse), U8(5), //
- B(LdaSmi8), U8(1), //
- B(Return), //
- B(LdaSmi8), U8(2), //
- B(Return), //
- }},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "return; var a = 1; a();",
+
+ "if (false) { return; }; var a = 1;",
+
+ "if (true) { return 1; } else { return 2; };",
+
+ "var a = 1; if (a) { return 1; }; return 2;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("DeadCodeRemoval.golden"));
}
-
TEST(ThisFunction) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var f;\n f = function f() { }",
- 2 * kPointerSize,
- 1,
- 19,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(closure), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(LdaUndefined), //
- B(Return), //
- }},
- {"var f;\n f = function f() { return f; }",
- 2 * kPointerSize,
- 1,
- 23,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(closure), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(3), //
- B(LdaUndefined), //
- B(Return), //
- }},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "var f;\n"
+ "f = function f() {};",
+
+ "var f;\n"
+ "f = function f() { return f; };",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunction(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("ThisFunction.golden"));
}
-
TEST(NewTarget) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int new_target = Register::new_target().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"return new.target;",
- 2 * kPointerSize,
- 1,
- 19,
- {
- B(Ldar), R(new_target), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"new.target;",
- 2 * kPointerSize,
- 1,
- 20,
- {
- B(Ldar), R(new_target), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}};
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "return new.target;",
-TEST(RemoveRedundantLdar) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"var ld_a = 1;\n" // This test is to check Ldar does not
- "while(true) {\n" // get removed if the preceding Star is
- " ld_a = ld_a + ld_a;\n" // in a different basicblock.
- " if (ld_a > 10) break;\n"
- "}\n"
- "return ld_a;",
- 2 * kPointerSize,
- 1,
- 31,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), // This load should not be removed as it
- B(Star), R(1), // is the target of the branch.
- B(Ldar), R(0), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(TestGreaterThan), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(4), //
- B(Jump), U8(-21), //
- B(Ldar), R(0), //
- B(Return)}},
- {"var ld_a = 1;\n"
- "do {\n"
- " ld_a = ld_a + ld_a;\n"
- " if (ld_a > 10) continue;\n"
- "} while(false);\n"
- "return ld_a;",
- 2 * kPointerSize,
- 1,
- 29,
- {B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(TestGreaterThan), R(1), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(2), //
- B(Ldar), R(0), //
- B(Return)}},
- {"var ld_a = 1;\n"
- " ld_a = ld_a + ld_a;\n"
- " return ld_a;",
- 2 * kPointerSize,
- 1,
- 14,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Return) //
- }},
+ "new.target;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("NewTarget.golden"));
}
+TEST(RemoveRedundantLdar) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "var ld_a = 1;\n" // This test is to check Ldar does not
+ "while(true) {\n" // get removed if the preceding Star is
+ " ld_a = ld_a + ld_a;\n" // in a different basicblock.
+ " if (ld_a > 10) break;\n"
+ "}\n"
+ "return ld_a;",
+
+ "var ld_a = 1;\n"
+ "do {\n"
+ " ld_a = ld_a + ld_a;\n"
+ " if (ld_a > 10) continue;\n"
+ "} while(false);\n"
+ "return ld_a;",
-TEST(AssignmentsInBinaryExpression) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var x = 0, y = 1;\n"
- "return (x = 2, y = 3, x = 4, y = 5)",
- 2 * kPointerSize,
- 1,
- 25,
- {
- B(StackCheck), //
- B(LdaZero), B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(4), //
- B(Star), R(0), //
- B(LdaSmi8), U8(5), //
- B(Star), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 55;\n"
- "var y = (x = 100);\n"
- "return y",
- 2 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(55), //
- B(Star), R(0), //
- B(LdaSmi8), U8(100), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 55;\n"
- "x = x + (x = 100) + (x = 101);\n"
- "return x;",
- 3 * kPointerSize,
- 1,
- 24,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(55), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(100), //
- B(Star), R(0), //
- B(Add), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(101), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 55;\n"
- "x = (x = 56) - x + (x = 57);\n"
- "x++;\n"
- "return x;",
- 3 * kPointerSize,
- 1,
- 32,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(55), //
- B(Star), R(0), //
- B(LdaSmi8), U8(56), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Sub), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(57), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(0), //
- B(ToNumber), //
- B(Star), R(1), //
- B(Inc), //
- B(Star), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 55;\n"
- "var y = x + (x = 1) + (x = 2) + (x = 3);\n"
- "return y;",
- 4 * kPointerSize,
- 1,
- 32,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(55), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(Add), R(3), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 55;\n"
- "var x = x + (x = 1) + (x = 2) + (x = 3);\n"
- "return x;",
- 3 * kPointerSize,
- 1,
- 32,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(55), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Add), R(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(1), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(Add), R(1), //
- B(Star), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 10, y = 20;\n"
- "return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + "
- "y;\n",
- 5 * kPointerSize,
- 1,
- 70,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(Add), R(2), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(2), //
- B(Star), R(4), //
- B(LdaSmi8), U8(2), //
- B(Star), R(1), //
- B(Mul), R(4), //
- B(Add), R(3), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(Add), R(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(4), //
- B(Star), R(0), //
- B(Add), R(3), //
- B(Star), R(2), //
- B(LdaSmi8), U8(5), //
- B(Star), R(1), //
- B(Add), R(2), //
- B(Star), R(3), //
- B(Ldar), R(1), //
- B(Add), R(3), //
- B(Return), //
- },
- 0},
- {"var x = 17;\n"
- "return 1 + x + (x++) + (++x);\n",
- 4 * kPointerSize,
- 1,
- 38,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(17), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Add), R(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(ToNumber), //
- B(Star), R(1), //
- B(Inc), //
- B(Star), R(0), //
- B(Ldar), R(1), //
- B(Add), R(2), //
- B(Star), R(3), //
- B(Ldar), R(0), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(0), //
- B(Add), R(3), //
- B(Return), //
- },
- 0}
+ "var ld_a = 1;\n"
+ " ld_a = ld_a + ld_a;\n"
+ " return ld_a;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("RemoveRedundantLdar.golden"));
}
+TEST(AssignmentsInBinaryExpression) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "var x = 0, y = 1;\n"
+ "return (x = 2, y = 3, x = 4, y = 5);",
-TEST(Eval) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int new_target = Register::new_target().index();
-
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"return eval('1;');",
- 9 * kPointerSize,
- 1,
- 65,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- /* */ R(3), U8(1), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- /* */ U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(2), U8(0), //
- B(Return), //
- },
- 2,
- {"eval", "1;"}},
+ "var x = 55;\n"
+ "var y = (x = 100);\n"
+ "return y;",
+
+ "var x = 55;\n"
+ "x = x + (x = 100) + (x = 101);\n"
+ "return x;",
+
+ "var x = 55;\n"
+ "x = (x = 56) - x + (x = 57);\n"
+ "x++;\n"
+ "return x;",
+
+ "var x = 55;\n"
+ "var y = x + (x = 1) + (x = 2) + (x = 3);\n"
+ "return y;",
+
+ "var x = 55;\n"
+ "var x = x + (x = 1) + (x = 2) + (x = 3);\n"
+ "return x;",
+
+ "var x = 10, y = 20;\n"
+ "return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + "
+ "y;\n",
+
+ "var x = 17;\n"
+ "return 1 + x + (x++) + (++x);\n",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("AssignmentsInBinaryExpression.golden"));
}
+TEST(Eval) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "return eval('1;');",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Eval.golden"));
+}
TEST(LookupSlot) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int first_context_slot = Context::MIN_CONTEXT_SLOTS;
- int new_target = Register::new_target().index();
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"eval('var x = 10;'); return x;",
- 9 * kPointerSize,
- 1,
- 67,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- R(3), U8(1), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(2), U8(0), //
- B(LdaLookupSlot), U8(2), //
- B(Return), //
- },
- 3,
- {"eval", "var x = 10;", "x"}},
- {"eval('var x = 10;'); return typeof x;",
- 9 * kPointerSize,
- 1,
- 68,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- /* */ R(3), U8(1), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- /* */ U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(2), U8(0), //
- B(LdaLookupSlotInsideTypeof), U8(2), //
- B(TypeOf), //
- B(Return), //
- },
- 3,
- {"eval", "var x = 10;", "x"}},
- {"x = 20; return eval('');",
- 9 * kPointerSize,
- 1,
- 69,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(first_context_slot + 2), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(StaLookupSlotSloppy), U8(0), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- /* */ R(3), U8(1), R(1), //
- B(LdaConstant), U8(2), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- /* */ U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(2), U8(0), //
- B(Return), //
- },
- 3,
- {"x", "eval", ""}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+
+ const char* snippets[] = {
+ "eval('var x = 10;'); return x;",
+
+ "eval('var x = 10;'); return typeof x;",
+
+ "x = 20; return eval('');",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LookupSlot.golden"));
}
-
TEST(CallLookupSlot) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- Zone zone;
-
- FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddCallICSlot();
- USE(slot1);
-
- Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
- int new_target = Register::new_target().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"g = function(){}; eval(''); return g();",
- 9 * kPointerSize,
- 1,
- 85,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(5), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(6), //
- B(StackCheck), //
- B(CreateClosure), U8(0), U8(0), //
- B(StaLookupSlotSloppy), U8(1), //
- B(LdaConstant), U8(2), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- R(3), U8(1), R(1), //
- B(LdaConstant), U8(3), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(2), U8(0), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
- R(3), U8(1), R(1), //
- B(Call), R(1), R(2), U8(1), U8(vector->GetIndex(slot2)), //
- B(Return), //
- },
- 4,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "g = function(){}; eval(''); return g();",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallLookupSlot.golden"));
}
-
// TODO(mythria): tests for variable/function declaration in lookup slots.
TEST(LookupSlotInEval) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- const char* function_prologue = "var f;"
- "var x = 1;"
- "function f1() {"
- " eval(\"function t() {";
- const char* function_epilogue = " }; f = t; f();\");"
- "}"
- "f1();";
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"return x;",
- 0 * kPointerSize,
- 1,
- 4,
- {
- B(StackCheck), //
- B(LdaLookupSlot), U8(0), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"x = 10;",
- 0 * kPointerSize,
- 1,
- 7,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaLookupSlotSloppy), U8(0), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"x"}},
- {"'use strict'; x = 10;",
- 0 * kPointerSize,
- 1,
- 7,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaLookupSlotStrict), U8(0), //
- B(LdaUndefined), //
- B(Return), //
- },
- 1,
- {"x"}},
- {"return typeof x;",
- 0 * kPointerSize,
- 1,
- 5,
- {
- B(StackCheck), //
- B(LdaLookupSlotInsideTypeof), U8(0), //
- B(TypeOf), //
- B(Return), //
- },
- 1,
- {"x"}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "return x;",
+
+ "x = 10;",
+
+ "'use strict'; x = 10;",
+
+ "return typeof x;",
};
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- std::string script = std::string(function_prologue) +
- std::string(snippets[i].code_snippet) +
- std::string(function_epilogue);
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "*", "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ std::string actual = BuildActual(printer, snippets,
+ "var f;\n"
+ "var x = 1;\n"
+ "function f1() {\n"
+ " eval(\"function t() { ",
+
+ " }; f = t; f();\");\n"
+ "}\n"
+ "f1();");
+
+ CHECK_EQ(actual, LoadGolden("LookupSlotInEval.golden"));
+}
TEST(LookupSlotWideInEval) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- const char* function_prologue =
- "var f;"
- "var x = 1;"
- "function f1() {"
- " eval(\"function t() {";
- const char* function_epilogue =
- " }; f = t; f();\");"
- "}"
- "f1();";
-
- int const_count[] = {0, 0, 0, 0};
- // clang-format off
- ExpectedSnippet<InstanceType, 257> snippets[] = {
- {REPEAT_256(SPACE, "var y = 2.3;")
- "return x;",
- 1 * kPointerSize,
- 1,
- 1029,
- {
- B(StackCheck), //
- REPEAT_256(SPACE, //
- B(LdaConstant), U8(const_count[0]++), //
- B(Star), R(0), ) //
- B(LdaLookupSlotWide), U16(256), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {REPEAT_256(SPACE, "var y = 2.3;")
- "return typeof x;",
- 1 * kPointerSize,
- 1,
- 1030,
- {
- B(StackCheck), //
- REPEAT_256(SPACE, //
- B(LdaConstant), U8(const_count[1]++), //
- B(Star), R(0), ) //
- B(LdaLookupSlotInsideTypeofWide), U16(256), //
- B(TypeOf), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {REPEAT_256(SPACE, "var y = 2.3;")
- "x = 10;",
- 1 * kPointerSize,
- 1,
- 1032,
- {
- B(StackCheck), //
- REPEAT_256(SPACE, //
- B(LdaConstant), U8(const_count[2]++), //
- B(Star), R(0), ) //
- B(LdaSmi8), U8(10), //
- B(StaLookupSlotSloppyWide), U16(256), //
- B(LdaUndefined), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"'use strict';"
- REPEAT_256(SPACE, "var y = 2.3;")
- "x = 10;",
- 1 * kPointerSize,
- 1,
- 1032,
- {
- B(StackCheck), //
- REPEAT_256(SPACE, //
- B(LdaConstant), U8(const_count[3]++), //
- B(Star), R(0), ) //
- B(LdaSmi8), U8(10), //
- B(StaLookupSlotStrictWide), U16(256), //
- B(LdaUndefined), //
- B(Return) //
- },
- 257,
- {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ REPEAT_256(" \"var y = 2.3;\" +\n") //
+ " \"return x;\" +\n",
+
+ REPEAT_256(" \"var y = 2.3;\" +\n") //
+ " \"return typeof x;\" +\n",
+
+ REPEAT_256(" \"var y = 2.3;\" +\n") //
+ " \"x = 10;\" +\n",
+
+ " \"'use strict';\" +\n" //
+ REPEAT_256(" \"var y = 2.3;\" +\n") //
+ " \"x = 10;\" +\n",
};
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- std::string script = std::string(function_prologue) +
- std::string(snippets[i].code_snippet) +
- std::string(function_epilogue);
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "*", "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ std::string actual = BuildActual(printer, snippets,
+ "var f;\n"
+ "var x = 1;\n"
+ "function f1() {\n"
+ " eval(\"function t() {\" +\n",
+
+ " \"};\" +\n"
+ " \"f = t; f();\"\n);\n"
+ "}\n"
+ "f1();");
+
+ CHECK_EQ(actual, LoadGolden("LookupSlotWideInEval.golden"));
+}
TEST(DeleteLookupSlotInEval) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- const char* function_prologue = "var f;"
- "var x = 1;"
- "z = 10;"
- "function f1() {"
- " var y;"
- " eval(\"function t() {";
- const char* function_epilogue = " }; f = t; f();\");"
- "}"
- "f1();";
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"delete x;",
- 1 * kPointerSize,
- 1,
- 12,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"return delete y;",
- 0 * kPointerSize,
- 1,
- 3,
- {
- B(StackCheck), //
- B(LdaFalse), //
- B(Return) //
- },
- 0},
- {"return delete z;",
- 1 * kPointerSize,
- 1,
- 11,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1), //
- B(Return) //
- },
- 1,
- {"z"}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "delete x;",
+
+ "return delete y;",
+
+ "return delete z;",
};
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- std::string script = std::string(function_prologue) +
- std::string(snippets[i].code_snippet) +
- std::string(function_epilogue);
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "*", "f");
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+
+ std::string actual = BuildActual(printer, snippets,
+ "var f;\n"
+ "var x = 1;\n"
+ "z = 10;\n"
+ "function f1() {\n"
+ " var y;\n"
+ " eval(\"function t() { ",
+
+ " }; f = t; f();\");\n"
+ "}\n"
+ "f1();");
+
+ CHECK_EQ(actual, LoadGolden("DeleteLookupSlotInEval.golden"));
}
TEST(WideRegisters) {
@@ -7949,1155 +1908,252 @@ TEST(WideRegisters) {
}
std::string prologue(os.str());
- // clang-format off
- ExpectedSnippet<int> snippets[] = {
- {"x0 = x127;\n"
- "return x0;\n",
- 161 * kPointerSize,
- 1,
- 11,
- {
- B(StackCheck), //
- B(MovWide), R16(131), R16(125), //
- B(Ldar), R(125), //
- B(Star), R(0), //
- B(Return), //
- }},
- {"x127 = x126;\n"
- "return x127;\n",
- 161 * kPointerSize,
- 1,
- 23,
- {
- B(StackCheck), //
- B(MovWide), R16(130), R16(125), //
- B(Ldar), R(125), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(131), //
- B(MovWide), R16(131), R16(125), //
- B(Ldar), R(125), //
- B(Return), //
- }},
- {"if (x2 > 3) { return x129; }\n"
- "return x128;\n",
- 162 * kPointerSize,
- 1,
- 37,
- {
- B(StackCheck), //
- B(Ldar), R(2), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(LdaSmi8), U8(3), //
- B(MovWide), R16(161), R16(125), //
- B(TestGreaterThan), R(125), //
- B(JumpIfFalse), U8(10), //
- B(MovWide), R16(133), R16(125), //
- B(Ldar), R(125), //
- B(Return), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(Return), //
- }},
- {"var x0 = 0;\n"
- "if (x129 == 3) { var x129 = x0; }\n"
- "if (x2 > 3) { return x0; }\n"
- "return x129;\n",
- 162 * kPointerSize,
- 1,
- 69,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(MovWide), R16(133), R16(125), //
- B(Ldar), R(125), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(LdaSmi8), U8(3), //
- B(MovWide), R16(161), R16(125), //
- B(TestEqual), R(125), //
- B(JumpIfFalse), U8(11), //
- B(Ldar), R(0), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(133), //
- B(Ldar), R(2), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(LdaSmi8), U8(3), //
- B(MovWide), R16(161), R16(125), //
- B(TestGreaterThan), R(125), //
- B(JumpIfFalse), U8(5), //
- B(Ldar), R(0), //
- B(Return), //
- B(MovWide), R16(133), R16(125), //
- B(Ldar), R(125), //
- B(Return), //
- }},
- {"var x0 = 0;\n"
- "var x1 = 0;\n"
- "for (x128 = 0; x128 < 64; x128++) {"
- " x1 += x128;"
- "}"
- "return x128;\n",
- 162 * kPointerSize,
- 1,
- 99,
- {
- B(StackCheck), //
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(132), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(LdaSmi8), U8(64), //
- B(MovWide), R16(161), R16(125), //
- B(TestLessThan), R(125), //
- B(JumpIfFalse), U8(53), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(MovWide), R16(161), R16(125), //
- B(Add), R(125), //
- B(Star), R(1), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(ToNumber), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(Inc), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(132), //
- B(Jump), U8(-74), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(Return), //
- }},
- {"var x0 = 1234;\n"
- "var x1 = 0;\n"
- "for (x128 in x0) {"
- " x1 += x128;"
- "}"
- "return x1;\n",
- 167 * kPointerSize,
- 1,
- 111,
- {
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfUndefined), U8(98), //
- B(JumpIfNull), U8(96), //
- B(ToObject), //
- B(JumpIfNull), U8(93), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(ForInPrepareWide), R16(162), //
- B(LdaZero), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(165), //
- B(MovWide), R16(165), R16(125), //
- B(MovWide), R16(164), R16(126), //
- B(ForInDone), R(125), R(126), //
- B(JumpIfTrue), U8(60), //
- B(ForInNextWide), R16(161), R16(165), R16(162), //
- B(JumpIfUndefined), U8(35), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(132), //
- B(StackCheck), //
- B(Ldar), R(1), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(166), //
- B(MovWide), R16(132), R16(125), //
- B(Ldar), R(125), //
- B(MovWide), R16(166), R16(125), //
- B(Add), R(125), //
- B(Star), R(1), //
- B(MovWide), R16(165), R16(125), //
- B(ForInStep), R(125), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(165), //
- B(Jump), U8(-71), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 1,
- {1234}},
- {"x0 = %Add(x64, x63);\n"
- "x1 = %Add(x27, x143);\n"
- "%TheHole();\n"
- "return x1;\n",
- 163 * kPointerSize,
- 1,
- 66,
- {
- B(StackCheck), //
- B(Ldar), R(64), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(Ldar), R(63), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(162), //
- B(CallRuntimeWide), U16(Runtime::kAdd), R16(161), U8(2), //
- B(Star), R(0), //
- B(Ldar), R(27), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(161), //
- B(MovWide), R16(147), R16(125), //
- B(Ldar), R(125), //
- B(Star), R(125), //
- B(MovWide), R16(125), R16(162), //
- B(CallRuntimeWide), U16(Runtime::kAdd), R16(161), U8(2), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0), //
- B(Ldar), R(1), //
- B(Return), //
- }}
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kNumber);
+ const char* snippets[] = {
+ "x0 = x127;\n"
+ "return x0;",
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
+ "x127 = x126;\n"
+ "return x127;",
- for (size_t i = 0; i < arraysize(snippets); ++i) {
- std::string body = prologue + snippets[i].code_snippet;
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(body.c_str());
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ "if (x2 > 3) { return x129; }\n"
+ "return x128;",
+
+ "var x0 = 0;\n"
+ "if (x129 == 3) { var x129 = x0; }\n"
+ "if (x2 > 3) { return x0; }\n"
+ "return x129;",
+
+ "var x0 = 0;\n"
+ "var x1 = 0;\n"
+ "for (x128 = 0; x128 < 64; x128++) {"
+ " x1 += x128;"
+ "}"
+ "return x128;",
+
+ "var x0 = 1234;\n"
+ "var x1 = 0;\n"
+ "for (x128 in x0) {"
+ " x1 += x128;"
+ "}"
+ "return x1;",
+
+ "x0 = %Add(x64, x63);\n"
+ "x1 = %Add(x27, x143);\n"
+ "%TheHole();\n"
+ "return x1;",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets, prologue.c_str()),
+ LoadGolden("WideRegisters.golden"));
}
TEST(ConstVariable) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"const x = 10;",
- 1 * kPointerSize,
- 1,
- 10,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"const x = 10; return x;",
- 2 * kPointerSize,
- 1,
- 20,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"const x = ( x = 20);",
- 3 * kPointerSize,
- 1,
- 32,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
- /* */ U8(0), //
- B(Ldar), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"const x = 10; x = 20;",
- 3 * kPointerSize,
- 1,
- 36,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
- /* */ U8(0), //
- B(Ldar), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"x"}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "const x = 10;",
+
+ "const x = 10; return x;",
+
+ "const x = ( x = 20);",
+
+ "const x = 10; x = 20;",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ConstVariable.golden"));
}
TEST(LetVariable) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"let x = 10;",
- 1 * kPointerSize,
- 1,
- 10,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"let x = 10; return x;",
- 2 * kPointerSize,
- 1,
- 20,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"let x = (x = 20);",
- 3 * kPointerSize,
- 1,
- 27,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(Ldar), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"x"}},
- {"let x = 10; x = 20;",
- 3 * kPointerSize,
- 1,
- 31,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(Ldar), R(1), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {"x"}},
- };
- // clang-format on
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "let x = 10;",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
-}
+ "let x = 10; return x;",
-TEST(LegacyConstVariable) {
- bool old_legacy_const_flag = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"const x = 10;",
- 2 * kPointerSize,
- 1,
- 19,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"const x = 10; return x;",
- 2 * kPointerSize,
- 1,
- 23,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(3), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"const x = ( x = 20);",
- 2 * kPointerSize,
- 1,
- 23,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Ldar), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- {"const x = 10; x = 20;",
- 2 * kPointerSize,
- 1,
- 27,
- {
- B(LdaTheHole), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(JumpIfNotHole), U8(5), //
- B(Mov), R(1), R(0), //
- B(Ldar), R(1), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(Ldar), R(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0},
- };
- // clang-format on
+ "let x = (x = 20);",
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ "let x = 10; x = 20;",
+ };
- FLAG_legacy_const = old_legacy_const_flag;
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LetVariable.golden"));
}
TEST(ConstVariableContextSlot) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
// TODO(mythria): Add tests for initialization of this via super calls.
// TODO(mythria): Add tests that walk the context chain.
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"const x = 10; function f1() {return x;}",
- 2 * kPointerSize,
- 1,
- 24,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"const x = 10; function f1() {return x;} return x;",
- 3 * kPointerSize,
- 1,
- 37,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"const x = (x = 20); function f1() {return x;}",
- 4 * kPointerSize,
- 1,
- 50,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(Star), R(2), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
- U8(0), //
- B(Ldar), R(2), //
- B(StaContextSlot), R(context), U8(4), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"const x = 10; x = 20; function f1() {return x;}",
- 4 * kPointerSize,
- 1,
- 52,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaSmi8), U8(20), //
- B(Star), R(2), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
- U8(0), //
- B(Ldar), R(2), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "const x = 10; function f1() {return x;}",
+
+ "const x = 10; function f1() {return x;} return x;",
+
+ "const x = (x = 20); function f1() {return x;}",
+
+ "const x = 10; x = 20; function f1() {return x;}",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ConstVariableContextSlot.golden"));
}
TEST(LetVariableContextSlot) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"let x = 10; function f1() {return x;}",
- 2 * kPointerSize,
- 1,
- 24,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 1,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"let x = 10; function f1() {return x;} return x;",
- 3 * kPointerSize,
- 1,
- 37,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"let x = (x = 20); function f1() {return x;}",
- 4 * kPointerSize,
- 1,
- 45,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(20), //
- B(Star), R(2), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
- B(Ldar), R(2), //
- B(StaContextSlot), R(context), U8(4), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- {"let x = 10; x = 20; function f1() {return x;}",
- 4 * kPointerSize,
- 1,
- 47,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(1), //
- B(LdaTheHole), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaSmi8), U8(20), //
- B(Star), R(2), //
- B(LdaContextSlot), R(context), U8(4), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
- B(Ldar), R(2), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaUndefined), //
- B(Return) //
- },
- 2,
- {InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "let x = 10; function f1() {return x;}",
+
+ "let x = 10; function f1() {return x;} return x;",
+
+ "let x = (x = 20); function f1() {return x;}",
+
+ "let x = 10; x = 20; function f1() {return x;}",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("LetVariableContextSlot.golden"));
}
TEST(DoExpression) {
bool old_flag = FLAG_harmony_do_expressions;
FLAG_harmony_do_expressions = true;
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippets[] = {
- {"var a = do { }; return a;",
- 2 * kPointerSize,
- 1,
- 6,
- {
- B(StackCheck), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(Return) //
- },
- 0},
- {"var a = do { var x = 100; }; return a;",
- 3 * kPointerSize,
- 1,
- 11,
- {
- B(StackCheck), //
- B(LdaSmi8), U8(100), //
- B(Star), R(1), //
- B(LdaUndefined), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(Return) //
- },
- 0},
- {"while(true) { var a = 10; a = do { ++a; break; }; a = 20; }",
- 2 * kPointerSize,
- 1,
- 26,
- {
- B(StackCheck), //
- B(StackCheck), //
- B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(ToNumber), //
- B(Inc), //
- B(Star), R(1), //
- B(Star), R(0), //
- B(Jump), U8(12), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(20), //
- B(Star), R(1), //
- B(Jump), U8(-21), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
+ "var a = do { }; return a;",
+
+ "var a = do { var x = 100; }; return a;",
+
+ "while(true) { var a = 10; a = do { ++a; break; }; a = 20; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DoExpression.golden"));
+
FLAG_harmony_do_expressions = old_flag;
}
TEST(WithStatement) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int deep_elements_flags =
- ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
- int context = Register::current_context().index();
- int closure = Register::function_closure().index();
- int new_target = Register::new_target().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType> snippets[] = {
- {"with ({x:42}) { return x; }",
- 5 * kPointerSize,
- 1,
- 47,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(context), U8(4), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(context), U8(5), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(context), U8(6), //
- B(StackCheck), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(2), //
- B(ToObject), //
- B(Star), R(3), //
- B(Ldar), R(closure), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kPushWithContext), R(3), U8(2), //
- B(PushContext), R(1), //
- B(LdaLookupSlot), U8(1), //
- B(PopContext), R(0), //
- B(Return), //
- },
- 2,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "with ({x:42}) { return x; }",
};
- // clang-format on
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("WithStatement.golden"));
}
TEST(DoDebugger) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- // clang-format off
- ExpectedSnippet<const char*> snippet = {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kString);
+ const char* snippets[] = {
"debugger;",
- 0,
- 1,
- 4,
- {
- B(StackCheck), //
- B(Debugger), //
- B(LdaUndefined), //
- B(Return) //
- },
- 0
};
- // clang-format on
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippet.code_snippet);
- CheckBytecodeArrayEqual(snippet, bytecode_array);
+ CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DoDebugger.golden"));
}
-// TODO(rmcilroy): Update expectations after switch to
-// Runtime::kDefineDataPropertyInLiteral.
TEST(ClassDeclarations) {
- InitializedHandleScope handle_scope;
- BytecodeGeneratorHelper helper;
-
- int closure = Register::function_closure().index();
- int context = Register::current_context().index();
-
- // clang-format off
- ExpectedSnippet<InstanceType, 12> snippets[] = {
- {"class Person {\n"
- " constructor(name) { this.name = name; }\n"
- " speak() { console.log(this.name + ' is speaking.'); }\n"
- "}\n",
- 9 * kPointerSize,
- 1,
- 71,
- {
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(LdaTheHole), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(2), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(15), //
- B(Star), R(4), //
- B(LdaConstant), U8(1), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4), //
- B(Star), R(2), //
- B(LoadIC), R(2), U8(2), U8(1), //
- B(Star), R(3), //
- B(Mov), R(3), R(4), //
- B(LdaConstant), U8(3), //
- B(Star), R(5), //
- B(CreateClosure), U8(4), U8(0), //
- B(Star), R(6), //
- B(LdaSmi8), U8(2), //
- B(Star), R(7), //
- B(LdaZero), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 5,
- { InstanceType::SHARED_FUNCTION_INFO_TYPE, kInstanceTypeDontCare,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"class person {\n"
- " constructor(name) { this.name = name; }\n"
- " speak() { console.log(this.name + ' is speaking.'); }\n"
- "}\n",
- 9 * kPointerSize,
- 1,
- 71,
- {
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(LdaTheHole), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(2), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(3), //
- B(LdaSmi8), U8(15), //
- B(Star), R(4), //
- B(LdaConstant), U8(1), //
- B(Star), R(5), //
- B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4), //
- B(Star), R(2), //
- B(LoadIC), R(2), U8(2), U8(1), //
- B(Star), R(3), //
- B(Mov), R(3), R(4), //
- B(LdaConstant), U8(3), //
- B(Star), R(5), //
- B(CreateClosure), U8(4), U8(0), //
- B(Star), R(6), //
- B(LdaSmi8), U8(2), //
- B(Star), R(7), //
- B(LdaZero), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaUndefined), //
- B(Return) //
- },
- 5,
- { InstanceType::SHARED_FUNCTION_INFO_TYPE, kInstanceTypeDontCare,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var n0 = 'a';"
- "var n1 = 'b';"
- "class N {\n"
- " [n0]() { return n0; }\n"
- " static [n1]() { return n1; }\n"
- "}\n",
- 10 * kPointerSize,
- 1,
- 125,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- /* */ U8(1), //
- B(PushContext), R(2), //
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(LdaConstant), U8(0), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaConstant), U8(1), //
- B(StaContextSlot), R(context), U8(5), //
- B(LdaTheHole), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(3), //
- B(CreateClosure), U8(2), U8(0), //
- B(Star), R(4), //
- B(LdaSmi8), U8(41), //
- B(Star), R(5), //
- B(LdaSmi8), U8(107), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4), //
- B(Star), R(3), //
- B(LoadIC), R(3), U8(3), U8(1), //
- B(Star), R(4), //
- B(Mov), R(4), R(5), //
- B(LdaContextSlot), R(context), U8(4), //
- B(ToName), //
- B(Star), R(6), //
- B(CreateClosure), U8(4), U8(0), //
- B(Star), R(7), //
- B(LdaSmi8), U8(2), //
- B(Star), R(8), //
- B(LdaSmi8), U8(1), //
- B(Star), R(9), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
- B(Mov), R(3), R(5), //
- B(LdaContextSlot), R(context), U8(5), //
- B(ToName), //
- B(Star), R(6), //
- B(LdaConstant), U8(3), //
- B(TestEqualStrict), R(6), //
- B(JumpIfFalse), U8(7), //
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), //
- /* */ R(0), U8(0), //
- B(CreateClosure), U8(5), U8(0), //
- B(Star), R(7), //
- B(LdaSmi8), U8(1), //
- B(Star), R(9), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaUndefined), //
- B(Return), //
- },
- 6,
- { InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
- {"var count = 0;\n"
- "class C { constructor() { count++; }}\n"
- "return new C();\n",
- 10 * kPointerSize,
- 1,
- 74,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1), //
- B(PushContext), R(2), //
- B(LdaTheHole), //
- B(Star), R(1), //
- B(StackCheck), //
- B(LdaZero), //
- B(StaContextSlot), R(context), U8(4), //
- B(LdaTheHole), //
- B(Star), R(0), //
- B(LdaTheHole), //
- B(Star), R(3), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(4), //
- B(LdaSmi8), U8(30), //
- B(Star), R(5), //
- B(LdaSmi8), U8(67), //
- B(Star), R(6), //
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4), //
- B(Star), R(3), //
- B(LoadIC), R(3), U8(1), U8(1), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(JumpIfNotHole), U8(11), //
- B(LdaConstant), U8(2), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
- B(Star), R(3), //
- B(New), R(3), R(0), U8(0), //
- B(Return), //
- },
- 3,
- { InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
- };
- // clang-format on
-
- for (size_t i = 0; i < arraysize(snippets); i++) {
- Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
- CheckBytecodeArrayEqual(snippets[i], bytecode_array);
- }
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ const char* snippets[] = {
+ "class Person {\n"
+ " constructor(name) { this.name = name; }\n"
+ " speak() { console.log(this.name + ' is speaking.'); }\n"
+ "}",
+
+ "class person {\n"
+ " constructor(name) { this.name = name; }\n"
+ " speak() { console.log(this.name + ' is speaking.'); }\n"
+ "}",
+
+ "var n0 = 'a';\n"
+ "var n1 = 'b';\n"
+ "class N {\n"
+ " [n0]() { return n0; }\n"
+ " static [n1]() { return n1; }\n"
+ "}",
+
+ "var count = 0;\n"
+ "class C { constructor() { count++; }}\n"
+ "return new C();\n",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ClassDeclarations.golden"));
+}
+
+TEST(ClassAndSuperClass) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("test");
+ const char* snippets[] = {
+ "var test;\n"
+ "(function() {\n"
+ " class A {\n"
+ " method() { return 2; }\n"
+ " }\n"
+ " class B extends A {\n"
+ " method() { return super.method() + 1; }\n"
+ " }\n"
+ " test = new B().method;\n"
+ " test();\n"
+ "})();\n",
+
+ "var test;\n"
+ "(function() {\n"
+ " class A {\n"
+ " get x() { return 1; }\n"
+ " set x(val) { return; }\n"
+ " }\n"
+ " class B extends A {\n"
+ " method() { super.x = 2; return super.x; }\n"
+ " }\n"
+ " test = new B().method;\n"
+ " test();\n"
+ "})();\n",
+
+ "var test;\n"
+ "(function() {\n"
+ " class A {\n"
+ " constructor(x) { this.x_ = x; }\n"
+ " }\n"
+ " class B extends A {\n"
+ " constructor() { super(1); this.y_ = 2; }\n"
+ " }\n"
+ " test = new B().constructor;\n"
+ "})();\n",
+
+ "var test;\n"
+ "(function() {\n"
+ " class A {\n"
+ " constructor() { this.x_ = 1; }\n"
+ " }\n"
+ " class B extends A {\n"
+ " constructor() { super(); this.y_ = 2; }\n"
+ " }\n"
+ " test = new B().constructor;\n"
+ "})();\n",
+ };
+
+ CHECK_EQ(BuildActual(printer, snippets),
+ LoadGolden("ClassAndSuperClass.golden"));
}
-// TODO(oth): Add tests for super keyword.
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
new file mode 100644
index 0000000000..e4cf809ad7
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -0,0 +1,96 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "test/cctest/interpreter/interpreter-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+namespace {
+
+class InvokeIntrinsicHelper {
+ public:
+ InvokeIntrinsicHelper(Isolate* isolate, Zone* zone,
+ Runtime::FunctionId function_id)
+ : isolate_(isolate),
+ zone_(zone),
+ factory_(isolate->factory()),
+ function_id_(function_id) {}
+
+ template <class... A>
+ Handle<Object> Invoke(A... args) {
+ CHECK(IntrinsicsHelper::IsSupported(function_id_));
+ BytecodeArrayBuilder builder(isolate_, zone_, sizeof...(args), 0, 0);
+ builder.CallRuntime(function_id_, builder.Parameter(0), sizeof...(args))
+ .Return();
+ InterpreterTester tester(isolate_, builder.ToBytecodeArray());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ return callable(args...).ToHandleChecked();
+ }
+
+ Handle<Object> NewObject(const char* script) {
+ return v8::Utils::OpenHandle(*CompileRun(script));
+ }
+
+ Handle<Object> Undefined() { return factory_->undefined_value(); }
+ Handle<Object> Null() { return factory_->null_value(); }
+
+ private:
+ Isolate* isolate_;
+ Zone* zone_;
+ Factory* factory_;
+ Runtime::FunctionId function_id_;
+};
+
+} // namespace
+
+TEST(IsJSReceiver) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsJSReceiver);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->true_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->true_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
+}
+
+TEST(IsArray) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsArray);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 69cf0e18bd..c8dc776010 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -7,8 +7,10 @@
#include "src/execution.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/interpreter/interpreter-tester.h"
#include "test/cctest/test-feedback-vector.h"
namespace v8 {
@@ -16,160 +18,6 @@ namespace internal {
namespace interpreter {
-static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
- Handle<JSFunction> function) {
- return Execution::Call(isolate, function,
- isolate->factory()->undefined_value(), 0, nullptr);
-}
-
-
-template <class... A>
-static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
- Handle<JSFunction> function,
- A... args) {
- Handle<Object> argv[] = { args... };
- return Execution::Call(isolate, function,
- isolate->factory()->undefined_value(), sizeof...(args),
- argv);
-}
-
-
-template <class... A>
-class InterpreterCallable {
- public:
- InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
- : isolate_(isolate), function_(function) {}
- virtual ~InterpreterCallable() {}
-
- MaybeHandle<Object> operator()(A... args) {
- return CallInterpreter(isolate_, function_, args...);
- }
-
- private:
- Isolate* isolate_;
- Handle<JSFunction> function_;
-};
-
-
-static const char* kFunctionName = "f";
-
-
-class InterpreterTester {
- public:
- InterpreterTester(Isolate* isolate, const char* source,
- MaybeHandle<BytecodeArray> bytecode,
- MaybeHandle<TypeFeedbackVector> feedback_vector,
- const char* filter)
- : isolate_(isolate),
- source_(source),
- bytecode_(bytecode),
- feedback_vector_(feedback_vector) {
- i::FLAG_ignition = true;
- i::FLAG_always_opt = false;
- // Set ignition filter flag via SetFlagsFromString to avoid double-free
- // (or potential leak with StrDup() based on ownership confusion).
- ScopedVector<char> ignition_filter(64);
- SNPrintF(ignition_filter, "--ignition-filter=%s", filter);
- FlagList::SetFlagsFromString(ignition_filter.start(),
- ignition_filter.length());
- // Ensure handler table is generated.
- isolate->interpreter()->Initialize();
- }
-
- InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode,
- MaybeHandle<TypeFeedbackVector> feedback_vector =
- MaybeHandle<TypeFeedbackVector>(),
- const char* filter = kFunctionName)
- : InterpreterTester(isolate, nullptr, bytecode, feedback_vector, filter) {
- }
-
-
- InterpreterTester(Isolate* isolate, const char* source,
- const char* filter = kFunctionName)
- : InterpreterTester(isolate, source, MaybeHandle<BytecodeArray>(),
- MaybeHandle<TypeFeedbackVector>(), filter) {}
-
- virtual ~InterpreterTester() {}
-
- template <class... A>
- InterpreterCallable<A...> GetCallable() {
- return InterpreterCallable<A...>(isolate_, GetBytecodeFunction<A...>());
- }
-
- Local<Message> CheckThrowsReturnMessage() {
- TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate_));
- auto callable = GetCallable<>();
- MaybeHandle<Object> no_result = callable();
- CHECK(isolate_->has_pending_exception());
- CHECK(try_catch.HasCaught());
- CHECK(no_result.is_null());
- isolate_->OptionalRescheduleException(true);
- CHECK(!try_catch.Message().IsEmpty());
- return try_catch.Message();
- }
-
- static Handle<Object> NewObject(const char* script) {
- return v8::Utils::OpenHandle(*CompileRun(script));
- }
-
- static Handle<String> GetName(Isolate* isolate, const char* name) {
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(name);
- return isolate->factory()->string_table()->LookupString(isolate, result);
- }
-
- static std::string SourceForBody(const char* body) {
- return "function " + function_name() + "() {\n" + std::string(body) + "\n}";
- }
-
- static std::string function_name() {
- return std::string(kFunctionName);
- }
-
- private:
- Isolate* isolate_;
- const char* source_;
- MaybeHandle<BytecodeArray> bytecode_;
- MaybeHandle<TypeFeedbackVector> feedback_vector_;
-
- template <class... A>
- Handle<JSFunction> GetBytecodeFunction() {
- Handle<JSFunction> function;
- if (source_) {
- CompileRun(source_);
- v8::Local<v8::Context> context =
- v8::Isolate::GetCurrent()->GetCurrentContext();
- Local<Function> api_function =
- Local<Function>::Cast(CcTest::global()
- ->Get(context, v8_str(kFunctionName))
- .ToLocalChecked());
- function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
- } else {
- int arg_count = sizeof...(A);
- std::string source("(function " + function_name() + "(");
- for (int i = 0; i < arg_count; i++) {
- source += i == 0 ? "a" : ", a";
- }
- source += "){})";
- function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source.c_str()))));
- function->ReplaceCode(
- *isolate_->builtins()->InterpreterEntryTrampoline());
- }
-
- if (!bytecode_.is_null()) {
- function->shared()->set_function_data(*bytecode_.ToHandleChecked());
- }
- if (!feedback_vector_.is_null()) {
- function->shared()->set_feedback_vector(
- *feedback_vector_.ToHandleChecked());
- }
- return function;
- }
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterTester);
-};
-
-
TEST(InterpreterReturn) {
HandleAndZoneScope handles;
Handle<Object> undefined_value =
@@ -731,7 +579,7 @@ TEST(InterpreterLoadNamedProperty) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
i::FeedbackVectorSpec feedback_spec(&zone);
i::FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
@@ -784,7 +632,7 @@ TEST(InterpreterLoadKeyedProperty) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
i::FeedbackVectorSpec feedback_spec(&zone);
i::FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
@@ -826,7 +674,7 @@ TEST(InterpreterStoreNamedProperty) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
i::FeedbackVectorSpec feedback_spec(&zone);
i::FeedbackVectorSlot slot = feedback_spec.AddStoreICSlot();
@@ -885,7 +733,7 @@ TEST(InterpreterStoreKeyedProperty) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
i::FeedbackVectorSpec feedback_spec(&zone);
i::FeedbackVectorSlot slot = feedback_spec.AddKeyedStoreICSlot();
@@ -932,7 +780,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- i::Zone zone;
+ i::Zone zone(isolate->allocator());
i::FeedbackVectorSpec feedback_spec(&zone);
i::FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
@@ -1145,7 +993,6 @@ TEST(InterpreterConditionalJumps) {
CHECK_EQ(Smi::cast(*return_value)->value(), 7);
}
-
TEST(InterpreterConditionalJumps2) {
// TODO(oth): Add tests for all conditional jumps near and far.
HandleAndZoneScope handles;
@@ -1179,12 +1026,96 @@ TEST(InterpreterConditionalJumps2) {
CHECK_EQ(Smi::cast(*return_value)->value(), 7);
}
+TEST(InterpreterJumpConstantWith16BitOperand) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 257);
+ Register reg(0), scratch(256);
+ BytecodeLabel done;
+
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(reg);
+ // Consume all 8-bit operands
+ for (int i = 1; i <= 256; i++) {
+ builder.LoadLiteral(handles.main_isolate()->factory()->NewNumber(i));
+ builder.BinaryOperation(Token::Value::ADD, reg);
+ builder.StoreAccumulatorInRegister(reg);
+ }
+ builder.Jump(&done);
+
+ // Emit more than 16-bit immediate operands worth of code to jump over.
+ for (int i = 0; i < 6600; i++) {
+ builder.LoadLiteral(Smi::FromInt(0)); // 1-byte
+ builder.BinaryOperation(Token::Value::ADD, scratch); // 4-bytes
+ builder.StoreAccumulatorInRegister(scratch); // 4-bytes
+ builder.MoveRegister(scratch, reg); // 6-bytes
+ }
+ builder.Bind(&done);
+ builder.LoadAccumulatorWithRegister(reg);
+ builder.Return();
-static const Token::Value kComparisonTypes[] = {
- Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
- Token::Value::NE_STRICT, Token::Value::LT, Token::Value::LTE,
- Token::Value::GT, Token::Value::GTE};
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(bytecode_array);
+
+ bool found_16bit_constant_jump = false;
+ while (!iterator.done()) {
+ if (iterator.current_bytecode() == Bytecode::kJumpConstant &&
+ iterator.current_operand_scale() == OperandScale::kDouble) {
+ found_16bit_constant_jump = true;
+ break;
+ }
+ iterator.Advance();
+ }
+ CHECK(found_16bit_constant_jump);
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_value)->value(), 256.0 / 2 * (1 + 256));
+}
+
+TEST(InterpreterJumpWith32BitOperand) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
+ Register reg(0);
+ BytecodeLabel done;
+
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(reg);
+ // Consume all 16-bit constant pool entries
+ for (int i = 1; i <= 65536; i++) {
+ builder.LoadLiteral(handles.main_isolate()->factory()->NewNumber(i));
+ }
+ builder.Jump(&done);
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.Bind(&done);
+ builder.Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(bytecode_array);
+
+ bool found_32bit_jump = false;
+ while (!iterator.done()) {
+ if (iterator.current_bytecode() == Bytecode::kJump &&
+ iterator.current_operand_scale() == OperandScale::kQuadruple) {
+ found_32bit_jump = true;
+ break;
+ }
+ iterator.Advance();
+ }
+ CHECK(found_32bit_jump);
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_value)->value(), 65536.0);
+}
+
+static const Token::Value kComparisonTypes[] = {
+ Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
+ Token::Value::LT, Token::Value::LTE, Token::Value::GT,
+ Token::Value::GTE};
template <typename T>
bool CompareC(Token::Value op, T lhs, T rhs, bool types_differed = false) {
@@ -1379,9 +1310,84 @@ TEST(InterpreterMixedComparisons) {
}
}
+TEST(InterpreterStrictNotEqual) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ const char* code_snippet =
+ "function f(lhs, rhs) {\n"
+ " return lhs !== rhs;\n"
+ "}\n"
+ "f(0, 0);\n";
+ InterpreterTester tester(handles.main_isolate(), code_snippet);
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+
+ // Test passing different types.
+ const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e5", "2.01"};
+ i::UnicodeCache unicode_cache;
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ double lhs = StringToDouble(&unicode_cache, inputs[i],
+ i::ConversionFlags::NO_FLAGS);
+ double rhs = StringToDouble(&unicode_cache, inputs[j],
+ i::ConversionFlags::NO_FLAGS);
+ Handle<Object> lhs_obj = factory->NewNumber(lhs);
+ Handle<Object> rhs_obj = factory->NewStringFromAsciiChecked(inputs[j]);
+
+ Handle<Object> return_value =
+ callable(lhs_obj, rhs_obj).ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(Token::Value::NE_STRICT, lhs, rhs, true));
+ }
+ }
+
+ // Test passing string types.
+ const char* inputs_str[] = {"A", "abc", "z", "", "Foo!", "Foo"};
+ for (size_t i = 0; i < arraysize(inputs_str); i++) {
+ for (size_t j = 0; j < arraysize(inputs_str); j++) {
+ Handle<Object> lhs_obj =
+ factory->NewStringFromAsciiChecked(inputs_str[i]);
+ Handle<Object> rhs_obj =
+ factory->NewStringFromAsciiChecked(inputs_str[j]);
+
+ Handle<Object> return_value =
+ callable(lhs_obj, rhs_obj).ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(Token::Value::NE_STRICT, inputs_str[i], inputs_str[j]));
+ }
+ }
+
+ // Test passing doubles.
+ double inputs_number[] = {std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ -0.001,
+ 0.01,
+ 0.1000001,
+ 1e99,
+ -1e-99};
+ for (size_t i = 0; i < arraysize(inputs_number); i++) {
+ for (size_t j = 0; j < arraysize(inputs_number); j++) {
+ Handle<Object> lhs_obj = factory->NewNumber(inputs_number[i]);
+ Handle<Object> rhs_obj = factory->NewNumber(inputs_number[j]);
+
+ Handle<Object> return_value =
+ callable(lhs_obj, rhs_obj).ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(Token::Value::NE_STRICT, inputs_number[i],
+ inputs_number[j]));
+ }
+ }
+}
TEST(InterpreterInstanceOf) {
HandleAndZoneScope handles;
+ // TODO(4447): The new ES6 'instanceof' operator is fully desugared in the
+ // parser and the Token::INSTANCEOF is not needed anymore. This test only
+ // makes sense with --no-harmony-instanceof and can be removed once we
+ // deprecate the ability to switch to old skool ES5 'instanceof' for good.
+ FLAG_harmony_instanceof = false;
i::Factory* factory = handles.main_isolate()->factory();
Handle<i::String> name = factory->NewStringFromAsciiChecked("cons");
Handle<i::JSFunction> func = factory->NewFunction(name);
@@ -1566,6 +1572,24 @@ TEST(InterpreterCallRuntime) {
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(55));
}
+TEST(InterpreterInvokeIntrinsic) {
+ HandleAndZoneScope handles;
+
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 2);
+ builder.LoadLiteral(Smi::FromInt(15))
+ .StoreAccumulatorInRegister(Register(0))
+ .CallRuntime(Runtime::kInlineIsArray, Register(0), 1)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val->IsBoolean());
+ CHECK_EQ(return_val->BooleanValue(), false);
+}
TEST(InterpreterFunctionLiteral) {
HandleAndZoneScope handles;
@@ -2007,6 +2031,15 @@ TEST(InterpreterTryFinally) {
" try { a = 3; throw 23; } finally { a = 4; }"
"} catch(e) { a = a + e; } return a;",
factory->NewStringFromStaticChars("R27")),
+ std::make_pair("var func_name;"
+ "function tcf2(a) {"
+ " try { throw new Error('boom');} "
+ " catch(e) {return 153; } "
+ " finally {func_name = tcf2.name;}"
+ "}"
+ "tcf2();"
+ "return func_name;",
+ factory->NewStringFromStaticChars("Rtcf2")),
};
const char* try_wrapper =
@@ -3857,7 +3890,7 @@ TEST(InterpreterClassLiterals) {
for (size_t i = 0; i < arraysize(examples); ++i) {
std::string source(InterpreterTester::SourceForBody(examples[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3917,7 +3950,7 @@ TEST(InterpreterClassAndSuperClass) {
for (size_t i = 0; i < arraysize(examples); ++i) {
std::string source(InterpreterTester::SourceForBody(examples[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*examples[i].second));
@@ -4061,29 +4094,6 @@ TEST(InterpreterConstInLookupContextChain) {
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*const_decl[i].second));
}
-
- // Tests for Legacy constant.
- bool old_flag_legacy_const = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- std::pair<const char*, Handle<Object>> legacy_const_decl[] = {
- {"return outerConst = 23;", handle(Smi::FromInt(23), isolate)},
- {"outerConst = 30; return outerConst;",
- handle(Smi::FromInt(10), isolate)},
- };
-
- for (size_t i = 0; i < arraysize(legacy_const_decl); i++) {
- std::string script = std::string(prologue) +
- std::string(legacy_const_decl[i].first) +
- std::string(epilogue);
- InterpreterTester tester(handles.main_isolate(), script.c_str(), "*");
- auto callable = tester.GetCallable<>();
-
- Handle<i::Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*legacy_const_decl[i].second));
- }
-
- FLAG_legacy_const = old_flag_legacy_const;
}
TEST(InterpreterIllegalConstDeclaration) {
@@ -4129,39 +4139,6 @@ TEST(InterpreterIllegalConstDeclaration) {
}
}
-TEST(InterpreterLegacyConstDeclaration) {
- bool old_flag_legacy_const = FLAG_legacy_const;
- FLAG_legacy_const = true;
-
- HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
-
- std::pair<const char*, Handle<Object>> const_decl[] = {
- {"const x = (x = 10) + 3; return x;", handle(Smi::FromInt(13), isolate)},
- {"const x = 10; x = 20; return x;", handle(Smi::FromInt(10), isolate)},
- {"var a = 10;\n"
- "for (var i = 0; i < 10; ++i) {\n"
- " const x = i;\n" // Legacy constants are not block scoped.
- " a = a + x;\n"
- "}\n"
- "return a;\n",
- handle(Smi::FromInt(10), isolate)},
- {"const x = 20; eval('x = 10;'); return x;",
- handle(Smi::FromInt(20), isolate)},
- };
-
- for (size_t i = 0; i < arraysize(const_decl); i++) {
- std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
- auto callable = tester.GetCallable<>();
-
- Handle<i::Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->SameValue(*const_decl[i].second));
- }
-
- FLAG_legacy_const = old_flag_legacy_const;
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 90ea08144d..67803eeed6 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -774,3 +774,28 @@ TEST(PrototypeGetterAccessCheck) {
CHECK(try_catch.HasCaught());
}
}
+
+static void check_receiver(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(info.This()->IsObject());
+}
+
+TEST(Regress609134) {
+ v8::internal::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto fun_templ = v8::FunctionTemplate::New(isolate);
+ fun_templ->InstanceTemplate()->SetNativeDataProperty(v8_str("foo"),
+ check_receiver);
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Fun"),
+ fun_templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ CompileRun(
+ "var f = new Fun();"
+ "Number.prototype.__proto__ = f;"
+ "[42][0].foo");
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index ac54ededca..a1894fad1a 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -3888,3 +3888,28 @@ THREADED_TEST(NonMaskingInterceptorGlobalEvalRegression) {
"eval('obj.x');",
9);
}
+
+static void CheckReceiver(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(info.This()->IsObject());
+}
+
+TEST(Regress609134Interceptor) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto fun_templ = v8::FunctionTemplate::New(isolate);
+ fun_templ->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(CheckReceiver));
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Fun"),
+ fun_templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ CompileRun(
+ "var f = new Fun();"
+ "Number.prototype.__proto__ = f;"
+ "var a = 42;"
+ "for (var i = 0; i<3; i++) { a.foo; }");
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 449d8dd66d..220b0cd077 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -2989,7 +2989,6 @@ THREADED_TEST(SymbolTemplateProperties) {
THREADED_TEST(PrivatePropertiesOnProxies) {
- i::FLAG_harmony_proxies = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -13089,7 +13088,6 @@ static void ShouldThrowOnErrorSetter(Local<Name> name, Local<v8::Value> value,
THREADED_TEST(AccessorShouldThrowOnError) {
- i::FLAG_strong_mode = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -13122,14 +13120,6 @@ THREADED_TEST(AccessorShouldThrowOnError) {
value = global->Get(context.local(), v8_str("should_throw_setter"))
.ToLocalChecked();
CHECK(value->IsTrue());
-
- // STRONG mode
- value = v8_compile("'use strong';o.f")->Run(context.local()).ToLocalChecked();
- CHECK(value->IsFalse());
- v8_compile("'use strong'; o.f = 153")->Run(context.local()).ToLocalChecked();
- value = global->Get(context.local(), v8_str("should_throw_setter"))
- .ToLocalChecked();
- CHECK(value->IsTrue());
}
@@ -13185,7 +13175,6 @@ static void ShouldThrowOnErrorPropertyEnumerator(
THREADED_TEST(InterceptorShouldThrowOnError) {
- i::FLAG_strong_mode = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -13242,21 +13231,6 @@ THREADED_TEST(InterceptorShouldThrowOnError) {
value = global->Get(context.local(), v8_str("should_throw_enumerator"))
.ToLocalChecked();
CHECK(value->IsFalse());
-
- // STRONG mode
- value = v8_compile("'use strong';o.f")->Run(context.local()).ToLocalChecked();
- CHECK(value->IsFalse());
- v8_compile("'use strong'; o.f = 153")->Run(context.local()).ToLocalChecked();
- value = global->Get(context.local(), v8_str("should_throw_setter"))
- .ToLocalChecked();
- CHECK(value->IsTrue());
-
- v8_compile("'use strong'; Object.getOwnPropertyNames(o)")
- ->Run(context.local())
- .ToLocalChecked();
- value = global->Get(context.local(), v8_str("should_throw_enumerator"))
- .ToLocalChecked();
- CHECK(value->IsFalse());
}
@@ -13338,8 +13312,6 @@ THREADED_TEST(ObjectProtoToString) {
TEST(ObjectProtoToStringES6) {
- // TODO(dslomov, caitp): merge into ObjectProtoToString test once shipped.
- i::FLAG_harmony_tostring = true;
LocalContext context;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -13534,7 +13506,7 @@ THREADED_TEST(ObjectGetConstructorName) {
"function Child() {};"
"Child.prototype = new Parent();"
"Child.prototype.constructor = Child;"
- "var outer = { inner: function() { } };"
+ "var outer = { inner: (0, function() { }) };"
"var p = new Parent();"
"var c = new Child();"
"var x = new outer.inner();"
@@ -14557,12 +14529,17 @@ static int move_events = 0;
static bool FunctionNameIs(const char* expected,
const v8::JitCodeEvent* event) {
// Log lines for functions are of the general form:
- // "LazyCompile:<type><function_name>", where the type is one of
- // "*", "~" or "".
- static const char kPreamble[] = "LazyCompile:";
- static size_t kPreambleLen = sizeof(kPreamble) - 1;
+ // "LazyCompile:<type><function_name>" or Function:<type><function_name>,
+ // where the type is one of "*", "~" or "".
+ static const char* kPreamble;
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ kPreamble = "Function:";
+ } else {
+ kPreamble = "LazyCompile:";
+ }
+ static size_t kPreambleLen = strlen(kPreamble);
- if (event->name.len < sizeof(kPreamble) - 1 ||
+ if (event->name.len < kPreambleLen ||
strncmp(kPreamble, event->name.str, kPreambleLen) != 0) {
return false;
}
@@ -14730,7 +14707,8 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
i::AlwaysAllocateScope always_allocate(i_isolate);
- SimulateFullSpace(heap->code_space());
+ SimulateFullSpace(i::FLAG_ignition ? heap->old_space()
+ : heap->code_space());
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
@@ -15171,6 +15149,9 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
// Tests that ScriptData can be serialized and deserialized.
TEST(PreCompileSerialization) {
+ // Producing cached parser data while parsing eagerly is not supported.
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+
v8::V8::Initialize();
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -18390,6 +18371,7 @@ THREADED_TEST(FunctionGetInferredName) {
THREADED_TEST(FunctionGetDebugName) {
+ i::FLAG_harmony_function_name = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* code =
@@ -18433,7 +18415,8 @@ THREADED_TEST(FunctionGetDebugName) {
"Object.defineProperty(i, 'name', { value: 'function.name' });"
"var j = function() {};"
"Object.defineProperty(j, 'name', { value: 'function.name' });"
- "var foo = { bar : { baz : function() {}}}; var k = foo.bar.baz;";
+ "var foo = { bar : { baz : (0, function() {})}}; var k = foo.bar.baz;"
+ "var foo = { bar : { baz : function() {} }}; var l = foo.bar.baz;";
v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
v8::Script::Compile(env.local(), v8_str(code), &origin)
.ToLocalChecked()
@@ -18452,7 +18435,8 @@ THREADED_TEST(FunctionGetDebugName) {
"h", "displayName",
"i", "function.name",
"j", "function.name",
- "k", "foo.bar.baz"};
+ "k", "foo.bar.baz",
+ "l", "baz"};
for (size_t i = 0; i < sizeof(functions) / sizeof(functions[0]) / 2; ++i) {
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()
@@ -19928,7 +19912,6 @@ TEST(PersistentHandleInNewSpaceVisitor) {
TEST(RegExp) {
- i::FLAG_harmony_regexps = true;
i::FLAG_harmony_unicode_regexps = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -20584,7 +20567,8 @@ THREADED_TEST(Regress1516) {
int elements = CountLiveMapsInMapCache(CcTest::i_isolate()->context());
CHECK_LE(1, elements);
- CcTest::heap()->CollectAllGarbage();
+ // We have to abort incremental marking here to abandon black pages.
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_GT(elements, CountLiveMapsInMapCache(CcTest::i_isolate()->context()));
}
@@ -20934,12 +20918,16 @@ TEST(CallCompletedCallbackTwoExceptions) {
static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
v8::HandleScope scope(info.GetIsolate());
+ v8::MicrotasksScope microtasks(info.GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
CompileRun("ext1Calls++;");
}
static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
v8::HandleScope scope(info.GetIsolate());
+ v8::MicrotasksScope microtasks(info.GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
CompileRun("ext2Calls++;");
}
@@ -21046,23 +21034,35 @@ TEST(RunMicrotasksIgnoresThrownExceptions) {
}
+uint8_t microtasks_completed_callback_count = 0;
+
+
+static void MicrotasksCompletedCallback(v8::Isolate* isolate) {
+ ++microtasks_completed_callback_count;
+}
+
+
TEST(SetAutorunMicrotasks) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ env->GetIsolate()->AddMicrotasksCompletedCallback(
+ &MicrotasksCompletedCallback);
CompileRun(
"var ext1Calls = 0;"
"var ext2Calls = 0;");
CompileRun("1+1;");
CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskOne).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1u, microtasks_completed_callback_count);
- env->GetIsolate()->SetAutorunMicrotasks(false);
+ env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskOne).ToLocalChecked());
env->GetIsolate()->EnqueueMicrotask(
@@ -21070,27 +21070,32 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1u, microtasks_completed_callback_count);
env->GetIsolate()->RunMicrotasks();
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2u, microtasks_completed_callback_count);
env->GetIsolate()->RunMicrotasks();
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3u, microtasks_completed_callback_count);
- env->GetIsolate()->SetAutorunMicrotasks(true);
+ env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(4u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
@@ -21099,18 +21104,29 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(4u, microtasks_completed_callback_count);
}
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(5u, microtasks_completed_callback_count);
+
+ env->GetIsolate()->RemoveMicrotasksCompletedCallback(
+ &MicrotasksCompletedCallback);
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+ CompileRun("1+1;");
+ CHECK_EQ(3, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(5u, microtasks_completed_callback_count);
}
TEST(RunMicrotasksWithoutEnteringContext) {
v8::Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
- isolate->SetAutorunMicrotasks(false);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
Local<Context> context = Context::New(isolate);
{
Context::Scope context_scope(context);
@@ -21123,7 +21139,147 @@ TEST(RunMicrotasksWithoutEnteringContext) {
Context::Scope context_scope(context);
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(context).FromJust());
}
- isolate->SetAutorunMicrotasks(true);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
+}
+
+
+TEST(ScopedMicrotasks) {
+ LocalContext env;
+ v8::HandleScope handles(env->GetIsolate());
+ env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
+ {
+ v8::MicrotasksScope scope1(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ {
+ v8::MicrotasksScope scope2(env->GetIsolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ {
+ v8::MicrotasksScope scope3(env->GetIsolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ CompileRun("1+1;");
+ CHECK_EQ(0,
+ CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0,
+ CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
+ }
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ {
+ v8::MicrotasksScope scope1(env->GetIsolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ {
+ v8::MicrotasksScope scope2(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ }
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
+ }
+
+ {
+ v8::Isolate::SuppressMicrotaskExecutionScope scope1(env->GetIsolate());
+ {
+ v8::MicrotasksScope scope2(env->GetIsolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ }
+ v8::MicrotasksScope scope3(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ {
+ v8::MicrotasksScope scope1(env->GetIsolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
+ }
+
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ env->GetIsolate()->EnqueueMicrotask(
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+ {
+ v8::Isolate::SuppressMicrotaskExecutionScope scope1(env->GetIsolate());
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+ v8::MicrotasksScope scope2(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+
+ {
+ v8::MicrotasksScope scope(env->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
+ }
+
+ env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
}
@@ -21146,7 +21302,7 @@ TEST(Regress385349) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
- isolate->SetAutorunMicrotasks(false);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
Local<Context> context = Context::New(isolate);
v8::Debug::SetDebugEventListener(isolate, DebugEventInObserver);
{
@@ -21156,7 +21312,7 @@ TEST(Regress385349) {
"obj.a = 0;");
}
isolate->RunMicrotasks();
- isolate->SetAutorunMicrotasks(true);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
v8::Debug::SetDebugEventListener(isolate, nullptr);
}
@@ -22050,7 +22206,7 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%DeleteProperty_Strict(other, 'x')");
CheckCorrectThrow("%DeleteProperty_Sloppy(other, '1')");
CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
- CheckCorrectThrow("%HasOwnProperty(other, 'x')");
+ CheckCorrectThrow("Object.prototype.hasOwnProperty.call(other, 'x')");
CheckCorrectThrow("%HasProperty('x', other)");
CheckCorrectThrow("%PropertyIsEnumerable(other, 'x')");
// PROPERTY_ATTRIBUTES_NONE = 0
@@ -22460,7 +22616,8 @@ THREADED_TEST(FunctionNew) {
->serial_number()),
i_isolate);
auto cache = i_isolate->template_instantiations_cache();
- CHECK(cache->Lookup(serial_number)->IsTheHole());
+ CHECK(cache->FindEntry(static_cast<uint32_t>(serial_number->value())) ==
+ i::UnseededNumberDictionary::kNotFound);
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
function_new_expected_env = data2;
@@ -23925,12 +24082,18 @@ TEST(InvalidCacheData) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
- TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
+ if (i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ // Cached parser data is not consumed while parsing eagerly.
+ TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
+ }
TestInvalidCacheData(v8::ScriptCompiler::kConsumeCodeCache);
}
TEST(ParserCacheRejectedGracefully) {
+ // Producing cached parser data while parsing eagerly is not supported.
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+
i::FLAG_min_preparse_length = 0;
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
@@ -24197,271 +24360,6 @@ TEST(SealHandleScopeNested) {
}
-static bool access_was_called = false;
-
-static bool AccessAlwaysAllowedWithFlag(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object,
- Local<v8::Value> data) {
- access_was_called = true;
- return true;
-}
-
-static bool AccessAlwaysBlockedWithFlag(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object,
- Local<v8::Value> data) {
- access_was_called = true;
- return false;
-}
-
-
-TEST(StrongModeAccessCheckAllowed) {
- i::FLAG_strong_mode = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Local<Value> value;
- access_was_called = false;
-
- v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
-
- obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
- obj_template->SetAccessCheckCallback(AccessAlwaysAllowedWithFlag);
-
- // Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
- context0->Enter();
- v8::Local<v8::Object> global0 = context0->Global();
- global0->Set(context0, v8_str("object"),
- obj_template->NewInstance(context0).ToLocalChecked())
- .FromJust();
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.x");
- CHECK(!try_catch.HasCaught());
- CHECK(!access_was_called);
- CHECK_EQ(42, value->Int32Value(context0).FromJust());
- }
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.foo");
- CHECK(try_catch.HasCaught());
- CHECK(!access_was_called);
- }
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object[10]");
- CHECK(try_catch.HasCaught());
- CHECK(!access_was_called);
- }
-
- // Create an environment
- v8::Local<Context> context1 = Context::New(isolate);
- context1->Enter();
- v8::Local<v8::Object> global1 = context1->Global();
- global1->Set(context1, v8_str("object"),
- obj_template->NewInstance(context1).ToLocalChecked())
- .FromJust();
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.x");
- CHECK(!try_catch.HasCaught());
- CHECK(access_was_called);
- CHECK_EQ(42, value->Int32Value(context1).FromJust());
- }
- access_was_called = false;
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.foo");
- CHECK(try_catch.HasCaught());
- CHECK(access_was_called);
- }
- access_was_called = false;
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object[10]");
- CHECK(try_catch.HasCaught());
- CHECK(access_was_called);
- }
-
- context1->Exit();
- context0->Exit();
-}
-
-
-TEST(StrongModeAccessCheckBlocked) {
- i::FLAG_strong_mode = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Local<Value> value;
- access_was_called = false;
-
- v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
-
- obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
- obj_template->SetAccessCheckCallback(AccessAlwaysBlockedWithFlag);
-
- // Create an environment
- v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
- context0->Enter();
- v8::Local<v8::Object> global0 = context0->Global();
- global0->Set(context0, v8_str("object"),
- obj_template->NewInstance(context0).ToLocalChecked())
- .FromJust();
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.x");
- CHECK(!try_catch.HasCaught());
- CHECK(!access_was_called);
- CHECK_EQ(42, value->Int32Value(context0).FromJust());
- }
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.foo");
- CHECK(try_catch.HasCaught());
- CHECK(!access_was_called);
- }
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object[10]");
- CHECK(try_catch.HasCaught());
- CHECK(!access_was_called);
- }
-
- // Create an environment
- v8::Local<Context> context1 = Context::New(isolate);
- context1->Enter();
- v8::Local<v8::Object> global1 = context1->Global();
- global1->Set(context1, v8_str("object"),
- obj_template->NewInstance(context1).ToLocalChecked())
- .FromJust();
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.x");
- CHECK(try_catch.HasCaught());
- CHECK(access_was_called);
- }
- access_was_called = false;
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object.foo");
- CHECK(try_catch.HasCaught());
- CHECK(access_was_called);
- }
- access_was_called = false;
- {
- v8::TryCatch try_catch(isolate);
- value = CompileRun("'use strong'; object[10]");
- CHECK(try_catch.HasCaught());
- CHECK(access_was_called);
- }
-
- context1->Exit();
- context0->Exit();
-}
-
-
-TEST(StrongModeArityCallFromApi) {
- i::FLAG_strong_mode = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<Function> fun;
- {
- v8::TryCatch try_catch(isolate);
- fun = Local<Function>::Cast(CompileRun(
- "function f(x) { 'use strong'; }"
- "f"));
-
- CHECK(!try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- CHECK(fun->Call(env.local(), v8::Undefined(isolate), 0, nullptr).IsEmpty());
- CHECK(try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<Value> args[] = {v8_num(42)};
- fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<Value> args[] = {v8_num(42), v8_num(555)};
- fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- }
-}
-
-
-TEST(StrongModeArityCallFromApi2) {
- i::FLAG_strong_mode = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<Function> fun;
- {
- v8::TryCatch try_catch(isolate);
- fun = Local<Function>::Cast(CompileRun(
- "'use strong';"
- "function f(x) {}"
- "f"));
-
- CHECK(!try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- CHECK(fun->Call(env.local(), v8::Undefined(isolate), 0, nullptr).IsEmpty());
- CHECK(try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<Value> args[] = {v8_num(42)};
- fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<Value> args[] = {v8_num(42), v8_num(555)};
- fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- }
-}
-
-
-TEST(StrongObjectDelete) {
- i::FLAG_strong_mode = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<Object> obj;
- {
- v8::TryCatch try_catch(isolate);
- obj = Local<Object>::Cast(CompileRun(
- "'use strong';"
- "({});"));
- CHECK(!try_catch.HasCaught());
- }
- obj->DefineOwnProperty(env.local(), v8_str("foo"), v8_num(1), v8::None)
- .FromJust();
- obj->DefineOwnProperty(env.local(), v8_str("2"), v8_num(1), v8::None)
- .FromJust();
- CHECK(obj->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
- CHECK(obj->HasOwnProperty(env.local(), v8_str("2")).FromJust());
- CHECK(!obj->Delete(env.local(), v8_str("foo")).FromJust());
- CHECK(!obj->Delete(env.local(), 2).FromJust());
-}
-
-
static void ExtrasBindingTestRuntimeFunction(
const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(
@@ -24773,6 +24671,53 @@ TEST(CompatibleReceiverCheckOnCachedICHandler) {
0);
}
+THREADED_TEST(ReceiverConversionForAccessors) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> acc =
+ v8::FunctionTemplate::New(isolate, Returns42);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("acc"),
+ acc->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetAccessorProperty(v8_str("acc"), acc, acc);
+ Local<v8::Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+
+ CHECK(env->Global()->Set(env.local(), v8_str("p"), instance).FromJust());
+ CHECK(CompileRun("(p.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+
+ CHECK(!CompileRun("Number.prototype.__proto__ = p;"
+ "var a = 1;")
+ .IsEmpty());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+
+ CHECK(!CompileRun("Boolean.prototype.__proto__ = p;"
+ "var a = true;")
+ .IsEmpty());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+
+ CHECK(!CompileRun("String.prototype.__proto__ = p;"
+ "var a = 'foo';")
+ .IsEmpty());
+ CHECK(CompileRun("(a.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(a.acc = 7) == 7")->BooleanValue(env.local()).FromJust());
+
+ CHECK(CompileRun("acc.call(1) == 42")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("acc.call(true)==42")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("acc.call('aa')==42")->BooleanValue(env.local()).FromJust());
+ CHECK(
+ CompileRun("acc.call(null) == 42")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("acc.call(undefined) == 42")
+ ->BooleanValue(env.local())
+ .FromJust());
+}
+
class FutexInterruptionThread : public v8::base::Thread {
public:
explicit FutexInterruptionThread(v8::Isolate* isolate)
@@ -24886,7 +24831,6 @@ TEST(AccessCheckedIsConcatSpreadable) {
TEST(AccessCheckedToStringTag) {
- i::FLAG_harmony_tostring = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
@@ -24965,7 +24909,6 @@ TEST(ObjectTemplateIntrinsics) {
TEST(Proxy) {
- i::FLAG_harmony_proxies = true;
LocalContext context;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -24987,3 +24930,98 @@ TEST(Proxy) {
CHECK(proxy->GetTarget()->SameValue(target));
CHECK(proxy->GetHandler()->IsNull());
}
+
+WeakCallCounterAndPersistent<Value>* CreateGarbageWithWeakCallCounter(
+ v8::Isolate* isolate, WeakCallCounter* counter) {
+ v8::Locker locker(isolate);
+ LocalContext env;
+ HandleScope scope(isolate);
+ WeakCallCounterAndPersistent<Value>* val =
+ new WeakCallCounterAndPersistent<Value>(counter);
+ val->handle.Reset(isolate, Object::New(isolate));
+ val->handle.SetWeak(val, &WeakPointerCallback,
+ v8::WeakCallbackType::kParameter);
+ return val;
+}
+
+class MemoryPressureThread : public v8::base::Thread {
+ public:
+ explicit MemoryPressureThread(v8::Isolate* isolate,
+ v8::MemoryPressureLevel level)
+ : Thread(Options("MemoryPressureThread")),
+ isolate_(isolate),
+ level_(level) {}
+
+ virtual void Run() { isolate_->MemoryPressureNotification(level_); }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::MemoryPressureLevel level_;
+};
+
+TEST(MemoryPressure) {
+ v8::Isolate* isolate = CcTest::isolate();
+ WeakCallCounter counter(1234);
+
+ // Check that critical memory pressure notification sets GC interrupt.
+ auto garbage = CreateGarbageWithWeakCallCounter(isolate, &counter);
+ CHECK(!v8::Locker::IsLocked(isolate));
+ {
+ v8::Locker locker(isolate);
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ MemoryPressureThread memory_pressure_thread(
+ isolate, v8::MemoryPressureLevel::kCritical);
+ memory_pressure_thread.Start();
+ memory_pressure_thread.Join();
+ // This should trigger GC.
+ CHECK_EQ(0, counter.NumberOfWeakCalls());
+ CompileRun("(function noop() { return 0; })()");
+ CHECK_EQ(1, counter.NumberOfWeakCalls());
+ }
+ delete garbage;
+ // Check that critical memory pressure notification triggers GC.
+ garbage = CreateGarbageWithWeakCallCounter(isolate, &counter);
+ {
+ v8::Locker locker(isolate);
+ // If isolate is locked, memory pressure notification should trigger GC.
+ CHECK_EQ(1, counter.NumberOfWeakCalls());
+ isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kCritical);
+ CHECK_EQ(2, counter.NumberOfWeakCalls());
+ }
+ delete garbage;
+ // Check that moderate memory pressure notification sets GC into memory
+ // optimizing mode.
+ isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kModerate);
+ CHECK(CcTest::i_isolate()->heap()->ShouldOptimizeForMemoryUsage());
+ // Check that disabling memory pressure returns GC into normal mode.
+ isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kNone);
+ CHECK(!CcTest::i_isolate()->heap()->ShouldOptimizeForMemoryUsage());
+}
+
+TEST(SetIntegrityLevel) {
+ LocalContext context;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(context->Global()->Set(context.local(), v8_str("o"), obj).FromJust());
+
+ v8::Local<v8::Value> is_frozen = CompileRun("Object.isFrozen(o)");
+ CHECK(!is_frozen->BooleanValue(context.local()).FromJust());
+
+ CHECK(obj->SetIntegrityLevel(context.local(), v8::IntegrityLevel::kFrozen)
+ .FromJust());
+
+ is_frozen = CompileRun("Object.isFrozen(o)");
+ CHECK(is_frozen->BooleanValue(context.local()).FromJust());
+}
+
+TEST(PrivateForApiIsNumber) {
+ LocalContext context;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ // Shouldn't crash.
+ v8::Private::ForApi(isolate, v8_str("42"));
+}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 379fe9c9c2..8887a8a976 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -18,6 +18,12 @@ static void CheckReturnValue(const T& t, i::Address callback) {
CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
// Verify reset
bool is_runtime = (*o)->IsTheHole();
+ if (is_runtime) {
+ CHECK(rv.Get()->IsUndefined());
+ } else {
+ i::Handle<i::Object> v = v8::Utils::OpenHandle(*rv.Get());
+ CHECK_EQ(*v, *o);
+ }
rv.Set(true);
CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
rv.Set(v8::Local<v8::Object>());
diff --git a/deps/v8/test/cctest/test-asm-validator.cc b/deps/v8/test/cctest/test-asm-validator.cc
index 207b915378..d5b51797d6 100644
--- a/deps/v8/test/cctest/test-asm-validator.cc
+++ b/deps/v8/test/cctest/test-asm-validator.cc
@@ -183,7 +183,7 @@ TEST(ValidateMinimum) {
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- // p = (p + 8)|0) {\n"
+ // p = (p + 8)|0) {
CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
CHECK_VAR(p, Bounds(cache.kAsmInt));
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
@@ -354,59 +354,54 @@ TEST(MissingReturnExports) {
Validate(zone, test_function, &types));
}
-
-#define HARNESS_STDLIB() \
- "var Infinity = stdlib.Infinity;\n" \
- "var NaN = stdlib.NaN;\n" \
- "var acos = stdlib.Math.acos;\n" \
- "var asin = stdlib.Math.asin;\n" \
- "var atan = stdlib.Math.atan;\n" \
- "var cos = stdlib.Math.cos;\n" \
- "var sin = stdlib.Math.sin;\n" \
- "var tan = stdlib.Math.tan;\n" \
- "var exp = stdlib.Math.exp;\n" \
- "var log = stdlib.Math.log;\n" \
- "var ceil = stdlib.Math.ceil;\n" \
- "var floor = stdlib.Math.floor;\n" \
- "var sqrt = stdlib.Math.sqrt;\n" \
- "var min = stdlib.Math.min;\n" \
- "var max = stdlib.Math.max;\n" \
- "var atan2 = stdlib.Math.atan2;\n" \
- "var pow = stdlib.Math.pow;\n" \
- "var abs = stdlib.Math.abs;\n" \
- "var imul = stdlib.Math.imul;\n" \
- "var fround = stdlib.Math.fround;\n" \
- "var E = stdlib.Math.E;\n" \
- "var LN10 = stdlib.Math.LN10;\n" \
- "var LN2 = stdlib.Math.LN2;\n" \
- "var LOG2E = stdlib.Math.LOG2E;\n" \
- "var LOG10E = stdlib.Math.LOG10E;\n" \
- "var PI = stdlib.Math.PI;\n" \
- "var SQRT1_2 = stdlib.Math.SQRT1_2;\n" \
- "var SQRT2 = stdlib.Math.SQRT2;\n"
-
-
-#define HARNESS_HEAP() \
- "var u8 = new stdlib.Uint8Array(buffer);\n" \
- "var i8 = new stdlib.Int8Array(buffer);\n" \
- "var u16 = new stdlib.Uint16Array(buffer);\n" \
- "var i16 = new stdlib.Int16Array(buffer);\n" \
- "var u32 = new stdlib.Uint32Array(buffer);\n" \
- "var i32 = new stdlib.Int32Array(buffer);\n" \
- "var f32 = new stdlib.Float32Array(buffer);\n" \
- "var f64 = new stdlib.Float64Array(buffer);\n"
-
-
-#define HARNESS_PREAMBLE() \
- const char test_function[] = \
- "function Module(stdlib, foreign, buffer) {\n" \
- "\"use asm\";\n" HARNESS_STDLIB() HARNESS_HEAP()
-
+#define HARNESS_STDLIB() \
+ "var Infinity = stdlib.Infinity; " \
+ "var NaN = stdlib.NaN; " \
+ "var acos = stdlib.Math.acos; " \
+ "var asin = stdlib.Math.asin; " \
+ "var atan = stdlib.Math.atan; " \
+ "var cos = stdlib.Math.cos; " \
+ "var sin = stdlib.Math.sin; " \
+ "var tan = stdlib.Math.tan; " \
+ "var exp = stdlib.Math.exp; " \
+ "var log = stdlib.Math.log; " \
+ "var ceil = stdlib.Math.ceil; " \
+ "var floor = stdlib.Math.floor; " \
+ "var sqrt = stdlib.Math.sqrt; " \
+ "var min = stdlib.Math.min; " \
+ "var max = stdlib.Math.max; " \
+ "var atan2 = stdlib.Math.atan2; " \
+ "var pow = stdlib.Math.pow; " \
+ "var abs = stdlib.Math.abs; " \
+ "var imul = stdlib.Math.imul; " \
+ "var fround = stdlib.Math.fround; " \
+ "var E = stdlib.Math.E; " \
+ "var LN10 = stdlib.Math.LN10; " \
+ "var LN2 = stdlib.Math.LN2; " \
+ "var LOG2E = stdlib.Math.LOG2E; " \
+ "var LOG10E = stdlib.Math.LOG10E; " \
+ "var PI = stdlib.Math.PI; " \
+ "var SQRT1_2 = stdlib.Math.SQRT1_2; " \
+ "var SQRT2 = stdlib.Math.SQRT2; "
+
+#define HARNESS_HEAP() \
+ "var u8 = new stdlib.Uint8Array(buffer); " \
+ "var i8 = new stdlib.Int8Array(buffer); " \
+ "var u16 = new stdlib.Uint16Array(buffer); " \
+ "var i16 = new stdlib.Int16Array(buffer); " \
+ "var u32 = new stdlib.Uint32Array(buffer); " \
+ "var i32 = new stdlib.Int32Array(buffer); " \
+ "var f32 = new stdlib.Float32Array(buffer); " \
+ "var f64 = new stdlib.Float64Array(buffer); "
+
+#define HARNESS_PREAMBLE() \
+ const char test_function[] = \
+ "function Module(stdlib, foreign, buffer) { " \
+ "\"use asm\"; " HARNESS_STDLIB() HARNESS_HEAP()
#define HARNESS_POSTAMBLE() \
- "return { foo: foo };\n" \
- "}\n";
-
+ "return { foo: foo }; " \
+ "} ";
#define CHECK_VAR_MATH_SHORTCUT(name, type) \
CHECK_EXPR(Assignment, type) { \
@@ -1088,7 +1083,7 @@ TEST(UnsignedFromFloat64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; return (x>>>0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: left bitwise operand expected to be an integer\n");
+ "asm: line 1: left bitwise operand expected to be an integer\n");
}
@@ -1096,7 +1091,7 @@ TEST(AndFloat64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; return (x&0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: left bitwise operand expected to be an integer\n");
+ "asm: line 1: left bitwise operand expected to be an integer\n");
}
@@ -1104,7 +1099,7 @@ TEST(TypeMismatchAddInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; var y = 0; return (x + y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed arithmetic operation\n");
+ "asm: line 1: ill-typed arithmetic operation\n");
}
@@ -1112,7 +1107,7 @@ TEST(TypeMismatchSubInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; var y = 0; return (x - y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed arithmetic operation\n");
+ "asm: line 1: ill-typed arithmetic operation\n");
}
@@ -1120,7 +1115,7 @@ TEST(TypeMismatchDivInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; var y = 0; return (x / y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed arithmetic operation\n");
+ "asm: line 1: ill-typed arithmetic operation\n");
}
@@ -1128,7 +1123,7 @@ TEST(TypeMismatchModInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; var y = 0; return (x % y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed arithmetic operation\n");
+ "asm: line 1: ill-typed arithmetic operation\n");
}
@@ -1136,7 +1131,7 @@ TEST(ModFloat32) {
CHECK_FUNC_ERROR(
"function bar() { var x = fround(1.0); return (x % x)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed arithmetic operation\n");
+ "asm: line 1: ill-typed arithmetic operation\n");
}
@@ -1144,7 +1139,7 @@ TEST(TernaryMismatchInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 0.0; return (1 ? x : y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: then and else expressions in ? must have the same type\n");
+ "asm: line 1: then and else expressions in ? must have the same type\n");
}
@@ -1152,15 +1147,15 @@ TEST(TernaryMismatchIntish) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 0; return (1 ? x + x : y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: invalid type in ? then expression\n");
+ "asm: line 1: invalid type in ? then expression\n");
}
TEST(TernaryMismatchInt32Float32) {
CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return (x?fround(y):x)|0; }\n"
+ "function bar() { var x = 1; var y = 2.0; return (x?fround(y):x)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: then and else expressions in ? must have the same type\n");
+ "asm: line 1: then and else expressions in ? must have the same type\n");
}
@@ -1168,13 +1163,26 @@ TEST(TernaryBadCondition) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2.0; return (y?x:1)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: condition must be of type int\n");
+ "asm: line 1: condition must be of type int\n");
}
+TEST(BadIntishMultiply) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; return ((x + x) * 4) | 0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 1: intish not allowed in multiply\n");
+}
-TEST(FroundFloat32) {
- CHECK_FUNC_TYPES_BEGIN(
+TEST(IntToFloat32) {
+ CHECK_FUNC_ERROR(
"function bar() { var x = 1; return fround(x); }\n"
+ "function foo() { bar(); }",
+ "asm: line 1: illegal function argument type\n");
+}
+
+TEST(Int32ToFloat32) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; return fround(x|0); }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
@@ -1183,7 +1191,32 @@ TEST(FroundFloat32) {
}
CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+TEST(Uint32ToFloat32) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; return fround(x>>>0); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
}
}
CHECK_SKIP();
@@ -1191,6 +1224,55 @@ TEST(FroundFloat32) {
CHECK_FUNC_TYPES_END
}
+TEST(Float64ToFloat32) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1.0; return fround(x); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+TEST(Int32ToFloat32ToInt32) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; return ~~fround(x|0) | 0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
TEST(Addition4) {
CHECK_FUNC_TYPES_BEGIN(
@@ -1229,7 +1311,7 @@ TEST(Multiplication2) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2; return (x*y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: direct integer multiply forbidden\n");
+ "asm: line 1: multiply must be by an integer literal\n");
}
@@ -1237,7 +1319,7 @@ TEST(Division4) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2; return (x/y/x/y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: too many consecutive multiplicative ops\n");
+ "asm: line 1: too many consecutive multiplicative ops\n");
}
@@ -1245,7 +1327,7 @@ TEST(CompareToStringLeft) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; return ('hi' > x)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: bad type on left side of comparison\n");
+ "asm: line 1: bad type on left side of comparison\n");
}
@@ -1253,7 +1335,7 @@ TEST(CompareToStringRight) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; return (x < 'hi')|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: bad type on right side of comparison\n");
+ "asm: line 1: bad type on right side of comparison\n");
}
@@ -1261,7 +1343,7 @@ TEST(CompareMismatchInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2.0; return (x < y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: left and right side of comparison must match\n");
+ "asm: line 1: left and right side of comparison must match\n");
}
@@ -1269,15 +1351,15 @@ TEST(CompareMismatchInt32Uint32) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2; return ((x|0) < (y>>>0))|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: left and right side of comparison must match\n");
+ "asm: line 1: left and right side of comparison must match\n");
}
TEST(CompareMismatchInt32Float32) {
CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return (x < fround(y))|0; }\n"
+ "function bar() { var x = 1; var y = 2.0; return (x < fround(y))|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: left and right side of comparison must match\n");
+ "asm: line 1: left and right side of comparison must match\n");
}
@@ -1618,7 +1700,7 @@ TEST(BadFunctionTable) {
"function bar(x, y) { x = x | 0; y = y | 0;\n"
" return table1[x & 1](y)|0; }\n"
"function foo() { bar(1, 2); }",
- "asm: line 40: array component expected to be a function\n");
+ "asm: line 2: array component expected to be a function\n");
}
@@ -1626,7 +1708,7 @@ TEST(MissingParameterTypes) {
CHECK_FUNC_ERROR(
"function bar(x) { var y = 1; }\n"
"function foo() { bar(2); }",
- "asm: line 39: missing parameter type annotations\n");
+ "asm: line 1: missing parameter type annotations\n");
}
@@ -1634,7 +1716,7 @@ TEST(InvalidTypeAnnotationBinaryOpDiv) {
CHECK_FUNC_ERROR(
"function bar(x) { x = x / 4; }\n"
"function foo() { bar(2); }",
- "asm: line 39: invalid type annotation on binary op\n");
+ "asm: line 1: invalid type annotation on binary op\n");
}
@@ -1642,7 +1724,7 @@ TEST(InvalidTypeAnnotationBinaryOpMul) {
CHECK_FUNC_ERROR(
"function bar(x) { x = x * 4.0; }\n"
"function foo() { bar(2); }",
- "asm: line 39: invalid type annotation on binary op\n");
+ "asm: line 1: invalid type annotation on binary op\n");
}
@@ -1650,7 +1732,7 @@ TEST(InvalidArgumentCount) {
CHECK_FUNC_ERROR(
"function bar(x) { return fround(4, 5); }\n"
"function foo() { bar(); }",
- "asm: line 39: invalid argument count calling function\n");
+ "asm: line 1: invalid argument count calling function\n");
}
@@ -1658,7 +1740,7 @@ TEST(InvalidTypeAnnotationArity) {
CHECK_FUNC_ERROR(
"function bar(x) { x = max(x); }\n"
"function foo() { bar(3); }",
- "asm: line 39: only fround allowed on expression annotations\n");
+ "asm: line 1: only fround allowed on expression annotations\n");
}
@@ -1666,7 +1748,7 @@ TEST(InvalidTypeAnnotationOnlyFround) {
CHECK_FUNC_ERROR(
"function bar(x) { x = sin(x); }\n"
"function foo() { bar(3); }",
- "asm: line 39: only fround allowed on expression annotations\n");
+ "asm: line 1: only fround allowed on expression annotations\n");
}
@@ -1674,7 +1756,7 @@ TEST(InvalidTypeAnnotation) {
CHECK_FUNC_ERROR(
"function bar(x) { x = (x+x)(x); }\n"
"function foo() { bar(3); }",
- "asm: line 39: invalid type annotation\n");
+ "asm: line 1: invalid type annotation\n");
}
@@ -1682,7 +1764,7 @@ TEST(WithStatement) {
CHECK_FUNC_ERROR(
"function bar() { var x = 0; with (x) { x = x + 1; } }\n"
"function foo() { bar(); }",
- "asm: line 39: bad with statement\n");
+ "asm: line 1: bad with statement\n");
}
@@ -1690,7 +1772,7 @@ TEST(NestedFunction) {
CHECK_FUNC_ERROR(
"function bar() { function x() { return 1; } }\n"
"function foo() { bar(); }",
- "asm: line 39: function declared inside another\n");
+ "asm: line 1: function declared inside another\n");
}
@@ -1698,7 +1780,7 @@ TEST(UnboundVariable) {
CHECK_FUNC_ERROR(
"function bar() { var x = y; }\n"
"function foo() { bar(); }",
- "asm: line 39: unbound variable\n");
+ "asm: line 1: unbound variable\n");
}
@@ -1706,7 +1788,7 @@ TEST(EqStrict) {
CHECK_FUNC_ERROR(
"function bar() { return (0 === 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal comparison operator\n");
+ "asm: line 1: illegal comparison operator\n");
}
@@ -1714,15 +1796,19 @@ TEST(NeStrict) {
CHECK_FUNC_ERROR(
"function bar() { return (0 !== 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal comparison operator\n");
+ "asm: line 1: illegal comparison operator\n");
}
TEST(InstanceOf) {
+ const char* errorMsg = FLAG_harmony_instanceof
+ ? "asm: line 0: do-expression encountered\n"
+ : "asm: line 1: illegal comparison operator\n";
+
CHECK_FUNC_ERROR(
"function bar() { return (0 instanceof 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal comparison operator\n");
+ errorMsg);
}
@@ -1730,7 +1816,7 @@ TEST(InOperator) {
CHECK_FUNC_ERROR(
"function bar() { return (0 in 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal comparison operator\n");
+ "asm: line 1: illegal comparison operator\n");
}
@@ -1738,7 +1824,7 @@ TEST(LogicalAndOperator) {
CHECK_FUNC_ERROR(
"function bar() { return (0 && 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal logical operator\n");
+ "asm: line 1: illegal logical operator\n");
}
@@ -1746,15 +1832,21 @@ TEST(LogicalOrOperator) {
CHECK_FUNC_ERROR(
"function bar() { return (0 || 0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal logical operator\n");
+ "asm: line 1: illegal logical operator\n");
}
+TEST(BitOrDouble) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1.0; return x | 0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 1: intish required\n");
+}
TEST(BadLiteral) {
CHECK_FUNC_ERROR(
"function bar() { return true | 0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal literal\n");
+ "asm: line 1: illegal literal\n");
}
@@ -1762,7 +1854,7 @@ TEST(MismatchedReturnTypeLiteral) {
CHECK_FUNC_ERROR(
"function bar() { if(1) { return 1; } return 1.0; }\n"
"function foo() { bar(); }",
- "asm: line 39: return type does not match function signature\n");
+ "asm: line 1: return type does not match function signature\n");
}
@@ -1771,7 +1863,7 @@ TEST(MismatchedReturnTypeExpression) {
"function bar() {\n"
" var x = 1; var y = 1.0; if(1) { return x; } return +y; }\n"
"function foo() { bar(); }",
- "asm: line 40: return type does not match function signature\n");
+ "asm: line 2: return type does not match function signature\n");
}
@@ -1779,7 +1871,7 @@ TEST(AssignToFloatishToF64) {
CHECK_FUNC_ERROR(
"function bar() { var v = fround(1.0); f64[0] = v + fround(1.0); }\n"
"function foo() { bar(); }",
- "asm: line 39: floatish assignment to double array\n");
+ "asm: line 1: floatish assignment to double array\n");
}
@@ -1848,7 +1940,7 @@ TEST(BadExports) {
HandleAndZoneScope handles;
Zone* zone = handles.main_zone();
ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 40: non-function in function table\n",
+ CHECK_EQ("asm: line 2: non-function in function table\n",
Validate(zone, test_function, &types));
}
@@ -1857,14 +1949,14 @@ TEST(NestedHeapAssignment) {
CHECK_FUNC_ERROR(
"function bar() { var x = 0; i16[x = 1] = 2; }\n"
"function foo() { bar(); }",
- "asm: line 39: expected >> in heap access\n");
+ "asm: line 1: expected >> in heap access\n");
}
TEST(BadOperatorHeapAssignment) {
CHECK_FUNC_ERROR(
"function bar() { var x = 0; i16[x & 1] = 2; }\n"
"function foo() { bar(); }",
- "asm: line 39: expected >> in heap access\n");
+ "asm: line 1: expected >> in heap access\n");
}
@@ -1872,7 +1964,7 @@ TEST(BadArrayAssignment) {
CHECK_FUNC_ERROR(
"function bar() { i8[0] = 0.0; }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal type in assignment\n");
+ "asm: line 1: illegal type in assignment\n");
}
@@ -1881,7 +1973,7 @@ TEST(BadStandardFunctionCallOutside) {
"var s0 = sin(0);\n"
"function bar() { }\n"
"function foo() { bar(); }",
- "asm: line 39: illegal variable reference in module body\n");
+ "asm: line 1: illegal variable reference in module body\n");
}
@@ -1890,9 +1982,23 @@ TEST(BadFunctionCallOutside) {
"function bar() { return 0.0; }\n"
"var s0 = bar(0);\n"
"function foo() { bar(); }",
- "asm: line 40: illegal variable reference in module body\n");
+ "asm: line 2: illegal variable reference in module body\n");
}
+TEST(UnaryPlusOnIntForbidden) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; return +x; }\n"
+ "function foo() { bar(); }",
+ "asm: line 1: "
+ "unary + only allowed on signed, unsigned, float?, or double?\n");
+}
+
+TEST(MultiplyNon1ConvertForbidden) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 0.0; return x * 2.0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 1: invalid type annotation on binary op\n");
+}
TEST(NestedVariableAssignment) {
CHECK_FUNC_TYPES_BEGIN(
@@ -2026,6 +2132,30 @@ TEST(CeilFloat) {
CHECK_FUNC_TYPES_END
}
+TEST(FloatReturnAsDouble) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = fround(3.1); return +fround(x); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
TEST(TypeConsistency) {
v8::V8::Initialize();
@@ -2125,7 +2255,7 @@ TEST(BadSwitchRange) {
CHECK_FUNC_ERROR(
"function bar() { switch (1) { case -1: case 0x7fffffff: } }\n"
"function foo() { bar(); }",
- "asm: line 39: case range too large\n");
+ "asm: line 1: case range too large\n");
}
@@ -2133,7 +2263,7 @@ TEST(DuplicateSwitchCase) {
CHECK_FUNC_ERROR(
"function bar() { switch (1) { case 0: case 0: } }\n"
"function foo() { bar(); }",
- "asm: line 39: duplicate case value\n");
+ "asm: line 1: duplicate case value\n");
}
@@ -2141,7 +2271,7 @@ TEST(BadSwitchOrder) {
CHECK_FUNC_ERROR(
"function bar() { switch (1) { default: case 0: } }\n"
"function foo() { bar(); }",
- "asm: line 39: default case out of order\n");
+ "asm: line 1: default case out of order\n");
}
TEST(BadForeignCall) {
@@ -2342,3 +2472,43 @@ TEST(Imports) {
}
CHECK_TYPES_END
}
+
+TEST(StoreFloatFromDouble) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { f32[0] = 0.0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(f32, Bounds(cache.kFloat32Array));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+TEST(NegateDouble) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0.0; x = -x; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 68eaab16f6..3763f06493 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -46,7 +46,7 @@ typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
-
+typedef Object* (*F5)(uint32_t p0, void* p1, void* p2, int p3, int p4);
#define __ assm.
@@ -232,6 +232,8 @@ TEST(4) {
double j;
double m;
double n;
+ float o;
+ float p;
float x;
float y;
} T;
@@ -314,6 +316,12 @@ TEST(4) {
__ vneg(d0, d1);
__ vstr(d0, r4, offsetof(T, n));
+ // Test vmov for single-precision immediates.
+ __ vmov(s0, 0.25f);
+ __ vstr(s0, r4, offsetof(T, o));
+ __ vmov(s0, -16.0f);
+ __ vstr(s0, r4, offsetof(T, p));
+
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
@@ -341,6 +349,8 @@ TEST(4) {
t.y = 9.0;
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
+ CHECK_EQ(-16.0f, t.p);
+ CHECK_EQ(0.25f, t.o);
CHECK_EQ(-123.456, t.n);
CHECK_EQ(2718.2818, t.m);
CHECK_EQ(2, t.i);
@@ -1939,6 +1949,78 @@ TEST(code_relative_offset) {
CHECK_EQ(42, res);
}
+TEST(msr_mrs) {
+ // Test msr and mrs.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Create a helper function:
+ // void TestMsrMrs(uint32_t nzcv,
+ // uint32_t * result_conditionals,
+ // uint32_t * result_mrs);
+ __ msr(CPSR_f, Operand(r0));
+
+ // Test that the condition flags have taken effect.
+ __ mov(r3, Operand(0));
+ __ orr(r3, r3, Operand(1 << 31), LeaveCC, mi); // N
+ __ orr(r3, r3, Operand(1 << 30), LeaveCC, eq); // Z
+ __ orr(r3, r3, Operand(1 << 29), LeaveCC, cs); // C
+ __ orr(r3, r3, Operand(1 << 28), LeaveCC, vs); // V
+ __ str(r3, MemOperand(r1));
+
+ // Also check mrs, ignoring everything other than the flags.
+ __ mrs(r3, CPSR);
+ __ and_(r3, r3, Operand(kSpecialCondition));
+ __ str(r3, MemOperand(r2));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F5 f = FUNCTION_CAST<F5>(code->entry());
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_MSR_MRS(n, z, c, v) \
+ do { \
+ uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
+ uint32_t result_conditionals = -1; \
+ uint32_t result_mrs = -1; \
+ dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &result_conditionals, \
+ &result_mrs, 0, 0); \
+ CHECK_EQ(nzcv, result_conditionals); \
+ CHECK_EQ(nzcv, result_mrs); \
+ } while (0);
+
+ // N Z C V
+ CHECK_MSR_MRS(0, 0, 0, 0);
+ CHECK_MSR_MRS(0, 0, 0, 1);
+ CHECK_MSR_MRS(0, 0, 1, 0);
+ CHECK_MSR_MRS(0, 0, 1, 1);
+ CHECK_MSR_MRS(0, 1, 0, 0);
+ CHECK_MSR_MRS(0, 1, 0, 1);
+ CHECK_MSR_MRS(0, 1, 1, 0);
+ CHECK_MSR_MRS(0, 1, 1, 1);
+ CHECK_MSR_MRS(1, 0, 0, 0);
+ CHECK_MSR_MRS(1, 0, 0, 1);
+ CHECK_MSR_MRS(1, 0, 1, 0);
+ CHECK_MSR_MRS(1, 0, 1, 1);
+ CHECK_MSR_MRS(1, 1, 0, 0);
+ CHECK_MSR_MRS(1, 1, 0, 1);
+ CHECK_MSR_MRS(1, 1, 1, 0);
+ CHECK_MSR_MRS(1, 1, 1, 1);
+
+#undef CHECK_MSR_MRS
+}
TEST(ARMv8_float32_vrintX) {
// Test the vrintX floating point instructions.
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index d930173937..c2c5b782dc 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -37,6 +37,7 @@
#include "src/arm64/disasm-arm64.h"
#include "src/arm64/simulator-arm64.h"
#include "src/arm64/utils-arm64.h"
+#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
@@ -173,8 +174,10 @@ static void InitializeVM() {
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK(isolate != NULL); \
- byte* buf = new byte[buf_size]; \
- MacroAssembler masm(isolate, buf, buf_size, \
+ size_t actual_size; \
+ byte* buf = static_cast<byte*>( \
+ v8::base::OS::Allocate(buf_size, &actual_size, true)); \
+ MacroAssembler masm(isolate, buf, actual_size, \
v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
@@ -208,7 +211,7 @@ static void InitializeVM() {
__ GetCode(NULL);
#define TEARDOWN() \
- delete[] buf;
+ v8::base::OS::Free(buf, actual_size);
#endif // ifdef USE_SIMULATOR.
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 1928a753fd..b8a04267e7 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -1490,7 +1490,7 @@ TEST(min_max) {
float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
fnan, 42.0, finf, fminf, finf, fnan};
- float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, -0.0, 0.0, fnan,
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan,
finf, finf, 42.0, finf, fminf, fnan};
float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
-0.0, finf, finf, 42.0, 42.0,
@@ -1524,19 +1524,12 @@ TEST(min_max) {
test.e = inputse[i];
test.f = inputsf[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
- if (i < kTableLength - 1) {
- CHECK_EQ(test.c, outputsdmin[i]);
- CHECK_EQ(test.d, outputsdmax[i]);
- CHECK_EQ(test.g, outputsfmin[i]);
- CHECK_EQ(test.h, outputsfmax[i]);
- } else {
- CHECK(std::isnan(test.c));
- CHECK(std::isnan(test.d));
- CHECK(std::isnan(test.g));
- CHECK(std::isnan(test.h));
- }
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
}
}
}
@@ -5360,78 +5353,6 @@ TEST(bal) {
}
-static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0,
- v8::internal::CodeObjectRequired::kYes);
-
- __ lsa(v0, a0, a1, sa);
- __ jr(ra);
- __ nop();
-
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- F1 f = FUNCTION_CAST<F1>(code->entry());
-
- uint32_t res = reinterpret_cast<uint32_t>(
- CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
-
- return res;
-}
-
-
-TEST(lsa) {
- if (!IsMipsArchVariant(kMips32r6)) return;
-
- CcTest::InitializeVM();
- struct TestCaseLsa {
- int32_t rt;
- int32_t rs;
- uint8_t sa;
- uint32_t expected_res;
- };
-
- struct TestCaseLsa tc[] = {
- // rt, rs, sa, expected_res
- {0x4, 0x1, 1, 0x6},
- {0x4, 0x1, 2, 0x8},
- {0x4, 0x1, 3, 0xc},
- {0x4, 0x1, 4, 0x14},
- {0x0, 0x1, 1, 0x2},
- {0x0, 0x1, 2, 0x4},
- {0x0, 0x1, 3, 0x8},
- {0x0, 0x1, 4, 0x10},
- {0x4, 0x0, 1, 0x4},
- {0x4, 0x0, 2, 0x4},
- {0x4, 0x0, 3, 0x4},
- {0x4, 0x0, 4, 0x4},
- {0x4, INT32_MAX, 1, 0x2}, // Shift overflow.
- {0x4, INT32_MAX >> 1, 2, 0x0}, // Shift overflow.
- {0x4, INT32_MAX >> 2, 3, 0xfffffffc}, // Shift overflow.
- {0x4, INT32_MAX >> 3, 4, 0xfffffff4}, // Shift overflow.
- {INT32_MAX - 1, 0x1, 1, 0x80000000}, // Signed adition overflow.
- {INT32_MAX - 3, 0x1, 2, 0x80000000}, // Signed addition overflow.
- {INT32_MAX - 7, 0x1, 3, 0x80000000}, // Signed addition overflow.
- {INT32_MAX - 15, 0x1, 4, 0x80000000}, // Signed addition overflow.
- {-2, 0x1, 1, 0x0}, // Addition overflow.
- {-4, 0x1, 2, 0x0}, // Addition overflow.
- {-8, 0x1, 3, 0x0}, // Addition overflow.
- {-16, 0x1, 4, 0x0}}; // Addition overflow.
-
- size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
- for (size_t i = 0; i < nr_test_cases; ++i) {
- uint32_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa);
- PrintF("0x%x =? 0x%x == lsa(v0, %x, %x, %hhu)\n", tc[i].expected_res, res,
- tc[i].rt, tc[i].rs, tc[i].sa);
- CHECK_EQ(tc[i].expected_res, res);
- }
-}
-
-
TEST(Trampoline) {
// Private member of Assembler class.
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index b979db29bb..dd6ed6b68c 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -1620,7 +1620,7 @@ TEST(min_max) {
float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
fnan, 42.0, finf, fminf, finf, fnan};
- float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, -0.0, 0.0, fnan,
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan,
finf, finf, 42.0, finf, fminf, fnan};
float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
-0.0, finf, finf, 42.0, 42.0,
@@ -1648,25 +1648,18 @@ TEST(min_max) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < kTableLength; i++) {
+ for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
test.f = inputsf[i];
- (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
- if (i < kTableLength - 1) {
- CHECK_EQ(test.c, outputsdmin[i]);
- CHECK_EQ(test.d, outputsdmax[i]);
- CHECK_EQ(test.g, outputsfmin[i]);
- CHECK_EQ(test.h, outputsfmax[i]);
- } else {
- CHECK(std::isnan(test.c));
- CHECK(std::isnan(test.d));
- CHECK(std::isnan(test.g));
- CHECK(std::isnan(test.h));
- }
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
}
}
}
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
new file mode 100644
index 0000000000..dee8e07935
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -0,0 +1,416 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/s390/simulator-s390.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
+typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+
+#define __ assm.
+
+// Simple add parameter 1 to parameter 2 and return
+TEST(0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ lhi(r1, Operand(3)); // test 4-byte instr
+ __ llilf(r2, Operand(4)); // test 6-byte instr
+ __ lgr(r2, r2); // test 2-byte opcode
+ __ ar(r2, r1); // test 2-byte instr
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(7, static_cast<int>(res));
+}
+
+// Loop 100 times, adding loop counter to result
+TEST(1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+#if defined(_AIX)
+ __ function_descriptor();
+#endif
+
+ __ lr(r3, r2);
+ __ lhi(r2, Operand(0, kRelocInfo_NONEPTR));
+ __ b(&C);
+
+ __ bind(&L);
+ __ ar(r2, r3);
+ __ ahi(r3, Operand(-1 & 0xFFFF));
+
+ __ bind(&C);
+ __ cfi(r3, Operand(0, kRelocInfo_NONEPTR));
+ __ bne(&L);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(5050, static_cast<int>(res));
+}
+
+TEST(2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(CcTest::i_isolate(), NULL, 0);
+ Label L, C;
+
+#if defined(_AIX)
+ __ function_descriptor();
+#endif
+
+ __ lgr(r3, r2);
+ __ lhi(r2, Operand(1));
+ __ b(&C);
+
+ __ bind(&L);
+ __ lr(r5, r2); // Set up muliplicant in R4:R5
+ __ mr_z(r4, r3); // this is actually R4:R5 = R5 * R2
+ __ lr(r2, r5);
+ __ ahi(r3, Operand(-1 & 0xFFFF));
+
+ __ bind(&C);
+ __ cfi(r3, Operand(0, kRelocInfo_NONEPTR));
+ __ bne(&L);
+ __ b(r14);
+
+ // some relocated stuff here, not executed
+ __ RecordComment("dead code, just testing relocations");
+ __ iilf(r0, Operand(isolate->factory()->true_value()));
+ __ RecordComment("dead code, just testing immediate operands");
+ __ iilf(r0, Operand(-1));
+ __ iilf(r0, Operand(0xFF000000));
+ __ iilf(r0, Operand(0xF0F0F0F0));
+ __ iilf(r0, Operand(0xFFF0FFFF));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(3628800, static_cast<int>(res));
+}
+
+TEST(3) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ ar(r14, r13);
+ __ sr(r14, r13);
+ __ mr_z(r14, r13);
+ __ dr(r14, r13);
+ __ or_z(r14, r13);
+ __ nr(r14, r13);
+ __ xr(r14, r13);
+
+ __ agr(r14, r13);
+ __ sgr(r14, r13);
+ __ ogr(r14, r13);
+ __ ngr(r14, r13);
+ __ xgr(r14, r13);
+
+ __ ahi(r13, Operand(123));
+ __ aghi(r13, Operand(123));
+ __ stm(r1, r2, MemOperand(r3, r0, 123));
+ __ slag(r1, r2, Operand(123));
+ __ lay(r1, MemOperand(r2, r3, -123));
+ __ a(r13, MemOperand(r1, r2, 123));
+ __ ay(r13, MemOperand(r1, r2, 123));
+ __ brc(Condition(14), Operand(123));
+ __ brc(Condition(14), Operand(-123));
+ __ brcl(Condition(14), Operand(123), false);
+ __ brcl(Condition(14), Operand(-123), false);
+ __ iilf(r13, Operand(123456789));
+ __ iihf(r13, Operand(-123456789));
+ __ mvc(MemOperand(r0, 123), MemOperand(r4, 567), 89);
+ __ sll(r13, Operand(10));
+
+ v8::internal::byte* bufPos = assm.buffer_pos();
+ ::printf("buffer position = %p", bufPos);
+ ::fflush(stdout);
+ // OS::DebugBreak();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ USE(code);
+ ::exit(0);
+}
+
+#if 0
+TEST(4) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L2, L3, L4;
+
+ __ chi(r2, Operand(10));
+ __ ble(&L2);
+ __ lr(r2, r4);
+ __ ar(r2, r3);
+ __ b(&L3);
+
+ __ bind(&L2);
+ __ chi(r2, Operand(5));
+ __ bgt(&L4);
+
+ __ lhi(r2, Operand::Zero());
+ __ b(&L3);
+
+ __ bind(&L4);
+ __ lr(r2, r3);
+ __ sr(r2, r4);
+
+ __ bind(&L3);
+ __ lgfr(r2, r3);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(4, static_cast<int>(res));
+}
+
+
+// Test ExtractBitRange
+TEST(5) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ mov(r2, Operand(0x12345678));
+ __ ExtractBitRange(r3, r2, 3, 2);
+ __ lgfr(r2, r3);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(2, static_cast<int>(res));
+}
+
+
+// Test JumpIfSmi
+TEST(6) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ Label yes;
+
+ __ mov(r2, Operand(0x12345678));
+ __ JumpIfSmi(r2, &yes);
+ __ beq(&yes);
+ __ Load(r2, Operand::Zero());
+ __ b(r14);
+ __ bind(&yes);
+ __ Load(r2, Operand(1));
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(1, static_cast<int>(res));
+}
+
+
+// Test fix<->floating point conversion.
+TEST(7) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ Label yes;
+
+ __ mov(r3, Operand(0x1234));
+ __ cdfbr(d1, r3);
+ __ ldr(d2, d1);
+ __ adbr(d1, d2);
+ __ cfdbr(Condition(0), r2, d1);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 3, 4, 3, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(0x2468, static_cast<int>(res));
+}
+
+
+// Test DSGR
+TEST(8) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ // Zero upper bits of r3/r4
+ __ llihf(r3, Operand::Zero());
+ __ llihf(r4, Operand::Zero());
+ __ mov(r3, Operand(0x0002));
+ __ mov(r4, Operand(0x0002));
+ __ dsgr(r2, r4);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 100, 0,
+ 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
+
+// Test LZDR
+TEST(9) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ lzdr(d4);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+}
+#endif
+
+#undef __
diff --git a/deps/v8/test/cctest/test-ast-expression-visitor.cc b/deps/v8/test/cctest/test-ast-expression-visitor.cc
index a40f87ccfe..bda1fba3b4 100644
--- a/deps/v8/test/cctest/test-ast-expression-visitor.cc
+++ b/deps/v8/test/cctest/test-ast-expression-visitor.cc
@@ -378,14 +378,17 @@ TEST(VisitYield) {
CHECK_EXPR(CallRuntime, Bounds::Unbounded());
}
}
- // Explicit yield
+ // Explicit yield (argument wrapped with CreateIterResultObject)
CHECK_EXPR(Yield, Bounds::Unbounded()) {
CHECK_VAR(.generator_object, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
+ CHECK_EXPR(CallRuntime, Bounds::Unbounded()) {
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
}
- // Implicit final yield
- CHECK_EXPR(Yield, Bounds::Unbounded()) {
- CHECK_VAR(.generator_object, Bounds::Unbounded());
+ // Argument to implicit final return
+ CHECK_EXPR(CallRuntime, Bounds::Unbounded()) { // CreateIterResultObject
+ CHECK_EXPR(Literal, Bounds::Unbounded());
CHECK_EXPR(Literal, Bounds::Unbounded());
}
// Implicit finally clause
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 49e44eb7b9..365652ef1d 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -38,7 +38,8 @@ TEST(List) {
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
AstValueFactory value_factory(&zone, 0);
AstNodeFactory factory(&value_factory);
AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index e8571d965e..6b9fbc7ee2 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -35,7 +35,8 @@
using namespace v8::internal;
TEST(BitVector) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
{
BitVector v(15, &zone);
v.Add(1);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index db2ccb29af..32d720e24e 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -39,8 +39,8 @@ using namespace v8::internal;
static Handle<Object> GetGlobalProperty(const char* name) {
Isolate* isolate = CcTest::i_isolate();
- return Object::GetProperty(
- isolate, isolate->global_object(), name).ToHandleChecked();
+ return JSReceiver::GetProperty(isolate, isolate->global_object(), name)
+ .ToHandleChecked();
}
@@ -59,12 +59,12 @@ static Handle<JSFunction> Compile(const char* source) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
CStrVector(source)).ToHandleChecked();
- Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+ Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
source_code, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
Handle<Object>(), Handle<Context>(isolate->native_context()), NULL, NULL,
v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_function, isolate->native_context());
+ shared, isolate->native_context());
}
@@ -227,10 +227,9 @@ TEST(C2JSFrames) {
Handle<JSObject> global(isolate->context()->global_object());
Execution::Call(isolate, fun0, global, 0, NULL).Check();
- Handle<String> foo_string =
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("foo"));
- Handle<Object> fun1 = Object::GetProperty(
- isolate->global_object(), foo_string).ToHandleChecked();
+ Handle<Object> fun1 =
+ JSReceiver::GetProperty(isolate, isolate->global_object(), "foo")
+ .ToHandleChecked();
CHECK(fun1->IsJSFunction());
Handle<Object> argv[] = {isolate->factory()->InternalizeOneByteString(
@@ -328,7 +327,10 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
TEST(FeedbackVectorUnaffectedByScopeChanges) {
- if (i::FLAG_always_opt || !i::FLAG_lazy) return;
+ if (i::FLAG_always_opt || !i::FLAG_lazy ||
+ (FLAG_ignition && FLAG_ignition_eager)) {
+ return;
+ }
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -350,7 +352,8 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
->Get(context, v8_str("morphing_call"))
.ToLocalChecked())));
- // Not compiled, and so no feedback vector allocated yet.
+ // If we are compiling lazily then it should not be compiled, and so no
+ // feedback vector allocated yet.
CHECK(!f->shared()->is_compiled());
CHECK(f->shared()->feedback_vector()->is_empty());
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 361c879af3..24c84c3df8 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -119,8 +119,7 @@ class TestSetup {
} // namespace
-
-i::Code* CreateCode(LocalContext* env) {
+i::AbstractCode* CreateCode(LocalContext* env) {
static int counter = 0;
i::EmbeddedVector<char, 256> script;
i::EmbeddedVector<char, 32> name;
@@ -138,10 +137,9 @@ i::Code* CreateCode(LocalContext* env) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*GetFunction(env->local(), name_start)));
- return fun->code();
+ return fun->abstract_code();
}
-
TEST(CodeEvents) {
CcTest::InitializeVM();
LocalContext env;
@@ -151,13 +149,13 @@ TEST(CodeEvents) {
i::HandleScope scope(isolate);
- i::Code* aaa_code = CreateCode(&env);
- i::Code* comment_code = CreateCode(&env);
- i::Code* args5_code = CreateCode(&env);
- i::Code* comment2_code = CreateCode(&env);
- i::Code* moved_code = CreateCode(&env);
- i::Code* args3_code = CreateCode(&env);
- i::Code* args4_code = CreateCode(&env);
+ i::AbstractCode* aaa_code = CreateCode(&env);
+ i::AbstractCode* comment_code = CreateCode(&env);
+ i::AbstractCode* args5_code = CreateCode(&env);
+ i::AbstractCode* comment2_code = CreateCode(&env);
+ i::AbstractCode* moved_code = CreateCode(&env);
+ i::AbstractCode* args3_code = CreateCode(&env);
+ i::AbstractCode* args4_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
profiles->StartProfiling("", false);
@@ -174,7 +172,7 @@ TEST(CodeEvents) {
profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment_code, "comment");
profiler.CodeCreateEvent(i::Logger::STUB_TAG, args5_code, 5);
profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment2_code, "comment2");
- profiler.CodeMoveEvent(comment2_code->address(), moved_code->address());
+ profiler.CodeMoveEvent(comment2_code, moved_code->address());
profiler.CodeCreateEvent(i::Logger::STUB_TAG, args3_code, 3);
profiler.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
@@ -203,22 +201,20 @@ TEST(CodeEvents) {
CHECK_EQ(0, strcmp("comment2", comment2->name()));
}
-
template<typename T>
static int CompareProfileNodes(const T* p1, const T* p2) {
return strcmp((*p1)->entry()->name(), (*p2)->entry()->name());
}
-
TEST(TickEvents) {
TestSetup test_setup;
LocalContext env;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- i::Code* frame1_code = CreateCode(&env);
- i::Code* frame2_code = CreateCode(&env);
- i::Code* frame3_code = CreateCode(&env);
+ i::AbstractCode* frame1_code = CreateCode(&env);
+ i::AbstractCode* frame2_code = CreateCode(&env);
+ i::AbstractCode* frame3_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
profiles->StartProfiling("", false);
@@ -265,7 +261,6 @@ TEST(TickEvents) {
CHECK_EQ(0, top_down_ddd_children->length());
}
-
// http://crbug/51594
// This test must not crash.
TEST(CrashIfStoppingLastNonExistentProfile) {
@@ -278,7 +273,6 @@ TEST(CrashIfStoppingLastNonExistentProfile) {
profiler->StopProfiling("");
}
-
// http://code.google.com/p/v8/issues/detail?id=1398
// Long stacks (exceeding max frames limit) must not be erased.
TEST(Issue1398) {
@@ -287,7 +281,7 @@ TEST(Issue1398) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- i::Code* code = CreateCode(&env);
+ i::AbstractCode* code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
profiles->StartProfiling("", false);
@@ -322,7 +316,6 @@ TEST(Issue1398) {
CHECK_EQ(1 + i::TickSample::kMaxFramesCount, actual_depth); // +1 for PC.
}
-
TEST(DeleteAllCpuProfiles) {
CcTest::InitializeVM();
TestSetup test_setup;
@@ -990,11 +983,11 @@ TEST(BoundFunctionCall) {
profile->Delete();
}
-
// This tests checks distribution of the samples through the source lines.
-TEST(TickLines) {
+static void TickLines(bool optimize) {
CcTest::InitializeVM();
LocalContext env;
+ i::FLAG_allow_natives_syntax = true;
i::FLAG_turbo_source_positions = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1003,6 +996,8 @@ TEST(TickLines) {
i::EmbeddedVector<char, 512> script;
const char* func_name = "func";
+ const char* opt_func =
+ optimize ? "%OptimizeFunctionOnNextCall" : "%NeverOptimizeFunction";
i::SNPrintF(script,
"function %s() {\n"
" var n = 0;\n"
@@ -1012,22 +1007,17 @@ TEST(TickLines) {
" n += m * m * m;\n"
" }\n"
"}\n"
+ "%s(%s);\n"
"%s();\n",
- func_name, func_name);
+ func_name, opt_func, func_name, func_name);
CompileRun(script.start());
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
CHECK(func->shared());
- CHECK(func->shared()->code());
- i::Code* code = NULL;
- if (func->code()->is_optimized_code()) {
- code = func->code();
- } else {
- CHECK(func->shared()->code() == func->code() || !i::FLAG_crankshaft);
- code = func->shared()->code();
- }
+ CHECK(func->shared()->abstract_code());
+ i::AbstractCode* code = func->abstract_code();
CHECK(code);
i::Address code_address = code->instruction_start();
CHECK(code_address);
@@ -1088,6 +1078,10 @@ TEST(TickLines) {
CHECK_EQ(hit_count, value);
}
+TEST(TickLinesBaseline) { TickLines(false); }
+
+TEST(TickLinesOptimized) { TickLines(true); }
+
static const char* call_function_test_source =
"%NeverOptimizeFunction(bar);\n"
"%NeverOptimizeFunction(start);\n"
@@ -1510,6 +1504,68 @@ TEST(JsNativeJsRuntimeJsSampleMultiple) {
profile->Delete();
}
+static const char* inlining_test_source =
+ "%NeverOptimizeFunction(action);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "%OptimizeFunctionOnNextCall(level1);\n"
+ "%OptimizeFunctionOnNextCall(level2);\n"
+ "%OptimizeFunctionOnNextCall(level3);\n"
+ "var finish = false;\n"
+ "function action(n) {\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < n; ++i) s += i*i*i;\n"
+ " if (finish)\n"
+ " startProfiling('my_profile');\n"
+ " return s;\n"
+ "}\n"
+ "function level3() { return action(100); }\n"
+ "function level2() { return level3() * 2; }\n"
+ "function level1() { return level2(); }\n"
+ "function start() {\n"
+ " var n = 100;\n"
+ " while (--n)\n"
+ " level1();\n"
+ " finish = true;\n"
+ " level1();\n"
+ "}";
+
+// The test check multiple entrances/exits between JS and native code.
+//
+// [Top down]:
+// (root) #0 1
+// start #16 3
+// level1 #0 4
+// level2 #16 5
+// level3 #16 6
+// action #16 7
+// (program) #0 2
+TEST(Inlining) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ CompileRun(inlining_test_source);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8_str("my_profile");
+ function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
+ v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
+ CHECK(profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(profile)->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ const v8::CpuProfileNode* level1_node = GetChild(env, start_node, "level1");
+ const v8::CpuProfileNode* level2_node = GetChild(env, level1_node, "level2");
+ const v8::CpuProfileNode* level3_node = GetChild(env, level2_node, "level3");
+ GetChild(env, level3_node, "action");
+
+ profile->Delete();
+}
+
// [Top down]:
// 0 (root) #0 1
// 2 (program) #0 2
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index dd483c06d5..ab27f394e9 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -2747,7 +2747,7 @@ TEST(DebugStepKeyedLoadLoop) {
foo->Call(context, env->Global(), kArgc, args).ToLocalChecked();
// With stepping all break locations are hit.
- CHECK_EQ(45, break_point_hit_count);
+ CHECK_EQ(44, break_point_hit_count);
v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded(env->GetIsolate());
@@ -2888,7 +2888,6 @@ static void DoDebugStepNamedStoreLoop(int expected) {
// Test of the stepping mechanism for named load in a loop.
TEST(DebugStepNamedStoreLoop) { DoDebugStepNamedStoreLoop(34); }
-
// Test the stepping mechanism with different ICs.
TEST(DebugStepLinearMixedICs) {
DebugLocalContext env;
@@ -2917,7 +2916,7 @@ TEST(DebugStepLinearMixedICs) {
foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
- CHECK_EQ(11, break_point_hit_count);
+ CHECK_EQ(10, break_point_hit_count);
v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded(env->GetIsolate());
@@ -2964,7 +2963,7 @@ TEST(DebugStepDeclarations) {
step_action = StepIn;
break_point_hit_count = 0;
foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- CHECK_EQ(6, break_point_hit_count);
+ CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -2998,7 +2997,7 @@ TEST(DebugStepLocals) {
step_action = StepIn;
break_point_hit_count = 0;
foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- CHECK_EQ(6, break_point_hit_count);
+ CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
@@ -3459,26 +3458,25 @@ TEST(DebugConditional) {
v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
- const char* src = "function foo(x) { "
- " var a;"
- " a = x ? 1 : 2;"
- " return a;"
- "}"
- "foo()";
+ const char* src =
+ "function foo(x) { "
+ " return x ? 1 : 2;"
+ "}"
+ "foo()";
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
SetBreakPoint(foo, 0); // "var a;"
step_action = StepIn;
break_point_hit_count = 0;
foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(2, break_point_hit_count);
step_action = StepIn;
break_point_hit_count = 0;
const int argc = 1;
v8::Local<v8::Value> argv_true[argc] = {v8::True(isolate)};
foo->Call(context, env->Global(), argc, argv_true).ToLocalChecked();
- CHECK_EQ(4, break_point_hit_count);
+ CHECK_EQ(2, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(isolate, nullptr);
@@ -8072,3 +8070,118 @@ TEST(BreakLocationIterator) {
DisableDebugger(isolate);
}
+
+TEST(DisableTailCallElimination) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_harmony_tailcalls = true;
+ // TODO(ishell, 4698): Investigate why TurboFan in --always-opt mode makes
+ // stack[2].getFunctionName() return null.
+ i::FLAG_turbo_inlining = false;
+
+ DebugLocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ CHECK(v8::Debug::IsTailCallEliminationEnabled(isolate));
+
+ CompileRun(
+ "'use strict'; \n"
+ "Error.prepareStackTrace = (error,stack) => { \n"
+ " error.strace = stack; \n"
+ " return error.message + \"\\n at \" + stack.join(\"\\n at \"); \n"
+ "} \n"
+ " \n"
+ "function getCaller() { \n"
+ " var e = new Error(); \n"
+ " e.stack; // prepare stack trace \n"
+ " var stack = e.strace; \n"
+ " %GlobalPrint('caller: '); \n"
+ " %GlobalPrint(stack[2].getFunctionName()); \n"
+ " %GlobalPrint('\\n'); \n"
+ " return stack[2].getFunctionName(); \n"
+ "} \n"
+ "function f() { \n"
+ " var caller = getCaller(); \n"
+ " if (caller === 'g') return 1; \n"
+ " if (caller === 'h') return 2; \n"
+ " return 0; \n"
+ "} \n"
+ "function g() { \n"
+ " return f(); \n"
+ "} \n"
+ "function h() { \n"
+ " var result = g(); \n"
+ " return result; \n"
+ "} \n"
+ "%NeverOptimizeFunction(getCaller); \n"
+ "%NeverOptimizeFunction(f); \n"
+ "%NeverOptimizeFunction(h); \n"
+ "");
+ ExpectInt32("h();", 2);
+ ExpectInt32("h(); %OptimizeFunctionOnNextCall(g); h();", 2);
+ v8::Debug::SetTailCallEliminationEnabled(isolate, false);
+ CHECK(!v8::Debug::IsTailCallEliminationEnabled(isolate));
+ ExpectInt32("h();", 1);
+ ExpectInt32("h(); %OptimizeFunctionOnNextCall(g); h();", 1);
+ v8::Debug::SetTailCallEliminationEnabled(isolate, true);
+ CHECK(v8::Debug::IsTailCallEliminationEnabled(isolate));
+ ExpectInt32("h();", 2);
+ ExpectInt32("h(); %OptimizeFunctionOnNextCall(g); h();", 2);
+}
+
+TEST(DebugStepNextTailCallEliminiation) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_harmony_tailcalls = true;
+ // TODO(ishell, 4698): Investigate why TurboFan in --always-opt mode makes
+ // stack[2].getFunctionName() return null.
+ i::FLAG_turbo_inlining = false;
+
+ DebugLocalContext env;
+ env.ExposeDebug();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ CHECK(v8::Debug::IsTailCallEliminationEnabled(isolate));
+
+ const char* source =
+ "'use strict'; \n"
+ "var Debug = debug.Debug; \n"
+ "var exception = null; \n"
+ "var breaks = 0; \n"
+ "var log = []; \n"
+ "function f(x) { \n"
+ " if (x == 2) { \n"
+ " debugger; // Break a \n"
+ " } \n"
+ " if (x-- > 0) { // Break b \n"
+ " return f(x); // Break c \n"
+ " } \n"
+ "} // Break e \n"
+ "function listener(event, exec_state, event_data, data) {\n"
+ " if (event != Debug.DebugEvent.Break) return; \n"
+ " try { \n"
+ " var line = exec_state.frame(0).sourceLineText(); \n"
+ " var col = exec_state.frame(0).sourceColumn(); \n"
+ " var match = line.match(/\\/\\/ Break (\\w)/); \n"
+ " log.push(match[1] + col); \n"
+ " exec_state.prepareStep(Debug.StepAction.StepNext); \n"
+ " } catch (e) { \n"
+ " exception = e; \n"
+ " }; \n"
+ "}; \n"
+ "Debug.setListener(listener); \n"
+ "f(4); \n"
+ "Debug.setListener(null); // Break d \n";
+
+ CompileRun(source);
+ ExpectNull("exception");
+ ExpectString("JSON.stringify(log)", "[\"a4\",\"b2\",\"c4\",\"c11\",\"d0\"]");
+
+ v8::Debug::SetTailCallEliminationEnabled(isolate, false);
+ CompileRun(
+ "log = []; \n"
+ "Debug.setListener(listener); \n"
+ "f(5); \n"
+ "Debug.setListener(null); // Break f \n");
+ ExpectNull("exception");
+ ExpectString("JSON.stringify(log)",
+ "[\"a4\",\"b2\",\"c4\",\"e0\",\"e0\",\"e0\",\"e0\",\"f0\"]");
+}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 4e9595258a..35cb515dc1 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -227,7 +227,6 @@ v8::Local<Integer> DeclarationContext::Query(Local<Name> key) {
// Test global declaration of a property the interceptor doesn't know
// about and doesn't handle.
TEST(Unknown) {
- i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
v8::V8::Initialize();
@@ -251,20 +250,6 @@ TEST(Unknown) {
0,
EXPECT_RESULT);
}
-
- { DeclarationContext context;
- context.Check("const x; x",
- 1, // access
- 0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
- }
-
- { DeclarationContext context;
- context.Check("const x = 0; x",
- 1, // access
- 0,
- 0,
- EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
- }
}
@@ -277,7 +262,6 @@ class AbsentPropertyContext: public DeclarationContext {
TEST(Absent) {
- i::FLAG_legacy_const = true;
v8::Isolate* isolate = CcTest::isolate();
v8::V8::Initialize();
HandleScope scope(isolate);
@@ -304,18 +288,6 @@ TEST(Absent) {
}
{ AbsentPropertyContext context;
- context.Check("const x; x",
- 1, // access
- 0, 0, EXPECT_RESULT, Undefined(isolate));
- }
-
- { AbsentPropertyContext context;
- context.Check("const x = 0; x",
- 1, // access
- 0, 0, EXPECT_RESULT, Number::New(isolate, 0));
- }
-
- { AbsentPropertyContext context;
context.Check("if (false) { var x = 0 }; x",
1, // access
0, 0, EXPECT_RESULT, Undefined(isolate));
@@ -361,7 +333,6 @@ class AppearingPropertyContext: public DeclarationContext {
TEST(Appearing) {
- i::FLAG_legacy_const = true;
v8::V8::Initialize();
HandleScope scope(CcTest::isolate());
@@ -385,18 +356,6 @@ TEST(Appearing) {
0,
EXPECT_RESULT);
}
-
- { AppearingPropertyContext context;
- context.Check("const x; x",
- 1, // access
- 0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
- }
-
- { AppearingPropertyContext context;
- context.Check("const x = 0; x",
- 1, // access
- 0, 0, EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
- }
}
@@ -418,7 +377,6 @@ class ExistsInPrototypeContext: public DeclarationContext {
TEST(ExistsInPrototype) {
- i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
// Sanity check to make sure that the holder of the interceptor
@@ -443,22 +401,6 @@ TEST(ExistsInPrototype) {
0,
EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
-
- { ExistsInPrototypeContext context;
- context.Check("const x; x",
- 0,
- 0,
- 0,
- EXPECT_RESULT, Undefined(CcTest::isolate()));
- }
-
- { ExistsInPrototypeContext context;
- context.Check("const x = 0; x",
- 0,
- 0,
- 0,
- EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
- }
}
@@ -528,7 +470,6 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
TEST(ExistsInHiddenPrototype) {
- i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
{ ExistsInHiddenPrototypeContext context;
@@ -548,18 +489,6 @@ TEST(ExistsInHiddenPrototype) {
0,
EXPECT_RESULT);
}
-
- // TODO(mstarzinger): The semantics of global const is vague.
- { ExistsInHiddenPrototypeContext context;
- context.Check("const x; x", 0, 0, 0, EXPECT_RESULT,
- Undefined(CcTest::isolate()));
- }
-
- // TODO(mstarzinger): The semantics of global const is vague.
- { ExistsInHiddenPrototypeContext context;
- context.Check("const x = 0; x", 0, 0, 0, EXPECT_RESULT,
- Number::New(CcTest::isolate(), 0));
- }
}
@@ -612,7 +541,6 @@ class SimpleContext {
TEST(CrossScriptReferences) {
- i::FLAG_legacy_const = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -621,8 +549,6 @@ TEST(CrossScriptReferences) {
EXPECT_RESULT, Number::New(isolate, 1));
context.Check("var x = 2; x",
EXPECT_RESULT, Number::New(isolate, 2));
- context.Check("const x = 3; x", EXPECT_EXCEPTION);
- context.Check("const x = 4; x", EXPECT_EXCEPTION);
context.Check("x = 5; x",
EXPECT_RESULT, Number::New(isolate, 5));
context.Check("var x = 6; x",
@@ -632,22 +558,6 @@ TEST(CrossScriptReferences) {
context.Check("function x() { return 7 }; x()",
EXPECT_RESULT, Number::New(isolate, 7));
}
-
- { SimpleContext context;
- context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(isolate, 1));
- context.Check("var x = 2; x", // assignment ignored
- EXPECT_RESULT, Number::New(isolate, 1));
- context.Check("const x = 3; x", EXPECT_EXCEPTION);
- context.Check("x = 4; x", // assignment ignored
- EXPECT_RESULT, Number::New(isolate, 1));
- context.Check("var x = 5; x", // assignment ignored
- EXPECT_RESULT, Number::New(isolate, 1));
- context.Check("this.x",
- EXPECT_RESULT, Number::New(isolate, 1));
- context.Check("function x() { return 7 }; x",
- EXPECT_EXCEPTION);
- }
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 0d4edf5dd4..cc4a7533d8 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -82,7 +82,7 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK_EQ(table->NumberOfElements(), i + 1);
CHECK_NE(table->FindEntry(key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), *value);
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(JSReceiver::GetIdentityHash(isolate, key)->IsSmi());
}
// Keys never added to the map which already have an identity hash
@@ -92,7 +92,7 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK(JSReceiver::GetOrCreateIdentityHash(key)->IsSmi());
CHECK_EQ(table->FindEntry(key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(JSReceiver::GetIdentityHash(isolate, key)->IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -100,8 +100,8 @@ static void TestHashMap(Handle<HashMap> table) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
- Object* identity_hash = key->GetIdentityHash();
- CHECK_EQ(identity_hash, CcTest::heap()->undefined_value());
+ Handle<Object> identity_hash = JSReceiver::GetIdentityHash(isolate, key);
+ CHECK_EQ(CcTest::heap()->undefined_value(), *identity_hash);
}
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 74144f25c6..7eea8a171c 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -453,6 +453,54 @@ TEST(Type3) {
}
+TEST(msr_mrs_disasm) {
+ SET_UP();
+
+ SRegisterFieldMask CPSR_all = CPSR_f | CPSR_s | CPSR_x | CPSR_c;
+ SRegisterFieldMask SPSR_all = SPSR_f | SPSR_s | SPSR_x | SPSR_c;
+
+ COMPARE(msr(CPSR_f, Operand(r0)), "e128f000 msr CPSR_f, r0");
+ COMPARE(msr(CPSR_s, Operand(r1)), "e124f001 msr CPSR_s, r1");
+ COMPARE(msr(CPSR_x, Operand(r2)), "e122f002 msr CPSR_x, r2");
+ COMPARE(msr(CPSR_c, Operand(r3)), "e121f003 msr CPSR_c, r3");
+ COMPARE(msr(CPSR_all, Operand(ip)), "e12ff00c msr CPSR_fsxc, ip");
+ COMPARE(msr(SPSR_f, Operand(r0)), "e168f000 msr SPSR_f, r0");
+ COMPARE(msr(SPSR_s, Operand(r1)), "e164f001 msr SPSR_s, r1");
+ COMPARE(msr(SPSR_x, Operand(r2)), "e162f002 msr SPSR_x, r2");
+ COMPARE(msr(SPSR_c, Operand(r3)), "e161f003 msr SPSR_c, r3");
+ COMPARE(msr(SPSR_all, Operand(ip)), "e16ff00c msr SPSR_fsxc, ip");
+ COMPARE(msr(CPSR_f, Operand(r0), eq), "0128f000 msreq CPSR_f, r0");
+ COMPARE(msr(CPSR_s, Operand(r1), ne), "1124f001 msrne CPSR_s, r1");
+ COMPARE(msr(CPSR_x, Operand(r2), cs), "2122f002 msrcs CPSR_x, r2");
+ COMPARE(msr(CPSR_c, Operand(r3), cc), "3121f003 msrcc CPSR_c, r3");
+ COMPARE(msr(CPSR_all, Operand(ip), mi), "412ff00c msrmi CPSR_fsxc, ip");
+ COMPARE(msr(SPSR_f, Operand(r0), pl), "5168f000 msrpl SPSR_f, r0");
+ COMPARE(msr(SPSR_s, Operand(r1), vs), "6164f001 msrvs SPSR_s, r1");
+ COMPARE(msr(SPSR_x, Operand(r2), vc), "7162f002 msrvc SPSR_x, r2");
+ COMPARE(msr(SPSR_c, Operand(r3), hi), "8161f003 msrhi SPSR_c, r3");
+ COMPARE(msr(SPSR_all, Operand(ip), ls), "916ff00c msrls SPSR_fsxc, ip");
+
+ // Other combinations of mask bits.
+ COMPARE(msr(CPSR_s | CPSR_x, Operand(r4)),
+ "e126f004 msr CPSR_sx, r4");
+ COMPARE(msr(SPSR_s | SPSR_x | SPSR_c, Operand(r5)),
+ "e167f005 msr SPSR_sxc, r5");
+ COMPARE(msr(SPSR_s | SPSR_c, Operand(r6)),
+ "e165f006 msr SPSR_sc, r6");
+ COMPARE(msr(SPSR_f | SPSR_c, Operand(r7)),
+ "e169f007 msr SPSR_fc, r7");
+ // MSR with no mask is UNPREDICTABLE, and checked by the assembler, but check
+ // that the disassembler does something sensible.
+ COMPARE(dd(0xe120f008), "e120f008 msr CPSR_(none), r8");
+
+ COMPARE(mrs(r0, CPSR), "e10f0000 mrs r0, CPSR");
+ COMPARE(mrs(r1, SPSR), "e14f1000 mrs r1, SPSR");
+ COMPARE(mrs(r2, CPSR, ge), "a10f2000 mrsge r2, CPSR");
+ COMPARE(mrs(r3, SPSR, lt), "b14f3000 mrslt r3, SPSR");
+
+ VERIFY_RUN();
+}
+
TEST(Vfp) {
SET_UP();
@@ -566,6 +614,11 @@ TEST(Vfp) {
COMPARE(vmov(d2, -13.0),
"eeba2b0a vmov.f64 d2, #-13");
+ COMPARE(vmov(s1, -1.0),
+ "eeff0a00 vmov.f32 s1, #-1");
+ COMPARE(vmov(s3, 13.0),
+ "eef21a0a vmov.f32 s3, #13");
+
COMPARE(vmov(d0, VmovIndexLo, r0),
"ee000b10 vmov.32 d0[0], r0");
COMPARE(vmov(d0, VmovIndexHi, r0),
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 669e37ac69..f96eb00fd1 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -96,6 +96,7 @@ TEST(DisasmIa320) {
__ nop();
__ add(ebx, Immediate(12));
__ nop();
+ __ adc(edx, Operand(ebx));
__ adc(ecx, 12);
__ adc(ecx, 1000);
__ nop();
@@ -119,8 +120,10 @@ TEST(DisasmIa320) {
__ nop();
__ imul(edx, ecx);
- __ shld(edx, ecx);
- __ shrd(edx, ecx);
+ __ shld(edx, ecx, 10);
+ __ shld_cl(edx, ecx);
+ __ shrd(edx, ecx, 10);
+ __ shrd_cl(edx, ecx);
__ bts(edx, ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop();
@@ -214,14 +217,13 @@ TEST(DisasmIa320) {
__ sar(Operand(ebx, ecx, times_4, 10000), 6);
__ sar_cl(Operand(ebx, ecx, times_4, 10000));
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
- __ shld(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
__ shl_cl(edx);
__ shl(Operand(ebx, ecx, times_4, 10000), 1);
__ shl(Operand(ebx, ecx, times_4, 10000), 6);
__ shl_cl(Operand(ebx, ecx, times_4, 10000));
- __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shrd_cl(Operand(ebx, ecx, times_4, 10000), edx);
__ shr(edx, 1);
__ shr(edx, 7);
__ shr_cl(edx);
@@ -242,7 +244,7 @@ TEST(DisasmIa320) {
__ cmp(ebx, 12345);
__ cmp(ebx, Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
- __ cmpb(eax, 100);
+ __ cmpb(eax, Immediate(100));
__ or_(ebx, 12345);
@@ -266,7 +268,7 @@ TEST(DisasmIa320) {
__ test(edx, Operand(ebx, ecx, times_8, 10000));
__ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
__ test_b(edx, Operand(ecx, ebx, times_2, 1000));
- __ test_b(Operand(eax, -20), 0x9A);
+ __ test_b(Operand(eax, -20), Immediate(0x9A));
__ nop();
__ xor_(edx, 12345);
diff --git a/deps/v8/test/cctest/test-disasm-s390.cc b/deps/v8/test/cctest/test-disasm-s390.cc
new file mode 100644
index 0000000000..ba9e5e079d
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-s390.cc
@@ -0,0 +1,297 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.start());
+ return false;
+ }
+ return true;
+}
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(isolate, buffer, 4 * 1024); \
+ bool failure = false;
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+// Force emission of any pending literals into a pool.
+#define EMIT_PENDING_LITERALS() assm.CheckConstPool(true, false)
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+ if (failure) { \
+ V8_Fatal(__FILE__, __LINE__, "S390 Disassembler tests failed.\n"); \
+ }
+
+TEST(TwoBytes) {
+ SET_UP();
+
+ COMPARE(ar(r3, r10), "1a3a ar\tr3,r10");
+ COMPARE(sr(r8, ip), "1b8c sr\tr8,ip");
+ COMPARE(mr_z(r0, r6), "1c06 mr\tr0,r6");
+ COMPARE(dr(r0, r5), "1d05 dr\tr0,r5");
+ COMPARE(or_z(r4, r2), "1642 or\tr4,r2");
+ COMPARE(nr(fp, r9), "14b9 nr\tfp,r9");
+ COMPARE(xr(r10, ip), "17ac xr\tr10,ip");
+ COMPARE(lr(r2, r13), "182d lr\tr2,r13");
+ COMPARE(cr_z(r9, r3), "1993 cr\tr9,r3");
+ COMPARE(clr(sp, r4), "15f4 clr\tsp,r4");
+ COMPARE(bcr(eq, r8), "0788 bcr\t0x8,r8");
+ COMPARE(ltr(r10, r1), "12a1 ltr\tr10,r1");
+ COMPARE(alr(r6, r8), "1e68 alr\tr6,r8");
+ COMPARE(slr(r3, ip), "1f3c slr\tr3,ip");
+ COMPARE(lnr(r4, r1), "1141 lnr\tr4,r1");
+ COMPARE(lcr(r0, r3), "1303 lcr\tr0,r3");
+ COMPARE(basr(r14, r7), "0de7 basr\tr14,r7");
+ COMPARE(ldr(d4, d6), "2846 ldr\td4,d6");
+
+ VERIFY_RUN();
+}
+
+TEST(FourBytes) {
+ SET_UP();
+
+#if V8_TARGET_ARCH_S390X
+ COMPARE(aghi(r5, Operand(1)), "a75b0001 aghi\tr5,1");
+ COMPARE(lghi(r6, Operand(8)), "a7690008 lghi\tr6,8");
+ COMPARE(mghi(r1, Operand(2)), "a71d0002 mghi\tr1,2");
+ COMPARE(cghi(r3, Operand(7)), "a73f0007 cghi\tr3,7");
+#else
+ COMPARE(ahi(r3, Operand(9)), "a73a0009 ahi\tr3,9");
+ COMPARE(lhi(r7, Operand::Zero()), "a7780000 lhi\tr7,0");
+ COMPARE(mhi(r8, Operand(3)), "a78c0003 mhi\tr8,3");
+ COMPARE(chi(r4, Operand(5)), "a74e0005 chi\tr4,5");
+ COMPARE(a(r13, MemOperand(r8, 70)), "5ad08046 a\tr13,70(r8)");
+ COMPARE(s(r9, MemOperand(sp, 9)), "5b90f009 s\tr9,9(sp)");
+ COMPARE(m(r6, MemOperand(r7, ip, 20)), "5c67c014 m\tr6,20(r7,ip)");
+ COMPARE(d(r14, MemOperand(r7, 15)), "5de0700f d\tr14,15(r7)");
+ COMPARE(o(r7, MemOperand(r3, r2, 10)), "5673200a o\tr7,10(r3,r2)");
+ COMPARE(n(r9, MemOperand(r5, sp, 9)), "5495f009 n\tr9,9(r5,sp)");
+ COMPARE(l(r0, MemOperand(r4, fp, 1)), "5804b001 l\tr0,1(r4,fp)");
+ COMPARE(c(r8, MemOperand(r5, r7, 18)), "59857012 c\tr8,18(r5,r7)");
+ COMPARE(al_z(r6, MemOperand(r9, sp, 2000)),
+ "5e69f7d0 al\tr6,2000(r9,sp)");
+ COMPARE(sl(r8, MemOperand(r1, 100)), "5f801064 sl\tr8,100(r1)");
+ COMPARE(la(r5, MemOperand(r9, 9)), "41509009 la\tr5,9(r9)");
+ COMPARE(ch(r0, MemOperand(r3, 0)), "49003000 ch\tr0,0(r3)");
+ COMPARE(cl(r1, MemOperand(r8, 5)), "55108005 cl\tr1,5(r8)");
+ COMPARE(cli(MemOperand(r9, 64), Operand(5)), "95059040 cli\t64(r9),5");
+ COMPARE(tm(MemOperand(r0, 8), Operand(7)), "91070008 tm\t8(r0),7");
+ COMPARE(bct(r1, MemOperand(r9, 15)), "4610900f bct\tr1,15(r9)");
+ COMPARE(st(r5, MemOperand(r9, r8, 7)), "50598007 st\tr5,7(r9,r8)");
+ COMPARE(stc(r13, MemOperand(r5, r1, 8)), "42d51008 stc\tr13,8(r5,r1)");
+ COMPARE(ic_z(r9, MemOperand(r1, 90)), "4390105a ic\tr9,90(r1)");
+ COMPARE(sth(r5, MemOperand(r9, r0, 87)), "40590057 sth\tr5,87(r9,r0)");
+#endif
+ COMPARE(iihh(r10, Operand(8)), "a5a00008 iihh\tr10,8");
+ COMPARE(iihl(r9, Operand(10)), "a591000a iihl\tr9,10");
+ COMPARE(iilh(r0, Operand(40)), "a5020028 iilh\tr0,40");
+ COMPARE(iill(r6, Operand(19)), "a5630013 iill\tr6,19");
+ COMPARE(oill(r9, Operand(9)), "a59b0009 oill\tr9,9");
+ COMPARE(tmll(r4, Operand(7)), "a7410007 tmll\tr4,7");
+ COMPARE(stm(r2, r5, MemOperand(r9, 44)), "9025902c stm\tr2,r5,44(r9)");
+ COMPARE(lm(r8, r0, MemOperand(sp, 88)), "9880f058 lm\tr8,r0,88(sp)");
+ COMPARE(nill(r7, Operand(30)), "a577001e nill\tr7,30");
+ COMPARE(nilh(r8, Operand(4)), "a5860004 nilh\tr8,4");
+ COMPARE(ah(r9, MemOperand(r5, r4, 4)), "4a954004 ah\tr9,4(r5,r4)");
+ COMPARE(sh(r8, MemOperand(r1, r2, 6)), "4b812006 sh\tr8,6(r1,r2)");
+ COMPARE(mh(r5, MemOperand(r9, r8, 7)), "4c598007 mh\tr5,7(r9,r8)");
+
+ VERIFY_RUN();
+}
+
+TEST(SixBytes) {
+ SET_UP();
+
+#if V8_TARGET_ARCH_S390X
+ COMPARE(llihf(ip, Operand(90000)), "c0ce00015f90 llihf\tip,90000");
+ COMPARE(agsi(MemOperand(r9, 1000), Operand(70)),
+ "eb4693e8007a agsi\t1000(r9),70");
+ COMPARE(clgfi(r7, Operand(80)), "c27e00000050 clgfi\tr7,80");
+ COMPARE(cgfi(r8, Operand(10)), "c28c0000000a cgfi\tr8,10");
+ COMPARE(xihf(fp, Operand(8)), "c0b600000008 xihf\tfp,8");
+ COMPARE(sllg(r0, r1, r2), "eb012000000d sllg\tr0,r1,0(r2)");
+ COMPARE(sllg(r0, r1, Operand(10)), "eb01000a000d sllg\tr0,r1,10(r0)");
+ COMPARE(srlg(r1, r3, Operand(10)), "eb13000a000c srlg\tr1,r3,10(r0)");
+ COMPARE(srlg(r1, r3, r10), "eb13a000000c srlg\tr1,r3,0(r10)");
+ COMPARE(slag(r1, r3, Operand(2)), "eb130002000b slag\tr1,r3,2(r0)");
+ COMPARE(slag(r1, r3, r2), "eb132000000b slag\tr1,r3,0(r2)");
+ COMPARE(srag(r1, r3, r2), "eb132000000a srag\tr1,r3,0(r2)");
+ COMPARE(srag(r1, r3, Operand(2)), "eb130002000a srag\tr1,r3,2(r0)");
+ COMPARE(risbg(r1, r2, Operand(3), Operand(5), Operand(2), false),
+ "ec1203050255 risbg\tr1,r2,3,5,2");
+ COMPARE(risbgn(r1, r2, Operand(3), Operand(5), Operand(2), false),
+ "ec1203050259 risbgn\tr1,r2,3,5,2");
+ COMPARE(stmg(r3, r4, MemOperand(sp, 10)),
+ "eb34f00a0024 stmg\tr3,r4,10(sp)");
+ COMPARE(ltg(r1, MemOperand(r4, sp, 10)), "e314f00a0002 ltg\tr1,10(r4,sp)");
+ COMPARE(lgh(r8, MemOperand(r1, 8888)), "e38012b80215 lgh\tr8,8888(r1)");
+ COMPARE(ag(r4, MemOperand(r9, r4, 2046)),
+ "e34947fe0008 ag\tr4,2046(r9,r4)");
+ COMPARE(agf(r1, MemOperand(r3, sp, 9)), "e313f0090018 agf\tr1,9(r3,sp)");
+ COMPARE(sg(r9, MemOperand(r5, 15)), "e390500f0009 sg\tr9,15(r5)");
+ COMPARE(ng(r7, MemOperand(r5, r6, 1000)),
+ "e37563e80080 ng\tr7,1000(r5,r6)");
+ COMPARE(og(r2, MemOperand(r8, r0, 1000)),
+ "e32803e80081 og\tr2,1000(r8,r0)");
+ COMPARE(xg(r9, MemOperand(r3, 8888)), "e39032b80282 xg\tr9,8888(r3)");
+ COMPARE(ng(r0, MemOperand(r9, r3, 900)), "e30933840080 ng\tr0,900(r9,r3)");
+ COMPARE(og(r3, MemOperand(r8, r2, 8888)),
+ "e33822b80281 og\tr3,8888(r8,r2)");
+ COMPARE(xg(r9, MemOperand(r3, 15)), "e390300f0082 xg\tr9,15(r3)");
+ COMPARE(cg(r0, MemOperand(r5, r4, 4)), "e30540040020 cg\tr0,4(r5,r4)");
+ COMPARE(lg(r1, MemOperand(r7, r8, 90)), "e317805a0004 lg\tr1,90(r7,r8)");
+ COMPARE(lgf(r1, MemOperand(sp, 15)), "e310f00f0014 lgf\tr1,15(sp)");
+ COMPARE(llgf(r0, MemOperand(r3, r4, 8)), "e30340080016 llgf\tr0,8(r3,r4)");
+ COMPARE(alg(r8, MemOperand(r4, 11)), "e380400b000a alg\tr8,11(r4)");
+ COMPARE(slg(r1, MemOperand(r5, r6, 11)), "e315600b000b slg\tr1,11(r5,r6)");
+ COMPARE(sgf(r0, MemOperand(r4, r5, 8888)),
+ "e30452b80219 sgf\tr0,8888(r4,r5)");
+ COMPARE(llgh(r4, MemOperand(r1, 8000)), "e3401f400191 llgh\tr4,8000(r1)");
+ COMPARE(llgc(r0, MemOperand(r4, r5, 30)),
+ "e304501e0090 llgc\tr0,30(r4,r5)");
+ COMPARE(lgb(r9, MemOperand(r8, r7, 10)), "e398700a0077 lgb\tr9,10(r8,r7)");
+ COMPARE(stg(r0, MemOperand(r9, 10)), "e300900a0024 stg\tr0,10(r9)");
+ COMPARE(mvghi(MemOperand(r7, 25), Operand(100)),
+ "e54870190064 mvghi\t25(r7),100");
+ COMPARE(algfi(r1, Operand(34250)), "c21a000085ca algfi\tr1,34250");
+ COMPARE(slgfi(r1, Operand(87654321)), "c21405397fb1 slgfi\tr1,87654321");
+ COMPARE(nihf(r2, Operand(8888)), "c02a000022b8 nihf\tr2,8888");
+ COMPARE(oihf(r6, Operand(9000)), "c06c00002328 oihf\tr6,9000");
+ COMPARE(msgfi(r6, Operand(90000)), "c26000015f90 msgfi\tr6,90000");
+#else
+ COMPARE(llilf(r10, Operand(72354)), "c0af00011aa2 llilf\tr10,72354");
+ COMPARE(iilf(r4, Operand(11)), "c0490000000b iilf\tr4,11");
+ COMPARE(afi(r2, Operand(8000)), "c22900001f40 afi\tr2,8000");
+ COMPARE(asi(MemOperand(r9, 1000), Operand(70)),
+ "eb4693e8006a asi\t1000(r9),70");
+ COMPARE(alfi(r1, Operand(90)), "c21b0000005a alfi\tr1,90");
+ COMPARE(clfi(r9, Operand(60)), "c29f0000003c clfi\tr9,60");
+ COMPARE(cfi(r8, Operand(10)), "c28d0000000a cfi\tr8,10");
+ COMPARE(xilf(r3, Operand(15)), "c0370000000f xilf\tr3,15");
+ COMPARE(sllk(r6, r7, Operand(10)), "eb67000a00df sllk\tr6,r7,10(r0)");
+ COMPARE(sllk(r6, r7, r8), "eb67800000df sllk\tr6,r7,0(r8)");
+ COMPARE(slak(r1, r3, r2), "eb13200000dd slak\tr1,r3,0(r2)");
+ COMPARE(slak(r1, r3, Operand(2)), "eb13000200dd slak\tr1,r3,2(r0)");
+ COMPARE(srak(r1, r3, Operand(2)), "eb13000200dc srak\tr1,r3,2(r0)");
+ COMPARE(srak(r1, r3, r2), "eb13200000dc srak\tr1,r3,0(r2)");
+ COMPARE(stmy(r3, r4, MemOperand(sp, 10)),
+ "eb34f00a0090 stmy\tr3,r4,10(sp)");
+ COMPARE(lt_z(r1, MemOperand(r4, sp, 10)), "e314f00a0012 lt\tr1,10(r4,sp)");
+ COMPARE(ml(r0, MemOperand(r3, r9, 2046)),
+ "e30397fe0096 ml\tr0,2046(r3,r9)");
+ COMPARE(ay(r5, MemOperand(r7, 8888)), "e35072b8025a ay\tr5,8888(r7)");
+ COMPARE(sy(r8, MemOperand(r6, r7, 2046)),
+ "e38677fe005b sy\tr8,2046(r6,r7)");
+ COMPARE(ny(r2, MemOperand(r9, r0, 8888)),
+ "e32902b80254 ny\tr2,8888(r9,r0)");
+ COMPARE(oy(r8, MemOperand(r4, 321)), "e38041410056 oy\tr8,321(r4)");
+ COMPARE(xy(r5, MemOperand(r3, r2, 0)), "e35320000057 xy\tr5,0(r3,r2)");
+ COMPARE(cy(r9, MemOperand(r4, 321)), "e39041410059 cy\tr9,321(r4)");
+ COMPARE(ahy(r1, MemOperand(r5, r6, 8888)),
+ "e31562b8027a ahy\tr1,8888(r5,r6)");
+ COMPARE(shy(r1, MemOperand(r5, r6, 8888)),
+ "e31562b8027b shy\tr1,8888(r5,r6)");
+ COMPARE(lb(r7, MemOperand(sp, 15)), "e370f00f0076 lb\tr7,15(sp)");
+ COMPARE(ly(r0, MemOperand(r1, r2, 321)), "e30121410058 ly\tr0,321(r1,r2)");
+ COMPARE(aly(r9, MemOperand(r2, r3, 10)), "e392300a005e aly\tr9,10(r2,r3)");
+ COMPARE(sly(r2, MemOperand(r9, r1, 15)), "e329100f005f sly\tr2,15(r9,r1)");
+ COMPARE(llh(r4, MemOperand(r1, 10)), "e340100a0095 llh\tr4,10(r1)");
+ COMPARE(llc(r0, MemOperand(r4, r5, 30)), "e304501e0094 llc\tr0,30(r4,r5)");
+ COMPARE(chy(r9, MemOperand(r8, r7, 30)), "e398701e0079 chy\tr9,30(r8,r7)");
+ COMPARE(cly(r8, MemOperand(r5, r4, 14)), "e385400e0055 cly\tr8,14(r5,r4)");
+ COMPARE(sty(r0, MemOperand(r0, 15)), "e300000f0050 sty\tr0,15(r0)");
+ COMPARE(mvhi(MemOperand(r7, 25), Operand(100)),
+ "e54c70190064 mvhi\t25(r7),100");
+ COMPARE(slfi(r4, Operand(100)), "c24500000064 slfi\tr4,100");
+ COMPARE(msfi(r8, Operand(1000)), "c281000003e8 msfi\tr8,1000");
+#endif
+ COMPARE(iihf(r6, Operand(9)), "c06800000009 iihf\tr6,9");
+ COMPARE(srlk(r1, r3, r2), "eb13200000de srlk\tr1,r3,0(r2)");
+ COMPARE(srlk(r1, r3, Operand(2)), "eb13000200de srlk\tr1,r3,2(r0)");
+ COMPARE(lmy(r9, r10, MemOperand(r8, 100)),
+ "eb9a80640098 lmy\tr9,r10,100(r8)");
+ COMPARE(lmg(r7, r8, MemOperand(r9, 100)),
+ "eb7890640004 lmg\tr7,r8,100(r9)");
+ COMPARE(lay(fp, MemOperand(sp, 8000)), "e3b0ff400171 lay\tfp,8000(sp)");
+ COMPARE(cliy(MemOperand(sp, 80), Operand(80)),
+ "eb50f0500055 cliy\t80(sp),80");
+ COMPARE(tmy(MemOperand(r0, 20), Operand(10)),
+ "eb0a00140051 tmy\t20(r0),10");
+ COMPARE(clg(r9, MemOperand(r6, r7, 19)), "e39670130021 clg\tr9,19(r6,r7)");
+ COMPARE(bctg(r8, MemOperand(sp, 10)), "e380f00a0046 bctg\tr8,10(sp)");
+ COMPARE(icy(r2, MemOperand(r3, 2)), "e32030020073 icy\tr2,2(r3)");
+ COMPARE(mvc(MemOperand(r9, 9), MemOperand(r3, 15), 10),
+ "d2099009300f mvc\t9(9,r9),15(r3)");
+ COMPARE(nilf(r0, Operand(8000)), "c00b00001f40 nilf\tr0,8000");
+ COMPARE(oilf(r9, Operand(1000)), "c09d000003e8 oilf\tr9,1000");
+
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-disasm-x87.cc b/deps/v8/test/cctest/test-disasm-x87.cc
index 17609cfc3c..697a9d3b49 100644
--- a/deps/v8/test/cctest/test-disasm-x87.cc
+++ b/deps/v8/test/cctest/test-disasm-x87.cc
@@ -96,6 +96,7 @@ TEST(DisasmIa320) {
__ nop();
__ add(ebx, Immediate(12));
__ nop();
+ __ adc(edx, Operand(ebx));
__ adc(ecx, 12);
__ adc(ecx, 1000);
__ nop();
@@ -119,8 +120,10 @@ TEST(DisasmIa320) {
__ nop();
__ imul(edx, ecx);
- __ shld(edx, ecx);
- __ shrd(edx, ecx);
+ __ shld(edx, ecx, 10);
+ __ shld_cl(edx, ecx);
+ __ shrd(edx, ecx, 10);
+ __ shrd_cl(edx, ecx);
__ bts(edx, ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop();
@@ -214,14 +217,13 @@ TEST(DisasmIa320) {
__ sar(Operand(ebx, ecx, times_4, 10000), 6);
__ sar_cl(Operand(ebx, ecx, times_4, 10000));
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
- __ shld(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
__ shl_cl(edx);
__ shl(Operand(ebx, ecx, times_4, 10000), 1);
__ shl(Operand(ebx, ecx, times_4, 10000), 6);
__ shl_cl(Operand(ebx, ecx, times_4, 10000));
- __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shrd_cl(Operand(ebx, ecx, times_4, 10000), edx);
__ shr(edx, 1);
__ shr(edx, 7);
__ shr_cl(edx);
@@ -242,7 +244,7 @@ TEST(DisasmIa320) {
__ cmp(ebx, 12345);
__ cmp(ebx, Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
- __ cmpb(eax, 100);
+ __ cmpb(eax, Immediate(100));
__ or_(ebx, 12345);
@@ -266,7 +268,7 @@ TEST(DisasmIa320) {
__ test(edx, Operand(ebx, ecx, times_8, 10000));
__ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
__ test_b(edx, Operand(ecx, ebx, times_2, 1000));
- __ test_b(Operand(eax, -20), 0x9A);
+ __ test_b(Operand(eax, -20), Immediate(0x9A));
__ nop();
__ xor_(edx, 12345);
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index cee3600314..c7c6f84423 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -86,6 +86,7 @@ static bool EqualDetails(DescriptorArray* descriptors, int descriptor,
class Expectations {
static const int MAX_PROPERTIES = 10;
Isolate* isolate_;
+ ElementsKind elements_kind_;
PropertyType types_[MAX_PROPERTIES];
PropertyAttributes attributes_[MAX_PROPERTIES];
Representation representations_[MAX_PROPERTIES];
@@ -97,8 +98,15 @@ class Expectations {
int number_of_properties_;
public:
+ explicit Expectations(Isolate* isolate, ElementsKind elements_kind)
+ : isolate_(isolate),
+ elements_kind_(elements_kind),
+ number_of_properties_(0) {}
+
explicit Expectations(Isolate* isolate)
- : isolate_(isolate), number_of_properties_(0) {}
+ : Expectations(
+ isolate,
+ isolate->object_function()->initial_map()->elements_kind()) {}
void Init(int index, PropertyType type, PropertyAttributes attributes,
Representation representation, Handle<Object> value) {
@@ -143,6 +151,10 @@ class Expectations {
os << "\n";
}
+ void SetElementsKind(ElementsKind elements_kind) {
+ elements_kind_ = elements_kind;
+ }
+
Handle<FieldType> GetFieldType(int index) {
CHECK(index < MAX_PROPERTIES);
CHECK(types_[index] == DATA || types_[index] == ACCESSOR);
@@ -252,6 +264,7 @@ class Expectations {
}
bool Check(Map* map, int expected_nof) const {
+ CHECK_EQ(elements_kind_, map->elements_kind());
CHECK(number_of_properties_ <= MAX_PROPERTIES);
CHECK_EQ(expected_nof, map->NumberOfOwnDescriptors());
CHECK(!map->is_dictionary_map());
@@ -279,6 +292,13 @@ class Expectations {
// given |map|.
//
+ Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind elements_kind) {
+ elements_kind_ = elements_kind;
+ map = Map::AsElementsKind(map, elements_kind);
+ CHECK_EQ(elements_kind_, map->elements_kind());
+ return map;
+ }
+
Handle<Map> AddDataField(Handle<Map> map, PropertyAttributes attributes,
Representation representation,
Handle<FieldType> heap_type) {
@@ -399,13 +419,17 @@ class Expectations {
Handle<Object> getter(pair->getter(), isolate);
Handle<Object> setter(pair->setter(), isolate);
- map = Map::TransitionToAccessorProperty(map, name, ACCESSOR_GETTER, getter,
- attributes);
+ int descriptor =
+ map->instance_descriptors()->SearchWithCache(isolate, *name, *map);
+ map = Map::TransitionToAccessorProperty(
+ map, name, descriptor, ACCESSOR_GETTER, getter, attributes);
CHECK(!map->is_deprecated());
CHECK(!map->is_dictionary_map());
- map = Map::TransitionToAccessorProperty(map, name, ACCESSOR_SETTER, setter,
- attributes);
+ descriptor =
+ map->instance_descriptors()->SearchWithCache(isolate, *name, *map);
+ map = Map::TransitionToAccessorProperty(
+ map, name, descriptor, ACCESSOR_SETTER, setter, attributes);
CHECK(!map->is_deprecated());
CHECK(!map->is_dictionary_map());
return map;
@@ -570,7 +594,7 @@ static void TestGeneralizeRepresentation(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
- Zone zone;
+ Zone zone(isolate->allocator());
if (is_detached_map) {
detach_point_map = Map::ReconfigureProperty(
@@ -962,7 +986,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone;
+ Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
CompilationInfo info("testing", isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
@@ -1047,7 +1071,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone;
+ Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
CompilationInfo info("testing", isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
@@ -1525,6 +1549,271 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
////////////////////////////////////////////////////////////////////////////////
+// A set of tests for elements kind reconfiguration case.
+//
+
+// This test ensures that representation/field type generalization is correctly
+// propagated from one branch of transition tree (|map2) to another (|map|).
+//
+// + - p0 - p1 - p2A - p3 - p4: |map|
+// |
+// ek
+// |
+// {} - p0 - p1 - p2B - p3 - p4: |map2|
+//
+// where "p2A" and "p2B" differ only in the representation/field type.
+//
+static void TestReconfigureElementsKind_GeneralizeRepresentation(
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate, FAST_SMI_ELEMENTS);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ initial_map->set_elements_kind(FAST_SMI_ELEMENTS);
+
+ Handle<Map> map = initial_map;
+ map = expectations.AsElementsKind(map, FAST_ELEMENTS);
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ // Create another branch in transition tree (property at index |kDiffProp|
+ // has different representatio/field type), initialize expectations.
+ const int kDiffProp = kPropCount / 2;
+ Expectations expectations2(isolate, FAST_SMI_ELEMENTS);
+
+ Handle<Map> map2 = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ if (i == kDiffProp) {
+ map2 = expectations2.AddDataField(map2, NONE, to_representation, to_type);
+ } else {
+ map2 = expectations2.AddDataField(map2, NONE, from_representation,
+ from_type);
+ }
+ }
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+ Zone zone(isolate->allocator());
+ Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
+ CompilationInfo info("testing", isolate, &zone);
+ CHECK(!info.dependencies()->HasAborted());
+ info.dependencies()->AssumeFieldType(field_owner);
+
+ // Reconfigure elements kinds of |map2|, which should generalize
+ // representations in |map|.
+ Handle<Map> new_map = Map::ReconfigureElementsKind(map2, FAST_ELEMENTS);
+
+ // |map2| should be left unchanged but marked unstable.
+ CHECK(!map2->is_stable());
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map2, *new_map);
+ CHECK(expectations2.Check(*map2));
+
+ // |map| should be deprecated and |new_map| should match new expectations.
+ expectations.SetDataField(kDiffProp, expected_representation, expected_type);
+
+ CHECK(map->is_deprecated());
+ CHECK(!info.dependencies()->HasAborted());
+ info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ CHECK_NE(*map, *new_map);
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(expectations.Check(*new_map));
+
+ // Update deprecated |map|, it should become |new_map|.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+
+ // Ensure Map::FindElementsKindTransitionedMap() is able to find the
+ // transitioned map.
+ {
+ MapHandleList map_list;
+ map_list.Add(updated_map);
+ Map* transitioned_map = map2->FindElementsKindTransitionedMap(&map_list);
+ CHECK_EQ(*updated_map, transitioned_map);
+ }
+}
+
+// This test ensures that trivial representation/field type generalization
+// (from HeapObject to HeapObject) is correctly propagated from one branch of
+// transition tree (|map2|) to another (|map|).
+//
+// + - p0 - p1 - p2A - p3 - p4: |map|
+// |
+// ek
+// |
+// {} - p0 - p1 - p2B - p3 - p4: |map2|
+//
+// where "p2A" and "p2B" differ only in the representation/field type.
+//
+static void TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type,
+ bool expected_field_type_dependency = true) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate, FAST_SMI_ELEMENTS);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ initial_map->set_elements_kind(FAST_SMI_ELEMENTS);
+
+ Handle<Map> map = initial_map;
+ map = expectations.AsElementsKind(map, FAST_ELEMENTS);
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ // Create another branch in transition tree (property at index |kDiffProp|
+ // has different attributes), initialize expectations.
+ const int kDiffProp = kPropCount / 2;
+ Expectations expectations2(isolate, FAST_SMI_ELEMENTS);
+
+ Handle<Map> map2 = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ if (i == kDiffProp) {
+ map2 = expectations2.AddDataField(map2, NONE, to_representation, to_type);
+ } else {
+ map2 = expectations2.AddDataField(map2, NONE, from_representation,
+ from_type);
+ }
+ }
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+ Zone zone(isolate->allocator());
+ Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
+ CompilationInfo info("testing", isolate, &zone);
+ CHECK(!info.dependencies()->HasAborted());
+ info.dependencies()->AssumeFieldType(field_owner);
+
+ // Reconfigure elements kinds of |map2|, which should generalize
+ // representations in |map|.
+ Handle<Map> new_map = Map::ReconfigureElementsKind(map2, FAST_ELEMENTS);
+
+ // |map2| should be left unchanged but marked unstable.
+ CHECK(!map2->is_stable());
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map2, *new_map);
+ CHECK(expectations2.Check(*map2));
+
+ // In trivial case |map| should be returned as a result of the elements
+ // kind reconfiguration, respective field types should be generalized and
+ // respective code dependencies should be invalidated. |map| should be NOT
+ // deprecated and it should match new expectations.
+ expectations.SetDataField(kDiffProp, expected_representation, expected_type);
+ CHECK(!map->is_deprecated());
+ CHECK_EQ(*map, *new_map);
+ CHECK_EQ(expected_field_type_dependency, info.dependencies()->HasAborted());
+ info.dependencies()->Rollback(); // Properly cleanup compilation info.
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(expectations.Check(*new_map));
+
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+
+ // Ensure Map::FindElementsKindTransitionedMap() is able to find the
+ // transitioned map.
+ {
+ MapHandleList map_list;
+ map_list.Add(updated_map);
+ Map* transitioned_map = map2->FindElementsKindTransitionedMap(&map_list);
+ CHECK_EQ(*updated_map, transitioned_map);
+ }
+}
+
+TEST(ReconfigureElementsKind_GeneralizeRepresentationSmiToDouble) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+
+ TestReconfigureElementsKind_GeneralizeRepresentation(
+ Representation::Smi(), any_type, Representation::Double(), any_type,
+ Representation::Double(), any_type);
+}
+
+TEST(ReconfigureElementsKind_GeneralizeRepresentationSmiToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureElementsKind_GeneralizeRepresentation(
+ Representation::Smi(), any_type, Representation::HeapObject(), value_type,
+ Representation::Tagged(), any_type);
+}
+
+TEST(ReconfigureElementsKind_GeneralizeRepresentationDoubleToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureElementsKind_GeneralizeRepresentation(
+ Representation::Double(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+TEST(ReconfigureElementsKind_GeneralizeRepresentationHeapObjToHeapObj) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+
+ Handle<FieldType> current_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ Handle<FieldType> new_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ Handle<FieldType> expected_type = any_type;
+
+ TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
+ Representation::HeapObject(), current_type, Representation::HeapObject(),
+ new_type, Representation::HeapObject(), expected_type);
+ current_type = expected_type;
+
+ new_type = FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
+ Representation::HeapObject(), any_type, Representation::HeapObject(),
+ new_type, Representation::HeapObject(), any_type, false);
+}
+
+TEST(ReconfigureElementsKind_GeneralizeRepresentationHeapObjectToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureElementsKind_GeneralizeRepresentation(
+ Representation::HeapObject(), value_type, Representation::Smi(), any_type,
+ Representation::Tagged(), any_type);
+}
+
+////////////////////////////////////////////////////////////////////////////////
// A set of tests checking split map deprecation.
//
@@ -1640,15 +1929,16 @@ static void TestGeneralizeRepresentationWithSpecialTransition(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
+ Expectations expectations2 = expectations;
+
// Apply some special transition to |map|.
CHECK(map->owns_descriptors());
- Handle<Map> map2 = config.Transition(map);
+ Handle<Map> map2 = config.Transition(map, expectations2);
// |map| should still match expectations.
CHECK(!map->is_deprecated());
CHECK(expectations.Check(*map));
- Expectations expectations2 = expectations;
if (config.generalizes_representations()) {
for (int i = 0; i < kPropCount; i++) {
expectations2.GeneralizeRepresentation(i);
@@ -1720,13 +2010,15 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- Handle<Map> Transition(Handle<Map> map) {
- return Map::CopyAsElementsKind(map, DICTIONARY_ELEMENTS,
- INSERT_TRANSITION);
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
+ Handle<Symbol> frozen_symbol(map->GetHeap()->frozen_symbol());
+ expectations.SetElementsKind(DICTIONARY_ELEMENTS);
+ return Map::CopyForPreventExtensions(map, NONE, frozen_symbol,
+ "CopyForPreventExtensions");
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return false; }
+ bool is_non_equevalent_transition() const { return true; }
};
TestConfig config;
TestGeneralizeRepresentationWithSpecialTransition(
@@ -1744,7 +2036,7 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- Handle<Map> Transition(Handle<Map> map) {
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -1756,12 +2048,14 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
.ToHandleChecked();
CHECK(!map->owns_descriptors());
- return Map::CopyAsElementsKind(map, DICTIONARY_ELEMENTS,
- INSERT_TRANSITION);
+ Handle<Symbol> frozen_symbol(map->GetHeap()->frozen_symbol());
+ expectations.SetElementsKind(DICTIONARY_ELEMENTS);
+ return Map::CopyForPreventExtensions(map, NONE, frozen_symbol,
+ "CopyForPreventExtensions");
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return false; }
+ bool is_non_equevalent_transition() const { return true; }
};
TestConfig config;
TestGeneralizeRepresentationWithSpecialTransition(
@@ -1779,7 +2073,7 @@ TEST(ForObservedTransitionFromMapOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- Handle<Map> Transition(Handle<Map> map) {
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
return Map::CopyForObserved(map);
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
@@ -1802,7 +2096,7 @@ TEST(ForObservedTransitionFromMapNotOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- Handle<Map> Transition(Handle<Map> map) {
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -1845,7 +2139,7 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(Handle<Map> map) {
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
return Map::TransitionToPrototype(map, prototype_, REGULAR_PROTOTYPE);
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
@@ -1879,7 +2173,7 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(Handle<Map> map) {
+ Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
diff --git a/deps/v8/test/cctest/test-gc-tracer.cc b/deps/v8/test/cctest/test-gc-tracer.cc
deleted file mode 100644
index 1289ec5cea..0000000000
--- a/deps/v8/test/cctest/test-gc-tracer.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <utility>
-
-#include "src/heap/gc-tracer.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-TEST(RingBufferPartialFill) {
- const int max_size = 6;
- typedef RingBuffer<int, max_size>::const_iterator Iter;
- RingBuffer<int, max_size> ring_buffer;
- CHECK(ring_buffer.empty());
- CHECK_EQ(static_cast<int>(ring_buffer.size()), 0);
- CHECK(ring_buffer.begin() == ring_buffer.end());
-
- // Fill ring_buffer partially: [0, 1, 2]
- for (int i = 0; i < max_size / 2; i++) ring_buffer.push_back(i);
-
- CHECK(!ring_buffer.empty());
- CHECK(static_cast<int>(ring_buffer.size()) == max_size / 2);
- CHECK(ring_buffer.begin() != ring_buffer.end());
-
- // Test forward itartion
- int i = 0;
- for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
- CHECK(*iter == i);
- ++i;
- }
- CHECK_EQ(i, 3); // one past last element.
-
- // Test backward iteration
- i = 2;
- Iter iter = ring_buffer.back();
- while (true) {
- CHECK(*iter == i);
- if (iter == ring_buffer.begin()) break;
- --iter;
- --i;
- }
- CHECK_EQ(i, 0);
-}
-
-
-TEST(RingBufferWrapAround) {
- const int max_size = 6;
- typedef RingBuffer<int, max_size>::const_iterator Iter;
- RingBuffer<int, max_size> ring_buffer;
-
- // Fill ring_buffer (wrap around): [9, 10, 11, 12, 13, 14]
- for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_back(i);
-
- CHECK(!ring_buffer.empty());
- CHECK(static_cast<int>(ring_buffer.size()) == max_size);
- CHECK(ring_buffer.begin() != ring_buffer.end());
-
- // Test forward iteration
- int i = 9;
- for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
- CHECK(*iter == i);
- ++i;
- }
- CHECK_EQ(i, 15); // one past last element.
-
- // Test backward iteration
- i = 14;
- Iter iter = ring_buffer.back();
- while (true) {
- CHECK(*iter == i);
- if (iter == ring_buffer.begin()) break;
- --iter;
- --i;
- }
- CHECK_EQ(i, 9);
-}
-
-
-TEST(RingBufferPushFront) {
- const int max_size = 6;
- typedef RingBuffer<int, max_size>::const_iterator Iter;
- RingBuffer<int, max_size> ring_buffer;
-
- // Fill ring_buffer (wrap around): [14, 13, 12, 11, 10, 9]
- for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_front(i);
-
- CHECK(!ring_buffer.empty());
- CHECK(static_cast<int>(ring_buffer.size()) == max_size);
- CHECK(ring_buffer.begin() != ring_buffer.end());
-
- // Test forward iteration
- int i = 14;
- for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
- CHECK(*iter == i);
- --i;
- }
- CHECK_EQ(i, 8); // one past last element.
-}
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 25a8e5c527..d6f055e321 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -90,6 +90,15 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#elif V8_TARGET_ARCH_S390
+ __ push(kRootRegister);
+ __ push(ip);
+ __ InitializeRootRegister();
+ __ lhi(r2, Operand(key));
+ __ GetNumberHash(r2, ip);
+ __ pop(ip);
+ __ pop(kRootRegister);
+ __ Ret();
#elif V8_TARGET_ARCH_PPC
__ function_descriptor();
__ push(kRootRegister);
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 2632593ec6..dfe591148a 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -32,6 +32,7 @@
#include "src/v8.h"
#include "include/v8-profiler.h"
+#include "src/collector.h"
#include "src/debug/debug.h"
#include "src/hashmap.h"
#include "src/profiler/allocation-tracker.h"
@@ -366,7 +367,9 @@ TEST(HeapSnapshotCodeObjects) {
}
}
CHECK(compiled_references_x);
- CHECK(!lazy_references_x);
+ if (i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ CHECK(!lazy_references_x);
+ }
}
@@ -2872,7 +2875,6 @@ static const v8::AllocationProfile::Node* FindAllocationProfileNode(
return node;
}
-
TEST(SamplingHeapProfiler) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
@@ -2985,6 +2987,23 @@ TEST(SamplingHeapProfiler) {
heap_profiler->StopSamplingHeapProfiler();
}
+
+ // A test case with scripts unloaded before profile gathered
+ {
+ heap_profiler->StartSamplingHeapProfiler(64);
+ CompileRun(
+ "for (var i = 0; i < 1024; i++) {\n"
+ " eval(\"new Array(100)\");\n"
+ "}\n");
+
+ CcTest::heap()->CollectAllGarbage();
+
+ v8::base::SmartPointer<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(!profile.is_empty());
+
+ heap_profiler->StopSamplingHeapProfiler();
+ }
}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 5cc4e94a02..004781ab41 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -612,6 +612,8 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
Handle<JSObject> tmp = Run<JSObject>(new_script);
CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
IsObjectShrinkable(*tmp));
+ CHECK_EQ(Map::kSlackTrackingCounterStart - i - 1,
+ initial_map->construction_counter());
}
CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
CHECK(!IsObjectShrinkable(*obj));
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index b8cf406130..bae3ed5ac4 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -95,7 +95,8 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk, &zone);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 385366aa24..f12ca87fa6 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -298,7 +298,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
// Run parallel threads that lock and access different isolates in parallel
TEST(SeparateIsolatesLocksNonexclusive) {
i::FLAG_always_opt = false;
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -382,7 +382,7 @@ class LockerUnlockerThread : public JoinableThread {
// Use unlocker inside of a Locker, multiple threads.
TEST(LockerUnlocker) {
i::FLAG_always_opt = false;
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -439,7 +439,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
// Use Unlocker inside two Lockers.
TEST(LockTwiceAndUnlock) {
i::FLAG_always_opt = false;
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -710,6 +710,8 @@ TEST(ExtensionsRegistration) {
const int kNThreads = 10;
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kNThreads = 4;
+#elif V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT
+ const int kNThreads = 10;
#else
const int kNThreads = 40;
#endif
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 21dafda4a9..05d7103685 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -57,7 +57,7 @@ using v8::internal::TickSample;
static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
- i::Code* code = function->code();
+ i::AbstractCode* code = function->abstract_code();
return code->contains(addr);
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 8077f65ae3..9c7e63bfed 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -569,14 +569,13 @@ TEST(LogVersion) {
TEST(Issue539892) {
class : public i::CodeEventLogger {
public:
- virtual void CodeMoveEvent(Address from, Address to) {}
- virtual void CodeDeleteEvent(Address from) {}
- virtual void CodeDisableOptEvent(i::Code* code,
- i::SharedFunctionInfo* shared) {}
+ void CodeMoveEvent(i::AbstractCode* from, Address to) override {}
+ void CodeDisableOptEvent(i::AbstractCode* code,
+ i::SharedFunctionInfo* shared) override {}
private:
- virtual void LogRecordedBuffer(i::Code* code, i::SharedFunctionInfo* shared,
- const char* name, int length) {}
+ void LogRecordedBuffer(i::AbstractCode* code, i::SharedFunctionInfo* shared,
+ const char* name, int length) override {}
} code_event_logger;
SETUP_FLAGS();
v8::Isolate::CreateParams create_params;
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 77dc859022..91ee215315 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -40,6 +40,7 @@ using namespace v8::internal;
typedef void* (*F)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
@@ -267,7 +268,7 @@ TEST(jump_tables5) {
masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
__ addiupc(at, 6 + 1);
- __ lsa(at, at, a0, 2);
+ __ Lsa(at, at, a0, 2);
__ lw(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
@@ -389,4 +390,191 @@ TEST(Lsa) {
}
}
+static const std::vector<uint32_t> uint32_test_values() {
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
+ 0x7fffffff, 0x80000000, 0x80000001,
+ 0x80ffff00, 0x8fffffff, 0xffffffff};
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
+ static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
+ static_cast<int32_t>(0x80ffff00), static_cast<int32_t>(0x8fffffff),
+ static_cast<int32_t>(0xffffffff)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+#define FOR_INPUTS(ctype, itype, var) \
+ std::vector<ctype> var##_vec = itype##_test_values(); \
+ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
+ var != var##_vec.end(); ++var)
+
+#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+
+template <typename RET_TYPE, typename IN_TYPE, typename Func>
+RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
+ typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ __ mtc1(a0, f4);
+ GenerateConvertInstructionFunc(masm);
+ __ mfc1(v0, f2);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ return reinterpret_cast<RET_TYPE>(
+ CALL_GENERATED_CODE(isolate, f, x, 0, 0, 0, 0));
+}
+
+TEST(cvt_s_w_Trunc_uw_s) {
+ CcTest::InitializeVM();
+ FOR_UINT32_INPUTS(i) {
+ uint32_t input = *i;
+ CHECK_EQ(static_cast<float>(input),
+ run_Cvt<uint32_t>(input, [](MacroAssembler* masm) {
+ __ cvt_s_w(f0, f4);
+ __ Trunc_uw_s(f2, f0, f1);
+ }));
+ }
+}
+
+TEST(cvt_d_w_Trunc_w_d) {
+ CcTest::InitializeVM();
+ FOR_INT32_INPUTS(i) {
+ int32_t input = *i;
+ CHECK_EQ(static_cast<double>(input),
+ run_Cvt<int32_t>(input, [](MacroAssembler* masm) {
+ __ cvt_d_w(f0, f4);
+ __ Trunc_w_d(f2, f0);
+ }));
+ }
+}
+
+TEST(min_max_nan) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct TestFloat {
+ double a;
+ double b;
+ double c;
+ double d;
+ float e;
+ float f;
+ float g;
+ float h;
+ };
+
+ TestFloat test;
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+
+ double inputsa[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, dinf, dminf,
+ dinf, dnan, 3.0, dinf, dnan, dnan};
+ double inputsb[kTableLength] = {3.0, 2.0, 0.0, -0.0, dinf, 42.0, dinf,
+ dminf, 3.0, dnan, dnan, dinf, dnan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0,
+ 42.0, dminf, dminf, dnan, dnan,
+ dnan, dnan, dnan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf,
+ dinf, dnan, dnan, dnan, dnan, dnan};
+
+ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf,
+ finf, fnan, 3.0, finf, fnan, fnan};
+ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf,
+ fminf, 3.0, fnan, fnan, finf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf,
+ fminf, fnan, fnan, fnan, fnan, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf,
+ finf, fnan, fnan, fnan, fnan, fnan};
+
+ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(back);
+ };
+
+ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ Move(dst, fnan);
+ __ Branch(back);
+ };
+
+ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan;
+ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan;
+
+ __ push(s6);
+ __ InitializeRootRegister();
+ __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
+ __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
+ __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
+ __ MinNaNCheck_d(f10, f4, f8, &handle_mind_nan);
+ __ bind(&back_mind_nan);
+ __ MaxNaNCheck_d(f12, f4, f8, &handle_maxd_nan);
+ __ bind(&back_maxd_nan);
+ __ MinNaNCheck_s(f14, f2, f6, &handle_mins_nan);
+ __ bind(&back_mins_nan);
+ __ MaxNaNCheck_s(f16, f2, f6, &handle_maxs_nan);
+ __ bind(&back_maxs_nan);
+ __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
+ __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
+ __ pop(s6);
+ __ jr(ra);
+ __ nop();
+
+ handle_dnan(f10, &handle_mind_nan, &back_mind_nan);
+ handle_dnan(f12, &handle_maxd_nan, &back_maxd_nan);
+ handle_snan(f14, &handle_mins_nan, &back_mins_nan);
+ handle_snan(f16, &handle_maxs_nan, &back_maxs_nan);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
+
+ CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index e74703b8f8..e251242dee 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -41,6 +41,7 @@ using namespace v8::internal;
typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
@@ -316,7 +317,7 @@ TEST(jump_tables5) {
masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
__ addiupc(at, 6 + 1);
- __ dlsa(at, at, a0, 3);
+ __ Dlsa(at, at, a0, 3);
__ ld(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
@@ -522,4 +523,268 @@ TEST(Dlsa) {
}
}
+static const std::vector<uint32_t> uint32_test_values() {
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
+ 0x7fffffff, 0x80000000, 0x80000001,
+ 0x80ffff00, 0x8fffffff, 0xffffffff};
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
+ static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
+ static_cast<int32_t>(0x80ffff00), static_cast<int32_t>(0x8fffffff),
+ static_cast<int32_t>(0xffffffff)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<uint64_t> uint64_test_values() {
+ static const uint64_t kValues[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x0000ffffffff0000,
+ 0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
+ 0x8000ffffffff0000, 0x8fffffffffffffff, 0xffffffffffffffff};
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int64_t> int64_test_values() {
+ static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0x0000ffffffff0000),
+ static_cast<int64_t>(0x7fffffffffffffff),
+ static_cast<int64_t>(0x8000000000000000),
+ static_cast<int64_t>(0x8000000000000001),
+ static_cast<int64_t>(0x8000ffffffff0000),
+ static_cast<int64_t>(0x8fffffffffffffff),
+ static_cast<int64_t>(0xffffffffffffffff)};
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+#define FOR_INPUTS(ctype, itype, var) \
+ std::vector<ctype> var##_vec = itype##_test_values(); \
+ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
+ var != var##_vec.end(); ++var)
+
+#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_INT64_INPUTS(var) FOR_INPUTS(int64_t, int64, var)
+#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_UINT64_INPUTS(var) FOR_INPUTS(uint64_t, uint64, var)
+
+template <typename RET_TYPE, typename IN_TYPE, typename Func>
+RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
+ typedef RET_TYPE (*F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateConvertInstructionFunc(masm);
+ __ dmfc1(v0, f2);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ return reinterpret_cast<RET_TYPE>(
+ CALL_GENERATED_CODE(isolate, f, x, 0, 0, 0, 0));
+}
+
+TEST(Cvt_s_uw_Trunc_uw_s) {
+ CcTest::InitializeVM();
+ FOR_UINT32_INPUTS(i) {
+ uint32_t input = *i;
+ CHECK_EQ(static_cast<float>(input),
+ run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
+ __ Cvt_s_uw(f0, a0);
+ __ mthc1(zero_reg, f2);
+ __ Trunc_uw_s(f2, f0, f1);
+ }));
+ }
+}
+
+TEST(Cvt_s_ul_Trunc_ul_s) {
+ CcTest::InitializeVM();
+ FOR_UINT64_INPUTS(i) {
+ uint64_t input = *i;
+ CHECK_EQ(static_cast<float>(input),
+ run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
+ __ Cvt_s_ul(f0, a0);
+ __ Trunc_ul_s(f2, f0, f1, v0);
+ }));
+ }
+}
+
+TEST(Cvt_d_ul_Trunc_ul_d) {
+ CcTest::InitializeVM();
+ FOR_UINT64_INPUTS(i) {
+ uint64_t input = *i;
+ CHECK_EQ(static_cast<double>(input),
+ run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
+ __ Cvt_d_ul(f0, a0);
+ __ Trunc_ul_d(f2, f0, f1, v0);
+ }));
+ }
+}
+
+TEST(cvt_d_l_Trunc_l_d) {
+ CcTest::InitializeVM();
+ FOR_INT64_INPUTS(i) {
+ int64_t input = *i;
+ CHECK_EQ(static_cast<double>(input),
+ run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
+ __ dmtc1(a0, f4);
+ __ cvt_d_l(f0, f4);
+ __ Trunc_l_d(f2, f0);
+ }));
+ }
+}
+
+TEST(cvt_d_l_Trunc_l_ud) {
+ CcTest::InitializeVM();
+ FOR_INT64_INPUTS(i) {
+ int64_t input = *i;
+ uint64_t abs_input = (input < 0) ? -input : input;
+ CHECK_EQ(static_cast<double>(abs_input),
+ run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
+ __ dmtc1(a0, f4);
+ __ cvt_d_l(f0, f4);
+ __ Trunc_l_ud(f2, f0, f6);
+ }));
+ }
+}
+
+TEST(cvt_d_w_Trunc_w_d) {
+ CcTest::InitializeVM();
+ FOR_INT32_INPUTS(i) {
+ int32_t input = *i;
+ CHECK_EQ(static_cast<double>(input),
+ run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
+ __ mtc1(a0, f4);
+ __ cvt_d_w(f0, f4);
+ __ Trunc_w_d(f2, f0);
+ __ mfc1(v1, f2);
+ __ dmtc1(v1, f2);
+ }));
+ }
+}
+
+TEST(min_max_nan) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct TestFloat {
+ double a;
+ double b;
+ double c;
+ double d;
+ float e;
+ float f;
+ float g;
+ float h;
+ };
+
+ TestFloat test;
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+
+ double inputsa[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, dinf, dminf,
+ dinf, dnan, 3.0, dinf, dnan, dnan};
+ double inputsb[kTableLength] = {3.0, 2.0, 0.0, -0.0, dinf, 42.0, dinf,
+ dminf, 3.0, dnan, dnan, dinf, dnan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0,
+ 42.0, dminf, dminf, dnan, dnan,
+ dnan, dnan, dnan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf,
+ dinf, dnan, dnan, dnan, dnan, dnan};
+
+ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf,
+ finf, fnan, 3.0, finf, fnan, fnan};
+ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf,
+ fminf, 3.0, fnan, fnan, finf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf,
+ fminf, fnan, fnan, fnan, fnan, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf,
+ finf, fnan, fnan, fnan, fnan, fnan};
+
+ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(back);
+ };
+
+ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ Move(dst, fnan);
+ __ Branch(back);
+ };
+
+ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan;
+ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan;
+
+ __ push(s6);
+ __ InitializeRootRegister();
+ __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
+ __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
+ __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
+ __ MinNaNCheck_d(f10, f4, f8, &handle_mind_nan);
+ __ bind(&back_mind_nan);
+ __ MaxNaNCheck_d(f12, f4, f8, &handle_maxd_nan);
+ __ bind(&back_maxd_nan);
+ __ MinNaNCheck_s(f14, f2, f6, &handle_mins_nan);
+ __ bind(&back_mins_nan);
+ __ MaxNaNCheck_s(f16, f2, f6, &handle_maxs_nan);
+ __ bind(&back_maxs_nan);
+ __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
+ __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
+ __ pop(s6);
+ __ jr(ra);
+ __ nop();
+
+ handle_dnan(f10, &handle_mind_nan, &back_mind_nan);
+ handle_dnan(f12, &handle_maxd_nan, &back_maxd_nan);
+ handle_snan(f14, &handle_mins_nan, &back_mins_nan);
+ handle_snan(f16, &handle_maxs_nan, &back_maxs_nan);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
+
+ CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0);
+
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
index ecec77fbfd..3150ab8872 100644
--- a/deps/v8/test/cctest/test-microtask-delivery.cc
+++ b/deps/v8/test/cctest/test-microtask-delivery.cc
@@ -105,7 +105,7 @@ TEST(MicrotaskPerIsolateState) {
HarmonyIsolate isolate;
v8::HandleScope scope(isolate.GetIsolate());
LocalContext context1(isolate.GetIsolate());
- isolate.GetIsolate()->SetAutorunMicrotasks(false);
+ isolate.GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
CompileRun(
"var obj = { calls: 0 };");
v8::Local<v8::Value> obj = CompileRun("obj");
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 5164b87df7..f17b8c081e 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -491,16 +491,17 @@ TEST(ObservationWeakMap) {
i::Isolate* i_isolate = CcTest::i_isolate();
i::Handle<i::JSObject> observation_state =
i_isolate->factory()->observation_state();
- i::Handle<i::JSWeakMap> callbackInfoMap =
- i::Handle<i::JSWeakMap>::cast(i::Object::GetProperty(
- i_isolate, observation_state, "callbackInfoMap").ToHandleChecked());
- i::Handle<i::JSWeakMap> objectInfoMap =
- i::Handle<i::JSWeakMap>::cast(i::Object::GetProperty(
- i_isolate, observation_state, "objectInfoMap").ToHandleChecked());
- i::Handle<i::JSWeakMap> notifierObjectInfoMap =
- i::Handle<i::JSWeakMap>::cast(i::Object::GetProperty(
- i_isolate, observation_state, "notifierObjectInfoMap")
- .ToHandleChecked());
+ i::Handle<i::JSWeakMap> callbackInfoMap = i::Handle<i::JSWeakMap>::cast(
+ i::JSReceiver::GetProperty(i_isolate, observation_state,
+ "callbackInfoMap")
+ .ToHandleChecked());
+ i::Handle<i::JSWeakMap> objectInfoMap = i::Handle<i::JSWeakMap>::cast(
+ i::JSReceiver::GetProperty(i_isolate, observation_state, "objectInfoMap")
+ .ToHandleChecked());
+ i::Handle<i::JSWeakMap> notifierObjectInfoMap = i::Handle<i::JSWeakMap>::cast(
+ i::JSReceiver::GetProperty(i_isolate, observation_state,
+ "notifierObjectInfoMap")
+ .ToHandleChecked());
CHECK_EQ(1, NumberOfElements(callbackInfoMap));
CHECK_EQ(1, NumberOfElements(objectInfoMap));
CHECK_EQ(1, NumberOfElements(notifierObjectInfoMap));
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index b04fb94d3a..ae278d8338 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -153,7 +153,7 @@ TEST(ScanHTMLEndComments) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -171,7 +171,7 @@ TEST(ScanHTMLEndComments) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -200,6 +200,9 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
TEST(UsingCachedData) {
+ // Producing cached parser data while parsing eagerly is not supported.
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
@@ -250,6 +253,9 @@ TEST(UsingCachedData) {
TEST(PreparseFunctionDataIsUsed) {
+ // Producing cached parser data while parsing eagerly is not supported.
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+
// This tests that we actually do use the function data generated by the
// preparser.
@@ -326,7 +332,7 @@ TEST(StandAlonePreParser) {
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -363,7 +369,7 @@ TEST(StandAlonePreParserNoNatives) {
scanner.Initialize(&stream);
// Preparser defaults to disallowing natives syntax.
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -432,7 +438,7 @@ TEST(RegressChromium62639) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -467,7 +473,7 @@ TEST(Regress928) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -520,7 +526,7 @@ TEST(PreParseOverflow) {
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -837,7 +843,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
scanner.Next(); // Current token is now the regexp literal.
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
ast_value_factory.Internalize(CcTest::i_isolate());
@@ -1061,7 +1067,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
factory->NewStringFromUtf8(i::CStrVector(program.start()))
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
parser.set_allow_harmony_sloppy(true);
@@ -1379,7 +1385,7 @@ TEST(ScopePositions) {
i::CStrVector(program.start())).ToHandleChecked();
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
parser.set_allow_lazy(true);
@@ -1428,7 +1434,7 @@ TEST(DiscardFunctionBody) {
i::Handle<i::String> source_code =
factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source_code);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
info.set_allow_lazy_parsing();
i::Parser parser(&info);
@@ -1503,15 +1509,12 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyDefaultParameters,
kAllowHarmonySloppy,
kAllowHarmonySloppyLet,
- kAllowHarmonyDestructuring,
- kAllowHarmonyDestructuringAssignment,
kAllowHarmonyNewTarget,
- kAllowStrongMode,
- kNoLegacyConst,
- kAllowHarmonyFunctionSent
+ kAllowHarmonyFunctionSent,
+ kAllowHarmonyRestrictiveDeclarations,
+ kAllowHarmonyExponentiationOperator
};
enum ParserSyncTestResult {
@@ -1525,18 +1528,14 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives(flags.Contains(kAllowNatives));
- parser->set_allow_harmony_default_parameters(
- flags.Contains(kAllowHarmonyDefaultParameters));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
parser->set_allow_harmony_sloppy_let(flags.Contains(kAllowHarmonySloppyLet));
- parser->set_allow_harmony_destructuring_bind(
- flags.Contains(kAllowHarmonyDestructuring));
- parser->set_allow_harmony_destructuring_assignment(
- flags.Contains(kAllowHarmonyDestructuringAssignment));
- parser->set_allow_strong_mode(flags.Contains(kAllowStrongMode));
- parser->set_allow_legacy_const(!flags.Contains(kNoLegacyConst));
parser->set_allow_harmony_function_sent(
flags.Contains(kAllowHarmonyFunctionSent));
+ parser->set_allow_harmony_restrictive_declarations(
+ flags.Contains(kAllowHarmonyRestrictiveDeclarations));
+ parser->set_allow_harmony_exponentiation_operator(
+ flags.Contains(kAllowHarmonyExponentiationOperator));
}
@@ -1557,7 +1556,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
if (test_preparser) {
i::Scanner scanner(isolate->unicode_cache());
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
@@ -1574,7 +1573,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::FunctionLiteral* function;
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
SetParserFlags(&parser, flags);
@@ -1596,9 +1595,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CHECK(isolate->has_pending_exception());
i::Handle<i::JSObject> exception_handle(
i::JSObject::cast(isolate->pending_exception()));
- i::Handle<i::String> message_string =
- i::Handle<i::String>::cast(i::Object::GetProperty(
- isolate, exception_handle, "message").ToHandleChecked());
+ i::Handle<i::String> message_string = i::Handle<i::String>::cast(
+ i::JSReceiver::GetProperty(isolate, exception_handle, "message")
+ .ToHandleChecked());
if (result == kSuccess) {
v8::base::OS::Print(
@@ -1910,12 +1909,9 @@ void RunModuleParserSyncTest(const char* context_data[][2],
int always_true_len = 0,
const ParserFlag* always_false_flags = NULL,
int always_false_len = 0) {
- bool flag = i::FLAG_harmony_modules;
- i::FLAG_harmony_modules = true;
RunParserSyncTest(context_data, statement_data, result, flags, flags_len,
always_true_flags, always_true_len, always_false_flags,
always_false_len, true);
- i::FLAG_harmony_modules = flag;
}
@@ -1926,9 +1922,7 @@ TEST(ErrorsEvalAndArguments) {
// isn't.
const char* context_data[][2] = {
{"\"use strict\";", ""},
- {"\"use strong\";", ""},
{"var eval; function test_func() {\"use strict\"; ", "}"},
- {"var eval; function test_func() {\"use strong\"; ", "}"},
{NULL, NULL}};
const char* statement_data[] = {
@@ -1959,9 +1953,7 @@ TEST(ErrorsEvalAndArguments) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2068,8 +2060,6 @@ TEST(ErrorsFutureStrictReservedWords) {
const char* context_data[][2] = {
{"function test_func() {\"use strict\"; ", "}"},
{"() => { \"use strict\"; ", "}"},
- {"function test_func() {\"use strong\"; ", "}"},
- {"() => { \"use strong\"; ", "}"},
{NULL, NULL}};
const char* statement_data[] {
@@ -2077,11 +2067,7 @@ TEST(ErrorsFutureStrictReservedWords) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2258,13 +2244,6 @@ TEST(ErrorsYieldStrict) {
{"\"use strict\"; (function not_gen() {", "})"},
{"\"use strict\"; (function * gen() { (function not_gen() {", "}) })"},
{"() => {\"use strict\"; ", "}"},
- {"\"use strong\";", ""},
- {"\"use strong\"; function not_gen() {", "}"},
- {"function test_func() {\"use strong\"; ", "}"},
- {"\"use strong\"; function * gen() { function not_gen() {", "} }"},
- {"\"use strong\"; (function not_gen() {", "})"},
- {"\"use strong\"; (function * gen() { (function not_gen() {", "}) })"},
- {"() => {\"use strong\"; ", "}"},
{NULL, NULL}};
const char* statement_data[] = {
@@ -2285,9 +2264,7 @@ TEST(ErrorsYieldStrict) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2364,13 +2341,16 @@ TEST(NoErrorsGenerator) {
"(yield) \n ? yield : yield",
// If there is a newline before the next token, we don't look for RHS.
"yield\nfor (;;) {}",
+ "x = class extends (yield) {}",
+ "x = class extends f(yield) {}",
+ "x = class extends (null, yield) { }",
+ "x = class extends (a ? null : yield) { }",
NULL
};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonyDestructuringAssignment};
- RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0,
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -2427,6 +2407,7 @@ TEST(ErrorsYieldGenerator) {
"var {foo: yield 24} = {a: 42};",
"[yield 24] = [42];",
"({a: yield 24} = {a: 42});",
+ "class C extends yield { }",
NULL
};
// clang-format on
@@ -2441,10 +2422,8 @@ TEST(ErrorsNameOfStrictFunction) {
const char* context_data[][2] = {
{ "function ", ""},
{ "\"use strict\"; function", ""},
- { "\"use strong\"; function", ""},
{ "function * ", ""},
{ "\"use strict\"; function * ", ""},
- { "\"use strong\"; function * ", ""},
{ NULL, NULL }
};
@@ -2459,9 +2438,7 @@ TEST(ErrorsNameOfStrictFunction) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2525,9 +2502,6 @@ TEST(ErrorsIllegalWordsAsLabelsStrict) {
{"\"use strict\";", ""},
{"function test_func() {\"use strict\"; ", "}"},
{"() => {\"use strict\"; ", "}"},
- {"\"use strong\";", ""},
- {"function test_func() {\"use strong\"; ", "}"},
- {"() => {\"use strong\"; ", "}"},
{NULL, NULL}};
#define LABELLED_WHILE(NAME) #NAME ": while (true) { break " #NAME "; }",
@@ -2538,9 +2512,7 @@ TEST(ErrorsIllegalWordsAsLabelsStrict) {
};
#undef LABELLED_WHILE
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2613,13 +2585,10 @@ TEST(NoErrorsParenthesizedDirectivePrologue) {
const char* statement_data[] = {
"(\"use strict\"); var eval;",
- "(\"use strong\"); var eval;",
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2700,7 +2669,7 @@ TEST(DontRegressPreParserDataSizes) {
i::Handle<i::String> source =
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::ScriptData* sd = NULL;
info.set_cached_data(&sd);
@@ -2747,7 +2716,6 @@ TEST(FunctionDeclaresItselfStrict) {
const char* strict_statement_data[] = {
"\"use strict\";",
- "\"use strong\";",
NULL
};
@@ -2756,11 +2724,8 @@ TEST(FunctionDeclaresItselfStrict) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, strict_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(context_data, non_strict_statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, strict_statement_data, kError);
+ RunParserSyncTest(context_data, non_strict_statement_data, kSuccess);
}
@@ -3028,11 +2993,6 @@ TEST(TooManyArguments) {
TEST(StrictDelete) {
// "delete <Identifier>" is not allowed in strict mode.
- const char* strong_context_data[][2] = {
- {"\"use strong\"; ", ""},
- { NULL, NULL }
- };
-
const char* strict_context_data[][2] = {
{"\"use strict\"; ", ""},
{ NULL, NULL }
@@ -3074,27 +3034,14 @@ TEST(StrictDelete) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(strong_context_data, sloppy_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, sloppy_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess, NULL,
- 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, sloppy_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess);
- RunParserSyncTest(strong_context_data, good_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, good_statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess);
- RunParserSyncTest(strong_context_data, bad_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, bad_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, bad_statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, bad_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, bad_statement_data, kError);
}
@@ -3302,6 +3249,9 @@ TEST(FuncNameInferrerEscaped) {
TEST(RegressionLazyFunctionWithErrorWithArg) {
+ // Test only applies when lazy parsing.
+ if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+
// The bug occurred when a lazy function had an error which requires a
// parameter (such as "unknown label" here). The error message was processed
// before the AstValueFactory containing the error message string was
@@ -3345,7 +3295,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3395,7 +3345,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3539,7 +3489,7 @@ TEST(InnerAssignment) {
printf("\n");
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
CHECK(parser.Parse(&info));
@@ -3590,27 +3540,6 @@ TEST(UseAsmUseCount) {
}
-TEST(UseConstLegacyCount) {
- i::FLAG_legacy_const = true;
- i::Isolate* isolate = CcTest::i_isolate();
- i::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
- CompileRun(
- "const x = 1;\n"
- "var foo = 1;\n"
- "const y = 1;\n"
- "function bar() {\n"
- " const z = 1; var baz = 1;\n"
- " function q() { const k = 42; }\n"
- "}");
- // Optimizing will double-count because the source is parsed twice.
- CHECK_EQ(i::FLAG_always_opt ? 8 : 4, use_counts[v8::Isolate::kLegacyConst]);
-}
-
-
TEST(StrictModeUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
@@ -3794,20 +3723,11 @@ TEST(ErrorsArrowFunctions) {
"(-a, b) => {}",
"(a, -b) => {}",
"{} => {}",
- "({}) => {}",
- "(a, {}) => {}",
- "({}, a) => {}",
"a++ => {}",
"(a++) => {}",
"(a++, b) => {}",
"(a, b++) => {}",
"[] => {}",
- "([]) => {}",
- "(a, []) => {}",
- "([], a) => {}",
- "(a = b) => {}",
- "(a = b, c) => {}",
- "(a, b = c) => {}",
"(foo ? bar : baz) => {}",
"(a, foo ? bar : baz) => {}",
"(foo ? bar : baz, a) => {}",
@@ -3841,6 +3761,7 @@ TEST(ErrorsArrowFunctions) {
TEST(NoErrorsArrowFunctions) {
// Tests that parser and preparser accept valid arrow functions syntax.
+ // clang-format off
const char* context_data[][2] = {
{"", ";"},
{"bar ? (", ") : baz;"},
@@ -3886,6 +3807,15 @@ TEST(NoErrorsArrowFunctions) {
"foo ? bar : baz => {}",
// Arrows with non-simple parameters.
+ "({}) => {}",
+ "(a, {}) => {}",
+ "({}, a) => {}",
+ "([]) => {}",
+ "(a, []) => {}",
+ "([], a) => {}",
+ "(a = b) => {}",
+ "(a = b, c) => {}",
+ "(a, b = c) => {}",
"({a}) => {}",
"(x = 9) => {}",
"(x, y = 9) => {}",
@@ -3897,18 +3827,15 @@ TEST(NoErrorsArrowFunctions) {
"(x = 9, ...a) => {}",
"(x, y = 9, ...a) => {}",
"(x, y = 9, {b}, z = 8, ...a) => {}",
- // TODO(wingo, rossberg): This is not accepted right now.
- // "({a} = {}) => {}",
- // "([x] = []) => {}",
+ "({a} = {}) => {}",
+ "([x] = []) => {}",
"({a = 42}) => {}",
"([x = 0]) => {}",
NULL
};
+ // clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyDefaultParameters,
- kAllowHarmonyDestructuring};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
static const ParserFlag flags[] = {kAllowLazy};
// In a context where a concise arrow body is parsed with [~In] variant,
@@ -3922,15 +3849,6 @@ TEST(NoErrorsArrowFunctions) {
TEST(ArrowFunctionsSloppyParameterNames) {
- const char* strong_context_data[][2] = {
- {"'use strong'; ", ";"},
- {"'use strong'; bar ? (", ") : baz;"},
- {"'use strong'; bar ? baz : (", ");"},
- {"'use strong'; bar, ", ";"},
- {"'use strong'; ", ", bar;"},
- {NULL, NULL}
- };
-
const char* strict_context_data[][2] = {
{"'use strict'; ", ";"},
{"'use strict'; bar ? (", ") : baz;"},
@@ -3970,13 +3888,8 @@ TEST(ArrowFunctionsSloppyParameterNames) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(strong_context_data, statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, statement_data, kSuccess);
}
@@ -3987,13 +3900,11 @@ TEST(ArrowFunctionsYieldParameterNameInGenerator) {
};
const char* strict_function_context_data[][2] = {
- {"(function f() {'use strong'; (", "); });"},
{"(function f() {'use strict'; (", "); });"},
{NULL, NULL}
};
const char* generator_context_data[][2] = {
- {"(function *g() {'use strong'; (", "); });"},
{"(function *g() {'use strict'; (", "); });"},
{"(function *g() { (", "); });"},
{NULL, NULL}
@@ -4011,14 +3922,9 @@ TEST(ArrowFunctionsYieldParameterNameInGenerator) {
NULL
};
- static const ParserFlag always_flags[] = { kAllowHarmonyDestructuring,
- kAllowStrongMode};
- RunParserSyncTest(sloppy_function_context_data, arrow_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_function_context_data, arrow_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(generator_context_data, arrow_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_function_context_data, arrow_data, kSuccess);
+ RunParserSyncTest(strict_function_context_data, arrow_data, kError);
+ RunParserSyncTest(generator_context_data, arrow_data, kError);
}
@@ -4963,8 +4869,8 @@ TEST(StatementParsingInForIn) {
"for(let x in {}, {}) {}", "for(const x in {}, {}) {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5153,8 +5059,8 @@ TEST(ForOfInOperator) {
"for(x of 'foo' in {}) {}", "for(var x of 'foo' in {}) {}",
"for(let x of 'foo' in {}) {}", "for(const x of 'foo' in {}) {}", NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5167,8 +5073,8 @@ TEST(ForOfYieldIdentifier) {
"for(let x of yield) {}", "for(const x of yield) {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5185,8 +5091,8 @@ TEST(ForOfYieldExpression) {
"function* g() { for(let x of yield) {} }",
"function* g() { for(const x of yield) {} }", NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5206,8 +5112,8 @@ TEST(ForOfExpressionError) {
"for(x of { y = 23 }) {}", "for(var x of { y = 23 }) {}",
"for(let x of { y = 23 }) {}", "for(const x of { y = 23 }) {}", NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5445,9 +5351,7 @@ TEST(ParseRestParameters) {
"...[]",
"...[...[a, b, ...c]]",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5668,8 +5572,6 @@ TEST(ComputedPropertyNameShorthandError) {
TEST(BasicImportExportParsing) {
- i::FLAG_harmony_modules = true;
-
// clang-format off
const char* kSources[] = {
"export let x = 0;",
@@ -5732,16 +5634,16 @@ TEST(BasicImportExportParsing) {
// Show that parsing as a module works
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
if (!parser.Parse(&info)) {
i::Handle<i::JSObject> exception_handle(
i::JSObject::cast(isolate->pending_exception()));
- i::Handle<i::String> message_string =
- i::Handle<i::String>::cast(i::Object::GetProperty(
- isolate, exception_handle, "message").ToHandleChecked());
+ i::Handle<i::String> message_string = i::Handle<i::String>::cast(
+ i::JSReceiver::GetProperty(isolate, exception_handle, "message")
+ .ToHandleChecked());
v8::base::OS::Print(
"Parser failed on:\n"
@@ -5757,7 +5659,7 @@ TEST(BasicImportExportParsing) {
// And that parsing a script does not.
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_global();
@@ -5768,8 +5670,6 @@ TEST(BasicImportExportParsing) {
TEST(ImportExportParsingErrors) {
- i::FLAG_harmony_modules = true;
-
// clang-format off
const char* kErrorSources[] = {
"export {",
@@ -5849,7 +5749,7 @@ TEST(ImportExportParsingErrors) {
factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -5859,8 +5759,6 @@ TEST(ImportExportParsingErrors) {
TEST(ModuleParsingInternals) {
- i::FLAG_harmony_modules = true;
-
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
v8::HandleScope handles(CcTest::isolate());
@@ -5879,7 +5777,7 @@ TEST(ModuleParsingInternals) {
"import 'q.js'";
i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -5968,12 +5866,6 @@ TEST(DeclarationsError) {
{"'use strict'; for (;;)", ""},
{"'use strict'; for (x in y)", ""},
{"'use strict'; do ", " while (false)"},
- {"'use strong'; if (true)", ""},
- {"'use strong'; if (false) {} else", ""},
- {"'use strong'; while (false)", ""},
- {"'use strong'; for (;;)", ""},
- {"'use strong'; for (x in y)", ""},
- {"'use strong'; do ", " while (false)"},
{NULL, NULL}};
const char* statement_data[] = {
@@ -5982,9 +5874,7 @@ TEST(DeclarationsError) {
"class C {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -6000,10 +5890,9 @@ void TestLanguageMode(const char* source,
i::Handle<i::Script> script =
factory->NewScript(factory->NewStringFromAsciiChecked(source));
- i::Zone zone;
+ i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_strong_mode(true);
info.set_global();
parser.Parse(&info);
CHECK(info.literal() != NULL);
@@ -6014,24 +5903,15 @@ void TestLanguageMode(const char* source,
TEST(LanguageModeDirectives) {
TestLanguageMode("\"use nothing\"", i::SLOPPY);
TestLanguageMode("\"use strict\"", i::STRICT);
- TestLanguageMode("\"use strong\"", i::STRONG);
TestLanguageMode("var x = 1; \"use strict\"", i::SLOPPY);
- TestLanguageMode("var x = 1; \"use strong\"", i::SLOPPY);
-
- // Test that multiple directives ("use strict" / "use strong") put the parser
- // into the correct mode.
- TestLanguageMode("\"use strict\"; \"use strong\";", i::STRONG);
- TestLanguageMode("\"use strong\"; \"use strict\";", i::STRONG);
TestLanguageMode("\"use some future directive\"; \"use strict\";", i::STRICT);
- TestLanguageMode("\"use some future directive\"; \"use strong\";", i::STRONG);
}
TEST(PropertyNameEvalArguments) {
const char* context_data[][2] = {{"'use strict';", ""},
- {"'use strong';", ""},
{NULL, NULL}};
const char* statement_data[] = {
@@ -6066,9 +5946,7 @@ TEST(PropertyNameEvalArguments) {
NULL};
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -6078,10 +5956,6 @@ TEST(FunctionLiteralDuplicateParameters) {
{"(function(", ") { 'use strict'; })();"},
{"'use strict'; function fn(", ") {}; fn();"},
{"function fn(", ") { 'use strict'; }; fn();"},
- {"'use strong';(function(", "){})();"},
- {"(function(", ") { 'use strong'; })();"},
- {"'use strong'; function fn(", ") {}; fn();"},
- {"function fn(", ") { 'use strong'; }; fn();"},
{NULL, NULL}};
const char* sloppy_context_data[][2] =
@@ -6099,451 +5973,8 @@ TEST(FunctionLiteralDuplicateParameters) {
"a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, w",
NULL};
- static const ParserFlag always_flags[] = { kAllowStrongMode };
- RunParserSyncTest(strict_context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, NULL, 0);
-}
-
-
-TEST(VarForbiddenInStrongMode) {
- const char* strong_context_data[][2] =
- {{"'use strong'; ", ""},
- {"function f() {'use strong'; ", "}"},
- {"function f() {'use strong'; while (true) { ", "} }"},
- {NULL, NULL}};
-
- const char* strict_context_data[][2] =
- {{"'use strict'; ", ""},
- {"function f() {'use strict'; ", "}"},
- {"function f() {'use strict'; while (true) { ", "} }"},
- {NULL, NULL}};
-
- const char* sloppy_context_data[][2] =
- {{"", ""},
- {"function f() { ", "}"},
- {NULL, NULL}};
-
- const char* var_declarations[] = {
- "var x = 0;",
- "for (var i = 0; i < 10; i++) { }",
- NULL};
-
- const char* let_declarations[] = {
- "let x = 0;",
- "for (let i = 0; i < 10; i++) { }",
- NULL};
-
- const char* const_declarations[] = {
- "const x = 0;",
- NULL};
-
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(strong_context_data, var_declarations, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, let_declarations, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, const_declarations, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-
- RunParserSyncTest(strict_context_data, var_declarations, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, let_declarations, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-
- RunParserSyncTest(sloppy_context_data, var_declarations, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- // At the moment, let declarations are only available in strict mode.
- RunParserSyncTest(sloppy_context_data, let_declarations, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongEmptySubStatements) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* data_error[] = {
- "if (1);",
- "if (1) {} else;",
- "while (1);",
- "do; while (1);",
- "for (;;);",
- "for (x in []);",
- "for (x of []);",
- "for (const x = 0;;);",
- "for (const x in []);",
- "for (const x of []);",
- NULL};
-
- const char* data_success[] = {
- "if (1) {} else {}",
- "switch(1) {}",
- "1+1;;",
- "1+1; ;",
- NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowStrongMode,
- };
- RunParserSyncTest(sloppy_context_data, data_error, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, data_error, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, data_error, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, data_success, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongForIn) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* data[] = {
- "for (x in []) {}",
- "for (const x in []) {}",
- NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowStrongMode,
- };
- RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
-}
-
-
-TEST(StrongConstructorThis) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* error_data[] = {
- "class C { constructor() { this; } }",
- "class C { constructor() { this.a; } }",
- "class C { constructor() { this['a']; } }",
- "class C { constructor() { (this); } }",
- "class C { constructor() { this(); } }",
- // TODO(rossberg): arrow functions not handled yet.
- // "class C { constructor() { () => this; } }",
- "class C { constructor() { this.a = 0, 0; } }",
- "class C { constructor() { (this.a = 0); } }",
- // "class C { constructor() { (() => this.a = 0)(); } }",
- "class C { constructor() { { this.a = 0; } } }",
- "class C { constructor() { if (1) this.a = 0; } }",
- "class C { constructor() { label: this.a = 0; } }",
- "class C { constructor() { this.a = this.b; } }",
- "class C { constructor() { this.a = {b: 1}; this.a.b } }",
- "class C { constructor() { this.a = {b: 1}; this.a.b = 0 } }",
- "class C { constructor() { this.a = function(){}; this.a() } }",
- NULL};
-
- const char* success_data[] = {
- "class C { constructor() { this.a = 0; } }",
- "class C { constructor() { label: 0; this.a = 0; this.b = 6; } }",
- NULL};
-
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(sloppy_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-
- RunParserSyncTest(sloppy_context_data, success_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongConstructorSuper) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* error_data[] = {
- "class C extends Object { constructor() {} }",
- "class C extends Object { constructor() { super.a; } }",
- "class C extends Object { constructor() { super['a']; } }",
- "class C extends Object { constructor() { super.a = 0; } }",
- "class C extends Object { constructor() { (super.a); } }",
- // TODO(rossberg): arrow functions do not handle super yet.
- // "class C extends Object { constructor() { () => super.a; } }",
- "class C extends Object { constructor() { super(), 0; } }",
- "class C extends Object { constructor() { (super()); } }",
- // "class C extends Object { constructor() { (() => super())(); } }",
- "class C extends Object { constructor() { { super(); } } }",
- "class C extends Object { constructor() { if (1) super(); } }",
- "class C extends Object { constructor() { label: super(); } }",
- "class C extends Object { constructor() { super(), super(); } }",
- "class C extends Object { constructor() { super(); super(); } }",
- "class C extends Object { constructor() { super(); (super()); } }",
- "class C extends Object { constructor() { super(); { super() } } }",
- "class C extends Object { constructor() { this.a = 0, super(); } }",
- "class C extends Object { constructor() { this.a = 0; super(); } }",
- "class C extends Object { constructor() { super(this.a = 0); } }",
- "class C extends Object { constructor() { super().a; } }",
- NULL};
-
- const char* success_data[] = {
- "class C extends Object { constructor() { super(); } }",
- "class C extends Object { constructor() { label: 66; super(); } }",
- "class C extends Object { constructor() { super(3); this.x = 0; } }",
- "class C extends Object { constructor() { 3; super(3); this.x = 0; } }",
- NULL};
-
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(sloppy_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-
- RunParserSyncTest(sloppy_context_data, success_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongConstructorReturns) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* error_data[] = {
- "class C extends Object { constructor() { super(); return {}; } }",
- "class C extends Object { constructor() { super(); { return {}; } } }",
- "class C extends Object { constructor() { super(); if (1) return {}; } }",
- "class C extends Object { constructor() { return; super(); } }",
- "class C extends Object { constructor() { { return; } super(); } }",
- "class C extends Object { constructor() { if (0) return; super(); } }",
- "class C { constructor() { return; this.a = 0; } }",
- "class C { constructor() { { return; } this.a = 0; } }",
- "class C { constructor() { if (0) return; this.a = 0; } }",
- "class C { constructor() { this.a = 0; if (0) return; this.b = 0; } }",
- NULL};
-
- const char* success_data[] = {
- "class C extends Object { constructor() { super(); return; } }",
- "class C extends Object { constructor() { super(); { return } } }",
- "class C extends Object { constructor() { super(); if (1) return; } }",
- "class C { constructor() { this.a = 0; return; } }",
- "class C { constructor() { this.a = 0; { return; } } }",
- "class C { constructor() { this.a = 0; if (0) return; 65; } }",
- "class C extends Array { constructor() { super(); this.a = 9; return } }",
- NULL};
-
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(sloppy_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-
- RunParserSyncTest(sloppy_context_data, success_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongConstructorDirective) {
- const char* context_data[][2] = {{"class c { ", " }"},
- {"(class c { ", " });"},
- {"let a = (class c { ", " });"},
- {NULL}};
-
- const char* error_data[] = {
- "constructor() { \"use strong\" }",
- "constructor(...rest) { \"use strong\" }",
- "foo() {} constructor() { \"use strong\" }",
- "foo(...rest) { \"use strict\" } constructor() { \"use strong\" }", NULL};
-
- const char* success_data[] = {
- "constructor() { \"use strict\" }", "foo() { \"use strong\" }",
- "foo() { \"use strong\" } constructor() {}", NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowStrongMode};
-
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, success_data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
-}
-
-
-TEST(StrongUndefinedLocal) {
- const char* context_data[][2] = {{"", ""}, {NULL}};
-
- const char* data[] = {
- "function undefined() {'use strong';}",
- "function* undefined() {'use strong';}",
- "(function undefined() {'use strong';});",
- "{foo: (function undefined(){'use strong';})};",
- "(function* undefined() {'use strong';})",
- "{foo: (function* undefined(){'use strong';})};",
- "function foo(a, b, undefined, c, d) {'use strong';}",
- "function* foo(a, b, undefined, c, d) {'use strong';}",
- "(function foo(a, b, undefined, c, d) {'use strong';})",
- "{foo: (function foo(a, b, undefined, c, d) {'use strong';})};",
- "(function* foo(a, b, undefined, c, d) {'use strong';})",
- "{foo: (function* foo(a, b, undefined, c, d) {'use strong';})};",
- "class C { foo(a, b, undefined, c, d) {'use strong';} }",
- "class C { *foo(a, b, undefined, c, d) {'use strong';} }",
- "({ foo(a, b, undefined, c, d) {'use strong';} });",
- "{ *foo(a, b, undefined, c, d) {'use strong';} });",
- "class undefined {'use strong'}",
- "(class undefined {'use strong'});",
- NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowStrongMode, kAllowHarmonySloppy
- };
-
- RunParserSyncTest(context_data, data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongUndefinedArrow) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* data[] = {
- "(undefined => {return});",
- "((undefined, b, c) => {return});",
- "((a, undefined, c) => {return});",
- "((a, b, undefined) => {return});",
- NULL};
-
- const char* local_strong[] = {
- "(undefined => {'use strong';});",
- "((undefined, b, c) => {'use strong';});",
- "((a, undefined, c) => {'use strong';});",
- "((a, b, undefined) => {'use strong';});",
- NULL};
-
- static const ParserFlag always_flags[] = {kAllowStrongMode};
- RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, local_strong, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongDirectEval) {
- const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
- const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
-
- const char* error_data[] = {
- "eval();",
- "eval([]);",
- "(eval)();",
- "(((eval)))();",
- "eval('function f() {}');",
- "function f() {eval()}",
- NULL};
-
- const char* success_data[] = {
- "eval;",
- "eval`foo`;",
- "let foo = eval; foo();",
- "(1, eval)();",
- NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowStrongMode
- };
-
- RunParserSyncTest(sloppy_context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
-}
-
-
-TEST(StrongSwitchFallthrough) {
- const char* sloppy_context_data[][2] = {
- {"function f() { foo:for(;;) { switch(1) {", "};}}"},
- {NULL, NULL}
- };
- const char* strong_context_data[][2] = {
- {"function f() { 'use strong'; foo:for(;;) { switch(1) {", "};}}"},
- {NULL, NULL}
- };
-
- const char* data_success[] = {
- "",
- "case 1:",
- "case 1: case 2:",
- "case 1: break;",
- "default: throw new TypeError();",
- "case 1: case 2: null",
- "case 1: case 2: default: 1+1",
- "case 1: break; case 2: return; default:",
- "case 1: break foo; case 2: return; default:",
- "case 1: case 2: break; case 3: continue; case 4: default:",
- "case 1: case 2: break; case 3: continue foo; case 4: default:",
- "case 1: case 2: {{return;}} case 3: default:",
- "case 1: case 2: case 3: default: {1+1;{continue;}}",
- "case 1: case 2: {1+1;{1+1;{continue;}}} case 3: default:",
- "case 1: if (1) break; else continue; case 2: case 3: default:",
- "case 1: case 2: if (1) {{break;}} else break; case 3: default:",
- "case 1: if (1) break; else {if (1) break; else break;} case 2: default:",
- "case 1: if (1) {if (1) break; else break;} else break; case 2: default:",
- NULL};
-
- const char* data_error[] = {
- "case 1: case 2: (function(){return}); default:",
- "case 1: 1+1; case 2:",
- "case 1: bar: break bar; case 2: break;",
- "case 1: bar:return; case 2:",
- "case 1: bar:{ continue;} case 2:",
- "case 1: break; case 2: bar:{ throw new TypeError() } default:",
- "case 1: case 2: { bar:{ { break;} } } default: break;",
- "case 1: if (1) break; else {}; case 2: default:",
- "case 1: case 2: if (1) break; default:",
- "case 1: case 2: if (1) break; else 0; default:",
- "case 1: case 2: if (1) 0; else break; default:",
- "case 1: case 2: case 3: if (1) {} default:",
- "case 1: bar:if (1) break; else continue; case 2: case 3: default:",
- NULL};
-
- static const ParserFlag always_flags[] = {
- kAllowStrongMode
- };
- RunParserSyncTest(strong_context_data, data_success, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_context_data, data_error, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strong_context_data, data_error, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kError);
+ RunParserSyncTest(sloppy_context_data, data, kSuccess);
}
@@ -6563,108 +5994,7 @@ TEST(ArrowFunctionASIErrors) {
}
-TEST(StrongModeFreeVariablesDeclaredByPreviousScript) {
- i::FLAG_strong_mode = true;
- i::FLAG_legacy_const = true;
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
- v8::TryCatch try_catch(CcTest::isolate());
-
- // Introduce a bunch of variables, in all language modes.
- const char* script1 =
- "var my_var1 = 0; \n"
- "function my_func1() { } \n"
- "const my_const1 = 0; \n";
- CompileRun(v8_str(script1));
- CHECK(!try_catch.HasCaught());
-
- const char* script2 =
- "\"use strict\"; \n"
- "let my_var2 = 0; \n"
- "function my_func2() { } \n"
- "const my_const2 = 0 \n";
- CompileRun(v8_str(script2));
- CHECK(!try_catch.HasCaught());
-
- const char* script3 =
- "\"use strong\"; \n"
- "let my_var3 = 0; \n"
- "function my_func3() { } \n"
- "const my_const3 = 0; \n";
- CompileRun(v8_str(script3));
- CHECK(!try_catch.HasCaught());
-
- // Sloppy eval introduces variables in the surrounding scope.
- const char* script4 =
- "eval('var my_var4 = 0;') \n"
- "eval('function my_func4() { }') \n"
- "eval('const my_const4 = 0;') \n";
- CompileRun(v8_str(script4));
- CHECK(!try_catch.HasCaught());
-
- // Test that referencing these variables work.
- const char* script5 =
- "\"use strong\"; \n"
- "my_var1; \n"
- "my_func1; \n"
- "my_const1; \n"
- "my_var2; \n"
- "my_func2; \n"
- "my_const2; \n"
- "my_var3; \n"
- "my_func3; \n"
- "my_const3; \n"
- "my_var4; \n"
- "my_func4; \n"
- "my_const4; \n";
- CompileRun(v8_str(script5));
- CHECK(!try_catch.HasCaught());
-}
-
-
-TEST(StrongModeFreeVariablesDeclaredByLanguage) {
- i::FLAG_strong_mode = true;
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
- v8::TryCatch try_catch(CcTest::isolate());
-
- const char* script1 =
- "\"use strong\"; \n"
- "Math; \n"
- "RegExp; \n";
- CompileRun(v8_str(script1));
- CHECK(!try_catch.HasCaught());
-}
-
-
-TEST(StrongModeFreeVariablesDeclaredInGlobalPrototype) {
- i::FLAG_strong_mode = true;
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
- v8::TryCatch try_catch(CcTest::isolate());
-
- const char* script1 = "this.__proto__.my_var = 0;\n";
- CompileRun(v8_str(script1));
- CHECK(!try_catch.HasCaught());
-
- const char* script2 =
- "\"use strong\"; \n"
- "my_var; \n";
- CompileRun(v8_str(script2));
- CHECK(!try_catch.HasCaught());
-}
-
-
-static const ParserFlag kAllDestructuringFlags[] = {
- kAllowHarmonyDestructuring, kAllowHarmonyDestructuringAssignment,
- kAllowHarmonyDefaultParameters};
-
TEST(DestructuringPositiveTests) {
- i::FLAG_harmony_destructuring_bind = true;
-
const char* context_data[][2] = {{"'use strict'; let ", " = {};"},
{"var ", " = {};"},
{"'use strict'; const ", " = {};"},
@@ -6715,18 +6045,11 @@ TEST(DestructuringPositiveTests) {
"[a,,...rest]",
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0,
- kAllDestructuringFlags, arraysize(kAllDestructuringFlags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
TEST(DestructuringNegativeTests) {
- i::FLAG_harmony_destructuring_bind = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
-
{ // All modes.
const char* context_data[][2] = {{"'use strict'; let ", " = {};"},
{"var ", " = {};"},
@@ -6811,11 +6134,7 @@ TEST(DestructuringNegativeTests) {
"{ *method() {} }",
NULL};
// clang-format on
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kError, NULL, 0,
- kAllDestructuringFlags,
- arraysize(kAllDestructuringFlags));
+ RunParserSyncTest(context_data, data, kError);
}
{ // All modes.
@@ -6834,11 +6153,7 @@ TEST(DestructuringNegativeTests) {
"() => x",
NULL};
// clang-format on
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kError, NULL, 0,
- kAllDestructuringFlags,
- arraysize(kAllDestructuringFlags));
+ RunParserSyncTest(context_data, data, kError);
}
{ // Strict mode.
@@ -6857,11 +6172,7 @@ TEST(DestructuringNegativeTests) {
"{ x : private }",
NULL};
// clang-format on
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kError, NULL, 0,
- kAllDestructuringFlags,
- arraysize(kAllDestructuringFlags));
+ RunParserSyncTest(context_data, data, kError);
}
{ // 'yield' in generators.
@@ -6878,11 +6189,7 @@ TEST(DestructuringNegativeTests) {
"{ x : yield }",
NULL};
// clang-format on
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kError, NULL, 0,
- kAllDestructuringFlags,
- arraysize(kAllDestructuringFlags));
+ RunParserSyncTest(context_data, data, kError);
}
{ // Declaration-specific errors
@@ -6906,8 +6213,7 @@ TEST(DestructuringNegativeTests) {
"[ a ]",
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring,
- kAllowHarmonySloppyLet};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
@@ -7080,14 +6386,9 @@ TEST(DestructuringAssignmentPositiveTests) {
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonyDestructuringAssignment, kAllowHarmonyDestructuring,
- kAllowHarmonyDefaultParameters};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
- RunParserSyncTest(mixed_assignments_context_data, data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(mixed_assignments_context_data, data, kSuccess);
const char* empty_context_data[][2] = {
{"'use strict';", ""}, {"", ""}, {NULL, NULL}};
@@ -7108,8 +6409,7 @@ TEST(DestructuringAssignmentPositiveTests) {
"var x; (({ x = 10 } = { x = 20 } = {}) => x)({})",
NULL,
};
- RunParserSyncTest(empty_context_data, ambiguity_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(empty_context_data, ambiguity_data, kSuccess);
}
@@ -7208,11 +6508,7 @@ TEST(DestructuringAssignmentNegativeTests) {
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonyDestructuringAssignment, kAllowHarmonyDestructuring,
- kAllowHarmonyDefaultParameters};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
const char* empty_context_data[][2] = {
{"'use strict';", ""}, {"", ""}, {NULL, NULL}};
@@ -7237,8 +6533,7 @@ TEST(DestructuringAssignmentNegativeTests) {
NULL,
};
- RunParserSyncTest(empty_context_data, ambiguity_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(empty_context_data, ambiguity_data, kError);
// Strict mode errors
const char* strict_context_data[][2] = {{"'use strict'; (", " = {})"},
@@ -7279,14 +6574,11 @@ TEST(DestructuringAssignmentNegativeTests) {
"[ ...(arguments) = 0 ]",
NULL};
- RunParserSyncTest(strict_context_data, strict_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, strict_data, kError);
}
TEST(DestructuringDisallowPatternsInForVarIn) {
- i::FLAG_harmony_destructuring_bind = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {
{"", ""}, {"function f() {", "}"}, {NULL, NULL}};
// clang-format off
@@ -7295,22 +6587,18 @@ TEST(DestructuringDisallowPatternsInForVarIn) {
"for (let x = {} of null);",
NULL};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
// clang-format off
const char* success_data[] = {
"for (var x = {} in null);",
NULL};
// clang-format on
- RunParserSyncTest(context_data, success_data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, success_data, kSuccess);
}
TEST(DestructuringDuplicateParams) {
- i::FLAG_harmony_destructuring_bind = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
{nullptr, nullptr}};
@@ -7329,14 +6617,11 @@ TEST(DestructuringDuplicateParams) {
"function f(x, x, {a}) {}",
nullptr};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
TEST(DestructuringDuplicateParamsSloppy) {
- i::FLAG_harmony_destructuring_bind = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {
{"", ""}, {"function outer() {", "}"}, {nullptr, nullptr}};
@@ -7350,14 +6635,11 @@ TEST(DestructuringDuplicateParamsSloppy) {
"function f(x, x, {a}) {}",
nullptr};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
TEST(DestructuringDisallowPatternsInSingleParamArrows) {
- i::FLAG_harmony_destructuring_bind = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
{"", ""},
@@ -7370,8 +6652,7 @@ TEST(DestructuringDisallowPatternsInSingleParamArrows) {
"var f = {x,y} => {};",
nullptr};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
@@ -7383,7 +6664,6 @@ TEST(DefaultParametersYieldInInitializers) {
};
const char* strict_function_context_data[][2] = {
- {"'use strong'; (function f(", ") { });"},
{"'use strict'; (function f(", ") { });"},
{NULL, NULL}
};
@@ -7394,15 +6674,16 @@ TEST(DefaultParametersYieldInInitializers) {
};
const char* strict_arrow_context_data[][2] = {
- {"'use strong'; ((", ")=>{});"},
{"'use strict'; ((", ")=>{});"},
{NULL, NULL}
};
const char* generator_context_data[][2] = {
- {"'use strong'; (function *g(", ") { });"},
{"'use strict'; (function *g(", ") { });"},
{"(function *g(", ") { });"},
+ // Arrow function within generator has the same rules.
+ {"'use strict'; (function *g() { (", ") => {} });"},
+ {"(function *g() { (", ") => {} });"},
{NULL, NULL}
};
@@ -7433,25 +6714,32 @@ TEST(DefaultParametersYieldInInitializers) {
NULL
};
+ // Because classes are always in strict mode, these are always errors.
+ const char* always_error_param_data[] = {
+ "x = class extends (yield) { }",
+ "x = class extends f(yield) { }",
+ "x = class extends (null, yield) { }",
+ "x = class extends (a ? null : yield) { }",
+ "[x] = [class extends (a ? null : yield) { }]",
+ "[x = class extends (a ? null : yield) { }]",
+ "[x = class extends (a ? null : yield) { }] = [null]",
+ "x = class { [yield]() { } }",
+ "x = class { static [yield]() { } }",
+ "x = class { [(yield, 1)]() { } }",
+ "x = class { [y = (yield, 1)]() { } }",
+ NULL
+ };
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring,
- kAllowHarmonyDefaultParameters,
- kAllowStrongMode};
-
- RunParserSyncTest(sloppy_function_context_data, parameter_data, kSuccess,
- NULL, 0, always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_arrow_context_data, parameter_data, kSuccess, NULL,
- 0, always_flags, arraysize(always_flags));
-
- RunParserSyncTest(strict_function_context_data, parameter_data, kError, NULL,
- 0, always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_arrow_context_data, parameter_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(generator_context_data, parameter_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-}
+ RunParserSyncTest(sloppy_function_context_data, parameter_data, kSuccess);
+ RunParserSyncTest(sloppy_arrow_context_data, parameter_data, kSuccess);
+ RunParserSyncTest(strict_function_context_data, parameter_data, kError);
+ RunParserSyncTest(strict_arrow_context_data, parameter_data, kError);
+
+ RunParserSyncTest(generator_context_data, parameter_data, kError);
+ RunParserSyncTest(generator_context_data, always_error_param_data, kError);
+}
TEST(SpreadArray) {
const char* context_data[][2] = {
@@ -7544,32 +6832,6 @@ TEST(NewTarget) {
}
-TEST(ConstLegacy) {
- // clang-format off
- const char* context_data[][2] = {
- {"", ""},
- {"{", "}"},
- {NULL, NULL}
- };
-
- const char* data[] = {
- "const x",
- "const x = 1",
- "for (const x = 1; x < 1; x++) {}",
- "for (const x in {}) {}",
- "for (const x of []) {}",
- NULL
- };
- // clang-format on
-
-
- static const ParserFlag always_flags[] = {kNoLegacyConst};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(context_data, data, kSuccess);
-}
-
-
TEST(ConstSloppy) {
// clang-format off
const char* context_data[][2] = {
@@ -7586,8 +6848,7 @@ TEST(ConstSloppy) {
NULL
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
}
@@ -7623,59 +6884,24 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
// TC39 deemed "use strict" directives to be an error when occurring in the
// body of a function with non-simple parameter list, on 29/7/2015.
// https://goo.gl/ueA7Ln
- //
- // In V8, this also applies to "use strong " directives.
const char* context_data[][2] = {
{"function f(", ") { 'use strict'; }"},
- {"function f(", ") { 'use strong'; }"},
{"function* g(", ") { 'use strict'; }"},
- {"function* g(", ") { 'use strong'; }"},
{"class c { foo(", ") { 'use strict' }"},
- {"class c { foo(", ") { 'use strong' }"},
{"var a = (", ") => { 'use strict'; }"},
- {"var a = (", ") => { 'use strong'; }"},
{"var o = { m(", ") { 'use strict'; }"},
- {"var o = { m(", ") { 'use strong'; }"},
{"var o = { *gm(", ") { 'use strict'; }"},
- {"var o = { *gm(", ") { 'use strong'; }"},
{"var c = { m(", ") { 'use strict'; }"},
- {"var c = { m(", ") { 'use strong'; }"},
{"var c = { *gm(", ") { 'use strict'; }"},
- {"var c = { *gm(", ") { 'use strong'; }"},
{"'use strict'; function f(", ") { 'use strict'; }"},
- {"'use strict'; function f(", ") { 'use strong'; }"},
{"'use strict'; function* g(", ") { 'use strict'; }"},
- {"'use strict'; function* g(", ") { 'use strong'; }"},
{"'use strict'; class c { foo(", ") { 'use strict' }"},
- {"'use strict'; class c { foo(", ") { 'use strong' }"},
{"'use strict'; var a = (", ") => { 'use strict'; }"},
- {"'use strict'; var a = (", ") => { 'use strong'; }"},
{"'use strict'; var o = { m(", ") { 'use strict'; }"},
- {"'use strict'; var o = { m(", ") { 'use strong'; }"},
{"'use strict'; var o = { *gm(", ") { 'use strict'; }"},
- {"'use strict'; var o = { *gm(", ") { 'use strong'; }"},
{"'use strict'; var c = { m(", ") { 'use strict'; }"},
- {"'use strict'; var c = { m(", ") { 'use strong'; }"},
{"'use strict'; var c = { *gm(", ") { 'use strict'; }"},
- {"'use strict'; var c = { *gm(", ") { 'use strong'; }"},
-
- {"'use strong'; function f(", ") { 'use strict'; }"},
- {"'use strong'; function f(", ") { 'use strong'; }"},
- {"'use strong'; function* g(", ") { 'use strict'; }"},
- {"'use strong'; function* g(", ") { 'use strong'; }"},
- {"'use strong'; class c { foo(", ") { 'use strict' }"},
- {"'use strong'; class c { foo(", ") { 'use strong' }"},
- {"'use strong'; var a = (", ") => { 'use strict'; }"},
- {"'use strong'; var a = (", ") => { 'use strong'; }"},
- {"'use strong'; var o = { m(", ") { 'use strict'; }"},
- {"'use strong'; var o = { m(", ") { 'use strong'; }"},
- {"'use strong'; var o = { *gm(", ") { 'use strict'; }"},
- {"'use strong'; var o = { *gm(", ") { 'use strong'; }"},
- {"'use strong'; var c = { m(", ") { 'use strict'; }"},
- {"'use strong'; var c = { m(", ") { 'use strong'; }"},
- {"'use strong'; var c = { *gm(", ") { 'use strict'; }"},
- {"'use strong'; var c = { *gm(", ") { 'use strong'; }"},
{NULL, NULL}};
@@ -7696,9 +6922,7 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
"{ initializedBindingPattern } = { initializedBindingPattern: true }",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonyDefaultParameters, kAllowHarmonyDestructuring,
- kAllowHarmonySloppy, kAllowStrongMode};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
@@ -7724,18 +6948,12 @@ TEST(LetSloppyOnly) {
"for (var [let] in {}) {}",
"var let",
"var [let] = []",
- "for (const let = 1; let < 1; let++) {}",
- "for (const let in {}) {}",
- "for (const [let] = 1; let < 1; let++) {}",
- "for (const [let] in {}) {}",
- "const let",
- "const [let] = []",
NULL
};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowHarmonyDestructuring};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
@@ -7771,9 +6989,8 @@ TEST(LetSloppyOnly) {
};
// clang-format on
- static const ParserFlag fail_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst,
- kAllowHarmonyDestructuring};
+ static const ParserFlag fail_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(context_data, fail_data, kError, NULL, 0, fail_flags,
arraysize(fail_flags));
}
@@ -7875,8 +7092,7 @@ TEST(EscapedKeywords) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonyDestructuring};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
RunParserSyncTest(sloppy_context_data, fail_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
RunParserSyncTest(strict_context_data, fail_data, kError, NULL, 0,
@@ -7898,8 +7114,8 @@ TEST(EscapedKeywords) {
RunParserSyncTest(strict_context_data, let_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
- static const ParserFlag sloppy_let_flags[] = {
- kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowHarmonyDestructuring};
+ static const ParserFlag sloppy_let_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
RunParserSyncTest(sloppy_context_data, let_data, kSuccess, NULL, 0,
sloppy_let_flags, arraysize(sloppy_let_flags));
@@ -7954,6 +7170,36 @@ TEST(MiscSyntaxErrors) {
RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
}
+
+TEST(EscapeSequenceErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'", "'" },
+ { "\"", "\"" },
+ { "`", "`" },
+ { "`${'", "'}`" },
+ { "`${\"", "\"}`" },
+ { "`${`", "`}`" },
+ { "f(tag`", "`);" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "\\uABCG",
+ "\\u{ZZ}",
+ "\\u{FFZ}",
+ "\\u{FFFFFFFFFF }",
+ "\\u{110000}",
+ "\\u{110000",
+ "\\u{FFFD }",
+ "\\xZF",
+ NULL
+ };
+ // clang-format on
+
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
+}
+
+
TEST(FunctionSentErrors) {
// clang-format off
const char* context_data[][2] = {
@@ -7988,3 +7234,181 @@ TEST(NewTargetErrors) {
// clang-format on
RunParserSyncTest(context_data, error_data, kError);
}
+
+TEST(FunctionDeclarationError) {
+ // clang-format off
+ const char* strict_context[][2] = {
+ { "'use strict';", "" },
+ { "'use strict'; { ", "}" },
+ {"(function() { 'use strict';", "})()"},
+ {"(function() { 'use strict'; {", "} })()"},
+ { NULL, NULL }
+ };
+ const char* sloppy_context[][2] = {
+ { "", "" },
+ { "{", "}" },
+ {"(function() {", "})()"},
+ {"(function() { {", "} })()"},
+ { NULL, NULL }
+ };
+ // Invalid in all contexts
+ const char* error_data[] = {
+ "try function foo() {} catch (e) {}",
+ NULL
+ };
+ // Valid in sloppy mode only, and only when the
+ // --harmony-restrictive-declarations flag is off
+ const char* unrestricted_data[] = {
+ "do function foo() {} while (0);",
+ "for (;false;) function foo() {}",
+ "for (var i = 0; i < 1; i++) function f() { };",
+ "for (var x in {a: 1}) function f() { };",
+ "for (var x in {}) function f() { };",
+ "for (var x in {}) function foo() {}",
+ "for (x in {a: 1}) function f() { };",
+ "for (x in {}) function f() { };",
+ "var x; for (x in {}) function foo() {}",
+ "with ({}) function f() { };",
+ "do label: function foo() {} while (0);",
+ "for (;false;) label: function foo() {}",
+ "for (var i = 0; i < 1; i++) label: function f() { };",
+ "for (var x in {a: 1}) label: function f() { };",
+ "for (var x in {}) label: function f() { };",
+ "for (var x in {}) label: function foo() {}",
+ "for (x in {a: 1}) label: function f() { };",
+ "for (x in {}) label: function f() { };",
+ "var x; for (x in {}) label: function foo() {}",
+ "with ({}) label: function f() { };",
+ "if (true) label: function f() {}",
+ "if (true) {} else label: function f() {}",
+ NULL
+ };
+ // Valid only in sloppy mode, with or without
+ // --harmony-restrictive-declarations
+ const char* sloppy_data[] = {
+ "if (true) function foo() {}",
+ "if (false) {} else function f() { };",
+ "label: function f() { }",
+ "label: if (true) function f() { }",
+ "label: if (true) {} else function f() { }",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag restrictive_flags[] = {
+ kAllowHarmonyRestrictiveDeclarations};
+
+ // Nothing parses in strict mode without a SyntaxError
+ RunParserSyncTest(strict_context, error_data, kError);
+ RunParserSyncTest(strict_context, error_data, kError, NULL, 0,
+ restrictive_flags, arraysize(restrictive_flags));
+ RunParserSyncTest(strict_context, unrestricted_data, kError);
+ RunParserSyncTest(strict_context, unrestricted_data, kError, NULL, 0,
+ restrictive_flags, arraysize(restrictive_flags));
+ RunParserSyncTest(strict_context, sloppy_data, kError);
+ RunParserSyncTest(strict_context, sloppy_data, kError, NULL, 0,
+ restrictive_flags, arraysize(restrictive_flags));
+
+ // In sloppy mode, some things are successful, depending on the flag
+ RunParserSyncTest(sloppy_context, error_data, kError);
+ RunParserSyncTest(sloppy_context, error_data, kError, NULL, 0,
+ restrictive_flags, arraysize(restrictive_flags));
+ RunParserSyncTest(sloppy_context, unrestricted_data, kSuccess);
+ RunParserSyncTest(sloppy_context, unrestricted_data, kError, NULL, 0,
+ restrictive_flags, arraysize(restrictive_flags));
+ RunParserSyncTest(sloppy_context, sloppy_data, kSuccess);
+ RunParserSyncTest(sloppy_context, sloppy_data, kSuccess, restrictive_flags,
+ arraysize(restrictive_flags));
+}
+
+TEST(ExponentiationOperator) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "var O = { p: 1 }, x = 10; ; if (", ") { foo(); }" },
+ { "var O = { p: 1 }, x = 10; ; (", ")" },
+ { "var O = { p: 1 }, x = 10; foo(", ")" },
+ { NULL, NULL }
+ };
+ const char* data[] = {
+ "(delete O.p) ** 10",
+ "(delete x) ** 10",
+ "(~O.p) ** 10",
+ "(~x) ** 10",
+ "(!O.p) ** 10",
+ "(!x) ** 10",
+ "(+O.p) ** 10",
+ "(+x) ** 10",
+ "(-O.p) ** 10",
+ "(-x) ** 10",
+ "(typeof O.p) ** 10",
+ "(typeof x) ** 10",
+ "(void 0) ** 10",
+ "(void O.p) ** 10",
+ "(void x) ** 10",
+ "++O.p ** 10",
+ "++x ** 10",
+ "--O.p ** 10",
+ "--x ** 10",
+ "O.p++ ** 10",
+ "x++ ** 10",
+ "O.p-- ** 10",
+ "x-- ** 10",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyExponentiationOperator};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+TEST(ExponentiationOperatorErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "var O = { p: 1 }, x = 10; ; if (", ") { foo(); }" },
+ { "var O = { p: 1 }, x = 10; ; (", ")" },
+ { "var O = { p: 1 }, x = 10; foo(", ")" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "delete O.p ** 10",
+ "delete x ** 10",
+ "~O.p ** 10",
+ "~x ** 10",
+ "!O.p ** 10",
+ "!x ** 10",
+ "+O.p ** 10",
+ "+x ** 10",
+ "-O.p ** 10",
+ "-x ** 10",
+ "typeof O.p ** 10",
+ "typeof x ** 10",
+ "void ** 10",
+ "void O.p ** 10",
+ "void x ** 10",
+ "++delete O.p ** 10",
+ "--delete O.p ** 10",
+ "++~O.p ** 10",
+ "++~x ** 10",
+ "--!O.p ** 10",
+ "--!x ** 10",
+ "++-O.p ** 10",
+ "++-x ** 10",
+ "--+O.p ** 10",
+ "--+x ** 10",
+ "[ x ] **= [ 2 ]",
+ "[ x **= 2 ] = [ 2 ]",
+ "{ x } **= { x: 2 }",
+ "{ x: x **= 2 ] = { x: 2 }",
+ // TODO(caitp): a Call expression as LHS should be an early ReferenceError!
+ // "Array() **= 10",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyExponentiationOperator};
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 2645a3dc18..6012fd4ae1 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -24,6 +24,10 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
__asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_MIPS64
__asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
+#elif defined(__s390x__) || defined(_ARCH_S390X)
+ __asm__ __volatile__("stg 15, %0" : "=g"(sp_addr));
+#elif defined(__s390__) || defined(_ARCH_S390)
+ __asm__ __volatile__("st 15, %0" : "=g"(sp_addr));
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
__asm__ __volatile__("std 1, %0" : "=g"(sp_addr));
#elif defined(__PPC__) || defined(_ARCH_PPC)
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index fa7dc155ee..48633f5da1 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -132,7 +132,7 @@ TEST(ProfileTreeAddPathFromEnd) {
CHECK(!helper.Walk(&entry3));
CodeEntry* path[] = {NULL, &entry3, NULL, &entry2, NULL, NULL, &entry1, NULL};
- Vector<CodeEntry*> path_vec(path, sizeof(path) / sizeof(path[0]));
+ std::vector<CodeEntry*> path_vec(path, path + arraysize(path));
tree.AddPathFromEnd(path_vec);
CHECK(!helper.Walk(&entry2));
CHECK(!helper.Walk(&entry3));
@@ -162,7 +162,7 @@ TEST(ProfileTreeAddPathFromEnd) {
CHECK_EQ(2u, node3->self_ticks());
CodeEntry* path2[] = {&entry2, &entry2, &entry1};
- Vector<CodeEntry*> path2_vec(path2, sizeof(path2) / sizeof(path2[0]));
+ std::vector<CodeEntry*> path2_vec(path2, path2 + arraysize(path2));
tree.AddPathFromEnd(path2_vec);
CHECK(!helper.Walk(&entry2));
CHECK(!helper.Walk(&entry3));
@@ -189,8 +189,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* e1_path[] = {&entry1};
- Vector<CodeEntry*> e1_path_vec(
- e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
+ std::vector<CodeEntry*> e1_path_vec(e1_path, e1_path + arraysize(e1_path));
ProfileTree single_child_tree(CcTest::i_isolate());
single_child_tree.AddPathFromEnd(e1_path_vec);
@@ -204,8 +203,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* e2_e1_path[] = {&entry2, &entry1};
- Vector<CodeEntry*> e2_e1_path_vec(e2_e1_path,
- sizeof(e2_e1_path) / sizeof(e2_e1_path[0]));
+ std::vector<CodeEntry*> e2_e1_path_vec(e2_e1_path,
+ e2_e1_path + arraysize(e2_e1_path));
ProfileTree flat_tree(CcTest::i_isolate());
ProfileTreeTestHelper flat_helper(&flat_tree);
@@ -227,12 +226,10 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(2u, node1->self_ticks());
CodeEntry* e2_path[] = {&entry2};
- Vector<CodeEntry*> e2_path_vec(
- e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
+ std::vector<CodeEntry*> e2_path_vec(e2_path, e2_path + arraysize(e2_path));
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry* e3_path[] = {&entry3};
- Vector<CodeEntry*> e3_path_vec(
- e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
+ std::vector<CodeEntry*> e3_path_vec(e3_path, e3_path + arraysize(e3_path));
ProfileTree wide_tree(CcTest::i_isolate());
ProfileTreeTestHelper wide_helper(&wide_tree);
@@ -649,7 +646,7 @@ int GetFunctionLineNumber(LocalContext* env, const char* name) {
->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
v8_str(name))
.ToLocalChecked())));
- CodeEntry* func_entry = code_map->FindEntry(func->code()->address());
+ CodeEntry* func_entry = code_map->FindEntry(func->abstract_code()->address());
if (!func_entry)
FATAL(name);
return func_entry->line_number();
@@ -675,10 +672,13 @@ TEST(LineNumber) {
profiler->processor()->StopSynchronously();
+ bool is_lazy = i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager);
CHECK_EQ(1, GetFunctionLineNumber(&env, "foo_at_the_first_line"));
- CHECK_EQ(0, GetFunctionLineNumber(&env, "lazy_func_at_forth_line"));
+ CHECK_EQ(is_lazy ? 0 : 4,
+ GetFunctionLineNumber(&env, "lazy_func_at_forth_line"));
CHECK_EQ(2, GetFunctionLineNumber(&env, "bar_at_the_second_line"));
- CHECK_EQ(0, GetFunctionLineNumber(&env, "lazy_func_at_6th_line"));
+ CHECK_EQ(is_lazy ? 0 : 6,
+ GetFunctionLineNumber(&env, "lazy_func_at_6th_line"));
profiler->StopProfiling("LineNumber");
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 22321c3664..0a153b78e0 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -54,6 +54,11 @@
#include "src/arm64/macro-assembler-arm64.h"
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#endif
+#if V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#include "src/s390/assembler-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+#endif
#if V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -92,7 +97,7 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(
@@ -103,7 +108,7 @@ static bool CheckParse(const char* input) {
static void CheckParseEq(const char* input, const char* expected,
bool unicode = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
JSRegExp::Flags flags = JSRegExp::kNone;
@@ -123,7 +128,7 @@ static void CheckParseEq(const char* input, const char* expected,
static bool CheckSimple(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
@@ -141,7 +146,7 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
@@ -456,7 +461,7 @@ TEST(ParserRegression) {
static void ExpectError(const char* input,
const char* expected) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(!v8::internal::RegExpParser::ParseRegExp(
@@ -525,7 +530,7 @@ static bool NotWord(uc16 c) {
static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(2, &zone);
CharacterRange::AddClassEscape(c, ranges, &zone);
@@ -576,7 +581,7 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
static void Execute(const char* input, bool multiline, bool unicode,
bool is_one_byte, bool dot_output = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
RegExpNode* node = Compile(input, multiline, unicode, is_one_byte, &zone);
USE(node);
#ifdef DEBUG
@@ -614,7 +619,7 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
static const unsigned kLimit = 1000;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneSplayTree<TestConfig> tree(&zone);
bool seen[kLimit];
for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
@@ -681,7 +686,7 @@ TEST(DispatchTableConstruction) {
}
}
// Enter test data into dispatch table.
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
DispatchTable table(&zone);
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
@@ -744,6 +749,8 @@ typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM64
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_S390
+typedef RegExpMacroAssemblerS390 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_PPC
typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
@@ -793,7 +800,7 @@ TEST(MacroAssemblerNativeSuccess) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -831,7 +838,7 @@ TEST(MacroAssemblerNativeSimple) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -898,7 +905,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
4);
@@ -971,7 +978,7 @@ TEST(MacroAssemblerNativeBacktrack) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1012,7 +1019,7 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -1062,7 +1069,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
4);
@@ -1115,7 +1122,7 @@ TEST(MacroAssemblernativeAtStart) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1175,7 +1182,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -1234,7 +1241,7 @@ TEST(MacroAssemblerNativeRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
6);
@@ -1336,7 +1343,7 @@ TEST(MacroAssemblerStackOverflow) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1375,7 +1382,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
2);
@@ -1423,7 +1430,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
byte codes[1024];
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), Vector<byte>(codes, 1024),
&zone);
// ^f(o)o.
@@ -1491,7 +1498,7 @@ TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(kRangeCount, &zone);
for (int i = 0; i < kRangeCount; i++) {
@@ -1512,7 +1519,7 @@ TEST(AddInverseToTable) {
CHECK_EQ(is_on, set->Get(0) == false);
}
}
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(1, &zone);
ranges->Add(CharacterRange::Range(0xFFF0, 0xFFFE), &zone);
@@ -1625,7 +1632,7 @@ TEST(UncanonicalizeEquivalence) {
static void TestRangeCaseIndependence(Isolate* isolate, CharacterRange input,
Vector<CharacterRange> expected) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
int count = expected.length();
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(count, &zone);
@@ -1694,7 +1701,7 @@ static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
TEST(UnicodeRangeSplitter) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
@@ -1738,7 +1745,7 @@ TEST(UnicodeRangeSplitter) {
TEST(CanonicalizeCharacterSets) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(4, &zone);
CharacterSet set(list);
@@ -1799,7 +1806,7 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
ZoneList<CharacterRange> l1(4, &zone);
ZoneList<CharacterRange> l2(4, &zone);
// Create all combinations of intersections of ranges, both singletons and
@@ -1904,7 +1911,6 @@ void MockUseCounterCallback(v8::Isolate* isolate,
// Test that ES2015 RegExp compatibility fixes are in place, that they
// are not overly broad, and the appropriate UseCounters are incremented
TEST(UseCountRegExp) {
- i::FLAG_harmony_regexps = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
@@ -1943,7 +1949,7 @@ TEST(UseCountRegExp) {
// a UseCounter is incremented to track it.
v8::Local<v8::Value> resultToString =
CompileRun("RegExp.prototype.toString().length");
- CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
CHECK(resultToString->IsInt32());
CHECK_EQ(6,
@@ -1951,7 +1957,7 @@ TEST(UseCountRegExp) {
// .toString() works on normal RegExps
v8::Local<v8::Value> resultReToString = CompileRun("/a/.toString().length");
- CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
CHECK(resultReToString->IsInt32());
CHECK_EQ(
@@ -1963,7 +1969,7 @@ TEST(UseCountRegExp) {
"try { RegExp.prototype.toString.call(null) }"
"catch (e) { exception = e; }"
"exception");
- CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
CHECK(resultToStringError->IsObject());
}
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
new file mode 100644
index 0000000000..adfeb28b2f
--- /dev/null
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iostream> // NOLINT(readability/streams)
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/arm/assembler-arm-inl.h"
+#include "src/arm/simulator-arm.h"
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/ostreams.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/call-tester.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define __ assm.
+
+static int32_t DummyStaticFunction(Object* result) { return 1; }
+
+TEST(WasmRelocationArm) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ DummyStaticFunction(NULL);
+ int32_t imm = 1234567;
+
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ __ mov(r0, Operand(imm, RelocInfo::WASM_MEMORY_REFERENCE));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ CSignature0<int32_t> csig;
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm);
+
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+ ::printf("f() = %d\n\n", ret_value);
+#endif
+ size_t offset = 1234;
+
+ // Relocating references by offset
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode)) {
+ // Dummy values of size used here as the objective of the test is to
+ // verify that the immediate is patched correctly
+ it.rinfo()->update_wasm_memory_reference(
+ it.rinfo()->wasm_memory_reference(),
+ it.rinfo()->wasm_memory_reference() + offset, 1, 2,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ // Call into relocated code object
+ ret_value = runnable.Call();
+ CHECK_EQ((imm + offset), ret_value);
+
+#ifdef DEBUG
+ code->Print(os);
+ ::printf("f() = %d\n\n", ret_value);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
new file mode 100644
index 0000000000..48f9e85ba1
--- /dev/null
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -0,0 +1,82 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iostream> // NOLINT(readability/streams)
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/arm64/simulator-arm64.h"
+#include "src/arm64/utils-arm64.h"
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/call-tester.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define __ masm.
+
+static int64_t DummyStaticFunction(Object* result) { return 1; }
+
+TEST(WasmRelocationArm64) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ DummyStaticFunction(NULL);
+ int64_t imm = 1234567;
+
+ MacroAssembler masm(isolate, buffer, sizeof buffer,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ Mov(x0, Immediate(imm, RelocInfo::WASM_MEMORY_REFERENCE));
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ CSignature0<int64_t> csig;
+ CodeRunner<int64_t> runnable(isolate, code, &csig);
+ int64_t ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm);
+
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+ ::printf("f() = %ld\n\n", ret_value);
+#endif
+ size_t offset = 1234;
+
+ // Relocating reference by offset
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode)) {
+ // Dummy values of size used here as the objective of the test is to
+ // verify that the immediate is patched correctly
+ it.rinfo()->update_wasm_memory_reference(
+ it.rinfo()->wasm_memory_reference(),
+ it.rinfo()->wasm_memory_reference() + offset, 1, 2,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ // Call into relocated code object
+ ret_value = runnable.Call();
+ CHECK_EQ((imm + offset), ret_value);
+
+#ifdef DEBUG
+ code->Print(os);
+ ::printf("f() = %ld\n\n", ret_value);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
new file mode 100644
index 0000000000..135b522df5
--- /dev/null
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/ia32/frames-ia32.h"
+#include "src/ic/ic.h"
+#include "src/macro-assembler.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/call-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define __ assm.
+
+static int32_t DummyStaticFunction(Object* result) { return 1; }
+
+TEST(WasmRelocationIa32) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Zone zone(isolate->allocator());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int32_t imm = 1234567;
+
+ __ mov(eax, Immediate(reinterpret_cast<Address>(imm),
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ __ nop();
+ __ ret(0);
+
+ CSignature0<int32_t> csig;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+
+ size_t offset = 1234;
+
+ // Relocating references by offset
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode)) {
+ // Dummy values of size used here as the objective of the test is to
+ // verify that the immediate is patched correctly
+ it.rinfo()->update_wasm_memory_reference(
+ it.rinfo()->wasm_memory_reference(),
+ it.rinfo()->wasm_memory_reference() + offset, 1, 2,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ // Check if immediate is updated correctly
+ ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm + offset);
+
+#ifdef OBJECT_PRINT
+ // OFStream os(stdout);
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
new file mode 100644
index 0000000000..f1b6d96b8c
--- /dev/null
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/ic/ic.h"
+#include "src/macro-assembler.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/call-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define __ assm.
+
+static int32_t DummyStaticFunction(Object* result) { return 1; }
+TEST(WasmRelocationX64movq64) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int64_t imm = 1234567;
+
+ __ movq(rax, imm, RelocInfo::WASM_MEMORY_REFERENCE);
+ __ nop();
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CSignature0<int64_t> csig;
+ CodeRunner<int64_t> runnable(isolate, code, &csig);
+ int64_t ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+ size_t offset = 1234;
+
+ // Relocating references by offset
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode)) {
+ // Dummy values of size used here as the objective of the test is to
+ // verify that the immediate is patched correctly
+ it.rinfo()->update_wasm_memory_reference(
+ it.rinfo()->wasm_memory_reference(),
+ it.rinfo()->wasm_memory_reference() + offset, 1, 2,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ // Check if immediate is updated correctly
+ ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm + offset);
+
+#ifdef OBJECT_PRINT
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
new file mode 100644
index 0000000000..6cbd065c1b
--- /dev/null
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/ic/ic.h"
+#include "src/macro-assembler.h"
+#include "src/x87/frames-x87.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/call-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define __ assm.
+
+static int32_t DummyStaticFunction(Object* result) { return 1; }
+
+TEST(WasmRelocationIa32) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Zone zone(isolate->allocator());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int32_t imm = 1234567;
+
+ __ mov(eax, Immediate(reinterpret_cast<Address>(imm),
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ __ nop();
+ __ ret(0);
+
+ CSignature0<int32_t> csig;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+
+ size_t offset = 1234;
+
+ // Relocating references by offset
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode)) {
+ // Dummy values of size used here as the objective of the test is to
+ // verify that the immediate is patched correctly
+ it.rinfo()->update_wasm_memory_reference(
+ it.rinfo()->wasm_memory_reference(),
+ it.rinfo()->wasm_memory_reference() + offset, 1, 2,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ // Check if immediate is updated correctly
+ ret_value = runnable.Call();
+ CHECK_EQ(ret_value, imm + offset);
+
+#ifdef OBJECT_PRINT
+ // OFStream os(stdout);
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 2cc15f816a..e2c1c25638 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -71,6 +71,12 @@ class SimulatorHelper {
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::fp));
+#elif V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
#endif
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 2f29b25fab..cd349f9d73 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -39,20 +39,17 @@
#include "src/objects.h"
#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/code-serializer.h"
+#include "src/snapshot/deserializer.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/snapshot.h"
+#include "src/snapshot/startup-serializer.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
-
-bool DefaultSnapshotAvailable() {
- return i::Snapshot::DefaultSnapshotBlob() != NULL;
-}
-
-
void DisableTurbofan() {
const char* flag = "--turbo-filter=\"\"";
FlagList::SetFlagsFromString(flag, StrLength(flag));
@@ -74,34 +71,14 @@ class TestIsolate : public Isolate {
}
};
-
-void WritePayload(const Vector<const byte>& payload, const char* file_name) {
- FILE* file = v8::base::OS::FOpen(file_name, "wb");
- if (file == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", file_name);
- exit(1);
- }
- size_t written = fwrite(payload.begin(), 1, payload.length(), file);
- if (written != static_cast<size_t>(payload.length())) {
- i::PrintF("Writing snapshot file failed.. Aborting.\n");
- exit(1);
- }
- fclose(file);
-}
-
-
-static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
- SnapshotByteSink sink;
- StartupSerializer ser(isolate, &sink);
- ser.SerializeStrongReferences();
- ser.SerializeWeakReferencesAndDeferred();
- SnapshotData snapshot_data(ser);
- WritePayload(snapshot_data.RawData(), snapshot_file);
- return true;
+static Vector<const byte> WritePayload(const Vector<const byte>& payload) {
+ int length = payload.length();
+ byte* blob = NewArray<byte>(length);
+ memcpy(blob, payload.begin(), length);
+ return Vector<const byte>(const_cast<const byte*>(blob), length);
}
-
-static void Serialize(v8::Isolate* isolate) {
+static Vector<const byte> Serialize(v8::Isolate* isolate) {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
@@ -114,7 +91,12 @@ static void Serialize(v8::Isolate* isolate) {
Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
internal_isolate->heap()->CollectAllAvailableGarbage("serialize");
- WriteToFile(internal_isolate, FLAG_testing_serialization_file);
+ SnapshotByteSink sink;
+ StartupSerializer ser(internal_isolate, &sink);
+ ser.SerializeStrongReferences();
+ ser.SerializeWeakReferencesAndDeferred();
+ SnapshotData snapshot_data(ser);
+ return WritePayload(snapshot_data.RawData());
}
@@ -134,49 +116,21 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
source_length);
}
-
-// Test that the whole heap can be serialized.
-UNINITIALIZED_TEST(Serialize) {
- DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Serialize(isolate);
-}
-
-
-// Test that heap serialization is non-destructive.
-UNINITIALIZED_TEST(SerializeTwice) {
- DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = TestIsolate::NewInitialized(true);
- Serialize(isolate);
- Serialize(isolate);
-}
-
-
-//----------------------------------------------------------------------------
-// Tests that the heap can be deserialized.
-
-v8::Isolate* InitializeFromFile(const char* snapshot_file) {
- int len;
- byte* str = ReadBytes(snapshot_file, &len);
- if (!str) return NULL;
+v8::Isolate* InitializeFromBlob(Vector<const byte> blob) {
v8::Isolate* v8_isolate = NULL;
{
- SnapshotData snapshot_data(Vector<const byte>(str, len));
+ SnapshotData snapshot_data(blob);
Deserializer deserializer(&snapshot_data);
Isolate* isolate = new TestIsolate(false);
v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(v8_isolate);
isolate->Init(&deserializer);
}
- DeleteArray(str);
return v8_isolate;
}
-
-static v8::Isolate* Deserialize() {
- v8::Isolate* isolate = InitializeFromFile(FLAG_testing_serialization_file);
+static v8::Isolate* Deserialize(Vector<const byte> blob) {
+ v8::Isolate* isolate = InitializeFromBlob(blob);
CHECK(isolate);
return isolate;
}
@@ -194,14 +148,15 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
}
-
-UNINITIALIZED_DEPENDENT_TEST(Deserialize, Serialize) {
+UNINITIALIZED_TEST(StartupSerializerOnce) {
// The serialize-deserialize tests only work if the VM is built without
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = Deserialize();
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Vector<const byte> blob = Serialize(isolate);
+ isolate = Deserialize(blob);
+ blob.Dispose();
{
v8::HandleScope handle_scope(isolate);
v8::Isolate::Scope isolate_scope(isolate);
@@ -214,12 +169,14 @@ UNINITIALIZED_DEPENDENT_TEST(Deserialize, Serialize) {
isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerialization,
- SerializeTwice) {
+UNINITIALIZED_TEST(StartupSerializerTwice) {
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = Deserialize();
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Vector<const byte> blob1 = Serialize(isolate);
+ Vector<const byte> blob2 = Serialize(isolate);
+ blob1.Dispose();
+ isolate = Deserialize(blob2);
+ blob2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -232,11 +189,12 @@ UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerialization,
isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
+UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = Deserialize();
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Vector<const byte> blob = Serialize(isolate);
+ isolate = Deserialize(blob);
+ blob.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -255,12 +213,14 @@ UNINITIALIZED_DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
- SerializeTwice) {
+UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- v8::Isolate* isolate = Deserialize();
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Vector<const byte> blob1 = Serialize(isolate);
+ Vector<const byte> blob2 = Serialize(isolate);
+ blob1.Dispose();
+ isolate = Deserialize(blob2);
+ blob2.Dispose();
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -278,10 +238,8 @@ UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
isolate->Dispose();
}
-
-UNINITIALIZED_TEST(PartialSerialization) {
- DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
+static void PartiallySerializeObject(Vector<const byte>* startup_blob_out,
+ Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
v8_isolate->Enter();
@@ -316,10 +274,6 @@ UNINITIALIZED_TEST(PartialSerialization) {
raw_foo = *(v8::Utils::OpenHandle(*foo));
}
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
@@ -340,34 +294,25 @@ UNINITIALIZED_TEST(PartialSerialization) {
SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer);
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
-
- startup_name.Dispose();
+ *partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *startup_blob_out = WritePayload(startup_snapshot.RawData());
}
v8_isolate->Exit();
v8_isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+UNINITIALIZED_TEST(PartialSerializerObject) {
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ Vector<const byte> startup_blob;
+ Vector<const byte> partial_blob;
+ PartiallySerializeObject(&startup_blob, &partial_blob);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ v8::Isolate* v8_isolate = InitializeFromBlob(startup_blob);
+ startup_blob.Dispose();
CHECK(v8_isolate);
- startup_name.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
@@ -375,7 +320,7 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
// any references to the global proxy in this test.
Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy>::null();
{
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
root = deserializer.DeserializePartial(isolate, global_proxy)
.ToHandleChecked();
@@ -384,23 +329,20 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
Handle<Object> root2;
{
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
root2 = deserializer.DeserializePartial(isolate, global_proxy)
.ToHandleChecked();
CHECK(root2->IsString());
CHECK(root.is_identical_to(root2));
}
-
- DeleteArray(snapshot);
+ partial_blob.Dispose();
}
v8_isolate->Dispose();
}
-
-UNINITIALIZED_TEST(ContextSerialization) {
- DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
+static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
+ Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
Heap* heap = isolate->heap();
@@ -428,10 +370,6 @@ UNINITIALIZED_TEST(ContextSerialization) {
// context even after we have disposed of env.
heap->CollectAllGarbage();
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
@@ -454,40 +392,31 @@ UNINITIALIZED_TEST(ContextSerialization) {
SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer);
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
-
- startup_name.Dispose();
+ *partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *startup_blob_out = WritePayload(startup_snapshot.RawData());
}
v8_isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
+UNINITIALIZED_TEST(PartialSerializerContext) {
DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ Vector<const byte> startup_blob;
+ Vector<const byte> partial_blob;
+ PartiallySerializeContext(&startup_blob, &partial_blob);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ v8::Isolate* v8_isolate = InitializeFromBlob(startup_blob);
CHECK(v8_isolate);
- startup_name.Dispose();
+ startup_blob.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
Handle<JSGlobalProxy> global_proxy =
isolate->factory()->NewUninitializedJSGlobalProxy();
{
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
root = deserializer.DeserializePartial(isolate, global_proxy)
.ToHandleChecked();
@@ -497,22 +426,21 @@ UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
Handle<Object> root2;
{
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
root2 = deserializer.DeserializePartial(isolate, global_proxy)
.ToHandleChecked();
CHECK(root2->IsContext());
CHECK(!root.is_identical_to(root2));
}
- DeleteArray(snapshot);
+ partial_blob.Dispose();
}
v8_isolate->Dispose();
}
-
-UNINITIALIZED_TEST(CustomContextSerialization) {
- DisableTurbofan();
- if (DefaultSnapshotAvailable()) return;
+static void PartiallySerializeCustomContext(
+ Vector<const byte>* startup_blob_out,
+ Vector<const byte>* partial_blob_out) {
v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
{
@@ -534,7 +462,8 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
" e = function(s) { return eval (s); }"
"})();"
"var o = this;"
- "var r = Math.sin(0) + Math.cos(0);"
+ "var r = Math.random();"
+ "var c = Math.sin(0) + Math.cos(0);"
"var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
"var s = parseInt('12345');");
@@ -559,10 +488,6 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
// context even after we have disposed of env.
isolate->heap()->CollectAllAvailableGarbage("snapshotting");
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
@@ -585,47 +510,43 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
SnapshotData startup_snapshot(startup_serializer);
SnapshotData partial_snapshot(partial_serializer);
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
-
- startup_name.Dispose();
+ *partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *startup_blob_out = WritePayload(startup_snapshot.RawData());
}
v8_isolate->Dispose();
}
-
-UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
- CustomContextSerialization) {
+UNINITIALIZED_TEST(PartialSerializerCustomContext) {
DisableTurbofan();
- FLAG_crankshaft = false;
- if (DefaultSnapshotAvailable()) return;
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ Vector<const byte> startup_blob;
+ Vector<const byte> partial_blob;
+ PartiallySerializeCustomContext(&startup_blob, &partial_blob);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ v8::Isolate* v8_isolate = InitializeFromBlob(startup_blob);
CHECK(v8_isolate);
- startup_name.Dispose();
+ startup_blob.Dispose();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
Handle<JSGlobalProxy> global_proxy =
isolate->factory()->NewUninitializedJSGlobalProxy();
{
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
root = deserializer.DeserializePartial(isolate, global_proxy)
.ToHandleChecked();
CHECK(root->IsContext());
Handle<Context> context = Handle<Context>::cast(root);
+
+ // Add context to the weak native context list
+ context->set(Context::NEXT_CONTEXT_LINK,
+ isolate->heap()->native_contexts_list(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ isolate->heap()->set_native_contexts_list(*context);
+
CHECK(context->global_proxy() == *global_proxy);
Handle<String> o = isolate->factory()->NewStringFromAsciiChecked("o");
Handle<JSObject> global_object(context->global_object(), isolate);
@@ -638,7 +559,18 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
->ToNumber(v8_isolate->GetCurrentContext())
.ToLocalChecked()
->Value();
- CHECK_EQ(1, r);
+ CHECK(0.0 <= r && r < 1.0);
+ // Math.random still works.
+ double random = CompileRun("Math.random()")
+ ->ToNumber(v8_isolate->GetCurrentContext())
+ .ToLocalChecked()
+ ->Value();
+ CHECK(0.0 <= random && random < 1.0);
+ double c = CompileRun("c")
+ ->ToNumber(v8_isolate->GetCurrentContext())
+ .ToLocalChecked()
+ ->Value();
+ CHECK_EQ(1, c);
int f = CompileRun("f()")
->ToNumber(v8_isolate->GetCurrentContext())
.ToLocalChecked()
@@ -669,13 +601,12 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
.FromJust();
CHECK_EQ(100002, b);
}
- DeleteArray(snapshot);
+ partial_blob.Dispose();
}
v8_isolate->Dispose();
}
-
-TEST(PerIsolateSnapshotBlobs) {
+TEST(CustomSnapshotDataBlob) {
DisableTurbofan();
const char* source1 = "function f() { return 42; }";
const char* source2 =
@@ -729,8 +660,7 @@ static void SerializationFunctionTemplate(
args.GetReturnValue().Set(args[0]);
}
-
-TEST(PerIsolateSnapshotBlobsOutdatedContextWithOverflow) {
+TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
DisableTurbofan();
const char* source1 =
@@ -776,8 +706,7 @@ TEST(PerIsolateSnapshotBlobsOutdatedContextWithOverflow) {
isolate->Dispose();
}
-
-TEST(PerIsolateSnapshotBlobsWithLocker) {
+TEST(CustomSnapshotDataBlobWithLocker) {
DisableTurbofan();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -815,8 +744,7 @@ TEST(PerIsolateSnapshotBlobsWithLocker) {
isolate1->Dispose();
}
-
-TEST(SnapshotBlobsStackOverflow) {
+TEST(CustomSnapshotDataBlobStackOverflow) {
DisableTurbofan();
const char* source =
"var a = [0];"
@@ -855,6 +783,106 @@ TEST(SnapshotBlobsStackOverflow) {
isolate->Dispose();
}
+bool IsCompiled(const char* name) {
+ return i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*CompileRun(name)))
+ ->shared()
+ ->is_compiled();
+}
+
+TEST(SnapshotDataBlobWithWarmup) {
+ DisableTurbofan();
+ const char* warmup = "Math.tan(1); Math.sin = 1;";
+
+ v8::StartupData cold = v8::V8::CreateSnapshotDataBlob();
+ v8::StartupData warm = v8::V8::WarmUpSnapshotDataBlob(cold, warmup);
+ delete[] cold.data;
+
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &warm;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope i_scope(isolate);
+ v8::HandleScope h_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ delete[] warm.data;
+ v8::Context::Scope c_scope(context);
+ // Running the warmup script has effect on whether functions are
+ // pre-compiled, but does not pollute the context.
+ CHECK(IsCompiled("Math.tan"));
+ CHECK(!IsCompiled("Math.cos"));
+ CHECK(CompileRun("Math.sin")->IsFunction());
+ }
+ isolate->Dispose();
+}
+
+TEST(CustomSnapshotDataBlobWithWarmup) {
+ DisableTurbofan();
+ const char* source =
+ "function f() { return Math.sin(1); }\n"
+ "function g() { return Math.cos(1); }\n"
+ "Math.tan(1);"
+ "var a = 5";
+ const char* warmup = "a = f()";
+
+ v8::StartupData cold = v8::V8::CreateSnapshotDataBlob(source);
+ v8::StartupData warm = v8::V8::WarmUpSnapshotDataBlob(cold, warmup);
+ delete[] cold.data;
+
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &warm;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope i_scope(isolate);
+ v8::HandleScope h_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ delete[] warm.data;
+ v8::Context::Scope c_scope(context);
+ // Running the warmup script has effect on whether functions are
+ // pre-compiled, but does not pollute the context.
+ CHECK(IsCompiled("f"));
+ CHECK(IsCompiled("Math.sin"));
+ CHECK(!IsCompiled("g"));
+ CHECK(!IsCompiled("Math.cos"));
+ CHECK(!IsCompiled("Math.tan"));
+ CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
+ }
+ isolate->Dispose();
+}
+
+TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
+ DisableTurbofan();
+ // Flood the startup snapshot with shared function infos. If they are
+ // serialized before the immortal immovable root, the root will no longer end
+ // up on the first page.
+ Vector<const uint8_t> source =
+ ConstructSource(STATIC_CHAR_VECTOR("var a = [];"),
+ STATIC_CHAR_VECTOR("a.push(function() {return 7});"),
+ STATIC_CHAR_VECTOR("\0"), 10000);
+
+ v8::StartupData data = v8::V8::CreateSnapshotDataBlob(
+ reinterpret_cast<const char*>(source.start()));
+
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &data;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope i_scope(isolate);
+ v8::HandleScope h_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ delete[] data.data; // We can dispose of the snapshot blob now.
+ v8::Context::Scope c_scope(context);
+ CHECK_EQ(7, CompileRun("a[0]()")->Int32Value(context).FromJust());
+ }
+ isolate->Dispose();
+ source.Dispose();
+}
TEST(TestThatAlwaysSucceeds) {
}
@@ -866,12 +894,6 @@ TEST(TestThatAlwaysFails) {
}
-DEPENDENT_TEST(DependentTestThatAlwaysFails, TestThatAlwaysSucceeds) {
- bool ArtificialFailure2 = false;
- CHECK(ArtificialFailure2);
-}
-
-
int CountBuiltins() {
// Check that we have not deserialized any additional builtin.
HeapIterator iterator(CcTest::heap());
@@ -887,14 +909,13 @@ int CountBuiltins() {
static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source, Handle<String> name,
ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
- return Compiler::CompileScript(
+ return Compiler::GetSharedFunctionInfoForScript(
source, name, 0, 0, v8::ScriptOriginOptions(), Handle<Object>(),
Handle<Context>(isolate->native_context()), NULL, cached_data, options,
NOT_NATIVES_CODE, false);
}
-
-TEST(SerializeToplevelOnePlusOne) {
+TEST(CodeSerializerOnePlusOne) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -944,8 +965,7 @@ TEST(SerializeToplevelOnePlusOne) {
delete cache;
}
-
-TEST(CodeCachePromotedToCompilationCache) {
+TEST(CodeSerializerPromotedToCompilationCache) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -975,8 +995,7 @@ TEST(CodeCachePromotedToCompilationCache) {
delete cache;
}
-
-TEST(SerializeToplevelInternalizedString) {
+TEST(CodeSerializerInternalizedString) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1035,8 +1054,7 @@ TEST(SerializeToplevelInternalizedString) {
delete cache;
}
-
-TEST(SerializeToplevelLargeCodeObject) {
+TEST(CodeSerializerLargeCodeObject) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1083,8 +1101,7 @@ TEST(SerializeToplevelLargeCodeObject) {
source.Dispose();
}
-
-TEST(SerializeToplevelLargeStrings) {
+TEST(CodeSerializerLargeStrings) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1141,8 +1158,7 @@ TEST(SerializeToplevelLargeStrings) {
source_t.Dispose();
}
-
-TEST(SerializeToplevelThreeBigStrings) {
+TEST(CodeSerializerThreeBigStrings) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1252,8 +1268,7 @@ class SerializerTwoByteResource : public v8::String::ExternalStringResource {
size_t length_;
};
-
-TEST(SerializeToplevelExternalString) {
+TEST(CodeSerializerExternalString) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1315,8 +1330,7 @@ TEST(SerializeToplevelExternalString) {
delete cache;
}
-
-TEST(SerializeToplevelLargeExternalString) {
+TEST(CodeSerializerLargeExternalString) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1374,8 +1388,7 @@ TEST(SerializeToplevelLargeExternalString) {
string.Dispose();
}
-
-TEST(SerializeToplevelExternalScriptName) {
+TEST(CodeSerializerExternalScriptName) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1474,8 +1487,7 @@ v8::ScriptCompiler::CachedData* ProduceCache(const char* source) {
return cache;
}
-
-TEST(SerializeToplevelIsolates) {
+TEST(CodeSerializerIsolates) {
FLAG_serialize_toplevel = true;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
@@ -1516,8 +1528,7 @@ TEST(SerializeToplevelIsolates) {
isolate2->Dispose();
}
-
-TEST(SerializeToplevelFlagChange) {
+TEST(CodeSerializerFlagChange) {
FLAG_serialize_toplevel = true;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
@@ -1546,8 +1557,7 @@ TEST(SerializeToplevelFlagChange) {
isolate2->Dispose();
}
-
-TEST(SerializeToplevelBitFlip) {
+TEST(CodeSerializerBitFlip) {
FLAG_serialize_toplevel = true;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
@@ -1576,8 +1586,7 @@ TEST(SerializeToplevelBitFlip) {
isolate2->Dispose();
}
-
-TEST(SerializeWithHarmonyScoping) {
+TEST(CodeSerializerWithHarmonyScoping) {
FLAG_serialize_toplevel = true;
const char* source1 = "'use strict'; let x = 'X'";
@@ -1655,11 +1664,16 @@ TEST(SerializeWithHarmonyScoping) {
isolate2->Dispose();
}
-
-TEST(SerializeInternalReference) {
+TEST(CodeSerializerInternalReference) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
return;
#endif
+ // In ignition there are only relative jumps, so the following code
+ // would not have any internal references. This test is not relevant
+ // for ignition.
+ if (FLAG_ignition) {
+ return;
+ }
// Disable experimental natives that are loaded after deserialization.
FLAG_function_context_specialization = false;
FLAG_always_opt = true;
@@ -1740,11 +1754,65 @@ TEST(SerializeInternalReference) {
isolate->Dispose();
}
+TEST(CodeSerializerEagerCompilationAndPreAge) {
+ if (FLAG_ignition) return;
+
+ FLAG_lazy = true;
+ FLAG_serialize_toplevel = true;
+ FLAG_serialize_age_code = true;
+ FLAG_serialize_eager = true;
+ FLAG_min_preparse_length = 1;
+
+ static const char* source =
+ "function f() {"
+ " function g() {"
+ " return 1;"
+ " }"
+ " return g();"
+ "}"
+ "'abcdef';";
+
+ v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate2 = v8::Isolate::New(create_params);
+ {
+ v8::Isolate::Scope iscope(isolate2);
+ v8::HandleScope scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::String> source_str = v8_str(source);
+ v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptCompiler::Source source(source_str, origin, cache);
+ v8::Local<v8::UnboundScript> unbound =
+ v8::ScriptCompiler::CompileUnboundScript(
+ isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
+ .ToLocalChecked();
+
+ CHECK(!cache->rejected);
+
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate2);
+ HandleScope i_scope(i_isolate);
+ Handle<SharedFunctionInfo> toplevel = v8::Utils::OpenHandle(*unbound);
+ Handle<Script> script(Script::cast(toplevel->script()));
+ WeakFixedArray::Iterator iterator(script->shared_function_infos());
+ // Every function has been pre-compiled from the code cache.
+ int count = 0;
+ while (SharedFunctionInfo* shared = iterator.Next<SharedFunctionInfo>()) {
+ CHECK(shared->is_compiled());
+ CHECK_EQ(Code::kPreAgedCodeAge, shared->code()->GetAge());
+ count++;
+ }
+ CHECK_EQ(3, count);
+ }
+ isolate2->Dispose();
+}
TEST(Regress503552) {
// Test that the code serializer can deal with weak cells that form a linked
// list during incremental marking.
-
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -1752,7 +1820,7 @@ TEST(Regress503552) {
Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
"function f() {} function g() {}");
ScriptData* script_data = NULL;
- Handle<SharedFunctionInfo> shared = Compiler::CompileScript(
+ Handle<SharedFunctionInfo> shared = Compiler::GetSharedFunctionInfoForScript(
source, Handle<String>(), 0, 0, v8::ScriptOriginOptions(),
Handle<Object>(), Handle<Context>(isolate->native_context()), NULL,
&script_data, v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE,
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index e992f33c8b..770042d814 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1508,3 +1508,25 @@ TEST(FormatMessage) {
"'arg0' returned for property 'arg1' of object 'arg2' is not a function");
CHECK(String::Equals(result, expected));
}
+
+TEST(Regress609831) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ {
+ HandleScope scope(isolate);
+ v8::Local<v8::Value> result = CompileRun(
+ "String.fromCharCode(32, 32, 32, 32, 32, "
+ "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, "
+ "32, 32, 32, 32, 32, 32, 32, 32, 32, 32)");
+ CHECK(v8::Utils::OpenHandle(*result)->IsSeqOneByteString());
+ }
+ {
+ HandleScope scope(isolate);
+ v8::Local<v8::Value> result = CompileRun(
+ "String.fromCharCode(432, 432, 432, 432, 432, "
+ "432, 432, 432, 432, 432, 432, 432, 432, 432, "
+ "432, 432, 432, 432, 432, 432, 432, 432, 432)");
+ CHECK(v8::Utils::OpenHandle(*result)->IsSeqTwoByteString());
+ }
+}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 05a3c3339e..85dfd13b60 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -403,7 +403,7 @@ TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
thread.Start();
v8::Isolate* isolate = CcTest::isolate();
- isolate->SetAutorunMicrotasks(false);
+ isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index a889e088f6..190cb40782 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -72,8 +72,8 @@ class MockTracingPlatform : public v8::Platform {
void PerformDelayedTask() {}
uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, uint64_t id, uint64_t bind_id,
- int num_args, const char** arg_names,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags) override {
MockTraceObject* to = new MockTraceObject(phase, std::string(name), id,
@@ -256,3 +256,27 @@ TEST(TestEventWithId) {
i::V8::SetPlatformForTesting(old_platform);
}
+
+TEST(TestEventInContext) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ static uint64_t isolate_id = 0x20151021;
+ {
+ TRACE_EVENT_SCOPED_CONTEXT("v8-cat", "Isolate", isolate_id);
+ TRACE_EVENT0("v8-cat", "e");
+ }
+
+ CHECK_EQ(3, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(TRACE_EVENT_PHASE_ENTER_CONTEXT, GET_TRACE_OBJECT(0)->phase);
+ CHECK_EQ("Isolate", GET_TRACE_OBJECT(0)->name);
+ CHECK_EQ(isolate_id, GET_TRACE_OBJECT(0)->id);
+ CHECK_EQ(TRACE_EVENT_PHASE_COMPLETE, GET_TRACE_OBJECT(1)->phase);
+ CHECK_EQ("e", GET_TRACE_OBJECT(1)->name);
+ CHECK_EQ(TRACE_EVENT_PHASE_LEAVE_CONTEXT, GET_TRACE_OBJECT(2)->phase);
+ CHECK_EQ("Isolate", GET_TRACE_OBJECT(2)->name);
+ CHECK_EQ(isolate_id, GET_TRACE_OBJECT(2)->id);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 2e658b0255..7b7706febf 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -40,7 +40,7 @@ struct Tests {
Tests()
: isolate(CcTest::InitIsolateOnce()),
scope(isolate),
- zone(),
+ zone(isolate->allocator()),
T(&zone, isolate, isolate->random_number_generator()) {}
bool IsBitset(Type* type) { return type->IsBitsetForTesting(); }
@@ -941,7 +941,7 @@ struct Tests {
CheckSub(T.Object, T.Receiver);
CheckSub(T.Proxy, T.Receiver);
CheckSub(T.OtherObject, T.Object);
- CheckSub(T.Undetectable, T.Object);
+ CheckSub(T.OtherUndetectable, T.Object);
CheckSub(T.OtherObject, T.Object);
CheckUnordered(T.Object, T.Proxy);
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index f195a31c79..7fc9b5beec 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -12,7 +12,6 @@
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
-#include "src/heap/slots-buffer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
@@ -1114,8 +1113,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
AlwaysAllocateScope always_allocate(isolate);
// Make sure |obj_value| is placed on an old-space evacuation candidate.
SimulateFullSpace(old_space);
- obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS,
- Strength::WEAK, TENURED);
+ obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
ec_page = Page::FromAddress(obj_value->address());
}
@@ -1454,8 +1452,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Make sure |obj_value| is placed on an old-space evacuation candidate.
SimulateFullSpace(old_space);
- obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS,
- Strength::WEAK, TENURED);
+ obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
ec_page = Page::FromAddress(obj_value->address());
CHECK_NE(ec_page, Page::FromAddress(obj->address()));
}
@@ -1474,18 +1471,11 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
- // Trigger incremental write barrier, which should add a slot to |ec_page|'s
- // slots buffer.
+ // Trigger incremental write barrier, which should add a slot to remembered
+ // set.
{
- int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
- const int n = SlotsBuffer::kNumberOfElements + 10;
- for (int i = 0; i < n; i++) {
- obj->FastPropertyAtPut(index, *obj_value);
- }
- // Ensure that the slot was actually added to the |ec_page|'s slots buffer.
- CHECK_EQ(slots_buffer_len + n,
- SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
+ obj->FastPropertyAtPut(index, *obj_value);
}
// Migrate |obj| to |new_map| which should shift fields and put the
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 207c2450a0..d84279475d 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -143,7 +143,7 @@ TEST(UniqueSet_Add) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -170,7 +170,7 @@ TEST(UniqueSet_Remove) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -210,7 +210,7 @@ TEST(UniqueSet_Contains) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -241,7 +241,7 @@ TEST(UniqueSet_At) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -278,7 +278,7 @@ TEST(UniqueSet_Equals) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -316,7 +316,7 @@ TEST(UniqueSet_IsSubset1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -351,7 +351,7 @@ TEST(UniqueSet_IsSubset2) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -394,7 +394,7 @@ TEST(UniqueSet_IsSubsetExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -417,7 +417,7 @@ TEST(UniqueSet_Intersect1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -458,7 +458,7 @@ TEST(UniqueSet_IntersectExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -485,7 +485,7 @@ TEST(UniqueSet_Union1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -526,7 +526,7 @@ TEST(UniqueSet_UnionExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone;
+ Zone zone(CcTest::i_isolate()->allocator());
Unique<String> elements[] = {
A, B, C, D, E, F, G
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 5045b7e591..00702a5e19 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -32,6 +32,7 @@
#include "src/v8.h"
#include "src/base/platform/platform.h"
+#include "src/collector.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index d28ef7d004..36db837c6c 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -42,15 +42,6 @@ class CcTestSuite(testsuite.TestSuite):
build_dir = "build"
else:
build_dir = "out"
- self.serdes_dir = os.path.normpath(
- os.path.join(root, "..", "..", build_dir, ".serdes"))
-
- def SetupWorkingDirectory(self):
- # This is only called once per machine, while init above is called once per
- # process.
- if os.path.exists(self.serdes_dir):
- shutil.rmtree(self.serdes_dir, True)
- os.makedirs(self.serdes_dir)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
@@ -65,26 +56,14 @@ class CcTestSuite(testsuite.TestSuite):
return []
tests = []
for test_desc in output.stdout.strip().split():
- if test_desc.find('<') < 0:
- # Native Client output can contain a few non-test arguments
- # before the tests. Skip these.
- continue
- raw_test, dependency = test_desc.split('<')
- if dependency != '':
- dependency = raw_test.split('/')[0] + '/' + dependency
- else:
- dependency = None
- test = testcase.TestCase(self, raw_test, dependency=dependency)
+ test = testcase.TestCase(self, test_desc)
tests.append(test)
- tests.sort()
+ tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
testname = testcase.path.split(os.path.sep)[-1]
- serialization_file = os.path.join(self.serdes_dir, "serdes_" + testname)
- serialization_file += ''.join(testcase.flags).replace('-', '_')
- return (testcase.flags + [testcase.path] + context.mode_flags +
- ["--testing_serialization_file=" + serialization_file])
+ return (testcase.flags + [testcase.path] + context.mode_flags)
def shell(self):
return "cctest"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
new file mode 100644
index 0000000000..784f21a050
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -0,0 +1,1360 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/bits.h"
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+#define CHECK_TRAP32(x) \
+ CHECK_EQ(0xdeadbeef, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
+#define CHECK_TRAP64(x) \
+ CHECK_EQ(0xdeadbeefdeadbeef, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
+#define CHECK_TRAP(x) CHECK_TRAP32(x)
+
+#define asi64(x) static_cast<int64_t>(x)
+
+#define asu64(x) static_cast<uint64_t>(x)
+
+#define B2(a, b) kExprBlock, 2, a, b
+#define B1(a) kExprBlock, 1, a
+
+// Can't bridge macro land with nested macros.
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_X87
+#define MIPS_OR_X87 true
+#else
+#define MIPS_OR_X87 false
+#endif
+
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_X87 || V8_TARGET_ARCH_ARM
+#define MIPS_OR_ARM_OR_X87 true
+#else
+#define MIPS_OR_ARM_OR_X87 false
+#endif
+
+#define FOREACH_I64_OPERATOR(V) \
+ V(DepthFirst, true) \
+ V(I64Phi, true) \
+ V(I64Const, true) \
+ V(I64Return, true) \
+ V(I64Param, true) \
+ V(I64LoadStore, true) \
+ V(I64Add, !MIPS_OR_X87) \
+ V(I64Sub, !MIPS_OR_X87) \
+ V(I64Mul, !MIPS_OR_X87) \
+ V(I64DivS, true) \
+ V(I64DivU, true) \
+ V(I64RemS, true) \
+ V(I64RemU, true) \
+ V(I64And, true) \
+ V(I64Ior, true) \
+ V(I64Xor, true) \
+ V(I64Shl, !MIPS_OR_X87) \
+ V(I64ShrU, !MIPS_OR_X87) \
+ V(I64ShrS, !MIPS_OR_X87) \
+ V(I64Eq, true) \
+ V(I64Ne, true) \
+ V(I64LtS, true) \
+ V(I64LeS, true) \
+ V(I64LtU, true) \
+ V(I64LeU, true) \
+ V(I64GtS, true) \
+ V(I64GeS, true) \
+ V(I64GtU, true) \
+ V(I64GeU, true) \
+ V(I64Ctz, true) \
+ V(I64Clz, true) \
+ V(I64Popcnt, !MIPS_OR_X87) \
+ V(I32ConvertI64, true) \
+ V(I64SConvertF32, true) \
+ V(I64SConvertF64, true) \
+ V(I64UConvertF32, true) \
+ V(I64UConvertF64, true) \
+ V(I64SConvertI32, true) \
+ V(I64UConvertI32, true) \
+ V(F32SConvertI64, true) \
+ V(F32UConvertI64, true) \
+ V(F64SConvertI64, true) \
+ V(F64UConvertI64, true) \
+ V(F64ReinterpretI64, true) \
+ V(I64ReinterpretF64, true) \
+ V(I64Ror, true) \
+ V(I64Rol, true)
+
+#define DECLARE_CONST(name, cond) static const bool kSupported_##name = cond;
+FOREACH_I64_OPERATOR(DECLARE_CONST)
+#undef DECLARE_CONST
+
+#define REQUIRE(name) \
+ if (!WASM_64 && !kSupported_##name) return
+
+TEST(Run_Wasm_I64Const) {
+ REQUIRE(I64Const);
+ WasmRunner<int64_t> r;
+ const int64_t kExpectedValue = 0x1122334455667788LL;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I64V_9(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+TEST(Run_Wasm_I64Const_many) {
+ REQUIRE(I64Const);
+ int cntr = 0;
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<int64_t> r;
+ const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I64V(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+ cntr++;
+ }
+}
+
+TEST(Run_Wasm_Return_I64) {
+ REQUIRE(I64Return);
+ WasmRunner<int64_t> r(MachineType::Int64());
+
+ BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+// todo(ahaas): I added a list of missing instructions here to make merging
+// easier when I do them one by one.
+// kExprI64Add:
+TEST(Run_WasmI64Add) {
+ REQUIRE(I64Add);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
+ }
+}
+// kExprI64Sub:
+TEST(Run_Wasm_I64Sub) {
+ REQUIRE(I64Sub);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i - *j, r.Call(*i, *j)); }
+ }
+}
+// kExprI64Mul:
+// kExprI64DivS:
+
+TEST(Run_WasmI64DivS) {
+ REQUIRE(I64DivS);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ if (*j == 0) {
+ CHECK_TRAP64(r.Call(*i, *j));
+ } else if (*j == -1 && *i == std::numeric_limits<int64_t>::min()) {
+ CHECK_TRAP64(r.Call(*i, *j));
+ } else {
+ CHECK_EQ(*i / *j, r.Call(*i, *j));
+ }
+ }
+ }
+}
+
+TEST(Run_WasmI64DivS_Trap) {
+ REQUIRE(I64DivS);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(asi64(0), asi64(100)));
+ CHECK_TRAP64(r.Call(asi64(100), asi64(0)));
+ CHECK_TRAP64(r.Call(asi64(-1001), asi64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(-1)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
+}
+
+TEST(Run_WasmI64DivS_Byzero_Const) {
+ REQUIRE(I64DivS);
+ for (int8_t denom = -2; denom < 8; denom++) {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
+ for (int64_t val = -7; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP64(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+// kExprI64DivU:
+
+TEST(Run_WasmI64DivU) {
+ REQUIRE(I64DivU);
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ if (*j == 0) {
+ CHECK_TRAP64(r.Call(*i, *j));
+ } else {
+ CHECK_EQ(*i / *j, r.Call(*i, *j));
+ }
+ }
+ }
+}
+
+TEST(Run_WasmI64DivU_Trap) {
+ REQUIRE(I64DivU);
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(asu64(0), asu64(100)));
+ CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
+ CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
+}
+
+TEST(Run_WasmI64DivU_Byzero_Const) {
+ REQUIRE(I64DivU);
+ for (uint64_t denom = 0xfffffffffffffffe; denom < 8; denom++) {
+ WasmRunner<uint64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
+
+ for (uint64_t val = 0xfffffffffffffff0; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP64(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+// kExprI64RemS:
+TEST(Run_WasmI64RemS) {
+ REQUIRE(I64RemS);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ if (*j == 0) {
+ CHECK_TRAP64(r.Call(*i, *j));
+ } else {
+ CHECK_EQ(*i % *j, r.Call(*i, *j));
+ }
+ }
+ }
+}
+
+TEST(Run_WasmI64RemS_Trap) {
+ REQUIRE(I64RemS);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(33, r.Call(asi64(133), asi64(100)));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), asi64(-1)));
+ CHECK_TRAP64(r.Call(asi64(100), asi64(0)));
+ CHECK_TRAP64(r.Call(asi64(-1001), asi64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
+}
+
+// kExprI64RemU:
+TEST(Run_WasmI64RemU) {
+ REQUIRE(I64RemU);
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ if (*j == 0) {
+ CHECK_TRAP64(r.Call(*i, *j));
+ } else {
+ CHECK_EQ(*i % *j, r.Call(*i, *j));
+ }
+ }
+ }
+}
+
+TEST(Run_Wasm_I64RemU_Trap) {
+ REQUIRE(I64RemU);
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(17, r.Call(asu64(217), asu64(100)));
+ CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
+ CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
+}
+
+// kExprI64And:
+TEST(Run_Wasm_I64And) {
+ REQUIRE(I64And);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
+ }
+}
+// kExprI64Ior:
+TEST(Run_Wasm_I64Ior) {
+ REQUIRE(I64Ior);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ((*i) | (*j), r.Call(*i, *j)); }
+ }
+}
+// kExprI64Xor:
+TEST(Run_Wasm_I64Xor) {
+ REQUIRE(I64Xor);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ((*i) ^ (*j), r.Call(*i, *j)); }
+ }
+}
+// kExprI64Shl:
+TEST(Run_Wasm_I64Shl) {
+ REQUIRE(I64Shl);
+ {
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ uint64_t expected = (*i) << (*j & 0x3f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 0, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 32, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 20, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 40, r.Call(*i)); }
+ }
+}
+// kExprI64ShrU:
+TEST(Run_Wasm_I64ShrU) {
+ REQUIRE(I64ShrU);
+ {
+ WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ uint64_t expected = (*i) >> (*j & 0x3f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
+ }
+}
+// kExprI64ShrS:
+TEST(Run_Wasm_I64ShrS) {
+ REQUIRE(I64ShrS);
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int64_t expected = (*i) >> (*j & 0x3f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
+ }
+}
+// kExprI64Eq:
+TEST(Run_Wasm_I64Eq) {
+ REQUIRE(I64Eq);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i == *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+// kExprI64Ne:
+TEST(Run_Wasm_I64Ne) {
+ REQUIRE(I64Ne);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i != *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+// kExprI64LtS:
+TEST(Run_Wasm_I64LtS) {
+ REQUIRE(I64LtS);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+TEST(Run_Wasm_I64LeS) {
+ REQUIRE(I64LeS);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+TEST(Run_Wasm_I64LtU) {
+ REQUIRE(I64LtU);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+TEST(Run_Wasm_I64LeU) {
+ REQUIRE(I64LeU);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+TEST(Run_Wasm_I64GtS) {
+ REQUIRE(I64GtS);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+TEST(Run_Wasm_I64GeS) {
+ REQUIRE(I64GeS);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+
+TEST(Run_Wasm_I64GtU) {
+ REQUIRE(I64GtU);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+
+TEST(Run_Wasm_I64GeU) {
+ REQUIRE(I64GeU);
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
+ }
+}
+// kExprI32ConvertI64:
+TEST(Run_Wasm_I32ConvertI64) {
+ REQUIRE(I32ConvertI64);
+ FOR_INT64_INPUTS(i) {
+ WasmRunner<int32_t> r;
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(*i)));
+ CHECK_EQ(static_cast<int32_t>(*i), r.Call());
+ }
+}
+// kExprI64SConvertI32:
+TEST(Run_Wasm_I64SConvertI32) {
+ REQUIRE(I64SConvertI32);
+ WasmRunner<int64_t> r(MachineType::Int32());
+ BUILD(r, WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
+}
+
+// kExprI64UConvertI32:
+TEST(Run_Wasm_I64UConvertI32) {
+ REQUIRE(I64UConvertI32);
+ WasmRunner<int64_t> r(MachineType::Uint32());
+ BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i)); }
+}
+
+// kExprF64ReinterpretI64:
+// kExprI64ReinterpretF64:
+
+// kExprI64Clz:
+// kExprI64Ctz:
+// kExprI64Popcnt:
+TEST(Run_WasmI64Popcnt) {
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{64, 0xffffffffffffffff},
+ {0, 0x0000000000000000},
+ {2, 0x0000080000008000},
+ {26, 0x1123456782345678},
+ {38, 0xffedcba09edcba09}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+// kExprF32SConvertI64:
+TEST(Run_WasmF32SConvertI64) {
+ REQUIRE(F32SConvertI64);
+ WasmRunner<float> r(MachineType::Int64());
+ BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(*i), r.Call(*i)); }
+}
+// kExprF32UConvertI64:
+TEST(Run_WasmF32UConvertI64) {
+ REQUIRE(F32UConvertI64);
+ struct {
+ uint64_t input;
+ uint32_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3f800000},
+ {0xffffffff, 0x4f800000},
+ {0x1b09788b, 0x4dd84bc4},
+ {0x4c5fce8, 0x4c98bf9d},
+ {0xcc0de5bf, 0x4f4c0de6},
+ {0x2, 0x40000000},
+ {0x3, 0x40400000},
+ {0x4, 0x40800000},
+ {0x5, 0x40a00000},
+ {0x8, 0x41000000},
+ {0x9, 0x41100000},
+ {0xffffffffffffffff, 0x5f800000},
+ {0xfffffffffffffffe, 0x5f800000},
+ {0xfffffffffffffffd, 0x5f800000},
+ {0x0, 0x0},
+ {0x100000000, 0x4f800000},
+ {0xffffffff00000000, 0x5f800000},
+ {0x1b09788b00000000, 0x5dd84bc4},
+ {0x4c5fce800000000, 0x5c98bf9d},
+ {0xcc0de5bf00000000, 0x5f4c0de6},
+ {0x200000000, 0x50000000},
+ {0x300000000, 0x50400000},
+ {0x400000000, 0x50800000},
+ {0x500000000, 0x50a00000},
+ {0x800000000, 0x51000000},
+ {0x900000000, 0x51100000},
+ {0x273a798e187937a3, 0x5e1ce9e6},
+ {0xece3af835495a16b, 0x5f6ce3b0},
+ {0xb668ecc11223344, 0x5d3668ed},
+ {0x9e, 0x431e0000},
+ {0x43, 0x42860000},
+ {0xaf73, 0x472f7300},
+ {0x116b, 0x458b5800},
+ {0x658ecc, 0x4acb1d98},
+ {0x2b3b4c, 0x4a2ced30},
+ {0x88776655, 0x4f087766},
+ {0x70000000, 0x4ee00000},
+ {0x7200000, 0x4ce40000},
+ {0x7fffffff, 0x4f000000},
+ {0x56123761, 0x4eac246f},
+ {0x7fffff00, 0x4efffffe},
+ {0x761c4761eeeeeeee, 0x5eec388f},
+ {0x80000000eeeeeeee, 0x5f000000},
+ {0x88888888dddddddd, 0x5f088889},
+ {0xa0000000dddddddd, 0x5f200000},
+ {0xddddddddaaaaaaaa, 0x5f5dddde},
+ {0xe0000000aaaaaaaa, 0x5f600000},
+ {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
+ {0xfffffffdeeeeeeee, 0x5f800000},
+ {0xf0000000dddddddd, 0x5f700000},
+ {0x7fffffdddddddd, 0x5b000000},
+ {0x3fffffaaaaaaaa, 0x5a7fffff},
+ {0x1fffffaaaaaaaa, 0x59fffffd},
+ {0xfffff, 0x497ffff0},
+ {0x7ffff, 0x48ffffe0},
+ {0x3ffff, 0x487fffc0},
+ {0x1ffff, 0x47ffff80},
+ {0xffff, 0x477fff00},
+ {0x7fff, 0x46fffe00},
+ {0x3fff, 0x467ffc00},
+ {0x1fff, 0x45fff800},
+ {0xfff, 0x457ff000},
+ {0x7ff, 0x44ffe000},
+ {0x3ff, 0x447fc000},
+ {0x1ff, 0x43ff8000},
+ {0x3fffffffffff, 0x56800000},
+ {0x1fffffffffff, 0x56000000},
+ {0xfffffffffff, 0x55800000},
+ {0x7ffffffffff, 0x55000000},
+ {0x3ffffffffff, 0x54800000},
+ {0x1ffffffffff, 0x54000000},
+ {0x8000008000000000, 0x5f000000},
+ {0x8000008000000001, 0x5f000001},
+ {0x8000000000000400, 0x5f000000},
+ {0x8000000000000401, 0x5f000000}};
+ WasmRunner<float> r(MachineType::Uint64());
+ BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(bit_cast<float>(values[i].expected), r.Call(values[i].input));
+ }
+}
+// kExprF64SConvertI64:
+TEST(Run_WasmF64SConvertI64) {
+ REQUIRE(F64SConvertI64);
+ WasmRunner<double> r(MachineType::Int64());
+ BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), r.Call(*i)); }
+}
+// kExprF64UConvertI64:
+TEST(Run_Wasm_F64UConvertI64) {
+ REQUIRE(F64UConvertI64);
+ struct {
+ uint64_t input;
+ uint64_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3ff0000000000000},
+ {0xffffffff, 0x41efffffffe00000},
+ {0x1b09788b, 0x41bb09788b000000},
+ {0x4c5fce8, 0x419317f3a0000000},
+ {0xcc0de5bf, 0x41e981bcb7e00000},
+ {0x2, 0x4000000000000000},
+ {0x3, 0x4008000000000000},
+ {0x4, 0x4010000000000000},
+ {0x5, 0x4014000000000000},
+ {0x8, 0x4020000000000000},
+ {0x9, 0x4022000000000000},
+ {0xffffffffffffffff, 0x43f0000000000000},
+ {0xfffffffffffffffe, 0x43f0000000000000},
+ {0xfffffffffffffffd, 0x43f0000000000000},
+ {0x100000000, 0x41f0000000000000},
+ {0xffffffff00000000, 0x43efffffffe00000},
+ {0x1b09788b00000000, 0x43bb09788b000000},
+ {0x4c5fce800000000, 0x439317f3a0000000},
+ {0xcc0de5bf00000000, 0x43e981bcb7e00000},
+ {0x200000000, 0x4200000000000000},
+ {0x300000000, 0x4208000000000000},
+ {0x400000000, 0x4210000000000000},
+ {0x500000000, 0x4214000000000000},
+ {0x800000000, 0x4220000000000000},
+ {0x900000000, 0x4222000000000000},
+ {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
+ {0xece3af835495a16b, 0x43ed9c75f06a92b4},
+ {0xb668ecc11223344, 0x43a6cd1d98224467},
+ {0x9e, 0x4063c00000000000},
+ {0x43, 0x4050c00000000000},
+ {0xaf73, 0x40e5ee6000000000},
+ {0x116b, 0x40b16b0000000000},
+ {0x658ecc, 0x415963b300000000},
+ {0x2b3b4c, 0x41459da600000000},
+ {0x88776655, 0x41e10eeccaa00000},
+ {0x70000000, 0x41dc000000000000},
+ {0x7200000, 0x419c800000000000},
+ {0x7fffffff, 0x41dfffffffc00000},
+ {0x56123761, 0x41d5848dd8400000},
+ {0x7fffff00, 0x41dfffffc0000000},
+ {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
+ {0x80000000eeeeeeee, 0x43e00000001dddde},
+ {0x88888888dddddddd, 0x43e11111111bbbbc},
+ {0xa0000000dddddddd, 0x43e40000001bbbbc},
+ {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
+ {0xe0000000aaaaaaaa, 0x43ec000000155555},
+ {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
+ {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
+ {0xf0000000dddddddd, 0x43ee0000001bbbbc},
+ {0x7fffffdddddddd, 0x435ffffff7777777},
+ {0x3fffffaaaaaaaa, 0x434fffffd5555555},
+ {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
+ {0xfffff, 0x412ffffe00000000},
+ {0x7ffff, 0x411ffffc00000000},
+ {0x3ffff, 0x410ffff800000000},
+ {0x1ffff, 0x40fffff000000000},
+ {0xffff, 0x40efffe000000000},
+ {0x7fff, 0x40dfffc000000000},
+ {0x3fff, 0x40cfff8000000000},
+ {0x1fff, 0x40bfff0000000000},
+ {0xfff, 0x40affe0000000000},
+ {0x7ff, 0x409ffc0000000000},
+ {0x3ff, 0x408ff80000000000},
+ {0x1ff, 0x407ff00000000000},
+ {0x3fffffffffff, 0x42cfffffffffff80},
+ {0x1fffffffffff, 0x42bfffffffffff00},
+ {0xfffffffffff, 0x42affffffffffe00},
+ {0x7ffffffffff, 0x429ffffffffffc00},
+ {0x3ffffffffff, 0x428ffffffffff800},
+ {0x1ffffffffff, 0x427ffffffffff000},
+ {0x8000008000000000, 0x43e0000010000000},
+ {0x8000008000000001, 0x43e0000010000000},
+ {0x8000000000000400, 0x43e0000000000000},
+ {0x8000000000000401, 0x43e0000000000001}};
+ WasmRunner<double> r(MachineType::Uint64());
+ BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(bit_cast<double>(values[i].expected), r.Call(values[i].input));
+ }
+}
+// kExprI64SConvertF32:
+
+TEST(Run_Wasm_I64SConvertF32a) {
+ WasmRunner<int64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ *i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+// kExprI64SConvertF64:
+TEST(Run_Wasm_I64SConvertF64a) {
+ WasmRunner<int64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ *i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+// kExprI64UConvertF32:
+TEST(Run_Wasm_I64UConvertF32a) {
+ WasmRunner<uint64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+// kExprI64UConvertF64:
+TEST(Run_Wasm_I64UConvertF64a) {
+ WasmRunner<uint64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+TEST(Run_WasmCallI64Parameter) {
+ // Build the target function.
+ LocalType param_types[20];
+ for (int i = 0; i < 20; i++) param_types[i] = kAstI64;
+ param_types[3] = kAstI32;
+ param_types[4] = kAstI32;
+ FunctionSig sig(1, 19, param_types);
+ for (int i = 0; i < 19; i++) {
+ TestingModule module;
+ WasmFunctionCompiler t(&sig, &module);
+ if (i == 2 || i == 3) {
+ continue;
+ } else {
+ BUILD(t, WASM_GET_LOCAL(i));
+ }
+ uint32_t index = t.CompileAndAdd();
+
+ // Build the calling function.
+ WasmRunner<int32_t> r(&module);
+ BUILD(
+ r,
+ WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
+ index, WASM_I64V_9(0xbcd12340000000b),
+ WASM_I64V_9(0xbcd12340000000c), WASM_I32V_1(0xd),
+ WASM_I32_CONVERT_I64(WASM_I64V_9(0xbcd12340000000e)),
+ WASM_I64V_9(0xbcd12340000000f), WASM_I64V_10(0xbcd1234000000010),
+ WASM_I64V_10(0xbcd1234000000011), WASM_I64V_10(0xbcd1234000000012),
+ WASM_I64V_10(0xbcd1234000000013), WASM_I64V_10(0xbcd1234000000014),
+ WASM_I64V_10(0xbcd1234000000015), WASM_I64V_10(0xbcd1234000000016),
+ WASM_I64V_10(0xbcd1234000000017), WASM_I64V_10(0xbcd1234000000018),
+ WASM_I64V_10(0xbcd1234000000019), WASM_I64V_10(0xbcd123400000001a),
+ WASM_I64V_10(0xbcd123400000001b), WASM_I64V_10(0xbcd123400000001c),
+ WASM_I64V_10(0xbcd123400000001d))));
+
+ CHECK_EQ(i + 0xb, r.Call());
+ }
+}
+
+void TestI64Binop(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+ {
+ WasmRunner<int64_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+void TestI64Cmp(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+ {
+ WasmRunner<int32_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+#define TEST_I64_BINOP(name, expected, a, b) \
+ do { \
+ if (WASM_64 || kSupported_##name) \
+ TestI64Binop(kExpr##name, expected, a, b); \
+ } while (false)
+
+TEST(Run_Wasm_I64Binops) {
+ TEST_I64_BINOP(I64Add, -5586332274295447011, 0x501b72ebabc26847,
+ 0x625de9793d8f79d6);
+ TEST_I64_BINOP(I64Sub, 9001903251710731490, 0xf24fe6474640002e,
+ 0x7562b6f711991b4c);
+ TEST_I64_BINOP(I64Mul, -4569547818546064176, 0x231a263c2cbc6451,
+ 0xead44de6bd3e23d0);
+ TEST_I64_BINOP(I64Mul, -25963122347507043, 0x4da1fa47c9352b73,
+ 0x91fe82317aa035af);
+ TEST_I64_BINOP(I64Mul, 7640290486138131960, 0x185731abe8eea47c,
+ 0x714ec59f1380d4c2);
+ TEST_I64_BINOP(I64DivS, -91517, 0x93b1190a34de56a0, 0x00004d8f68863948);
+ TEST_I64_BINOP(I64DivU, 149016, 0xe15b3727e8a2080a, 0x0000631bfa72db8b);
+ TEST_I64_BINOP(I64RemS, -664128064149968, 0x9a78b4e4fe708692,
+ 0x0003e0b6b3be7609);
+ TEST_I64_BINOP(I64RemU, 1742040017332765, 0x0ce84708c6258c81,
+ 0x000a6fde82016697);
+ TEST_I64_BINOP(I64And, 2531040582801836054, 0xaf257d1602644a16,
+ 0x33b290a91a10d997);
+ TEST_I64_BINOP(I64Ior, 8556201506536114940, 0x169d9be7bd3f0a5c,
+ 0x66bca28d77af40e8);
+ TEST_I64_BINOP(I64Xor, -4605655183785456377, 0xb6ea20a5d48e85b8,
+ 0x76ff4da6c80688bf);
+ TEST_I64_BINOP(I64Shl, -7240704056088331264, 0xef4dc1ed030e8ffe, 9);
+ TEST_I64_BINOP(I64ShrU, 12500673744059159, 0xb1a52fa7deec5d14, 10);
+ TEST_I64_BINOP(I64ShrS, 1725103446999874, 0x3107c791461a112b, 11);
+ TEST_I64_BINOP(I64Ror, -8960135652432576946, 0x73418d1717e4e83a, 12);
+ TEST_I64_BINOP(I64Ror, 7617662827409989779, 0xebff67cf0c126d36, 13);
+ TEST_I64_BINOP(I64Rol, -2097714064174346012, 0x43938b8db0b0f230, 14);
+ TEST_I64_BINOP(I64Rol, 8728493013947314237, 0xe07af243ac4d219d, 15);
+}
+
+#define TEST_I64_CMP(name, expected, a, b) \
+ do { \
+ if (WASM_64 || kSupported_##name) TestI64Cmp(kExpr##name, expected, a, b); \
+ } while (false)
+
+TEST(Run_Wasm_I64Compare) {
+ TEST_I64_CMP(I64Eq, 0, 0xB915D8FA494064F0, 0x04D700B2536019A3);
+ TEST_I64_CMP(I64Ne, 1, 0xC2FAFAAAB0446CDC, 0x52A3328F780C97A3);
+ TEST_I64_CMP(I64LtS, 0, 0x673636E6306B0578, 0x028EC9ECA78F7227);
+ TEST_I64_CMP(I64LeS, 1, 0xAE5214114B86A0FA, 0x7C1D21DA3DFD0CCF);
+ TEST_I64_CMP(I64LtU, 0, 0x7D52166381EC1CE0, 0x59F4A6A9E78CD3D8);
+ TEST_I64_CMP(I64LeU, 1, 0xE4169A385C7EA0E0, 0xFBDBED2C8781E5BC);
+ TEST_I64_CMP(I64GtS, 0, 0x9D08FF8FB5F42E81, 0xD4E5C9D7FE09F621);
+ TEST_I64_CMP(I64GeS, 1, 0x78DA3B2F73264E0F, 0x6FE5E2A67C501CBE);
+ TEST_I64_CMP(I64GtU, 0, 0x8F691284E44F7DA9, 0xD5EA9BC1EE149192);
+ TEST_I64_CMP(I64GeU, 0, 0x0886A0C58C7AA224, 0x5DDBE5A81FD7EE47);
+}
+
+TEST(Run_Wasm_I64Clz) {
+ REQUIRE(I64Clz);
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{0, 0x8000100000000000}, {1, 0x4000050000000000},
+ {2, 0x2000030000000000}, {3, 0x1000000300000000},
+ {4, 0x0805000000000000}, {5, 0x0400600000000000},
+ {6, 0x0200000000000000}, {7, 0x010000a000000000},
+ {8, 0x00800c0000000000}, {9, 0x0040000000000000},
+ {10, 0x0020000d00000000}, {11, 0x00100f0000000000},
+ {12, 0x0008000000000000}, {13, 0x0004100000000000},
+ {14, 0x0002002000000000}, {15, 0x0001030000000000},
+ {16, 0x0000804000000000}, {17, 0x0000400500000000},
+ {18, 0x0000205000000000}, {19, 0x0000170000000000},
+ {20, 0x0000087000000000}, {21, 0x0000040500000000},
+ {22, 0x0000020300000000}, {23, 0x0000010100000000},
+ {24, 0x0000008900000000}, {25, 0x0000004100000000},
+ {26, 0x0000002200000000}, {27, 0x0000001300000000},
+ {28, 0x0000000800000000}, {29, 0x0000000400000000},
+ {30, 0x0000000200000000}, {31, 0x0000000100000000},
+ {32, 0x0000000080001000}, {33, 0x0000000040000500},
+ {34, 0x0000000020000300}, {35, 0x0000000010000003},
+ {36, 0x0000000008050000}, {37, 0x0000000004006000},
+ {38, 0x0000000002000000}, {39, 0x00000000010000a0},
+ {40, 0x0000000000800c00}, {41, 0x0000000000400000},
+ {42, 0x000000000020000d}, {43, 0x0000000000100f00},
+ {44, 0x0000000000080000}, {45, 0x0000000000041000},
+ {46, 0x0000000000020020}, {47, 0x0000000000010300},
+ {48, 0x0000000000008040}, {49, 0x0000000000004005},
+ {50, 0x0000000000002050}, {51, 0x0000000000001700},
+ {52, 0x0000000000000870}, {53, 0x0000000000000405},
+ {54, 0x0000000000000203}, {55, 0x0000000000000101},
+ {56, 0x0000000000000089}, {57, 0x0000000000000041},
+ {58, 0x0000000000000022}, {59, 0x0000000000000013},
+ {60, 0x0000000000000008}, {61, 0x0000000000000004},
+ {62, 0x0000000000000002}, {63, 0x0000000000000001},
+ {64, 0x0000000000000000}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_CLZ(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+TEST(Run_Wasm_I64Ctz) {
+ REQUIRE(I64Ctz);
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{64, 0x0000000000000000}, {63, 0x8000000000000000},
+ {62, 0x4000000000000000}, {61, 0x2000000000000000},
+ {60, 0x1000000000000000}, {59, 0xa800000000000000},
+ {58, 0xf400000000000000}, {57, 0x6200000000000000},
+ {56, 0x9100000000000000}, {55, 0xcd80000000000000},
+ {54, 0x0940000000000000}, {53, 0xaf20000000000000},
+ {52, 0xac10000000000000}, {51, 0xe0b8000000000000},
+ {50, 0x9ce4000000000000}, {49, 0xc792000000000000},
+ {48, 0xb8f1000000000000}, {47, 0x3b9f800000000000},
+ {46, 0xdb4c400000000000}, {45, 0xe9a3200000000000},
+ {44, 0xfca6100000000000}, {43, 0x6c8a780000000000},
+ {42, 0x8ce5a40000000000}, {41, 0xcb7d020000000000},
+ {40, 0xcb4dc10000000000}, {39, 0xdfbec58000000000},
+ {38, 0x27a9db4000000000}, {37, 0xde3bcb2000000000},
+ {36, 0xd7e8a61000000000}, {35, 0x9afdbc8800000000},
+ {34, 0x9afdbc8400000000}, {33, 0x9afdbc8200000000},
+ {32, 0x9afdbc8100000000}, {31, 0x0000000080000000},
+ {30, 0x0000000040000000}, {29, 0x0000000020000000},
+ {28, 0x0000000010000000}, {27, 0x00000000a8000000},
+ {26, 0x00000000f4000000}, {25, 0x0000000062000000},
+ {24, 0x0000000091000000}, {23, 0x00000000cd800000},
+ {22, 0x0000000009400000}, {21, 0x00000000af200000},
+ {20, 0x00000000ac100000}, {19, 0x00000000e0b80000},
+ {18, 0x000000009ce40000}, {17, 0x00000000c7920000},
+ {16, 0x00000000b8f10000}, {15, 0x000000003b9f8000},
+ {14, 0x00000000db4c4000}, {13, 0x00000000e9a32000},
+ {12, 0x00000000fca61000}, {11, 0x000000006c8a7800},
+ {10, 0x000000008ce5a400}, {9, 0x00000000cb7d0200},
+ {8, 0x00000000cb4dc100}, {7, 0x00000000dfbec580},
+ {6, 0x0000000027a9db40}, {5, 0x00000000de3bcb20},
+ {4, 0x00000000d7e8a610}, {3, 0x000000009afdbc88},
+ {2, 0x000000009afdbc84}, {1, 0x000000009afdbc82},
+ {0, 0x000000009afdbc81}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+TEST(Run_Wasm_I64Popcnt) {
+ REQUIRE(I64Popcnt);
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{64, 0xffffffffffffffff},
+ {0, 0x0000000000000000},
+ {2, 0x0000080000008000},
+ {26, 0x1123456782345678},
+ {38, 0xffedcba09edcba09}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+// Test the WasmRunner with an Int64 return value and different numbers of
+// Int64 parameters.
+TEST(Run_TestI64WasmRunner) {
+ REQUIRE(I64Param);
+ REQUIRE(I64Xor);
+ {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r;
+ BUILD(r, WASM_I64V(*i));
+ CHECK_EQ(*i, r.Call());
+}
+}
+{
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_GET_LOCAL(0));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+{
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i ^ *j, r.Call(*i, *j)); }
+ }
+}
+{
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
+ WASM_I64_XOR(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(*i ^ *j ^ *j, r.Call(*i, *j, *j));
+ CHECK_EQ(*j ^ *i ^ *j, r.Call(*j, *i, *j));
+ CHECK_EQ(*j ^ *j ^ *i, r.Call(*j, *j, *i));
+ }
+ }
+}
+{
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
+ WASM_I64_XOR(WASM_GET_LOCAL(1),
+ WASM_I64_XOR(WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(*i ^ *j ^ *j ^ *j, r.Call(*i, *j, *j, *j));
+ CHECK_EQ(*j ^ *i ^ *j ^ *j, r.Call(*j, *i, *j, *j));
+ CHECK_EQ(*j ^ *j ^ *i ^ *j, r.Call(*j, *j, *i, *j));
+ CHECK_EQ(*j ^ *j ^ *j ^ *i, r.Call(*j, *j, *j, *i));
+ }
+ }
+}
+}
+
+TEST(Run_WasmCall_Int64Sub) {
+ REQUIRE(I64Sub);
+ // Build the target function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.l_ll(), &module);
+ BUILD(t, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ uint32_t index = t.CompileAndAdd();
+
+ // Build the caller function.
+ WasmRunner<int64_t> r(&module, MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int64_t a = static_cast<int64_t>(*i) << 32 |
+ (static_cast<int64_t>(*j) | 0xFFFFFFFF);
+ int64_t b = static_cast<int64_t>(*j) << 32 |
+ (static_cast<int64_t>(*i) | 0xFFFFFFFF);
+
+ int64_t expected = static_cast<int64_t>(static_cast<uint64_t>(a) -
+ static_cast<uint64_t>(b));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+ }
+}
+
+TEST(Run_Wasm_LoadStoreI64_sx) {
+ REQUIRE(I64LoadStore);
+ REQUIRE(DepthFirst);
+ byte loads[] = {kExprI64LoadMem8S, kExprI64LoadMem16S, kExprI64LoadMem32S,
+ kExprI64LoadMem};
+
+ for (size_t m = 0; m < arraysize(loads); m++) {
+ TestingModule module;
+ byte* memory = module.AddMemoryElems<byte>(16);
+ WasmRunner<int64_t> r(&module);
+
+ byte code[] = {kExprI64StoreMem, ZERO_ALIGNMENT,
+ ZERO_OFFSET, // --
+ kExprI8Const, 8, // --
+ loads[m], ZERO_ALIGNMENT,
+ ZERO_OFFSET, // --
+ kExprI8Const, 0}; // --
+
+ r.Build(code, code + arraysize(code));
+
+ // Try a bunch of different negative values.
+ for (int i = -1; i >= -128; i -= 11) {
+ int size = 1 << m;
+ module.BlankMemory();
+ memory[size - 1] = static_cast<byte>(i); // set the high order byte.
+
+ int64_t expected = static_cast<int64_t>(i) << ((size - 1) * 8);
+
+ CHECK_EQ(expected, r.Call());
+ CHECK_EQ(static_cast<byte>(i), memory[8 + size - 1]);
+ for (int j = size; j < 8; j++) {
+ CHECK_EQ(255, memory[8 + j]);
+ }
+ }
+ }
+}
+
+TEST(Run_Wasm_I64SConvertF32b) {
+ REQUIRE(I64SConvertF32);
+ WasmRunner<int64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(INT64_MAX) &&
+ *i >= static_cast<float>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+TEST(Run_Wasm_I64SConvertF64b) {
+ REQUIRE(I64SConvertF64);
+ WasmRunner<int64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<double>(INT64_MAX) &&
+ *i >= static_cast<double>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+TEST(Run_Wasm_I64UConvertF32b) {
+ REQUIRE(I64UConvertF32);
+ WasmRunner<uint64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+TEST(Run_Wasm_I64UConvertF64b) {
+ REQUIRE(I64UConvertF64);
+ WasmRunner<uint64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+TEST(Run_Wasm_I64ReinterpretF64) {
+ REQUIRE(I64ReinterpretF64);
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ WasmRunner<int64_t> r(&module);
+
+ BUILD(r, WASM_I64_REINTERPRET_F64(
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
+
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ memory[0] = expected;
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(Run_Wasm_F64ReinterpretI64) {
+ REQUIRE(F64ReinterpretI64);
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ WasmRunner<int64_t> r(&module, MachineType::Int64());
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
+ WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
+
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ CHECK_EQ(expected, r.Call(expected));
+ CHECK_EQ(expected, memory[0]);
+ }
+}
+
+TEST(Run_Wasm_LoadMemI64) {
+ REQUIRE(I64LoadStore);
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ module.RandomizeMemory(1111);
+ WasmRunner<int64_t> r(&module);
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
+
+ memory[0] = 0xaabbccdd00112233LL;
+ CHECK_EQ(0xaabbccdd00112233LL, r.Call());
+
+ memory[0] = 0x33aabbccdd001122LL;
+ CHECK_EQ(0x33aabbccdd001122LL, r.Call());
+
+ memory[0] = 77777777;
+ CHECK_EQ(77777777, r.Call());
+}
+
+TEST(Run_Wasm_MemI64_Sum) {
+ REQUIRE(I64LoadStore);
+ REQUIRE(I64Add);
+ REQUIRE(I64Sub);
+ REQUIRE(I64Phi);
+ const int kNumElems = 20;
+ TestingModule module;
+ uint64_t* memory = module.AddMemoryElems<uint64_t>(kNumElems);
+ WasmRunner<uint64_t> r(&module, MachineType::Int32());
+ const byte kSum = r.AllocateLocal(kAstI64);
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ 2, WASM_SET_LOCAL(
+ kSum, WASM_I64_ADD(
+ WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Int64(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
+ WASM_GET_LOCAL(1)));
+
+ // Run 4 trials.
+ for (int i = 0; i < 3; i++) {
+ module.RandomizeMemory(i * 33);
+ uint64_t expected = 0;
+ for (size_t j = kNumElems - 1; j > 0; j--) {
+ expected += memory[j];
+ }
+ uint64_t result = r.Call(8 * (kNumElems - 1));
+ CHECK_EQ(expected, result);
+ }
+}
+
+TEST(Run_Wasm_I64Global) {
+ REQUIRE(I64LoadStore);
+ REQUIRE(I64SConvertI32);
+ REQUIRE(I64And);
+ REQUIRE(DepthFirst);
+ TestingModule module;
+ int64_t* global = module.AddGlobal<int64_t>(MachineType::Int64());
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ // global = global + p0
+ BUILD(r, B2(WASM_STORE_GLOBAL(
+ 0, WASM_I64_AND(WASM_LOAD_GLOBAL(0),
+ WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
+
+ *global = 0xFFFFFFFFFFFFFFFFLL;
+ for (int i = 9; i < 444444; i += 111111) {
+ int64_t expected = *global & i;
+ r.Call(i);
+ CHECK_EQ(expected, *global);
+ }
+}
+
+TEST(Run_Wasm_I64Eqz) {
+ REQUIRE(I64Eq);
+
+ WasmRunner<int32_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_EQZ(WASM_GET_LOCAL(0)));
+
+ FOR_INT64_INPUTS(i) {
+ int32_t result = *i == 0 ? 1 : 0;
+ CHECK_EQ(result, r.Call(*i));
+ }
+}
+
+TEST(Run_Wasm_I64Ror) {
+ REQUIRE(I64Ror);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ int64_t expected = bits::RotateRight64(*i, *j & 0x3f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+TEST(Run_Wasm_I64Rol) {
+ REQUIRE(I64Rol);
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ int64_t expected = bits::RotateLeft64(*i, *j & 0x3f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 0b33808781..6d95d6e0fc 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -54,21 +54,30 @@ uint32_t AddJsFunction(TestingModule* module, FunctionSig* sig,
Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
module->AddFunction(sig, Handle<Code>::null());
- uint32_t index = static_cast<uint32_t>(module->module->functions->size() - 1);
+ uint32_t index = static_cast<uint32_t>(module->module->functions.size() - 1);
Isolate* isolate = CcTest::InitIsolateOnce();
- Handle<Code> code =
- CompileWasmToJSWrapper(isolate, module, jsfunc, sig, "test");
- module->instance->function_code->at(index) = code;
+ WasmName module_name = {"test", 4};
+ WasmName function_name = {nullptr, 0};
+ Handle<Code> code = CompileWasmToJSWrapper(isolate, module, jsfunc, sig,
+ module_name, function_name);
+ module->instance->function_code[index] = code;
return index;
}
uint32_t AddJSSelector(TestingModule* module, FunctionSig* sig, int which) {
- const int kMaxParams = 8;
- static const char* formals[kMaxParams] = {
- "", "a", "a,b", "a,b,c",
- "a,b,c,d", "a,b,c,d,e", "a,b,c,d,e,f", "a,b,c,d,e,f,g",
- };
+ const int kMaxParams = 11;
+ static const char* formals[kMaxParams] = {"",
+ "a",
+ "a,b",
+ "a,b,c",
+ "a,b,c,d",
+ "a,b,c,d,e",
+ "a,b,c,d,e,f",
+ "a,b,c,d,e,f,g",
+ "a,b,c,d,e,f,g,h",
+ "a,b,c,d,e,f,g,h,i",
+ "a,b,c,d,e,f,g,h,i,j"};
CHECK_LT(which, static_cast<int>(sig->parameter_count()));
CHECK_LT(static_cast<int>(sig->parameter_count()), kMaxParams);
@@ -86,7 +95,7 @@ Handle<JSFunction> WrapCode(ModuleEnv* module, uint32_t index) {
// Wrap the code so it can be called as a JS function.
Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
- Handle<Code> code = module->instance->function_code->at(index);
+ Handle<Code> code = module->instance->function_code[index];
WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
return compiler::CompileJSToWasmWrapper(isolate, module, name, code,
module_object, index);
@@ -171,8 +180,6 @@ TEST(Run_I32Popcount_jswrapped) {
}
-#if !V8_TARGET_ARCH_ARM64
-// TODO(titzer): dynamic frame alignment on arm64
TEST(Run_CallJS_Add_jswrapped) {
TestSignatures sigs;
TestingModule module;
@@ -187,12 +194,9 @@ TEST(Run_CallJS_Add_jswrapped) {
EXPECT_CALL(199, jsfunc, 100, -1);
EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
}
-#endif
void RunJSSelectTest(int which) {
-#if !V8_TARGET_ARCH_ARM
- // TODO(titzer): fix tests on arm and reenable
const int kMaxParams = 8;
PredictableInputValues inputs(0x100);
LocalType type = kAstF64;
@@ -223,7 +227,6 @@ void RunJSSelectTest(int which) {
double expected = inputs.arg_d(which);
EXPECT_CALL(expected, jsfunc, 0.0, 0.0);
}
-#endif
}
@@ -296,10 +299,11 @@ TEST(Run_WASMSelect_7) { RunWASMSelectTest(7); }
void RunWASMSelectAlignTest(int num_args, int num_params) {
PredictableInputValues inputs(0x300);
Isolate* isolate = CcTest::InitIsolateOnce();
- const int kMaxParams = 4;
+ const int kMaxParams = 10;
DCHECK_LE(num_args, kMaxParams);
LocalType type = kAstF64;
- LocalType types[kMaxParams + 1] = {type, type, type, type, type};
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type, type,
+ type, type, type, type, type};
FunctionSig sig(1, num_params, types);
for (int which = 0; which < num_params; which++) {
@@ -308,12 +312,16 @@ void RunWASMSelectAlignTest(int num_args, int num_params) {
BUILD(t, WASM_GET_LOCAL(which));
Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
- Handle<Object> args[] = {
- isolate->factory()->NewNumber(inputs.arg_d(0)),
- isolate->factory()->NewNumber(inputs.arg_d(1)),
- isolate->factory()->NewNumber(inputs.arg_d(2)),
- isolate->factory()->NewNumber(inputs.arg_d(3)),
- };
+ Handle<Object> args[] = {isolate->factory()->NewNumber(inputs.arg_d(0)),
+ isolate->factory()->NewNumber(inputs.arg_d(1)),
+ isolate->factory()->NewNumber(inputs.arg_d(2)),
+ isolate->factory()->NewNumber(inputs.arg_d(3)),
+ isolate->factory()->NewNumber(inputs.arg_d(4)),
+ isolate->factory()->NewNumber(inputs.arg_d(5)),
+ isolate->factory()->NewNumber(inputs.arg_d(6)),
+ isolate->factory()->NewNumber(inputs.arg_d(7)),
+ isolate->factory()->NewNumber(inputs.arg_d(8)),
+ isolate->factory()->NewNumber(inputs.arg_d(9))};
double nan = std::numeric_limits<double>::quiet_NaN();
double expected = which < num_args ? inputs.arg_d(which) : nan;
@@ -351,16 +359,43 @@ TEST(Run_WASMSelectAlign_4) {
RunWASMSelectAlignTest(4, 4);
}
+TEST(Run_WASMSelectAlign_7) {
+ RunWASMSelectAlignTest(7, 5);
+ RunWASMSelectAlignTest(7, 6);
+ RunWASMSelectAlignTest(7, 7);
+}
+
+TEST(Run_WASMSelectAlign_8) {
+ RunWASMSelectAlignTest(8, 5);
+ RunWASMSelectAlignTest(8, 6);
+ RunWASMSelectAlignTest(8, 7);
+ RunWASMSelectAlignTest(8, 8);
+}
+
+TEST(Run_WASMSelectAlign_9) {
+ RunWASMSelectAlignTest(9, 6);
+ RunWASMSelectAlignTest(9, 7);
+ RunWASMSelectAlignTest(9, 8);
+ RunWASMSelectAlignTest(9, 9);
+}
+
+TEST(Run_WASMSelectAlign_10) {
+ RunWASMSelectAlignTest(10, 7);
+ RunWASMSelectAlignTest(10, 8);
+ RunWASMSelectAlignTest(10, 9);
+ RunWASMSelectAlignTest(10, 10);
+}
void RunJSSelectAlignTest(int num_args, int num_params) {
PredictableInputValues inputs(0x400);
Isolate* isolate = CcTest::InitIsolateOnce();
Factory* factory = isolate->factory();
- const int kMaxParams = 4;
+ const int kMaxParams = 10;
CHECK_LE(num_args, kMaxParams);
CHECK_LE(num_params, kMaxParams);
LocalType type = kAstF64;
- LocalType types[kMaxParams + 1] = {type, type, type, type, type};
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type, type,
+ type, type, type, type, type};
FunctionSig sig(1, num_params, types);
// Build the calling code.
@@ -390,6 +425,12 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
factory->NewNumber(inputs.arg_d(1)),
factory->NewNumber(inputs.arg_d(2)),
factory->NewNumber(inputs.arg_d(3)),
+ factory->NewNumber(inputs.arg_d(4)),
+ factory->NewNumber(inputs.arg_d(5)),
+ factory->NewNumber(inputs.arg_d(6)),
+ factory->NewNumber(inputs.arg_d(7)),
+ factory->NewNumber(inputs.arg_d(8)),
+ factory->NewNumber(inputs.arg_d(9)),
};
double nan = std::numeric_limits<double>::quiet_NaN();
@@ -404,29 +445,50 @@ TEST(Run_JSSelectAlign_0) {
RunJSSelectAlignTest(0, 2);
}
+TEST(Run_JSSelectAlign_1) {
+ RunJSSelectAlignTest(1, 2);
+ RunJSSelectAlignTest(1, 3);
+}
TEST(Run_JSSelectAlign_2) {
RunJSSelectAlignTest(2, 3);
RunJSSelectAlignTest(2, 4);
}
+TEST(Run_JSSelectAlign_3) {
+ RunJSSelectAlignTest(3, 3);
+ RunJSSelectAlignTest(3, 4);
+}
TEST(Run_JSSelectAlign_4) {
RunJSSelectAlignTest(4, 3);
RunJSSelectAlignTest(4, 4);
}
+TEST(Run_JSSelectAlign_7) {
+ RunJSSelectAlignTest(7, 3);
+ RunJSSelectAlignTest(7, 4);
+ RunJSSelectAlignTest(7, 4);
+ RunJSSelectAlignTest(7, 4);
+}
-#if !V8_TARGET_ARCH_ARM64
-// TODO(titzer): dynamic frame alignment on arm64
-TEST(Run_JSSelectAlign_1) {
- RunJSSelectAlignTest(1, 2);
- RunJSSelectAlignTest(1, 3);
+TEST(Run_JSSelectAlign_8) {
+ RunJSSelectAlignTest(8, 5);
+ RunJSSelectAlignTest(8, 6);
+ RunJSSelectAlignTest(8, 7);
+ RunJSSelectAlignTest(8, 8);
}
+TEST(Run_JSSelectAlign_9) {
+ RunJSSelectAlignTest(9, 6);
+ RunJSSelectAlignTest(9, 7);
+ RunJSSelectAlignTest(9, 8);
+ RunJSSelectAlignTest(9, 9);
+}
-TEST(Run_JSSelectAlign_3) {
- RunJSSelectAlignTest(3, 3);
- RunJSSelectAlignTest(3, 4);
+TEST(Run_JSSelectAlign_10) {
+ RunJSSelectAlignTest(10, 7);
+ RunJSSelectAlignTest(10, 8);
+ RunJSSelectAlignTest(10, 9);
+ RunJSSelectAlignTest(10, 10);
}
-#endif
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 905e8e4932..118a91f3e9 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -19,7 +19,6 @@ using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
-#if !V8_TARGET_ARCH_ARM64
// TODO(titzer): fix arm64 frame alignment.
namespace {
void TestModule(WasmModuleIndex* module, int32_t expected_result) {
@@ -36,19 +35,24 @@ void TestModule(WasmModuleIndex* module, int32_t expected_result) {
// A raw test that skips the WasmModuleBuilder.
TEST(Run_WasmModule_CallAdd_rev) {
static const byte data[] = {
+ WASM_MODULE_HEADER,
// sig#0 ------------------------------------------
- kDeclSignatures, 2, 0, kLocalI32, // void -> int
- 2, kLocalI32, kLocalI32, kLocalI32, // int,int -> int
+ WASM_SECTION_SIGNATURES_SIZE + 7, // Section size.
+ WASM_SECTION_SIGNATURES, 2, 0, kLocalI32, // void -> int
+ 2, kLocalI32, kLocalI32, kLocalI32, // int,int -> int
// func#0 (main) ----------------------------------
- kDeclFunctions, 2, kDeclFunctionExport, 0, 0, // sig index
- 6, 0, // body size
- kExprCallFunction, 1, // --
- kExprI8Const, 77, // --
- kExprI8Const, 22, // --
+ WASM_SECTION_FUNCTIONS_SIZE + 24, WASM_SECTION_FUNCTIONS, 2,
+ kDeclFunctionExport, 0, 0, // sig index
+ 7, 0, // body size
+ 0, // locals
+ kExprCallFunction, 1, // --
+ kExprI8Const, 77, // --
+ kExprI8Const, 22, // --
// func#1 -----------------------------------------
0, // no name, not exported
1, 0, // sig index
- 5, 0, // body size
+ 6, 0, // body size
+ 0, // locals
kExprI32Add, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
@@ -65,7 +69,8 @@ TEST(Run_WasmModule_CallAdd_rev) {
TEST(Run_WasmModule_Return114) {
static const int32_t kReturnValue = 114;
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
@@ -79,7 +84,8 @@ TEST(Run_WasmModule_Return114) {
TEST(Run_WasmModule_CallAdd) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f1_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
@@ -102,7 +108,8 @@ TEST(Run_WasmModule_CallAdd) {
TEST(Run_WasmModule_ReadLoadedDataSegment) {
static const byte kDataSegmentDest0 = 12;
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
@@ -118,19 +125,10 @@ TEST(Run_WasmModule_ReadLoadedDataSegment) {
TestModule(writer->WriteTo(&zone), 0xddccbbaa);
}
-
-#if defined(__has_feature)
-#if __has_feature(address_sanitizer)
-#define V8_WITH_ASAN 1
-#endif
-#endif
-
-
-#if !defined(V8_WITH_ASAN)
-// TODO(bradnelson): Figure out why this crashes under asan.
TEST(Run_WasmModule_CheckMemoryIsZero) {
static const int kCheckSize = 16 * 1024;
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
@@ -140,23 +138,19 @@ TEST(Run_WasmModule_CheckMemoryIsZero) {
byte code[] = {WASM_BLOCK(
2,
WASM_WHILE(
- WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32(kCheckSize)),
+ WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32V_3(kCheckSize)),
WASM_IF_ELSE(
WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
WASM_BRV(2, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
WASM_I8(11))};
- uint32_t local_indices[] = {7, 19, 25, 28};
- f->EmitCode(code, sizeof(code), local_indices, sizeof(local_indices) / 4);
+ f->EmitCode(code, sizeof(code), nullptr, 0);
WasmModuleWriter* writer = builder->Build(&zone);
TestModule(writer->WriteTo(&zone), 11);
}
-#endif
-
-#if !defined(V8_WITH_ASAN)
-// TODO(bradnelson): Figure out why this crashes under asan.
TEST(Run_WasmModule_CallMain_recursive) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
@@ -171,18 +165,14 @@ TEST(Run_WasmModule_CallMain_recursive) {
WASM_INC_LOCAL(localIndex)),
WASM_BRV(1, WASM_CALL_FUNCTION0(0))),
WASM_BRV(0, WASM_I8(55))))};
- uint32_t local_indices[] = {3, 11, 21, 24};
- f->EmitCode(code, sizeof(code), local_indices, sizeof(local_indices) / 4);
+ f->EmitCode(code, sizeof(code), nullptr, 0);
WasmModuleWriter* writer = builder->Build(&zone);
TestModule(writer->WriteTo(&zone), 55);
}
-#endif
-
-#if !defined(V8_WITH_ASAN)
-// TODO(bradnelson): Figure out why this crashes under asan.
TEST(Run_WasmModule_Global) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint32_t global1 = builder->AddGlobal(MachineType::Int32(), 0);
uint32_t global2 = builder->AddGlobal(MachineType::Int32(), 0);
@@ -196,13 +186,10 @@ TEST(Run_WasmModule_Global) {
f = builder->FunctionAt(f2_index);
f->ReturnType(kAstI32);
f->Exported(1);
- byte code2[] = {WASM_STORE_GLOBAL(global1, WASM_I32(56)),
- WASM_STORE_GLOBAL(global2, WASM_I32(41)),
+ byte code2[] = {WASM_STORE_GLOBAL(global1, WASM_I32V_1(56)),
+ WASM_STORE_GLOBAL(global2, WASM_I32V_1(41)),
WASM_RETURN(WASM_CALL_FUNCTION0(f1_index))};
f->EmitCode(code2, sizeof(code2));
WasmModuleWriter* writer = builder->Build(&zone);
TestModule(writer->WriteTo(&zone), 97);
}
-#endif
-
-#endif // !V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index a6f07f7af0..70d461b627 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -18,12 +18,11 @@ using namespace v8::internal;
using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
-#define BUILD(r, ...) \
- do { \
- byte code[] = {__VA_ARGS__}; \
- r.Build(code, code + arraysize(code)); \
- } while (false)
-
+// for even shorter tests.
+#define B2(a, b) kExprBlock, 2, a, b
+#define B1(a) kExprBlock, 1, a
+#define RET(x) kExprReturn, x
+#define RET_I8(x) kExprReturn, kExprI8Const, x
TEST(Run_WasmInt8Const) {
WasmRunner<int32_t> r;
@@ -67,7 +66,7 @@ TEST(Run_WasmInt32Const) {
WasmRunner<int32_t> r;
const int32_t kExpectedValue = 0x11223344;
// return(kExpectedValue)
- BUILD(r, WASM_I32(kExpectedValue));
+ BUILD(r, WASM_I32V_5(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
@@ -77,7 +76,7 @@ TEST(Run_WasmInt32Const_many) {
WasmRunner<int32_t> r;
const int32_t kExpectedValue = *i;
// return(kExpectedValue)
- BUILD(r, WASM_I32(kExpectedValue));
+ BUILD(r, WASM_I32V(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
}
@@ -92,47 +91,6 @@ TEST(Run_WasmMemorySize) {
}
-#if WASM_64
-TEST(Run_WasmInt64Const) {
- WasmRunner<int64_t> r;
- const int64_t kExpectedValue = 0x1122334455667788LL;
- // return(kExpectedValue)
- BUILD(r, WASM_I64(kExpectedValue));
- CHECK_EQ(kExpectedValue, r.Call());
-}
-
-
-TEST(Run_WasmInt64Const_many) {
- int cntr = 0;
- FOR_INT32_INPUTS(i) {
- WasmRunner<int64_t> r;
- const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
- // return(kExpectedValue)
- BUILD(r, WASM_I64(kExpectedValue));
- CHECK_EQ(kExpectedValue, r.Call());
- cntr++;
- }
-}
-#endif
-
-TEST(Run_WasmI32ConvertI64) {
- FOR_INT64_INPUTS(i) {
- WasmRunner<int32_t> r;
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64(*i)));
- CHECK_EQ(static_cast<int32_t>(*i), r.Call());
- }
-}
-
-TEST(Run_WasmI64AndConstants) {
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- WasmRunner<int32_t> r;
- BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_AND(WASM_I64(*i), WASM_I64(*j))));
- CHECK_EQ(static_cast<int32_t>(*i & *j), r.Call());
- }
- }
-}
-
TEST(Run_WasmInt32Param0) {
WasmRunner<int32_t> r(MachineType::Int32());
// return(local[0])
@@ -216,7 +174,7 @@ void TestInt32Binop(WasmOpcode opcode, int32_t expected, int32_t a, int32_t b) {
{
WasmRunner<int32_t> r;
// K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I32(a), WASM_I32(b)));
+ BUILD(r, WASM_BINOP(opcode, WASM_I32V(a), WASM_I32V(b)));
CHECK_EQ(expected, r.Call());
}
{
@@ -241,6 +199,10 @@ TEST(Run_WasmInt32Binops) {
TestInt32Binop(kExprI32Shl, 0xA0000000, 0xA, 28);
TestInt32Binop(kExprI32ShrU, 0x07000010, 0x70000100, 4);
TestInt32Binop(kExprI32ShrS, 0xFF000000, 0x80000000, 7);
+ TestInt32Binop(kExprI32Ror, 0x01000000, 0x80000000, 7);
+ TestInt32Binop(kExprI32Ror, 0x01000000, 0x80000000, 39);
+ TestInt32Binop(kExprI32Rol, 0x00000040, 0x80000000, 7);
+ TestInt32Binop(kExprI32Rol, 0x00000040, 0x80000000, 39);
TestInt32Binop(kExprI32Eq, 1, -99, -99);
TestInt32Binop(kExprI32Ne, 0, -97, -97);
@@ -260,7 +222,7 @@ void TestInt32Unop(WasmOpcode opcode, int32_t expected, int32_t a) {
{
WasmRunner<int32_t> r;
// return op K
- BUILD(r, WASM_UNOP(opcode, WASM_I32(a)));
+ BUILD(r, WASM_UNOP(opcode, WASM_I32V(a)));
CHECK_EQ(expected, r.Call());
}
{
@@ -354,209 +316,83 @@ TEST(Run_WasmInt32Popcnt) {
TestInt32Unop(kExprI32Popcnt, 19, 0xfedcba09);
}
-
-#if WASM_64
-void TestInt64Binop(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
- if (!WasmOpcodes::IsSupported(opcode)) return;
- {
- WasmRunner<int64_t> r;
- // return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I64(a), WASM_I64(b)));
- CHECK_EQ(expected, r.Call());
- }
- {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- // return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(expected, r.Call(a, b));
- }
+TEST(Run_WasmI32Eqz) {
+ TestInt32Unop(kExprI32Eqz, 0, 1);
+ TestInt32Unop(kExprI32Eqz, 0, -1);
+ TestInt32Unop(kExprI32Eqz, 0, -827343);
+ TestInt32Unop(kExprI32Eqz, 0, 8888888);
+ TestInt32Unop(kExprI32Eqz, 1, 0);
}
+TEST(Run_WasmI32Shl) {
+ WasmRunner<uint32_t> r(MachineType::Uint32(), MachineType::Uint32());
+ BUILD(r, WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-void TestInt64Cmp(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
- if (!WasmOpcodes::IsSupported(opcode)) return;
- {
- WasmRunner<int32_t> r;
- // return K op K
- BUILD(r, WASM_BINOP(opcode, WASM_I64(a), WASM_I64(b)));
- CHECK_EQ(expected, r.Call());
- }
- {
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
- // return a op b
- BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(expected, r.Call(a, b));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (*i) << (*j & 0x1f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
}
}
+TEST(Run_WasmI32Shr) {
+ WasmRunner<uint32_t> r(MachineType::Uint32(), MachineType::Uint32());
+ BUILD(r, WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-TEST(Run_WasmInt64Binops) {
- // TODO(titzer): real 64-bit numbers
- TestInt64Binop(kExprI64Add, 8888888888888LL, 3333333333333LL,
- 5555555555555LL);
- TestInt64Binop(kExprI64Sub, -111111111111LL, 777777777777LL, 888888888888LL);
- TestInt64Binop(kExprI64Mul, 65130756, 88734, 734);
- TestInt64Binop(kExprI64DivS, -66, -4777344, 72384);
- TestInt64Binop(kExprI64DivU, 805306368, 0xF0000000, 5);
- TestInt64Binop(kExprI64RemS, -3, -3003, 1000);
- TestInt64Binop(kExprI64RemU, 4, 4004, 1000);
- TestInt64Binop(kExprI64And, 0xEE, 0xFFEE, 0xFF0000FF);
- TestInt64Binop(kExprI64Ior, 0xF0FF00FF, 0xF0F000EE, 0x000F0011);
- TestInt64Binop(kExprI64Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
- TestInt64Binop(kExprI64Shl, 0xA0000000, 0xA, 28);
- TestInt64Binop(kExprI64ShrU, 0x0700001000123456LL, 0x7000010001234567LL, 4);
- TestInt64Binop(kExprI64ShrS, 0xFF00000000000000LL, 0x8000000000000000LL, 7);
- TestInt64Cmp(kExprI64Eq, 1, -9999, -9999);
- TestInt64Cmp(kExprI64Ne, 1, -9199, -9999);
- TestInt64Cmp(kExprI64LtS, 1, -4, 4);
- TestInt64Cmp(kExprI64LeS, 0, -2, -3);
- TestInt64Cmp(kExprI64LtU, 1, 0, -6);
- TestInt64Cmp(kExprI64LeU, 1, 98978, 0xF0000000);
-}
-
-
-TEST(Run_WasmInt64Clz) {
- struct {
- int64_t expected;
- uint64_t input;
- } values[] = {{0, 0x8000100000000000}, {1, 0x4000050000000000},
- {2, 0x2000030000000000}, {3, 0x1000000300000000},
- {4, 0x0805000000000000}, {5, 0x0400600000000000},
- {6, 0x0200000000000000}, {7, 0x010000a000000000},
- {8, 0x00800c0000000000}, {9, 0x0040000000000000},
- {10, 0x0020000d00000000}, {11, 0x00100f0000000000},
- {12, 0x0008000000000000}, {13, 0x0004100000000000},
- {14, 0x0002002000000000}, {15, 0x0001030000000000},
- {16, 0x0000804000000000}, {17, 0x0000400500000000},
- {18, 0x0000205000000000}, {19, 0x0000170000000000},
- {20, 0x0000087000000000}, {21, 0x0000040500000000},
- {22, 0x0000020300000000}, {23, 0x0000010100000000},
- {24, 0x0000008900000000}, {25, 0x0000004100000000},
- {26, 0x0000002200000000}, {27, 0x0000001300000000},
- {28, 0x0000000800000000}, {29, 0x0000000400000000},
- {30, 0x0000000200000000}, {31, 0x0000000100000000},
- {32, 0x0000000080001000}, {33, 0x0000000040000500},
- {34, 0x0000000020000300}, {35, 0x0000000010000003},
- {36, 0x0000000008050000}, {37, 0x0000000004006000},
- {38, 0x0000000002000000}, {39, 0x00000000010000a0},
- {40, 0x0000000000800c00}, {41, 0x0000000000400000},
- {42, 0x000000000020000d}, {43, 0x0000000000100f00},
- {44, 0x0000000000080000}, {45, 0x0000000000041000},
- {46, 0x0000000000020020}, {47, 0x0000000000010300},
- {48, 0x0000000000008040}, {49, 0x0000000000004005},
- {50, 0x0000000000002050}, {51, 0x0000000000001700},
- {52, 0x0000000000000870}, {53, 0x0000000000000405},
- {54, 0x0000000000000203}, {55, 0x0000000000000101},
- {56, 0x0000000000000089}, {57, 0x0000000000000041},
- {58, 0x0000000000000022}, {59, 0x0000000000000013},
- {60, 0x0000000000000008}, {61, 0x0000000000000004},
- {62, 0x0000000000000002}, {63, 0x0000000000000001},
- {64, 0x0000000000000000}};
-
- WasmRunner<int64_t> r(MachineType::Uint64());
- BUILD(r, WASM_I64_CLZ(WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < arraysize(values); i++) {
- CHECK_EQ(values[i].expected, r.Call(values[i].input));
- }
-}
-
-
-TEST(Run_WasmInt64Ctz) {
- struct {
- int64_t expected;
- uint64_t input;
- } values[] = {{64, 0x0000000000000000}, {63, 0x8000000000000000},
- {62, 0x4000000000000000}, {61, 0x2000000000000000},
- {60, 0x1000000000000000}, {59, 0xa800000000000000},
- {58, 0xf400000000000000}, {57, 0x6200000000000000},
- {56, 0x9100000000000000}, {55, 0xcd80000000000000},
- {54, 0x0940000000000000}, {53, 0xaf20000000000000},
- {52, 0xac10000000000000}, {51, 0xe0b8000000000000},
- {50, 0x9ce4000000000000}, {49, 0xc792000000000000},
- {48, 0xb8f1000000000000}, {47, 0x3b9f800000000000},
- {46, 0xdb4c400000000000}, {45, 0xe9a3200000000000},
- {44, 0xfca6100000000000}, {43, 0x6c8a780000000000},
- {42, 0x8ce5a40000000000}, {41, 0xcb7d020000000000},
- {40, 0xcb4dc10000000000}, {39, 0xdfbec58000000000},
- {38, 0x27a9db4000000000}, {37, 0xde3bcb2000000000},
- {36, 0xd7e8a61000000000}, {35, 0x9afdbc8800000000},
- {34, 0x9afdbc8400000000}, {33, 0x9afdbc8200000000},
- {32, 0x9afdbc8100000000}, {31, 0x0000000080000000},
- {30, 0x0000000040000000}, {29, 0x0000000020000000},
- {28, 0x0000000010000000}, {27, 0x00000000a8000000},
- {26, 0x00000000f4000000}, {25, 0x0000000062000000},
- {24, 0x0000000091000000}, {23, 0x00000000cd800000},
- {22, 0x0000000009400000}, {21, 0x00000000af200000},
- {20, 0x00000000ac100000}, {19, 0x00000000e0b80000},
- {18, 0x000000009ce40000}, {17, 0x00000000c7920000},
- {16, 0x00000000b8f10000}, {15, 0x000000003b9f8000},
- {14, 0x00000000db4c4000}, {13, 0x00000000e9a32000},
- {12, 0x00000000fca61000}, {11, 0x000000006c8a7800},
- {10, 0x000000008ce5a400}, {9, 0x00000000cb7d0200},
- {8, 0x00000000cb4dc100}, {7, 0x00000000dfbec580},
- {6, 0x0000000027a9db40}, {5, 0x00000000de3bcb20},
- {4, 0x00000000d7e8a610}, {3, 0x000000009afdbc88},
- {2, 0x000000009afdbc84}, {1, 0x000000009afdbc82},
- {0, 0x000000009afdbc81}};
-
- WasmRunner<int64_t> r(MachineType::Uint64());
- BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < arraysize(values); i++) {
- CHECK_EQ(values[i].expected, r.Call(values[i].input));
- }
-}
-
-
-TEST(Run_WasmInt64Popcnt) {
- struct {
- int64_t expected;
- uint64_t input;
- } values[] = {{64, 0xffffffffffffffff},
- {0, 0x0000000000000000},
- {2, 0x0000080000008000},
- {26, 0x1123456782345678},
- {38, 0xffedcba09edcba09}};
-
- WasmRunner<int64_t> r(MachineType::Uint64());
- BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < arraysize(values); i++) {
- CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ uint32_t expected = (*i) >> (*j & 0x1f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
}
}
+TEST(Run_WasmI32Sar) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-#endif
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = (*i) >> (*j & 0x1f);
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
TEST(Run_WASM_Int32DivS_trap) {
WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
CHECK_TRAP(r.Call(100, 0));
CHECK_TRAP(r.Call(-1001, 0));
- CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), -1));
- CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+ CHECK_TRAP(r.Call(kMin, -1));
+ CHECK_TRAP(r.Call(kMin, 0));
}
TEST(Run_WASM_Int32RemS_trap) {
WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
- CHECK_EQ(0, r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_EQ(0, r.Call(kMin, -1));
CHECK_TRAP(r.Call(100, 0));
CHECK_TRAP(r.Call(-1001, 0));
- CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+ CHECK_TRAP(r.Call(kMin, 0));
}
TEST(Run_WASM_Int32DivU_trap) {
WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
- CHECK_EQ(0, r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_EQ(0, r.Call(kMin, -1));
CHECK_TRAP(r.Call(100, 0));
CHECK_TRAP(r.Call(-1001, 0));
- CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+ CHECK_TRAP(r.Call(kMin, 0));
}
@@ -564,11 +400,63 @@ TEST(Run_WASM_Int32RemU_trap) {
WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(217, 100));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_TRAP(r.Call(100, 0));
CHECK_TRAP(r.Call(-1001, 0));
- CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
- CHECK_EQ(std::numeric_limits<int32_t>::min(),
- r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_TRAP(r.Call(kMin, 0));
+ CHECK_EQ(kMin, r.Call(kMin, -1));
+}
+
+TEST(Run_WASM_Int32DivS_asmjs) {
+ TestingModule module;
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(kMin, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+TEST(Run_WASM_Int32RemS_asmjs) {
+ TestingModule module;
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(33, r.Call(133, 100));
+ CHECK_EQ(0, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+TEST(Run_WASM_Int32DivU_asmjs) {
+ TestingModule module;
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_EQ(0, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+TEST(Run_WASM_Int32RemU_asmjs) {
+ TestingModule module;
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(17, r.Call(217, 100));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+ CHECK_EQ(kMin, r.Call(kMin, -1));
}
@@ -590,7 +478,7 @@ TEST(Run_WASM_Int32DivS_byzero_const) {
TEST(Run_WASM_Int32DivU_byzero_const) {
for (uint32_t denom = 0xfffffffe; denom < 8; denom++) {
WasmRunner<uint32_t> r(MachineType::Uint32());
- BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32(denom)));
+ BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
for (uint32_t val = 0xfffffff0; val < 8; val++) {
if (denom == 0) {
@@ -622,86 +510,6 @@ TEST(Run_WASM_Int32DivS_trap_effect) {
CHECK_TRAP(r.Call(0, 0));
}
-
-#if WASM_64
-#define as64(x) static_cast<int64_t>(x)
-TEST(Run_WASM_Int64DivS_trap) {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(0, r.Call(as64(0), as64(100)));
- CHECK_TRAP64(r.Call(as64(100), as64(0)));
- CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
-}
-
-
-TEST(Run_WASM_Int64RemS_trap) {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(33, r.Call(as64(133), as64(100)));
- CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
- CHECK_TRAP64(r.Call(as64(100), as64(0)));
- CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
-}
-
-
-TEST(Run_WASM_Int64DivU_trap) {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(0, r.Call(as64(0), as64(100)));
- CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
- CHECK_TRAP64(r.Call(as64(100), as64(0)));
- CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
-}
-
-
-TEST(Run_WASM_Int64RemU_trap) {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(17, r.Call(as64(217), as64(100)));
- CHECK_TRAP64(r.Call(as64(100), as64(0)));
- CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
- CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
- CHECK_EQ(std::numeric_limits<int64_t>::min(),
- r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
-}
-
-
-TEST(Run_WASM_Int64DivS_byzero_const) {
- for (int8_t denom = -2; denom < 8; denom++) {
- WasmRunner<int64_t> r(MachineType::Int64());
- BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64(denom)));
- for (int64_t val = -7; val < 8; val++) {
- if (denom == 0) {
- CHECK_TRAP64(r.Call(val));
- } else {
- CHECK_EQ(val / denom, r.Call(val));
- }
- }
- }
-}
-
-
-TEST(Run_WASM_Int64DivU_byzero_const) {
- for (uint64_t denom = 0xfffffffffffffffe; denom < 8; denom++) {
- WasmRunner<uint64_t> r(MachineType::Uint64());
- BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64(denom)));
-
- for (uint64_t val = 0xfffffffffffffff0; val < 8; val++) {
- if (denom == 0) {
- CHECK_TRAP64(r.Call(val));
- } else {
- CHECK_EQ(val / denom, r.Call(val));
- }
- }
- }
-}
-#endif
-
-
void TestFloat32Binop(WasmOpcode opcode, int32_t expected, float a, float b) {
{
WasmRunner<int32_t> r;
@@ -892,7 +700,7 @@ TEST(Run_Wasm_IfElse_Unreachable1) {
TEST(Run_Wasm_Return12) {
WasmRunner<int32_t> r;
- BUILD(r, WASM_RETURN(WASM_I8(12)));
+ BUILD(r, RET_I8(12));
CHECK_EQ(12, r.Call());
}
@@ -900,7 +708,7 @@ TEST(Run_Wasm_Return12) {
TEST(Run_Wasm_Return17) {
WasmRunner<int32_t> r;
- BUILD(r, WASM_BLOCK(1, WASM_RETURN(WASM_I8(17))));
+ BUILD(r, B1(RET_I8(17)));
CHECK_EQ(17, r.Call());
}
@@ -908,27 +716,16 @@ TEST(Run_Wasm_Return17) {
TEST(Run_Wasm_Return_I32) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+ BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-#if WASM_64
-TEST(Run_Wasm_Return_I64) {
- WasmRunner<int64_t> r(MachineType::Int64());
-
- BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
-
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
-}
-#endif
-
-
TEST(Run_Wasm_Return_F32) {
WasmRunner<float> r(MachineType::Float32());
- BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+ BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
float expect = *i;
@@ -945,7 +742,7 @@ TEST(Run_Wasm_Return_F32) {
TEST(Run_Wasm_Return_F64) {
WasmRunner<double> r(MachineType::Float64());
- BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+ BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
double expect = *i;
@@ -973,17 +770,18 @@ TEST(Run_Wasm_Select) {
TEST(Run_Wasm_Select_strict1) {
WasmRunner<int32_t> r(MachineType::Int32());
// select(a=0, a=1, a=2); return a
- BUILD(r, WASM_BLOCK(2, WASM_SELECT(WASM_SET_LOCAL(0, WASM_I8(0)),
- WASM_SET_LOCAL(0, WASM_I8(1)),
- WASM_SET_LOCAL(0, WASM_I8(2))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, B2(WASM_SELECT(WASM_SET_LOCAL(0, WASM_I8(0)),
+ WASM_SET_LOCAL(0, WASM_I8(1)),
+ WASM_SET_LOCAL(0, WASM_I8(2))),
+ WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(*i)); }
}
TEST(Run_Wasm_Select_strict2) {
WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->AddLocals(kAstI32, 2);
+ r.AllocateLocal(kAstI32);
+ r.AllocateLocal(kAstI32);
// select(b=5, c=6, a)
BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
WASM_SET_LOCAL(2, WASM_I8(6)), WASM_GET_LOCAL(0)));
@@ -995,7 +793,8 @@ TEST(Run_Wasm_Select_strict2) {
TEST(Run_Wasm_Select_strict3) {
WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->AddLocals(kAstI32, 2);
+ r.AllocateLocal(kAstI32);
+ r.AllocateLocal(kAstI32);
// select(b=5, c=6, a=b)
BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
WASM_SET_LOCAL(2, WASM_I8(6)),
@@ -1009,55 +808,67 @@ TEST(Run_Wasm_Select_strict3) {
TEST(Run_Wasm_BrIf_strict) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- 2, WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_I8(99)))),
- WASM_GET_LOCAL(0)));
+ BUILD(
+ r,
+ B2(B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(99)))),
+ WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(99, r.Call(*i)); }
}
-TEST(Run_Wasm_TableSwitch0a) {
+TEST(Run_Wasm_BrTable0a) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)),
- WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)), WASM_I8(91)));
+ BUILD(r,
+ B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), WASM_I8(91)));
FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
}
-TEST(Run_Wasm_TableSwitch0b) {
+TEST(Run_Wasm_BrTable0b) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- 2, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0), WASM_CASE_BR(0)),
- WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)), WASM_I8(92)));
+ BUILD(r,
+ B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0))),
+ WASM_I8(92)));
FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(*i)); }
}
-TEST(Run_Wasm_TableSwitch0c) {
+TEST(Run_Wasm_BrTable0c) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(2, WASM_BLOCK(2, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0),
- WASM_CASE_BR(1)),
- WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)),
- WASM_RETURN(WASM_I8(76))),
- WASM_I8(77)));
+ BUILD(
+ r,
+ B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(1))),
+ RET_I8(76)),
+ WASM_I8(77)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i == 0 ? 76 : 77;
CHECK_EQ(expected, r.Call(*i));
}
}
-TEST(Run_Wasm_TableSwitch1) {
+TEST(Run_Wasm_BrTable1) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(93))));
+ BUILD(r, B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), RET_I8(93));
FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(*i)); }
}
+TEST(Run_Wasm_BrTable_loop) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ B2(WASM_LOOP(1, WASM_BR_TABLE(WASM_INC_LOCAL_BY(0, 1), 2, BR_TARGET(2),
+ BR_TARGET(1), BR_TARGET(0))),
+ RET_I8(99)),
+ WASM_I8(98));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(98, r.Call(-1));
+ CHECK_EQ(98, r.Call(-2));
+ CHECK_EQ(98, r.Call(-3));
+ CHECK_EQ(98, r.Call(-100));
+}
-TEST(Run_Wasm_TableSwitch_br) {
+TEST(Run_Wasm_BrTable_br) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_TABLESWITCH_OP(1, 2, WASM_CASE_BR(0), WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(91))),
+ BUILD(r,
+ B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(1), BR_TARGET(0))),
+ RET_I8(91)),
WASM_I8(99));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(91, r.Call(1));
@@ -1065,17 +876,14 @@ TEST(Run_Wasm_TableSwitch_br) {
CHECK_EQ(91, r.Call(3));
}
-
-TEST(Run_Wasm_TableSwitch_br2) {
+TEST(Run_Wasm_BrTable_br2) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- 2, WASM_BLOCK(2, WASM_TABLESWITCH_OP(
- 1, 4, WASM_CASE_BR(0), WASM_CASE_BR(1),
- WASM_CASE_BR(2), WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0),
- WASM_RETURN(WASM_I8(85))),
- WASM_RETURN(WASM_I8(86))),
- WASM_RETURN(WASM_I8(87))),
+
+ BUILD(r, B2(B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 3, BR_TARGET(1),
+ BR_TARGET(2), BR_TARGET(3), BR_TARGET(0))),
+ RET_I8(85)),
+ RET_I8(86)),
+ RET_I8(87)),
WASM_I8(88));
CHECK_EQ(86, r.Call(0));
CHECK_EQ(87, r.Call(1));
@@ -1085,95 +893,74 @@ TEST(Run_Wasm_TableSwitch_br2) {
CHECK_EQ(85, r.Call(5));
}
-
-TEST(Run_Wasm_TableSwitch2) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(91)),
- WASM_RETURN(WASM_I8(92))));
- FOR_INT32_INPUTS(i) {
- int32_t expected = *i == 0 ? 91 : 92;
- CHECK_EQ(expected, r.Call(*i));
- }
-}
-
-
-TEST(Run_Wasm_TableSwitch2b) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(1), WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(81)),
- WASM_RETURN(WASM_I8(82))));
- FOR_INT32_INPUTS(i) {
- int32_t expected = *i == 0 ? 82 : 81;
- CHECK_EQ(expected, r.Call(*i));
- }
-}
-
-
-TEST(Run_Wasm_TableSwitch4) {
+TEST(Run_Wasm_BrTable4) {
for (int i = 0; i < 4; i++) {
- const uint16_t br = 0x8000u;
- uint16_t c = 0;
- uint16_t cases[] = {i == 0 ? br : c++, i == 1 ? br : c++, i == 2 ? br : c++,
- i == 3 ? br : c++};
- byte code[] = {
- WASM_BLOCK(1, WASM_TABLESWITCH_OP(
- 3, 4, WASM_CASE(cases[0]), WASM_CASE(cases[1]),
- WASM_CASE(cases[2]), WASM_CASE(cases[3])),
- WASM_TABLESWITCH_BODY(
- WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(71)),
- WASM_RETURN(WASM_I8(72)), WASM_RETURN(WASM_I8(73)))),
- WASM_RETURN(WASM_I8(74))};
-
- WasmRunner<int32_t> r(MachineType::Int32());
- r.Build(code, code + arraysize(code));
-
- FOR_INT32_INPUTS(i) {
- int index = (*i < 0 || *i > 3) ? 3 : *i;
- int32_t expected = 71 + cases[index];
- if (expected >= 0x8000) expected = 74;
- CHECK_EQ(expected, r.Call(*i));
+ for (int t = 0; t < 4; t++) {
+ uint32_t cases[] = {0, 1, 2, 3};
+ cases[i] = t;
+ byte code[] = {B2(B2(B2(B2(B1(WASM_BR_TABLE(
+ WASM_GET_LOCAL(0), 3, BR_TARGET(cases[0]),
+ BR_TARGET(cases[1]), BR_TARGET(cases[2]),
+ BR_TARGET(cases[3]))),
+ RET_I8(70)),
+ RET_I8(71)),
+ RET_I8(72)),
+ RET_I8(73)),
+ WASM_I8(75)};
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ for (int x = -3; x < 50; x++) {
+ int index = (x > 3 || x < 0) ? 3 : x;
+ int32_t expected = 70 + cases[index];
+ CHECK_EQ(expected, r.Call(x));
+ }
}
}
}
-
-TEST(Run_Wasm_TableSwitch4b) {
- for (int a = 0; a < 2; a++) {
- for (int b = 0; b < 2; b++) {
- for (int c = 0; c < 2; c++) {
- for (int d = 0; d < 2; d++) {
- if (a + b + c + d == 0) continue;
- if (a + b + c + d == 4) continue;
-
- byte code[] = {
- WASM_TABLESWITCH_OP(2, 4, WASM_CASE(a), WASM_CASE(b),
- WASM_CASE(c), WASM_CASE(d)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(61)),
- WASM_RETURN(WASM_I8(62)))};
-
- WasmRunner<int32_t> r(MachineType::Int32());
- r.Build(code, code + arraysize(code));
-
- CHECK_EQ(61 + a, r.Call(0));
- CHECK_EQ(61 + b, r.Call(1));
- CHECK_EQ(61 + c, r.Call(2));
- CHECK_EQ(61 + d, r.Call(3));
- CHECK_EQ(61 + d, r.Call(4));
+TEST(Run_Wasm_BrTable4x4) {
+ for (byte a = 0; a < 4; a++) {
+ for (byte b = 0; b < 4; b++) {
+ for (byte c = 0; c < 4; c++) {
+ for (byte d = 0; d < 4; d++) {
+ for (int i = 0; i < 4; i++) {
+ uint32_t cases[] = {a, b, c, d};
+ byte code[] = {
+ B2(B2(B2(B2(B1(WASM_BR_TABLE(
+ WASM_GET_LOCAL(0), 3, BR_TARGET(cases[0]),
+ BR_TARGET(cases[1]), BR_TARGET(cases[2]),
+ BR_TARGET(cases[3]))),
+ RET_I8(50)),
+ RET_I8(51)),
+ RET_I8(52)),
+ RET_I8(53)),
+ WASM_I8(55)};
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ for (int x = -6; x < 47; x++) {
+ int index = (x > 3 || x < 0) ? 3 : x;
+ int32_t expected = 50 + cases[index];
+ CHECK_EQ(expected, r.Call(x));
+ }
+ }
}
}
}
}
}
-
-TEST(Run_Wasm_TableSwitch4_fallthru) {
+TEST(Run_Wasm_BrTable4_fallthru) {
byte code[] = {
- WASM_TABLESWITCH_OP(4, 4, WASM_CASE(0), WASM_CASE(1), WASM_CASE(2),
- WASM_CASE(3)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_INC_LOCAL_BY(1, 1),
- WASM_INC_LOCAL_BY(1, 2), WASM_INC_LOCAL_BY(1, 4),
- WASM_INC_LOCAL_BY(1, 8)),
+ B2(B2(B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 3, BR_TARGET(0),
+ BR_TARGET(1), BR_TARGET(2), BR_TARGET(3))),
+ WASM_INC_LOCAL_BY(1, 1)),
+ WASM_INC_LOCAL_BY(1, 2)),
+ WASM_INC_LOCAL_BY(1, 4)),
+ WASM_INC_LOCAL_BY(1, 8)),
WASM_GET_LOCAL(1)};
WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
@@ -1192,34 +979,6 @@ TEST(Run_Wasm_TableSwitch4_fallthru) {
CHECK_EQ(108, r.Call(4, 100));
}
-
-TEST(Run_Wasm_TableSwitch4_fallthru_br) {
- byte code[] = {
- WASM_TABLESWITCH_OP(4, 4, WASM_CASE(0), WASM_CASE(1), WASM_CASE(2),
- WASM_CASE(3)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_INC_LOCAL_BY(1, 1),
- WASM_BRV(0, WASM_INC_LOCAL_BY(1, 2)),
- WASM_INC_LOCAL_BY(1, 4),
- WASM_BRV(0, WASM_INC_LOCAL_BY(1, 8))),
- WASM_GET_LOCAL(1)};
-
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
- r.Build(code, code + arraysize(code));
-
- CHECK_EQ(3, r.Call(0, 0));
- CHECK_EQ(2, r.Call(1, 0));
- CHECK_EQ(12, r.Call(2, 0));
- CHECK_EQ(8, r.Call(3, 0));
- CHECK_EQ(8, r.Call(4, 0));
-
- CHECK_EQ(203, r.Call(0, 200));
- CHECK_EQ(202, r.Call(1, 200));
- CHECK_EQ(212, r.Call(2, 200));
- CHECK_EQ(208, r.Call(3, 200));
- CHECK_EQ(208, r.Call(4, 200));
-}
-
-
TEST(Run_Wasm_F32ReinterpretI32) {
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
@@ -1282,9 +1041,8 @@ TEST(Run_Wasm_VoidReturn1) {
const int32_t kExpected = -414444;
// Build the calling function.
- WasmRunner<int32_t> r;
- r.env()->module = &module;
- BUILD(r, WASM_BLOCK(2, WASM_CALL_FUNCTION0(index), WASM_I32(kExpected)));
+ WasmRunner<int32_t> r(&module);
+ BUILD(r, B2(WASM_CALL_FUNCTION0(index), WASM_I32V_3(kExpected)));
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
@@ -1302,9 +1060,8 @@ TEST(Run_Wasm_VoidReturn2) {
const int32_t kExpected = -414444;
// Build the calling function.
- WasmRunner<int32_t> r;
- r.env()->module = &module;
- BUILD(r, WASM_BLOCK(2, WASM_CALL_FUNCTION0(index), WASM_I32(kExpected)));
+ WasmRunner<int32_t> r(&module);
+ BUILD(r, B2(WASM_CALL_FUNCTION0(index), WASM_I32V_3(kExpected)));
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
@@ -1314,10 +1071,10 @@ TEST(Run_Wasm_VoidReturn2) {
TEST(Run_Wasm_Block_If_P) {
WasmRunner<int32_t> r(MachineType::Int32());
// { if (p0) return 51; return 52; }
- BUILD(r, WASM_BLOCK(2, // --
- WASM_IF(WASM_GET_LOCAL(0), // --
- WASM_BRV(0, WASM_I8(51))), // --
- WASM_I8(52))); // --
+ BUILD(r, B2( // --
+ WASM_IF(WASM_GET_LOCAL(0), // --
+ WASM_BRV(0, WASM_I8(51))), // --
+ WASM_I8(52))); // --
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
CHECK_EQ(expected, r.Call(*i));
@@ -1327,8 +1084,7 @@ TEST(Run_Wasm_Block_If_P) {
TEST(Run_Wasm_Block_BrIf_P) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(51), WASM_GET_LOCAL(0)),
- WASM_I8(52)));
+ BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(51), WASM_GET_LOCAL(0)), WASM_I8(52)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
CHECK_EQ(expected, r.Call(*i));
@@ -1339,11 +1095,11 @@ TEST(Run_Wasm_Block_BrIf_P) {
TEST(Run_Wasm_Block_IfElse_P_assign) {
WasmRunner<int32_t> r(MachineType::Int32());
// { if (p0) p0 = 71; else p0 = 72; return p0; }
- BUILD(r, WASM_BLOCK(2, // --
- WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_SET_LOCAL(0, WASM_I8(71)), // --
- WASM_SET_LOCAL(0, WASM_I8(72))), // --
- WASM_GET_LOCAL(0)));
+ BUILD(r, B2( // --
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_SET_LOCAL(0, WASM_I8(71)), // --
+ WASM_SET_LOCAL(0, WASM_I8(72))), // --
+ WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 71 : 72;
CHECK_EQ(expected, r.Call(*i));
@@ -1354,10 +1110,10 @@ TEST(Run_Wasm_Block_IfElse_P_assign) {
TEST(Run_Wasm_Block_IfElse_P_return) {
WasmRunner<int32_t> r(MachineType::Int32());
// if (p0) return 81; else return 82;
- BUILD(r, // --
- WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_RETURN(WASM_I8(81)), // --
- WASM_RETURN(WASM_I8(82)))); // --
+ BUILD(r, // --
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ RET_I8(81), // --
+ RET_I8(82))); // --
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 81 : 82;
CHECK_EQ(expected, r.Call(*i));
@@ -1381,8 +1137,7 @@ TEST(Run_Wasm_Block_If_P_assign) {
TEST(Run_Wasm_DanglingAssign) {
WasmRunner<int32_t> r(MachineType::Int32());
// { return 0; p0 = 0; }
- BUILD(r,
- WASM_BLOCK(2, WASM_RETURN(WASM_I8(99)), WASM_SET_LOCAL(0, WASM_ZERO)));
+ BUILD(r, B2(RET_I8(99), WASM_SET_LOCAL(0, WASM_ZERO)));
CHECK_EQ(99, r.Call(1));
}
@@ -1459,9 +1214,9 @@ TEST(Run_Wasm_WhileCountDown) {
TEST(Run_Wasm_Loop_if_break1) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(0)),
- WASM_SET_LOCAL(0, WASM_I8(99))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, B2(WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(0)),
+ WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(3, r.Call(3));
CHECK_EQ(10000, r.Call(10000));
@@ -1471,9 +1226,9 @@ TEST(Run_Wasm_Loop_if_break1) {
TEST(Run_Wasm_Loop_if_break2) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_LOOP(2, WASM_BR_IF(1, WASM_GET_LOCAL(0)),
- WASM_SET_LOCAL(0, WASM_I8(99))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, B2(WASM_LOOP(2, WASM_BR_IF(1, WASM_GET_LOCAL(0)),
+ WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(3, r.Call(3));
CHECK_EQ(10000, r.Call(10000));
@@ -1483,8 +1238,8 @@ TEST(Run_Wasm_Loop_if_break2) {
TEST(Run_Wasm_Loop_if_break_fallthru) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
- WASM_SET_LOCAL(0, WASM_I8(93)))),
+ BUILD(r, B1(WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
+ WASM_SET_LOCAL(0, WASM_I8(93)))),
WASM_GET_LOCAL(0));
CHECK_EQ(93, r.Call(0));
CHECK_EQ(3, r.Call(3));
@@ -1534,7 +1289,7 @@ TEST(Run_Wasm_LoadMemI32_oob) {
TEST(Run_Wasm_LoadMemI32_oob_asm) {
TestingModule module;
- module.asm_js = true;
+ module.origin = kAsmJsOrigin;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Uint32());
module.RandomizeMemory(1112);
@@ -1665,7 +1420,7 @@ TEST(Run_Wasm_StoreMemI32_offset) {
const int32_t kWritten = 0xaabbccdd;
BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
- WASM_I32(kWritten)));
+ WASM_I32V_5(kWritten)));
for (int i = 0; i < 2; i++) {
module.RandomizeMemory(1111);
@@ -1719,61 +1474,6 @@ TEST(Run_Wasm_StoreMem_offset_oob) {
}
-#if WASM_64
-TEST(Run_Wasm_F64ReinterpretI64) {
- TestingModule module;
- int64_t* memory = module.AddMemoryElems<int64_t>(8);
- WasmRunner<int64_t> r(&module);
-
- BUILD(r, WASM_I64_REINTERPRET_F64(
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
- memory[0] = expected;
- CHECK_EQ(expected, r.Call());
- }
-}
-
-
-TEST(Run_Wasm_I64ReinterpretF64) {
- TestingModule module;
- int64_t* memory = module.AddMemoryElems<int64_t>(8);
- WasmRunner<int64_t> r(&module, MachineType::Int64());
-
- BUILD(r, WASM_BLOCK(
- 2, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
- WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
-
- FOR_INT32_INPUTS(i) {
- int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
- CHECK_EQ(expected, r.Call(expected));
- CHECK_EQ(expected, memory[0]);
- }
-}
-
-
-TEST(Run_Wasm_LoadMemI64) {
- TestingModule module;
- int64_t* memory = module.AddMemoryElems<int64_t>(8);
- module.RandomizeMemory(1111);
- WasmRunner<int64_t> r(&module);
-
- BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
-
- memory[0] = 0xaabbccdd00112233LL;
- CHECK_EQ(0xaabbccdd00112233LL, r.Call());
-
- memory[0] = 0x33aabbccdd001122LL;
- CHECK_EQ(0x33aabbccdd001122LL, r.Call());
-
- memory[0] = 77777777;
- CHECK_EQ(77777777, r.Call());
-}
-#endif
-
-
TEST(Run_Wasm_LoadMemI32_P) {
const int kNumElems = 8;
TestingModule module;
@@ -1829,9 +1529,9 @@ TEST(Run_Wasm_CheckMachIntsZero) {
WasmRunner<uint32_t> r(&module, MachineType::Int32());
BUILD(r, kExprBlock, 2, kExprLoop, 1, kExprIf, kExprGetLocal, 0, kExprBr, 0,
- kExprIfElse, kExprI32LoadMem, 0, kExprGetLocal, 0, kExprBr, 2,
- kExprI8Const, 255, kExprSetLocal, 0, kExprI32Sub, kExprGetLocal, 0,
- kExprI8Const, 4, kExprI8Const, 0);
+ kExprIfElse, kExprI32LoadMem, ZERO_ALIGNMENT, ZERO_OFFSET,
+ kExprGetLocal, 0, kExprBr, 2, kExprI8Const, 255, kExprSetLocal, 0,
+ kExprI32Sub, kExprGetLocal, 0, kExprI8Const, 4, kExprI8Const, 0);
module.BlankMemory();
CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
@@ -1872,41 +1572,6 @@ TEST(Run_Wasm_MemF32_Sum) {
}
-#if WASM_64
-TEST(Run_Wasm_MemI64_Sum) {
- const int kNumElems = 20;
- TestingModule module;
- uint64_t* memory = module.AddMemoryElems<uint64_t>(kNumElems);
- WasmRunner<uint64_t> r(&module, MachineType::Int32());
- const byte kSum = r.AllocateLocal(kAstI64);
-
- BUILD(r, WASM_BLOCK(
- 2, WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- 2, WASM_SET_LOCAL(
- kSum, WASM_I64_ADD(
- WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Int64(),
- WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
- WASM_GET_LOCAL(1)));
-
- // Run 4 trials.
- for (int i = 0; i < 3; i++) {
- module.RandomizeMemory(i * 33);
- uint64_t expected = 0;
- for (size_t j = kNumElems - 1; j > 0; j--) {
- expected += memory[j];
- }
- uint64_t result = r.Call(8 * (kNumElems - 1));
- CHECK_EQ(expected, result);
- }
-}
-#endif
-
-
template <typename T>
T GenerateAndRunFold(WasmOpcode binop, T* buffer, size_t size,
LocalType astType, MachineType memType) {
@@ -1966,8 +1631,7 @@ TEST(Build_Wasm_Infinite_Loop_effect) {
TEST(Run_Wasm_Unreachable0a) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(2, WASM_BRV(0, WASM_I8(9)), WASM_RETURN(WASM_GET_LOCAL(0))));
+ BUILD(r, B2(WASM_BRV(0, WASM_I8(9)), RET(WASM_GET_LOCAL(0))));
CHECK_EQ(9, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
@@ -1975,7 +1639,7 @@ TEST(Run_Wasm_Unreachable0a) {
TEST(Run_Wasm_Unreachable0b) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
+ BUILD(r, B2(WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
CHECK_EQ(7, r.Call(0));
CHECK_EQ(7, r.Call(1));
}
@@ -2014,8 +1678,8 @@ TEST(Build_Wasm_UnreachableIf2) {
TEST(Run_Wasm_Unreachable_Load) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV(0, WASM_GET_LOCAL(0)),
- WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
+ BUILD(r, B2(WASM_BRV(0, WASM_GET_LOCAL(0)),
+ WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
CHECK_EQ(11, r.Call(11));
CHECK_EQ(21, r.Call(21));
}
@@ -2023,8 +1687,7 @@ TEST(Run_Wasm_Unreachable_Load) {
TEST(Run_Wasm_Infinite_Loop_not_taken1) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP),
- WASM_I8(45)));
+ BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP), WASM_I8(45)));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(0));
}
@@ -2032,9 +1695,8 @@ TEST(Run_Wasm_Infinite_Loop_not_taken1) {
TEST(Run_Wasm_Infinite_Loop_not_taken2) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(1, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(45)),
- WASM_INFINITE_LOOP)));
+ BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(45)),
+ WASM_INFINITE_LOOP)));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
@@ -2042,8 +1704,8 @@ TEST(Run_Wasm_Infinite_Loop_not_taken2) {
TEST(Run_Wasm_Infinite_Loop_not_taken2_brif) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)),
- WASM_INFINITE_LOOP));
+ BUILD(r,
+ B2(WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)), WASM_INFINITE_LOOP));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
@@ -2052,8 +1714,8 @@ TEST(Run_Wasm_Infinite_Loop_not_taken2_brif) {
static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
if (!WasmOpcodes::IsSupported(opcode)) return;
- Zone zone;
Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone(isolate->allocator());
HandleScope scope(isolate);
// Enable all optional operators.
CommonOperatorBuilder common(&zone);
@@ -2061,18 +1723,19 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
MachineOperatorBuilder::kAllOptionalOps);
Graph graph(&zone);
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
- FunctionEnv env;
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- init_env(&env, sig);
if (sig->parameter_count() == 1) {
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
- TestBuildingGraph(&zone, &jsgraph, &env, code, code + arraysize(code));
+ byte code[] = {WASM_NO_LOCALS, static_cast<byte>(opcode), kExprGetLocal, 0};
+ TestBuildingGraph(&zone, &jsgraph, nullptr, sig, code,
+ code + arraysize(code));
} else {
CHECK_EQ(2, sig->parameter_count());
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
- 1};
- TestBuildingGraph(&zone, &jsgraph, &env, code, code + arraysize(code));
+ byte code[] = {WASM_NO_LOCALS, static_cast<byte>(opcode),
+ kExprGetLocal, 0,
+ kExprGetLocal, 1};
+ TestBuildingGraph(&zone, &jsgraph, nullptr, sig, code,
+ code + arraysize(code));
}
}
@@ -2196,38 +1859,15 @@ TEST(Run_WasmInt32Globals_DontAlias) {
}
-#if WASM_64
-TEST(Run_WasmInt64Global) {
- TestingModule module;
- int64_t* global = module.AddGlobal<int64_t>(MachineType::Int64());
- WasmRunner<int32_t> r(&module, MachineType::Int32());
- // global = global + p0
- BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
- 0, WASM_I64_ADD(
- WASM_LOAD_GLOBAL(0),
- WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
-
- *global = 0xFFFFFFFFFFFFFFFFLL;
- for (int i = 9; i < 444444; i += 111111) {
- int64_t expected = *global + i;
- r.Call(i);
- CHECK_EQ(expected, *global);
- }
-}
-#endif
-
-
TEST(Run_WasmFloat32Global) {
TestingModule module;
float* global = module.AddGlobal<float>(MachineType::Float32());
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
- 0, WASM_F32_ADD(
- WASM_LOAD_GLOBAL(0),
- WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
+ BUILD(r, B2(WASM_STORE_GLOBAL(
+ 0, WASM_F32_ADD(WASM_LOAD_GLOBAL(0),
+ WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -2243,11 +1883,10 @@ TEST(Run_WasmFloat64Global) {
double* global = module.AddGlobal<double>(MachineType::Float64());
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
- 0, WASM_F64_ADD(
- WASM_LOAD_GLOBAL(0),
- WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
+ BUILD(r, B2(WASM_STORE_GLOBAL(
+ 0, WASM_F64_ADD(WASM_LOAD_GLOBAL(0),
+ WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -2313,69 +1952,13 @@ TEST(Run_WasmMixedGlobals) {
}
-#if WASM_64
-// Test the WasmRunner with an Int64 return value and different numbers of
-// Int64 parameters.
-TEST(Run_TestI64WasmRunner) {
- {
- FOR_INT64_INPUTS(i) {
- WasmRunner<int64_t> r;
- BUILD(r, WASM_I64(*i));
- CHECK_EQ(*i, r.Call());
- }
- }
- {
- WasmRunner<int64_t> r(MachineType::Int64());
- BUILD(r, WASM_GET_LOCAL(0));
- FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
- }
- {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
- }
- }
- {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
- MachineType::Int64());
- BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0),
- WASM_I64_ADD(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i + *j + *j, r.Call(*i, *j, *j));
- CHECK_EQ(*j + *i + *j, r.Call(*j, *i, *j));
- CHECK_EQ(*j + *j + *i, r.Call(*j, *j, *i));
- }
- }
- }
- {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
- MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0),
- WASM_I64_ADD(WASM_GET_LOCAL(1),
- WASM_I64_ADD(WASM_GET_LOCAL(2),
- WASM_GET_LOCAL(3)))));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- CHECK_EQ(*i + *j + *j + *j, r.Call(*i, *j, *j, *j));
- CHECK_EQ(*j + *i + *j + *j, r.Call(*j, *i, *j, *j));
- CHECK_EQ(*j + *j + *i + *j, r.Call(*j, *j, *i, *j));
- CHECK_EQ(*j + *j + *j + *i, r.Call(*j, *j, *j, *i));
- }
- }
- }
-}
-#endif
-
-
TEST(Run_WasmCallEmpty) {
const int32_t kExpected = -414444;
// Build the target function.
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.i_v(), &module);
- BUILD(t, WASM_I32(kExpected));
+ BUILD(t, WASM_I32V_3(kExpected));
uint32_t index = t.CompileAndAdd();
// Build the calling function.
@@ -2436,51 +2019,6 @@ TEST(Run_WasmCallF64StackParameter) {
CHECK_EQ(256.5, result);
}
-TEST(Run_WasmCallI64Parameter) {
- // Build the target function.
- LocalType param_types[20];
- for (int i = 0; i < 20; i++) param_types[i] = kAstI64;
- param_types[3] = kAstI32;
- param_types[4] = kAstI32;
- FunctionSig sig(1, 19, param_types);
- for (int i = 0; i < 19; i++) {
- TestingModule module;
- WasmFunctionCompiler t(&sig, &module);
- if (i == 2 || i == 3) {
- continue;
- } else {
- BUILD(t, WASM_GET_LOCAL(i));
- }
- uint32_t index = t.CompileAndAdd();
-
- // Build the calling function.
- WasmRunner<int32_t> r;
- r.env()->module = &module;
- BUILD(r,
- WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
- index, WASM_I64(0xbcd12340000000b), WASM_I64(0xbcd12340000000c),
- WASM_I32(0xd), WASM_I32_CONVERT_I64(WASM_I64(0xbcd12340000000e)),
- WASM_I64(0xbcd12340000000f), WASM_I64(0xbcd1234000000010),
- WASM_I64(0xbcd1234000000011), WASM_I64(0xbcd1234000000012),
- WASM_I64(0xbcd1234000000013), WASM_I64(0xbcd1234000000014),
- WASM_I64(0xbcd1234000000015), WASM_I64(0xbcd1234000000016),
- WASM_I64(0xbcd1234000000017), WASM_I64(0xbcd1234000000018),
- WASM_I64(0xbcd1234000000019), WASM_I64(0xbcd123400000001a),
- WASM_I64(0xbcd123400000001b), WASM_I64(0xbcd123400000001c),
- WASM_I64(0xbcd123400000001d))));
-
- CHECK_EQ(i + 0xb, r.Call());
- }
-}
-
-TEST(Run_WasmI64And) {
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
- }
-}
-
TEST(Run_WasmCallVoid) {
const byte kMemOffset = 8;
const int32_t kElemNum = kMemOffset / sizeof(int32_t);
@@ -2492,7 +2030,7 @@ TEST(Run_WasmCallVoid) {
module.RandomizeMemory();
WasmFunctionCompiler t(sigs.v_v(), &module);
BUILD(t, WASM_STORE_MEM(MachineType::Int32(), WASM_I8(kMemOffset),
- WASM_I32(kExpected)));
+ WASM_I32V_3(kExpected)));
uint32_t index = t.CompileAndAdd();
// Build the calling function.
@@ -2527,36 +2065,6 @@ TEST(Run_WasmCall_Int32Add) {
}
}
-
-#if WASM_64
-TEST(Run_WasmCall_Int64Sub) {
- // Build the target function.
- TestSignatures sigs;
- TestingModule module;
- WasmFunctionCompiler t(sigs.l_ll(), &module);
- BUILD(t, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- uint32_t index = t.CompileAndAdd();
-
- // Build the caller function.
- WasmRunner<int64_t> r(&module, MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
-
- FOR_INT32_INPUTS(i) {
- FOR_INT32_INPUTS(j) {
- int64_t a = static_cast<int64_t>(*i) << 32 |
- (static_cast<int64_t>(*j) | 0xFFFFFFFF);
- int64_t b = static_cast<int64_t>(*j) << 32 |
- (static_cast<int64_t>(*i) | 0xFFFFFFFF);
-
- int64_t expected = static_cast<int64_t>(static_cast<uint64_t>(a) -
- static_cast<uint64_t>(b));
- CHECK_EQ(expected, r.Call(a, b));
- }
- }
-}
-#endif
-
-
TEST(Run_WasmCall_Float32Sub) {
TestSignatures sigs;
TestingModule module;
@@ -2571,10 +2079,7 @@ TEST(Run_WasmCall_Float32Sub) {
BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- volatile float expected = *i - *j;
- CheckFloatEq(expected, r.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, r.Call(*i, *j)); }
}
}
@@ -2584,7 +2089,6 @@ TEST(Run_WasmCall_Float64Sub) {
double* memory = module.AddMemoryElems<double>(16);
WasmRunner<int32_t> r(&module);
- // TODO(titzer): convert to a binop test.
BUILD(r, WASM_BLOCK(
2, WASM_STORE_MEM(
MachineType::Float64(), WASM_ZERO,
@@ -2637,7 +2141,8 @@ static void Run_WasmMixedCall_N(int start) {
int num_params = static_cast<int>(arraysize(mixed)) - start;
for (int which = 0; which < num_params; which++) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
TestingModule module;
module.AddMemory(1024);
MachineType* memtypes = &mixed[start];
@@ -2664,7 +2169,7 @@ static void Run_WasmMixedCall_N(int start) {
std::vector<byte> code;
ADD_CODE(code,
static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
- WasmOpcodes::LoadStoreAccessOf(false));
+ ZERO_ALIGNMENT, ZERO_OFFSET);
ADD_CODE(code, WASM_ZERO);
ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
@@ -2673,7 +2178,7 @@ static void Run_WasmMixedCall_N(int start) {
ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
}
- ADD_CODE(code, WASM_I32(kExpected));
+ ADD_CODE(code, WASM_I32V_2(kExpected));
size_t end = code.size();
code.push_back(0);
r.Build(&code[0], &code[end]);
@@ -2709,13 +2214,12 @@ TEST(Run_Wasm_AddCall) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
byte local = r.AllocateLocal(kAstI32);
- BUILD(r,
- WASM_BLOCK(2, WASM_SET_LOCAL(local, WASM_I8(99)),
- WASM_I32_ADD(
- WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(local)))));
+ BUILD(r, B2(WASM_SET_LOCAL(local, WASM_I8(99)),
+ WASM_I32_ADD(
+ WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(local)))));
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));
@@ -2737,8 +2241,7 @@ TEST(Run_Wasm_CountDown_expr) {
TEST(Run_Wasm_ExprBlock2a) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))),
- WASM_I8(1)));
+ BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))), WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
@@ -2746,8 +2249,7 @@ TEST(Run_Wasm_ExprBlock2a) {
TEST(Run_Wasm_ExprBlock2b) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))),
- WASM_I8(2)));
+ BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
@@ -2755,8 +2257,7 @@ TEST(Run_Wasm_ExprBlock2b) {
TEST(Run_Wasm_ExprBlock2c) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)),
- WASM_I8(1)));
+ BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
@@ -2764,8 +2265,7 @@ TEST(Run_Wasm_ExprBlock2c) {
TEST(Run_Wasm_ExprBlock2d) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)),
- WASM_I8(2)));
+ BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
@@ -2837,9 +2337,8 @@ TEST(Run_Wasm_nested_ifs) {
TEST(Run_Wasm_ExprBlock_if) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(1, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
- WASM_BRV(0, WASM_I8(14)))));
+ BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(0, WASM_I8(14)))));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(14, r.Call(0));
@@ -2884,42 +2383,6 @@ TEST(Run_Wasm_ExprLoop_nested_ifs) {
}
-#if WASM_64
-TEST(Run_Wasm_LoadStoreI64_sx) {
- byte loads[] = {kExprI64LoadMem8S, kExprI64LoadMem16S, kExprI64LoadMem32S,
- kExprI64LoadMem};
-
- for (size_t m = 0; m < arraysize(loads); m++) {
- TestingModule module;
- byte* memory = module.AddMemoryElems<byte>(16);
- WasmRunner<int64_t> r(&module);
-
- byte code[] = {kExprI64StoreMem, 0, kExprI8Const, 8,
- loads[m], 0, kExprI8Const, 0};
-
- r.Build(code, code + arraysize(code));
-
- // Try a bunch of different negative values.
- for (int i = -1; i >= -128; i -= 11) {
- int size = 1 << m;
- module.BlankMemory();
- memory[size - 1] = static_cast<byte>(i); // set the high order byte.
-
- int64_t expected = static_cast<int64_t>(i) << ((size - 1) * 8);
-
- CHECK_EQ(expected, r.Call());
- CHECK_EQ(static_cast<byte>(i), memory[8 + size - 1]);
- for (int j = size; j < 8; j++) {
- CHECK_EQ(255, memory[8 + j]);
- }
- }
- }
-}
-
-
-#endif
-
-
TEST(Run_Wasm_SimpleCallIndirect) {
TestSignatures sigs;
TestingModule module;
@@ -3020,56 +2483,56 @@ TEST(Run_Wasm_F32Floor) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floorf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F32Ceil) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceilf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F32Trunc) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(truncf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F32NearestInt) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyintf(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyintf(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F64Floor) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F64Ceil) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F64Trunc) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(trunc(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F64NearestInt) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(nearbyint(*i), r.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), r.Call(*i)); }
}
TEST(Run_Wasm_F32Min) {
@@ -3090,7 +2553,7 @@ TEST(Run_Wasm_F32Min) {
expected = *j;
}
- CheckFloatEq(expected, r.Call(*i, *j));
+ CHECK_FLOAT_EQ(expected, r.Call(*i, *j));
}
}
}
@@ -3114,7 +2577,7 @@ TEST(Run_Wasm_F64Min) {
expected = *j;
}
- CheckDoubleEq(expected, r.Call(*i, *j));
+ CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
}
}
}
@@ -3138,7 +2601,7 @@ TEST(Run_Wasm_F32Max) {
expected = *j;
}
- CheckFloatEq(expected, r.Call(*i, *j));
+ CHECK_FLOAT_EQ(expected, r.Call(*i, *j));
}
}
}
@@ -3162,13 +2625,14 @@ TEST(Run_Wasm_F64Max) {
expected = *j;
}
- CheckDoubleEq(expected, r.Call(*i, *j));
+ CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
}
}
}
-// TODO(ahaas): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+// TODO(ahaas): Fix on arm and mips and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
TEST(Run_Wasm_F32Min_Snan) {
// Test that the instruction does not return a signalling NaN.
@@ -3236,102 +2700,6 @@ TEST(Run_Wasm_F64Max_Snan) {
#endif
-#if WASM_64
-TEST(Run_Wasm_F32SConvertI64) {
- WasmRunner<float> r(MachineType::Int64());
- BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), r.Call(*i)); }
-}
-
-
-#if !defined(_WIN64)
-// TODO(ahaas): Fix this failure.
-TEST(Run_Wasm_F32UConvertI64) {
- WasmRunner<float> r(MachineType::Uint64());
- BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), r.Call(*i)); }
-}
-#endif
-
-
-TEST(Run_Wasm_F64SConvertI64) {
- WasmRunner<double> r(MachineType::Int64());
- BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), r.Call(*i)); }
-}
-
-
-#if !defined(_WIN64)
-// TODO(ahaas): Fix this failure.
-TEST(Run_Wasm_F64UConvertI64) {
- WasmRunner<double> r(MachineType::Uint64());
- BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
- FOR_UINT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), r.Call(*i)); }
-}
-#endif
-
-
-TEST(Run_Wasm_I64SConvertF32) {
- WasmRunner<int64_t> r(MachineType::Float32());
- BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(INT64_MAX) &&
- *i >= static_cast<float>(INT64_MIN)) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-
-TEST(Run_Wasm_I64SConvertF64) {
- WasmRunner<int64_t> r(MachineType::Float64());
- BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<double>(INT64_MAX) &&
- *i >= static_cast<double>(INT64_MIN)) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-
-TEST(Run_Wasm_I64UConvertF32) {
- WasmRunner<uint64_t> r(MachineType::Float32());
- BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-
-TEST(Run_Wasm_I64UConvertF64) {
- WasmRunner<uint64_t> r(MachineType::Float64());
- BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-#endif
-
-
-// TODO(titzer): Fix and re-enable.
-#if 0
TEST(Run_Wasm_I32SConvertF32) {
WasmRunner<int32_t> r(MachineType::Float32());
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
@@ -3352,8 +2720,8 @@ TEST(Run_Wasm_I32SConvertF64) {
BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<double>(INT32_MAX) &&
- *i >= static_cast<double>(INT32_MIN)) {
+ if (*i < (static_cast<double>(INT32_MAX) + 1.0) &&
+ *i > (static_cast<double>(INT32_MIN) - 1.0)) {
CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -3367,7 +2735,7 @@ TEST(Run_Wasm_I32UConvertF32) {
BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(UINT32_MAX) && *i > -1) {
+ if (*i < (static_cast<float>(UINT32_MAX) + 1.0) && *i > -1) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -3381,48 +2749,40 @@ TEST(Run_Wasm_I32UConvertF64) {
BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<float>(UINT32_MAX) && *i > -1) {
+ if (*i < (static_cast<float>(UINT32_MAX) + 1.0) && *i > -1) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
}
}
}
-#endif
-
TEST(Run_Wasm_F64CopySign) {
WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(copysign(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(copysign(*i, *j), r.Call(*i, *j)); }
}
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST(Run_Wasm_F32CopySign) {
WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
BUILD(r, WASM_F32_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) { CheckFloatEq(copysign(*i, *j), r.Call(*i, *j)); }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(copysignf(*i, *j), r.Call(*i, *j)); }
}
}
-
-#endif
-
-
void CompileCallIndirectMany(LocalType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
- Zone zone;
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
HandleScope scope(CcTest::InitIsolateOnce());
TestingModule module;
FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
@@ -3458,3 +2818,15 @@ TEST(Compile_Wasm_CallIndirect_Many_f32) { CompileCallIndirectMany(kAstF32); }
TEST(Compile_Wasm_CallIndirect_Many_f64) { CompileCallIndirectMany(kAstF64); }
+
+TEST(Run_WASM_Int32RemS_dead) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_ZERO);
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(0, r.Call(133, 100));
+ CHECK_EQ(0, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(0, 1));
+ CHECK_TRAP(r.Call(100, 0));
+ CHECK_TRAP(r.Call(-1001, 0));
+ CHECK_TRAP(r.Call(kMin, 0));
+}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 7ee3981885..1e85f46887 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -51,22 +51,18 @@ static const uint32_t kMaxFunctions = 10;
#define WASM_RUNNER_MAX_NUM_PARAMETERS 4
#define WASM_WRAPPER_RETURN_VALUE 8754
+#define BUILD(r, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ r.Build(code, code + arraysize(code)); \
+ } while (false)
+
namespace {
using namespace v8::base;
using namespace v8::internal;
using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
-inline void init_env(FunctionEnv* env, FunctionSig* sig) {
- env->module = nullptr;
- env->sig = sig;
- env->local_i32_count = 0;
- env->local_i64_count = 0;
- env->local_f32_count = 0;
- env->local_f64_count = 0;
- env->SumLocals();
-}
-
const uint32_t kMaxGlobalsSize = 128;
// A helper for module environments that adds the ability to allocate memory
@@ -83,9 +79,8 @@ class TestingModule : public ModuleEnv {
instance->globals_size = kMaxGlobalsSize;
instance->mem_start = nullptr;
instance->mem_size = 0;
- instance->function_code = nullptr;
linker = nullptr;
- asm_js = false;
+ origin = kWasmOrigin;
memset(global_data, 0, sizeof(global_data));
}
@@ -93,9 +88,6 @@ class TestingModule : public ModuleEnv {
if (instance->mem_start) {
free(instance->mem_start);
}
- if (instance->function_code) {
- delete instance->function_code;
- }
}
byte* AddMemory(size_t size) {
@@ -121,11 +113,8 @@ class TestingModule : public ModuleEnv {
}
byte AddSignature(FunctionSig* sig) {
- if (!module->signatures) {
- module->signatures = new std::vector<FunctionSig*>();
- }
- module->signatures->push_back(sig);
- size_t size = module->signatures->size();
+ module->signatures.push_back(sig);
+ size_t size = module->signatures.size();
CHECK(size < 127);
return static_cast<byte>(size - 1);
}
@@ -171,23 +160,21 @@ class TestingModule : public ModuleEnv {
}
int AddFunction(FunctionSig* sig, Handle<Code> code) {
- if (module->functions == nullptr) {
- module->functions = new std::vector<WasmFunction>();
+ if (module->functions.size() == 0) {
// TODO(titzer): Reserving space here to avoid the underlying WasmFunction
// structs from moving.
- module->functions->reserve(kMaxFunctions);
- instance->function_code = new std::vector<Handle<Code>>();
+ module->functions.reserve(kMaxFunctions);
}
- uint32_t index = static_cast<uint32_t>(module->functions->size());
- module->functions->push_back(
- {sig, index, 0, 0, 0, 0, 0, 0, 0, false, false});
- instance->function_code->push_back(code);
+ uint32_t index = static_cast<uint32_t>(module->functions.size());
+ module->functions.push_back(
+ {sig, index, 0, 0, 0, 0, 0, 0, 0, 0, 0, false, false});
+ instance->function_code.push_back(code);
DCHECK_LT(index, kMaxFunctions); // limited for testing.
return index;
}
void SetFunctionCode(uint32_t index, Handle<Code> code) {
- instance->function_code->at(index) = code;
+ instance->function_code[index] = code;
}
void AddIndirectFunctionTable(int* functions, int table_size) {
@@ -195,21 +182,21 @@ class TestingModule : public ModuleEnv {
Handle<FixedArray> fixed =
isolate->factory()->NewFixedArray(2 * table_size);
instance->function_table = fixed;
- module->function_table = new std::vector<uint16_t>();
+ DCHECK_EQ(0u, module->function_table.size());
for (int i = 0; i < table_size; i++) {
- module->function_table->push_back(functions[i]);
+ module->function_table.push_back(functions[i]);
}
}
void PopulateIndirectFunctionTable() {
if (instance->function_table.is_null()) return;
- int table_size = static_cast<int>(module->function_table->size());
+ int table_size = static_cast<int>(module->function_table.size());
for (int i = 0; i < table_size; i++) {
- int function_index = module->function_table->at(i);
- WasmFunction* function = &module->functions->at(function_index);
+ int function_index = module->function_table[i];
+ WasmFunction* function = &module->functions[function_index];
instance->function_table->set(i, Smi::FromInt(function->sig_index));
- instance->function_table->set(
- i + table_size, *instance->function_code->at(function_index));
+ instance->function_table->set(i + table_size,
+ *instance->function_code[function_index]);
}
}
@@ -220,24 +207,22 @@ class TestingModule : public ModuleEnv {
V8_ALIGNED(8) byte global_data[kMaxGlobalsSize]; // preallocated global data.
WasmGlobal* AddGlobal(MachineType mem_type) {
- if (!module->globals) {
- module->globals = new std::vector<WasmGlobal>();
- }
byte size = WasmOpcodes::MemSize(mem_type);
global_offset = (global_offset + size - 1) & ~(size - 1); // align
- module->globals->push_back({0, mem_type, global_offset, false});
+ module->globals.push_back({0, 0, mem_type, global_offset, false});
global_offset += size;
// limit number of globals.
CHECK_LT(global_offset, kMaxGlobalsSize);
- return &module->globals->back();
+ return &module->globals.back();
}
};
-
-inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, FunctionEnv* env,
- const byte* start, const byte* end) {
- compiler::WasmGraphBuilder builder(zone, jsgraph, env->sig);
- TreeResult result = BuildTFGraph(&builder, env, start, end);
+inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
+ FunctionSig* sig, const byte* start,
+ const byte* end) {
+ compiler::WasmGraphBuilder builder(zone, jsgraph, sig);
+ TreeResult result =
+ BuildTFGraph(zone->allocator(), &builder, module, sig, start, end);
if (result.failed()) {
ptrdiff_t pc = result.error_pc - result.start;
ptrdiff_t pt = result.error_pt - result.start;
@@ -405,10 +390,9 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
: GraphAndBuilders(main_zone()),
jsgraph(this->isolate(), this->graph(), this->common(), nullptr,
nullptr, this->machine()),
+ sig(sig),
descriptor_(nullptr),
testing_module_(module) {
- init_env(&env, sig);
- env.module = module;
if (module) {
// Get a new function from the testing module.
function_ = nullptr;
@@ -426,12 +410,13 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
}
JSGraph jsgraph;
- FunctionEnv env;
+ FunctionSig* sig;
// The call descriptor is initialized when the function is compiled.
CallDescriptor* descriptor_;
TestingModule* testing_module_;
WasmFunction* function_;
int function_index_;
+ LocalDeclEncoder local_decls;
Isolate* isolate() { return main_isolate(); }
Graph* graph() const { return main_graph_; }
@@ -440,31 +425,25 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
MachineOperatorBuilder* machine() { return &main_machine_; }
void InitializeDescriptor() {
if (descriptor_ == nullptr) {
- descriptor_ = env.module->GetWasmCallDescriptor(main_zone(), env.sig);
+ descriptor_ = testing_module_->GetWasmCallDescriptor(main_zone(), sig);
}
}
CallDescriptor* descriptor() { return descriptor_; }
void Build(const byte* start, const byte* end) {
- // Transfer local counts before compiling.
- function()->local_i32_count = env.local_i32_count;
- function()->local_i64_count = env.local_i64_count;
- function()->local_f32_count = env.local_f32_count;
- function()->local_f64_count = env.local_f64_count;
-
// Build the TurboFan graph.
- TestBuildingGraph(main_zone(), &jsgraph, &env, start, end);
+ local_decls.Prepend(&start, &end);
+ TestBuildingGraph(main_zone(), &jsgraph, testing_module_, sig, start, end);
+ delete[] start;
}
byte AllocateLocal(LocalType type) {
- int result = static_cast<int>(env.total_locals);
- env.AddLocals(type, 1);
- byte b = static_cast<byte>(result);
- CHECK_EQ(result, b);
- return b;
+ uint32_t index = local_decls.AddLocals(1, type, sig);
+ byte result = static_cast<byte>(index);
+ DCHECK_EQ(index, result);
+ return result;
}
- // TODO(titzer): remove me.
Handle<Code> Compile() {
InitializeDescriptor();
CallDescriptor* desc = descriptor_;
@@ -484,7 +463,6 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
return result;
}
- // TODO(titzer): remove me.
uint32_t CompileAndAdd(uint16_t sig_index = 0) {
CHECK(testing_module_);
function()->sig_index = sig_index;
@@ -495,7 +473,7 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
WasmFunction* function() {
if (function_) return function_;
- return &testing_module_->module->functions->at(function_index_);
+ return &testing_module_->module->functions[function_index_];
}
};
@@ -508,8 +486,8 @@ class WasmRunner {
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
- : compiled_(false),
-
+ : zone(&allocator_),
+ compiled_(false),
signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
compiler_(&signature_, nullptr) {
@@ -520,7 +498,8 @@ class WasmRunner {
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
- : compiled_(false),
+ : zone(&allocator_),
+ compiled_(false),
signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
compiler_(&signature_, module) {
@@ -548,8 +527,6 @@ class WasmRunner {
wrapper_.Init(compiler_.descriptor(), p0, p1, p2, p3);
}
- FunctionEnv* env() { return &compiler_.env; }
-
// Builds a graph from the given Wasm code and generates the machine
// code and call wrapper for that graph. This method must not be called
// more than once.
@@ -599,15 +576,10 @@ class WasmRunner {
return return_value;
}
- byte AllocateLocal(LocalType type) {
- int result = static_cast<int>(env()->total_locals);
- env()->AddLocals(type, 1);
- byte b = static_cast<byte>(result);
- CHECK_EQ(result, b);
- return b;
- }
+ byte AllocateLocal(LocalType type) { return compiler_.AllocateLocal(type); }
protected:
+ v8::base::AccountingAllocator allocator_;
Zone zone;
bool compiled_;
LocalType storage_[WASM_RUNNER_MAX_NUM_PARAMETERS];
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index 5fc338cb58..6e15a906bb 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -87,6 +87,58 @@
],
},
{
+ 'target_name': 'wasm_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_asmjs_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_asmjs_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_asmjs_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-asmjs.cc',
+ ],
+ },
+ {
'target_name': 'fuzzer_support',
'type': 'static_library',
'dependencies': [
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 286be2f24a..4e98edd75a 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -8,11 +8,15 @@
'<(PRODUCT_DIR)/json_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/parser_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/regexp_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/wasm_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/wasm_asmjs_fuzzer<(EXECUTABLE_SUFFIX)',
'./fuzzer.status',
'./testcfg.py',
'./json/',
'./parser/',
'./regexp/',
+ './wasm/',
+ './wasm_asmjs/',
],
},
'includes': [
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index aee4c0dab7..be70b439ef 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -33,7 +33,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::internal::Handle<v8::internal::Script> script =
factory->NewScript(source.ToHandleChecked());
- v8::internal::Zone zone;
+ v8::internal::Zone zone(i_isolate->allocator());
v8::internal::ParseInfo info(&zone, script);
info.set_global();
v8::internal::Parser parser(&info);
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 976325a70a..35a5abb465 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -18,7 +18,7 @@ class FuzzerVariantGenerator(testsuite.VariantGenerator):
class FuzzerTestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp', )
+ SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_asmjs', )
def __init__(self, name, root):
super(FuzzerTestSuite, self).__init__(name, root)
diff --git a/deps/v8/test/fuzzer/wasm-asmjs.cc b/deps/v8/test/fuzzer/wasm-asmjs.cc
new file mode 100644
index 0000000000..3f7477bf14
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-asmjs.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+ v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
+ i_isolate->native_context());
+ v8::internal::wasm::CompileAndRunWasmModule(i_isolate, data, data + size,
+ true);
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
new file mode 100644
index 0000000000..8750cbf786
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+ v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
+ i_isolate->native_context());
+ v8::internal::wasm::CompileAndRunWasmModule(i_isolate, data, data + size,
+ false);
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm/foo.wasm b/deps/v8/test/fuzzer/wasm/foo.wasm
new file mode 100644
index 0000000000..79cd64b50c
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm/foo.wasm
Binary files differ
diff --git a/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm b/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm
new file mode 100644
index 0000000000..79cd64b50c
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm
Binary files differ
diff --git a/deps/v8/test/ignition.isolate b/deps/v8/test/ignition.isolate
index 7e4e581a6b..a8e4d5aa02 100644
--- a/deps/v8/test/ignition.isolate
+++ b/deps/v8/test/ignition.isolate
@@ -10,5 +10,7 @@
'includes': [
'cctest/cctest.isolate',
'mjsunit/mjsunit.isolate',
+ 'message/message.isolate',
+ 'webkit/webkit.isolate',
],
}
diff --git a/deps/v8/test/intl/number-format/format-is-bound.js b/deps/v8/test/intl/number-format/format-is-bound.js
index b24c2ed0ca..d93ee0067f 100644
--- a/deps/v8/test/intl/number-format/format-is-bound.js
+++ b/deps/v8/test/intl/number-format/format-is-bound.js
@@ -37,3 +37,6 @@ numberArray.forEach(nf.format);
// Formatting a number should work in a direct call.
nf.format(12345);
+
+// Reading the format doesn't add any additional property keys
+assertEquals(1, Object.getOwnPropertyNames(nf).length);
diff --git a/deps/v8/test/intl/regexp-assert.js b/deps/v8/test/intl/regexp-assert.js
new file mode 100644
index 0000000000..28fafd0125
--- /dev/null
+++ b/deps/v8/test/intl/regexp-assert.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("a", RegExp.$1);
+assertEquals("b", RegExp.$2);
+assertEquals("c", RegExp.$3);
+assertEquals("d", RegExp.$4);
+assertEquals("e", RegExp.$5);
+assertEquals("f", RegExp.$6);
+assertEquals("g", RegExp.$7);
+assertEquals("h", RegExp.$8);
+assertEquals("i", RegExp.$9);
+
+assertEquals("abcdefghij", RegExp.lastMatch);
+assertEquals("j", RegExp.lastParen);
+assertEquals(">>>", RegExp.leftContext);
+assertEquals("<<<", RegExp.rightContext);
+assertEquals(">>>abcdefghij<<<", RegExp.input);
diff --git a/deps/v8/test/intl/regexp-prepare.js b/deps/v8/test/intl/regexp-prepare.js
new file mode 100644
index 0000000000..dec84110ed
--- /dev/null
+++ b/deps/v8/test/intl/regexp-prepare.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)(\w)/.exec(">>>abcdefghij<<<");
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 51fa1e1a88..6e4b4f15df 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -45,7 +45,8 @@ class IntlTestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
- filename != "utils.js"):
+ filename != "utils.js" and filename != "regexp-assert.js" and
+ filename != "regexp-prepare.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
@@ -59,7 +60,9 @@ class IntlTestSuite(testsuite.TestSuite):
files = []
files.append(os.path.join(self.root, "assert.js"))
files.append(os.path.join(self.root, "utils.js"))
+ files.append(os.path.join(self.root, "regexp-prepare.js"))
files.append(os.path.join(self.root, testcase.path + self.suffix()))
+ files.append(os.path.join(self.root, "regexp-assert.js"))
flags += files
if context.isolates:
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 31049d143f..dc3cb757b9 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -110,10 +110,19 @@
"name": "Object",
"path": ["Object"],
"main": "run.js",
- "resources": ["assign.js"],
+ "flags": ["--harmony"],
+ "resources": [
+ "assign.js",
+ "values.js",
+ "entries.js"
+ ],
"results_regexp": "^%s\\-Object\\(Score\\): (.+)$",
"tests": [
- {"name": "Assign"}
+ {"name": "Assign"},
+ {"name": "Entries"},
+ {"name": "EntriesMegamorphic"},
+ {"name": "Values"},
+ {"name": "ValuesMegamorphic"}
]
},
{
@@ -135,6 +144,25 @@
"tests": [
{"name": "Try-Catch"}
]
+ },
+ {
+ "name": "Keys",
+ "path": ["Keys"],
+ "main": "run.js",
+ "resources": ["keys.js"],
+ "results_regexp": "^%s\\-Keys\\(Score\\): (.+)$",
+ "run_count": 3,
+ "run_count_android_arm": 2,
+ "run_count_android_arm64": 2,
+ "tests": [
+ {"name": "Object.keys()"},
+ {"name": "for-in"},
+ {"name": "for-in hasOwnProperty()"},
+ {"name": "for (i < Object.keys().length)"},
+ {"name": "Object.keys().forEach()"},
+ {"name": "for (i < array.length)"},
+ {"name": "for (i < length)"}
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/Keys/keys.js b/deps/v8/test/js-perf-test/Keys/keys.js
new file mode 100644
index 0000000000..63ed0ebd7e
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Keys/keys.js
@@ -0,0 +1,209 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function ObjectWithKeys(count, keyOffset, keyGen) {
+ if (keyOffset === undefined) keyOffset = 0;
+ if (keyGen === undefined) keyGen = (i) => { return "key" + i };
+ var o = {};
+ for (var i = 0; i < count; i++) {
+ var key = keyGen(i + keyOffset);
+ o[key] = "value";
+ }
+ return o
+}
+
+function ObjectWithMixedKeys(count, keyOffset) {
+ return ObjectWithKeys(count, keyOffset, (key) => {
+ if (key % 2 == 0) return key;
+ return "key" + key;
+ });
+}
+
+// Create an object with #depth prototypes each having #keys properties.
+function ObjectWithProtoKeys(depth, keys, cacheable) {
+ var o = ObjectWithKeys(keys);
+ var current = o;
+ var keyOffset = 0;
+ for (var i = 0; i < depth; i++) {
+ keyOffset += keys;
+ current.__proto__ = ObjectWithKeys(keys, keyOffset);
+ current = current.__proto__;
+ }
+ if (cacheable === false) {
+ // Add an empty proxy at the prototype chain to make caching properties
+ // impossible.
+ current.__proto__ = new Proxy({}, {});
+ }
+ return o;
+}
+
+function HoleyIntArray(size) {
+ var array = new Array(size);
+ for (var i = 0; i < size; i += 3) {
+ array[i] = i;
+ }
+ return array
+}
+
+function IntArray(size) {
+ var array = new Array(size);
+ for (var i = 0; i < size; i++) {
+ array[i] = i;
+ }
+ return array;
+}
+
+// ============================================================================
+var object_empty = {};
+var array_empty = [];
+
+var array_int_50 = IntArray(50);
+var array_int_50_proto_elements = IntArray(50);
+array_int_50_proto_elements.__proto__ = [51, 52, 53, 54];
+var array_int_holey_50 = HoleyIntArray(50);
+
+var empty_proto_5_10 = ObjectWithKeys(5);
+empty_proto_5_10.__proto__ = ObjectWithProtoKeys(10, 0);
+
+var empty_proto_5_5_slow = ObjectWithKeys(5);
+empty_proto_5_5_slow.__proto__ = ObjectWithProtoKeys(5, 0, false);
+
+var object_elements_proto_5_10 = ObjectWithKeys(5);
+object_elements_proto_5_10.__proto__ = ObjectWithProtoKeys(10, 0);
+// Add some properties further up the prototype chain, the rest stays
+// empty.
+for (var i = 0; i < 5; i++) {
+ object_elements_proto_5_10.__proto__.__proto__.__proto__["proto" + i] = true;
+}
+
+var TestObjects = {
+ object_empty: object_empty,
+ array_empty: array_empty,
+ array_int_50: array_int_50,
+ array_int_holey_50: array_int_holey_50,
+ array_int_50_proto_elements: array_int_50_proto_elements,
+ empty_proto_5_10: empty_proto_5_10,
+ empty_proto_5_5_slow: empty_proto_5_5_slow,
+ object_elements_proto_5_10: object_elements_proto_5_10
+}
+
+var TestArrays = {
+ array_empty: array_empty,
+ array_int_50: array_int_50,
+ array_int_holey_50: array_int_holey_50,
+ array_int_50_proto_elements: array_int_50_proto_elements,
+}
+
+// ============================================================================
+
+function CreateTestFunctionGen(fn) {
+ // Force a new function for each test-object to avoid side-effects due to ICs.
+ return (object) => {
+ var random_comment = "\n// random comment" + Math.random() + "\n";
+ return eval(random_comment + fn.toString());
+ }
+}
+
+var TestFunctions = {
+ "Object.keys()": CreateTestFunctionGen(() => {return Object.keys(object)}),
+ "for-in": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ for (var key in object) {
+ count++;
+ result = object[key];
+ };
+ return [result, count];
+ }),
+ "for-in hasOwnProperty()": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ for (var key in object) {
+ if (!object.hasOwnProperty(key)) continue;
+ count++;
+ result = object[key];
+ };
+ return [result, count];
+ }),
+ "for (i < Object.keys().length)": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ var keys = Object.keys(object);
+ for (var i = 0; i < keys.length; i++) {
+ count++;
+ result = object[keys[i]];
+ };
+ return [result, count];
+ }),
+ "Object.keys().forEach()": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ Object.keys(object).forEach((value, index, obj) => {
+ count++;
+ result = value;
+ });
+ return [result, count];
+ }),
+}
+
+var TestFunctionsArrays = {
+ "for (i < array.length)": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ for (var i = 0; i < object.length; i++) {
+ count++;
+ result = object[i];
+ };
+ return [result, count];
+ }),
+ "for (i < length)": CreateTestFunctionGen(() => {
+ var count = 0;
+ var result;
+ var length = object.length;
+ for (var i = 0; i < length; i++) {
+ count++;
+ result = object[i];
+ };
+ return [result, count];
+ })
+}
+
+// ============================================================================
+// Create the benchmark suites. We create a suite for each of the test
+// functions above and each suite contains benchmarks for each object type.
+var Benchmarks = [];
+
+function NewBenchmark(
+ test_function_gen, test_function_name, test_object, test_object_name) {
+ var object = test_object;
+ var name = test_function_name + " " + test_object_name;
+ var test_function = test_function_gen(object);
+ return new Benchmark(name, false, false, 0, test_function)
+}
+
+for (var test_function_name in TestFunctions) {
+ var test_function_gen = TestFunctions[test_function_name];
+ var benchmarks = [];
+ for (var test_object_name in TestObjects) {
+ var test_object = TestObjects[test_object_name];
+ var benchmark = NewBenchmark(
+ test_function_gen, test_function_name, test_object, test_object_name);
+ benchmarks.push(benchmark);
+ }
+ Benchmarks.push(new BenchmarkSuite(test_function_name, [100], benchmarks));
+}
+
+for (var test_function_name in TestFunctionsArrays) {
+ var test_function_gen = TestFunctionsArrays[test_function_name];
+ var benchmarks = [];
+ for (var test_array_name in TestArrays) {
+ var test_array = TestArrays[test_array_name];
+ var benchmark = NewBenchmark(
+ test_function_gen, test_function_name, test_array, test_array_name);
+ benchmarks.push(benchmark);
+ }
+ Benchmarks.push(new BenchmarkSuite(test_function_name, [100], benchmarks));
+}
+
+// ============================================================================
diff --git a/deps/v8/test/js-perf-test/Keys/run.js b/deps/v8/test/js-perf-test/Keys/run.js
new file mode 100644
index 0000000000..d4acbf3ba2
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Keys/run.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load('keys.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-Keys(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/Object/ObjectTests.json b/deps/v8/test/js-perf-test/Object/ObjectTests.json
new file mode 100644
index 0000000000..1c0e5ed2ee
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Object/ObjectTests.json
@@ -0,0 +1,31 @@
+{
+ "name": "ObjectTests",
+ "run_count": 5,
+ "run_count_android_arm": 3,
+ "run_count_android_arm64": 3,
+ "timeout": 120,
+ "units": "score",
+ "total": true,
+ "resources": ["base.js"],
+ "tests": [
+ {
+ "name": "Object",
+ "path": ["."],
+ "main": "run.js",
+ "flags": ["--harmony"],
+ "resources": [
+ "assign.js",
+ "values.js",
+ "entries.js"
+ ],
+ "results_regexp": "^%s\\-Object\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Assign"},
+ {"name": "Entries"},
+ {"name": "EntriesMegamorphic"},
+ {"name": "Values"},
+ {"name": "ValuesMegamorphic"}
+ ]
+ }
+ ]
+}
diff --git a/deps/v8/test/js-perf-test/Object/entries.js b/deps/v8/test/js-perf-test/Object/entries.js
new file mode 100644
index 0000000000..0658b7983d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Object/entries.js
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('Entries', [1000], [
+ new Benchmark('Basic', false, false, 0, Basic, BasicSetup, BasicTearDown)
+]);
+
+var object;
+var expected;
+var result;
+var symbol1;
+
+function Basic() {
+ result = Object.entries(object);
+}
+
+
+function BasicSetup() {
+ result = undefined;
+ symbol1 = Symbol('test');
+ object = { a: 10 };
+ object[26.0] = 'third';
+ object.b = 72;
+ object[symbol1] = 'TEST';
+ Object.defineProperty(object, 'not-enumerable', {
+ enumerable: false, value: 'nope', writable: true, configurable: true });
+}
+
+
+function BasicTearDown() {
+ result = result.map(entry => `[${[String(entry[0]), String(entry[1])]}]`);
+ return result.length === 3 &&
+ result.join(', ') === '[a, 10], [26.0, third], [b, 72]';
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('EntriesMegamorphic', [1000], [
+ new Benchmark('BasicMegamorphic', false, false, 0, BasicMegamorphic,
+ BasicMegamorphicSetup, BasicMegamorphicTearDown)
+]);
+
+function BasicMegamorphic() {
+ for (var i = 0; i < object.length; ++i) {
+ result[i] = Object.entries(object[i]);
+ }
+}
+
+
+function BasicMegamorphicSetup() {
+ // Create 1k objects with different maps.
+ object = [];
+ expected = [];
+ result = [];
+ for (var i=0; i<1000; i++) {
+ var obj = {};
+ var exp = [];
+ for (var j=0; j<10; j++) {
+ obj['key-'+i+'-'+j] = 'property-'+i+'-'+j;
+ exp[j] = ['key-'+i+'-'+j, 'property-'+i+'-'+j];
+ }
+ object[i] = obj;
+ expected[i] = exp;
+ }
+}
+
+
+function BasicMegamorphicTearDown() {
+ if (JSON.stringify(expected) !== JSON.stringify(result)) {
+ throw new Error("FAILURE");
+ }
+ object = result = expected = undefined;
+ return true;
+}
diff --git a/deps/v8/test/js-perf-test/Object/run.js b/deps/v8/test/js-perf-test/Object/run.js
index 15c31ba08d..f25bee4572 100644
--- a/deps/v8/test/js-perf-test/Object/run.js
+++ b/deps/v8/test/js-perf-test/Object/run.js
@@ -4,7 +4,10 @@
load('../base.js');
+
load('assign.js');
+load('values.js');
+load('entries.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/Object/values.js b/deps/v8/test/js-perf-test/Object/values.js
new file mode 100644
index 0000000000..acdec49bbd
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Object/values.js
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('Values', [1000], [
+ new Benchmark('Basic', false, false, 0, Basic, BasicSetup, BasicTearDown),
+]);
+
+var object;
+var expected;
+var result;
+var symbol1;
+
+function Basic() {
+ result = Object.values(object);
+}
+
+
+function BasicSetup() {
+ result = undefined;
+ symbol1 = Symbol('test');
+ object = { a: 10 };
+ object[26.0] = 'third';
+ object.b = 72;
+ object[symbol1] = 'TEST';
+ Object.defineProperty(object, 'not-enumerable', {
+ enumerable: false, value: 'nope', writable: true, configurable: true });
+}
+
+
+function BasicTearDown() {
+ return result.length === 3 && result[0] === 10 && result[1] === 'third' &&
+ result[2] === 72;
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ValuesMegamorphic', [1000], [
+ new Benchmark('BasicMegamorphic', false, false, 0, BasicMegamorphic,
+ BasicMegamorphicSetup, BasicMegamorphicTearDown)
+]);
+
+
+function BasicMegamorphic() {
+ for (var i = 0; i < object.length; ++i) {
+ result[i] = Object.values(object[i]);
+ }
+}
+
+
+function BasicMegamorphicSetup() {
+ // Create 1k objects with different maps.
+ object = [];
+ expected = [];
+ result = [];
+ for (var i=0; i<1000; i++) {
+ var obj = {};
+ var exp = [];
+ for (var j=0; j<10; j++) {
+ obj['key-'+i+'-'+j] = 'property-'+i+'-'+j;
+ exp[j] = 'property-'+i+'-'+j;
+ }
+ object[i] = obj;
+ expected[i] = exp;
+ }
+}
+
+
+function BasicMegamorphicTearDown() {
+ if (JSON.stringify(expected) !== JSON.stringify(result)) {
+ throw new Error("FAILURE");
+ }
+ object = result = expected = undefined;
+ return true;
+}
diff --git a/deps/v8/test/message/strong-object-set-proto.js b/deps/v8/test/message/const-decl-no-init-sloppy.js
index 890dd84d72..a122eae182 100644
--- a/deps/v8/test/message/strong-object-set-proto.js
+++ b/deps/v8/test/message/const-decl-no-init-sloppy.js
@@ -1,9 +1,9 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-sloppy
-// Flags: --strong-mode
-
-"use strong";
-
-({}).__proto__ = {};
+function f() {
+ const a;
+}
diff --git a/deps/v8/test/message/const-decl-no-init-sloppy.out b/deps/v8/test/message/const-decl-no-init-sloppy.out
new file mode 100644
index 0000000000..302497771a
--- /dev/null
+++ b/deps/v8/test/message/const-decl-no-init-sloppy.out
@@ -0,0 +1,7 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:8: SyntaxError: Missing initializer in const declaration
+ const a;
+ ^
+SyntaxError: Missing initializer in const declaration
diff --git a/deps/v8/test/message/default-parameter-tdz-arrow.js b/deps/v8/test/message/default-parameter-tdz-arrow.js
index cad091f8ac..d68ceb2908 100644
--- a/deps/v8/test/message/default-parameter-tdz-arrow.js
+++ b/deps/v8/test/message/default-parameter-tdz-arrow.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-default-parameters
+//
((a=-a) => { })();
diff --git a/deps/v8/test/message/default-parameter-tdz.js b/deps/v8/test/message/default-parameter-tdz.js
index ff2a400e09..a109196218 100644
--- a/deps/v8/test/message/default-parameter-tdz.js
+++ b/deps/v8/test/message/default-parameter-tdz.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-default-parameters
+//
(function(a=+a) { })();
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array.js b/deps/v8/test/message/destructuring-decl-no-init-array.js
index 7c73d3b670..ab976b197e 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-array.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
var [ a, b, c ];
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array2.js b/deps/v8/test/message/destructuring-decl-no-init-array2.js
index a82afa46b0..9ffa58b1df 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array2.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-array2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
for (var [ a ]; a; ) {}
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj.js b/deps/v8/test/message/destructuring-decl-no-init-obj.js
index 23424aa8bb..398b4fc5e7 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-obj.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
var { a, b, c };
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj2.js b/deps/v8/test/message/destructuring-decl-no-init-obj2.js
index 6c76137b75..652409bfab 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj2.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-obj2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
for (var { a, b, c }; a && b && c; ) {}
diff --git a/deps/v8/test/message/destructuring-modify-const.js b/deps/v8/test/message/destructuring-modify-const.js
index 88bda351d8..5575ae9267 100644
--- a/deps/v8/test/message/destructuring-modify-const.js
+++ b/deps/v8/test/message/destructuring-modify-const.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring-bind
+//
'use strict';
const { x : x, y : y } = { x : 1, y : 2 };
diff --git a/deps/v8/test/message/for-in-loop-initializers-destructuring.js b/deps/v8/test/message/for-in-loop-initializers-destructuring.js
index eab8b81cf3..9bbfd8d71f 100644
--- a/deps/v8/test/message/for-in-loop-initializers-destructuring.js
+++ b/deps/v8/test/message/for-in-loop-initializers-destructuring.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
function f() {
for (var [x, y] = {} in {});
diff --git a/deps/v8/test/message/for-of-throw-in-body.js b/deps/v8/test/message/for-of-throw-in-body.js
new file mode 100644
index 0000000000..38b27f3863
--- /dev/null
+++ b/deps/v8/test/message/for-of-throw-in-body.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (var x of [1, 2, 3]) { throw 42 }
diff --git a/deps/v8/test/message/for-of-throw-in-body.out b/deps/v8/test/message/for-of-throw-in-body.out
new file mode 100644
index 0000000000..4bc48ebdad
--- /dev/null
+++ b/deps/v8/test/message/for-of-throw-in-body.out
@@ -0,0 +1,6 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:5: 42
+for (var x of [1, 2, 3]) { throw 42 }
+ ^
diff --git a/deps/v8/test/preparser/empty.js b/deps/v8/test/message/instanceof-noncallable.js
index 70b88e2a43..571a2b0c76 100644
--- a/deps/v8/test/preparser/empty.js
+++ b/deps/v8/test/message/instanceof-noncallable.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,4 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// This file contains no JavaScript code.
+// Flags: --harmony-instanceof
+
+1 instanceof {};
diff --git a/deps/v8/test/message/instanceof-noncallable.out b/deps/v8/test/message/instanceof-noncallable.out
new file mode 100644
index 0000000000..73e2ae61b5
--- /dev/null
+++ b/deps/v8/test/message/instanceof-noncallable.out
@@ -0,0 +1,5 @@
+*%(basename)s:30: TypeError: Right-hand side of 'instanceof' is not callable
+1 instanceof {};
+ ^
+TypeError: Right-hand side of 'instanceof' is not callable
+ at *%(basename)s:30:3
diff --git a/deps/v8/test/message/instanceof.js b/deps/v8/test/message/instanceof-nonobject.js
index 1d55e0f94e..47152571a1 100644
--- a/deps/v8/test/message/instanceof.js
+++ b/deps/v8/test/message/instanceof-nonobject.js
@@ -25,4 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-instanceof
+
1 instanceof 2;
diff --git a/deps/v8/test/message/instanceof-nonobject.out b/deps/v8/test/message/instanceof-nonobject.out
new file mode 100644
index 0000000000..ecf47af9d1
--- /dev/null
+++ b/deps/v8/test/message/instanceof-nonobject.out
@@ -0,0 +1,5 @@
+*%(basename)s:30: TypeError: Right-hand side of 'instanceof' is not an object
+1 instanceof 2;
+ ^
+TypeError: Right-hand side of 'instanceof' is not an object
+ at *%(basename)s:30:3
diff --git a/deps/v8/test/message/instanceof.out b/deps/v8/test/message/instanceof.out
deleted file mode 100644
index d279bc44e7..0000000000
--- a/deps/v8/test/message/instanceof.out
+++ /dev/null
@@ -1,5 +0,0 @@
-*%(basename)s:28: TypeError: Expecting a function in instanceof check, but got 2
-1 instanceof 2;
- ^
-TypeError: Expecting a function in instanceof check, but got 2
- at *%(basename)s:28:3
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
index c7a35cd4a6..a6cba6fc30 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
+// Flags: --harmony-sloppy --harmony-sloppy-let
let [let];
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
index d2b7c905d0..0a12762ec3 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
+// Flags: --harmony-sloppy --harmony-sloppy-let
let {let};
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 234bf0f35c..051911c6a9 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -29,5 +29,9 @@
[ALWAYS, {
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
+ # We don't parse RegExps at scanning time, so we can't fail on octal
+ # escapes (we need to parse to distinguish octal escapes from valid
+ # back-references).
+ 'strict-octal-regexp': [SKIP],
}], # ALWAYS
]
diff --git a/deps/v8/test/message/no-legacy-const-2.js b/deps/v8/test/message/no-legacy-const-2.js
index 24e3f85639..5dc63b3cb5 100644
--- a/deps/v8/test/message/no-legacy-const-2.js
+++ b/deps/v8/test/message/no-legacy-const-2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
// Flags: --no-harmony-sloppy-function
const = 42;
diff --git a/deps/v8/test/message/no-legacy-const-3.js b/deps/v8/test/message/no-legacy-const-3.js
index 4f6e9a4bbb..43dd9c9d71 100644
--- a/deps/v8/test/message/no-legacy-const-3.js
+++ b/deps/v8/test/message/no-legacy-const-3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
// Flags: --no-harmony-sloppy-function
const
diff --git a/deps/v8/test/message/no-legacy-const.js b/deps/v8/test/message/no-legacy-const.js
index d9a716b3a8..9eebee59bf 100644
--- a/deps/v8/test/message/no-legacy-const.js
+++ b/deps/v8/test/message/no-legacy-const.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
// Flags: --no-harmony-sloppy-function
const x = 42;
diff --git a/deps/v8/test/preparser/non-alphanum.js b/deps/v8/test/message/non-alphanum.js
index 83bd1f8b17..357ebfa1e5 100644
--- a/deps/v8/test/preparser/non-alphanum.js
+++ b/deps/v8/test/message/non-alphanum.js
@@ -29,6 +29,6 @@
// non-alphanumeric characters, but does contain valid code.
// Created using http://discogscounter.getfreehosting.co.uk/js-noalnum_com.php
-// Probably only works in Firefox, but should parse fine.
+// Will throw a TypeError, but should parse fine and not throw a SyntaxError.
([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]])([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(![]+[])[+!+[]]]((![]+[])[+!+[]])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+(+[![]]+[])[+[]])[+[]]+(![]+[])[+!+[]]+(+[]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[!+[]+!+[]+!+[]+[+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([]+([]+[])[([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+!+[]]+(![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[+!+[]]+([][[]]+[])[+[]]+([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]])[+!+[]+[!+[]+!+[]+!+[]+!+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+[+[]])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])+[])[+[]]+(![]+[])[+[]])[+[]])
diff --git a/deps/v8/test/message/non-alphanum.out b/deps/v8/test/message/non-alphanum.out
new file mode 100644
index 0000000000..dc15a614eb
--- /dev/null
+++ b/deps/v8/test/message/non-alphanum.out
@@ -0,0 +1,6 @@
+*%(basename)s:34: TypeError: Array.prototype.sort called on null or undefined
+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]])([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(![]+[])[+!+[]]]((![]+[])[+!+[]])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+(+[![]]+[])[+[]])[+[]]+(![]+[])[+!+[]]+(+[]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[!+[]+!+[]+!+[]+[+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([]+([]+[])[([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+!+[]]+(![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[+!+[]]+([][[]]+[])[+[]]+([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]])[+!+[]+[!+[]+!+[]+!+[]+!+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+[+[]])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])+[])[+[]]+(![]+[])[+[]])[+[]])
+ ^
+TypeError: Array.prototype.sort called on null or undefined
+ at sort (native)
+ at *%(basename)s:34:410
diff --git a/deps/v8/test/preparser/non-use-strict-hex-escape.js b/deps/v8/test/message/non-use-strict-hex-escape.js
index 44db66e42f..44db66e42f 100644
--- a/deps/v8/test/preparser/non-use-strict-hex-escape.js
+++ b/deps/v8/test/message/non-use-strict-hex-escape.js
diff --git a/deps/v8/test/message/non-use-strict-hex-escape.out b/deps/v8/test/message/non-use-strict-hex-escape.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/non-use-strict-hex-escape.out
diff --git a/deps/v8/test/preparser/non-use-strict-octal-escape.js b/deps/v8/test/message/non-use-strict-octal-escape.js
index 55f035a7e0..55f035a7e0 100644
--- a/deps/v8/test/preparser/non-use-strict-octal-escape.js
+++ b/deps/v8/test/message/non-use-strict-octal-escape.js
diff --git a/deps/v8/test/message/non-use-strict-octal-escape.out b/deps/v8/test/message/non-use-strict-octal-escape.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/non-use-strict-octal-escape.out
diff --git a/deps/v8/test/preparser/non-use-strict-uhex-escape.js b/deps/v8/test/message/non-use-strict-uhex-escape.js
index c7df2cb97e..c7df2cb97e 100644
--- a/deps/v8/test/preparser/non-use-strict-uhex-escape.js
+++ b/deps/v8/test/message/non-use-strict-uhex-escape.js
diff --git a/deps/v8/test/message/non-use-strict-uhex-escape.out b/deps/v8/test/message/non-use-strict-uhex-escape.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/non-use-strict-uhex-escape.out
diff --git a/deps/v8/test/preparser/nonstrict-arguments.js b/deps/v8/test/message/nonstrict-arguments.js
index 43c7e2e940..e5ce0f9315 100644
--- a/deps/v8/test/preparser/nonstrict-arguments.js
+++ b/deps/v8/test/message/nonstrict-arguments.js
@@ -27,24 +27,26 @@
// Eval restrictions should not trigger outside of strict-mode code.
-var arguments = 42;
-arguments = arguments++;
-arguments += --arguments;
-arguments -= ++arguments;
-arguments *= arguments--;
-function arguments(arguments) {};
-try {} catch (arguments) {}
+function foo() {
+ var arguments = 42;
+ arguments = arguments++;
+ arguments += --arguments;
+ arguments -= ++arguments;
+ arguments *= arguments--;
+ function arguments(arguments) {};
+ try {} catch (arguments) {}
-function strict() {
- "use strict";
- // Reading eval and arguments is allowed.
- eval(arguments);
-}
+ function strict() {
+ "use strict";
+ // Reading eval and arguments is allowed.
+ eval(arguments);
+ }
-var arguments = 42;
-arguments = arguments++;
-arguments += --arguments;
-arguments -= ++arguments;
-arguments *= arguments--;
-function arguments(arguments) {};
-try {} catch (arguments) {}
+ var arguments = 42;
+ arguments = arguments++;
+ arguments += --arguments;
+ arguments -= ++arguments;
+ arguments *= arguments--;
+ function arguments(arguments) {};
+ try {} catch (arguments) {}
+}
diff --git a/deps/v8/test/message/nonstrict-arguments.out b/deps/v8/test/message/nonstrict-arguments.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/nonstrict-arguments.out
diff --git a/deps/v8/test/preparser/nonstrict-eval.js b/deps/v8/test/message/nonstrict-eval.js
index db6feda3e3..13d7903105 100644
--- a/deps/v8/test/preparser/nonstrict-eval.js
+++ b/deps/v8/test/message/nonstrict-eval.js
@@ -27,24 +27,26 @@
// Eval restrictions should not trigger outside of strict-mode code.
-var eval = 42;
-eval = eval++;
-eval += --eval;
-eval -= ++eval;
-eval *= eval--;
-function eval(eval) {};
-try {} catch (eval) {}
+function foo() {
+ var eval = 42;
+ eval = eval++;
+ eval += --eval;
+ eval -= ++eval;
+ eval *= eval--;
+ function eval(eval) {};
+ try {} catch (eval) {}
-function strict() {
- "use strict";
- // Reading eval and arguments is allowed.
- eval(arguments);
-}
+ function strict() {
+ "use strict";
+ // Reading eval and arguments is allowed.
+ eval(arguments);
+ }
-var eval = 42;
-eval = eval++;
-eval += --eval;
-eval -= ++eval;
-eval *= eval--;
-function eval(eval) {};
-try {} catch (eval) {}
+ var eval = 42;
+ eval = eval++;
+ eval += --eval;
+ eval -= ++eval;
+ eval *= eval--;
+ function eval(eval) {};
+ try {} catch (eval) {}
+}
diff --git a/deps/v8/test/message/nonstrict-eval.out b/deps/v8/test/message/nonstrict-eval.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/nonstrict-eval.out
diff --git a/deps/v8/test/preparser/nonstrict-with.js b/deps/v8/test/message/nonstrict-with.js
index 17f0c930a6..f6e39c9c0e 100644
--- a/deps/v8/test/preparser/nonstrict-with.js
+++ b/deps/v8/test/message/nonstrict-with.js
@@ -28,13 +28,15 @@
// The with statement is allowed in non-strict code, and even around
// strict code.
-with ({}) {}
+function foo() {
+ with ({}) {}
-with ({x : 42}) {
- var foo = function () {
- "use strict";
- return x;
- };
-}
+ with ({x : 42}) {
+ var foo = function () {
+ "use strict";
+ return x;
+ };
+ }
-with ({}) {}
+ with ({}) {}
+}
diff --git a/deps/v8/test/message/nonstrict-with.out b/deps/v8/test/message/nonstrict-with.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/nonstrict-with.out
diff --git a/deps/v8/test/message/regress/regress-4829-1.js b/deps/v8/test/message/regress/regress-4829-1.js
new file mode 100644
index 0000000000..1ad5feda8c
--- /dev/null
+++ b/deps/v8/test/message/regress/regress-4829-1.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function tag() {}
+
+tag(tag`\xyy`);
diff --git a/deps/v8/test/message/regress/regress-4829-1.out b/deps/v8/test/message/regress/regress-4829-1.out
new file mode 100644
index 0000000000..6b51a8abdf
--- /dev/null
+++ b/deps/v8/test/message/regress/regress-4829-1.out
@@ -0,0 +1,8 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: Invalid hexadecimal escape sequence
+tag(tag`\xyy`);
+ ^^^^
+SyntaxError: Invalid hexadecimal escape sequence
diff --git a/deps/v8/test/message/regress/regress-4829-2.js b/deps/v8/test/message/regress/regress-4829-2.js
new file mode 100644
index 0000000000..eadb653a4b
--- /dev/null
+++ b/deps/v8/test/message/regress/regress-4829-2.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function tag() {}
+
+`${tag`\xyy`}`;
diff --git a/deps/v8/test/message/regress/regress-4829-2.out b/deps/v8/test/message/regress/regress-4829-2.out
new file mode 100644
index 0000000000..c8272b4db0
--- /dev/null
+++ b/deps/v8/test/message/regress/regress-4829-2.out
@@ -0,0 +1,8 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: Invalid hexadecimal escape sequence
+`${tag`\xyy`}`;
+ ^^^^
+SyntaxError: Invalid hexadecimal escape sequence
diff --git a/deps/v8/test/preparser/strict-octal-indirect-regexp.js b/deps/v8/test/message/strict-octal-indirect-regexp.js
index ea5c5e3348..ea5c5e3348 100644
--- a/deps/v8/test/preparser/strict-octal-indirect-regexp.js
+++ b/deps/v8/test/message/strict-octal-indirect-regexp.js
diff --git a/deps/v8/test/message/strict-octal-indirect-regexp.out b/deps/v8/test/message/strict-octal-indirect-regexp.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-indirect-regexp.out
diff --git a/deps/v8/test/preparser/strict-octal-number.js b/deps/v8/test/message/strict-octal-number.js
index 3e991279fa..3e991279fa 100644
--- a/deps/v8/test/preparser/strict-octal-number.js
+++ b/deps/v8/test/message/strict-octal-number.js
diff --git a/deps/v8/test/message/strict-octal-number.out b/deps/v8/test/message/strict-octal-number.out
new file mode 100644
index 0000000000..687321877a
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-number.out
@@ -0,0 +1,4 @@
+*%(basename)s:32: SyntaxError: Octal literals are not allowed in strict mode.
+ var x = 012;
+ ^^^
+SyntaxError: Octal literals are not allowed in strict mode.
diff --git a/deps/v8/test/preparser/strict-octal-regexp.js b/deps/v8/test/message/strict-octal-regexp.js
index b39d0b27c7..b39d0b27c7 100644
--- a/deps/v8/test/preparser/strict-octal-regexp.js
+++ b/deps/v8/test/message/strict-octal-regexp.js
diff --git a/deps/v8/test/message/strict-octal-regexp.out b/deps/v8/test/message/strict-octal-regexp.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-regexp.out
diff --git a/deps/v8/test/preparser/strict-octal-string.js b/deps/v8/test/message/strict-octal-string.js
index 87c0e99fb1..87c0e99fb1 100644
--- a/deps/v8/test/preparser/strict-octal-string.js
+++ b/deps/v8/test/message/strict-octal-string.js
diff --git a/deps/v8/test/message/strict-octal-string.out b/deps/v8/test/message/strict-octal-string.out
new file mode 100644
index 0000000000..c46df6bbff
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-string.out
@@ -0,0 +1,4 @@
+*%(basename)s:32: SyntaxError: Octal literals are not allowed in strict mode.
+ var x = "hello\040world";
+ ^^
+SyntaxError: Octal literals are not allowed in strict mode.
diff --git a/deps/v8/test/preparser/strict-octal-use-strict-after.js b/deps/v8/test/message/strict-octal-use-strict-after.js
index 57d0f20151..57d0f20151 100644
--- a/deps/v8/test/preparser/strict-octal-use-strict-after.js
+++ b/deps/v8/test/message/strict-octal-use-strict-after.js
diff --git a/deps/v8/test/message/strict-octal-use-strict-after.out b/deps/v8/test/message/strict-octal-use-strict-after.out
new file mode 100644
index 0000000000..2a425000ab
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-use-strict-after.out
@@ -0,0 +1,4 @@
+*%(basename)s:33: SyntaxError: Octal literals are not allowed in strict mode.
+ "use\040strict";
+ ^^
+SyntaxError: Octal literals are not allowed in strict mode.
diff --git a/deps/v8/test/preparser/strict-octal-use-strict-before.js b/deps/v8/test/message/strict-octal-use-strict-before.js
index bfc380f950..bfc380f950 100644
--- a/deps/v8/test/preparser/strict-octal-use-strict-before.js
+++ b/deps/v8/test/message/strict-octal-use-strict-before.js
diff --git a/deps/v8/test/message/strict-octal-use-strict-before.out b/deps/v8/test/message/strict-octal-use-strict-before.out
new file mode 100644
index 0000000000..cd93e50df9
--- /dev/null
+++ b/deps/v8/test/message/strict-octal-use-strict-before.out
@@ -0,0 +1,4 @@
+*%(basename)s:32: SyntaxError: Octal literals are not allowed in strict mode.
+ "use\040strict";
+ ^^
+SyntaxError: Octal literals are not allowed in strict mode.
diff --git a/deps/v8/test/preparser/strict-with.js b/deps/v8/test/message/strict-with.js
index 411fc2926c..411fc2926c 100644
--- a/deps/v8/test/preparser/strict-with.js
+++ b/deps/v8/test/message/strict-with.js
diff --git a/deps/v8/test/message/strict-with.out b/deps/v8/test/message/strict-with.out
new file mode 100644
index 0000000000..06e7ed852d
--- /dev/null
+++ b/deps/v8/test/message/strict-with.out
@@ -0,0 +1,4 @@
+*%(basename)s:32: SyntaxError: Strict mode code may not include a with statement
+ with ({}) {}
+ ^^^^
+SyntaxError: Strict mode code may not include a with statement
diff --git a/deps/v8/test/message/strong-object-freeze-prop.out b/deps/v8/test/message/strong-object-freeze-prop.out
deleted file mode 100644
index 0c611c5928..0000000000
--- a/deps/v8/test/message/strong-object-freeze-prop.out
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:11: TypeError: On strong object #<Object>, redefining writable, non-configurable property 'foo' to be non-writable is deprecated
-Object.defineProperty(o, "foo", { writable: false });
- ^
-TypeError: On strong object #<Object>, redefining writable, non-configurable property 'foo' to be non-writable is deprecated
- at Function.defineProperty (native)
- at *%(basename)s:11:8
diff --git a/deps/v8/test/message/strong-object-set-proto.out b/deps/v8/test/message/strong-object-set-proto.out
deleted file mode 100644
index bf2c9334f7..0000000000
--- a/deps/v8/test/message/strong-object-set-proto.out
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:9: TypeError: On strong object #<Object>, redefining the internal prototype is deprecated
-({}).__proto__ = {};
- ^
-TypeError: On strong object #<Object>, redefining the internal prototype is deprecated
- at Object.set __proto__ (native)
- at *%(basename)s:9:16
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 7c53041016..577b476637 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -94,7 +94,9 @@ class MessageTestSuite(testsuite.TestSuite):
string.find("Native Client module will be loaded") > 0 or
string.find("NaClHostDescOpen:") > 0)
- def IsFailureOutput(self, output, testpath):
+ def IsFailureOutput(self, testcase):
+ output = testcase.output
+ testpath = testcase.path
expected_path = os.path.join(self.root, testpath + ".out")
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
diff --git a/deps/v8/test/message/try-catch-lexical-conflict.js b/deps/v8/test/message/try-catch-lexical-conflict.js
index a5db29898a..48b1a162b1 100644
--- a/deps/v8/test/message/try-catch-lexical-conflict.js
+++ b/deps/v8/test/message/try-catch-lexical-conflict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
"use strict";
try {
diff --git a/deps/v8/test/message/try-catch-variable-conflict.js b/deps/v8/test/message/try-catch-variable-conflict.js
index 6cf04fa207..49e120bf61 100644
--- a/deps/v8/test/message/try-catch-variable-conflict.js
+++ b/deps/v8/test/message/try-catch-variable-conflict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
+//
try {
} catch ({x}) {
diff --git a/deps/v8/test/message/unicode-escape-invalid-2.js b/deps/v8/test/message/unicode-escape-invalid-2.js
new file mode 100644
index 0000000000..b83665b197
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-invalid-2.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("\u162P");
diff --git a/deps/v8/test/message/unicode-escape-invalid-2.out b/deps/v8/test/message/unicode-escape-invalid-2.out
new file mode 100644
index 0000000000..423e79d60e
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-invalid-2.out
@@ -0,0 +1,8 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:5: SyntaxError: Invalid Unicode escape sequence
+print("\u162P");
+ ^^^^^^
+SyntaxError: Invalid Unicode escape sequence
diff --git a/deps/v8/test/message/unicode-escape-invalid.js b/deps/v8/test/message/unicode-escape-invalid.js
new file mode 100644
index 0000000000..5378acf816
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-invalid.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("\u{FFYZ}");
diff --git a/deps/v8/test/message/unicode-escape-invalid.out b/deps/v8/test/message/unicode-escape-invalid.out
new file mode 100644
index 0000000000..2bdd53881e
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-invalid.out
@@ -0,0 +1,8 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:5: SyntaxError: Invalid Unicode escape sequence
+print("\u{FFYZ}");
+ ^
+SyntaxError: Invalid Unicode escape sequence
diff --git a/deps/v8/test/message/unicode-escape-undefined.js b/deps/v8/test/message/unicode-escape-undefined.js
new file mode 100644
index 0000000000..49de2fb2c8
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-undefined.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("\u{110000}");
diff --git a/deps/v8/test/message/unicode-escape-undefined.out b/deps/v8/test/message/unicode-escape-undefined.out
new file mode 100644
index 0000000000..9b0483cdcc
--- /dev/null
+++ b/deps/v8/test/message/unicode-escape-undefined.out
@@ -0,0 +1,8 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:5: SyntaxError: Undefined Unicode code-point
+print("\u{110000}");
+ ^^^^^^^^^
+SyntaxError: Undefined Unicode code-point \ No newline at end of file
diff --git a/deps/v8/test/message/yield-in-arrow-param.js b/deps/v8/test/message/yield-in-arrow-param.js
new file mode 100644
index 0000000000..c815fe7603
--- /dev/null
+++ b/deps/v8/test/message/yield-in-arrow-param.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* g() { (a = yield) => {} }
diff --git a/deps/v8/test/message/yield-in-arrow-param.out b/deps/v8/test/message/yield-in-arrow-param.out
new file mode 100644
index 0000000000..8eeb0df5f6
--- /dev/null
+++ b/deps/v8/test/message/yield-in-arrow-param.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Yield expression not allowed in formal parameter
+function* g() { (a = yield) => {} }
+ ^^^^^
+SyntaxError: Yield expression not allowed in formal parameter
diff --git a/deps/v8/test/message/strong-object-freeze-prop.js b/deps/v8/test/message/yield-in-generator-param.js
index 17250158d5..1a8f8420f6 100644
--- a/deps/v8/test/message/strong-object-freeze-prop.js
+++ b/deps/v8/test/message/yield-in-generator-param.js
@@ -2,10 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --strong-mode
-
-"use strong";
-
-let o = {};
-Object.defineProperty(o, "foo", { writable: true });
-Object.defineProperty(o, "foo", { writable: false });
+function* g(a = yield) {}
diff --git a/deps/v8/test/message/yield-in-generator-param.out b/deps/v8/test/message/yield-in-generator-param.out
new file mode 100644
index 0000000000..ec46f478c9
--- /dev/null
+++ b/deps/v8/test/message/yield-in-generator-param.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Yield expression not allowed in formal parameter
+function* g(a = yield) {}
+ ^^^^^
+SyntaxError: Yield expression not allowed in formal parameter
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index 9984f5bd2c..8325ab791c 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -317,6 +317,9 @@ function instanceof_check2(type) {
}
var realmBArray = Realm.eval(realmB, "Array");
+// Two calls with Array because ES6 instanceof desugars into a load of Array,
+// and load has a premonomorphic state.
+instanceof_check(Array);
instanceof_check(Array);
instanceof_check(realmBArray);
@@ -327,6 +330,10 @@ instanceof_check(realmBArray);
// It'll go (uninit->realmBArray) then (realmBArray->megamorphic). Recognize
// that state "Array" implies an AllocationSite is present, and code is
// configured to use it.
+
+// Two calls with realmBArray because ES6 instanceof desugars into a load of
+// realmBArray, and load has a premonomorphic state.
+instanceof_check2(realmBArray);
instanceof_check2(realmBArray);
instanceof_check2(Array);
@@ -467,3 +474,19 @@ gc();
assertKind(elements_kind.fast_double, obj[0]);
assertKind(elements_kind.fast, obj[1][0]);
})();
+
+// Test gathering allocation site feedback for generic ics.
+(function() {
+ function make() { return new Array(); }
+ function foo(a, i) { a[0] = i; }
+
+ var a = make();
+ assertKind(elements_kind.fast_smi_only, a);
+
+ // Make the keyed store ic go generic.
+ foo("howdy", 1);
+ foo(a, 3.5);
+
+ var b = make();
+ assertKind(elements_kind.fast_double, b);
+})();
diff --git a/deps/v8/test/mjsunit/array-foreach.js b/deps/v8/test/mjsunit/array-foreach.js
new file mode 100644
index 0000000000..0e34c03422
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-foreach.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [20, 21, 22, 23];
+a.__proto__ = [10, 11, 12, 13];
+
+var values = [];
+var indices = [];
+function callback(value, index, object) {
+ object.length = 2;
+ values.push(value);
+ indices.push(index);
+}
+a.forEach(callback);
+assertEquals([20, 21, 12, 13], values);
+assertEquals([0, 1, 2, 3], indices);
diff --git a/deps/v8/test/mjsunit/array-isarray.js b/deps/v8/test/mjsunit/array-isarray.js
index a21b1e1e7d..6b0d88543e 100644
--- a/deps/v8/test/mjsunit/array-isarray.js
+++ b/deps/v8/test/mjsunit/array-isarray.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
assertTrue(Array.isArray([]));
assertFalse(Array.isArray({}));
diff --git a/deps/v8/test/mjsunit/array-join.js b/deps/v8/test/mjsunit/array-join.js
index c08c182fee..0c949e769a 100644
--- a/deps/v8/test/mjsunit/array-join.js
+++ b/deps/v8/test/mjsunit/array-join.js
@@ -91,3 +91,16 @@ for (var i = 0; i < a.length; i++) a[i] = undefined;
a[5] = "ab";
a[90000] = "cd";
assertEquals("abcd", a.join("")); // Must not throw.
+
+
+// Make sure that each element is accessed exactly once, and in the correct
+// order.
+{
+ var log = [];
+ var p = new Proxy({length: 3, 0: 'a', 1: 'b'}, {
+ get: function(t, k, r) { log.push(k); return Reflect.get(t, k, r); }
+ });
+
+ assertEquals("a,b,", Array.prototype.join.call(p));
+ assertEquals(["length", "0", "1", "2"], log);
+}
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index bf884fca47..9898faac23 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -30,6 +30,16 @@
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
+// This code exists to eliminate the learning influence of AllocationSites
+// on the following tests.
+function make_array_string(literal) {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ " + literal;
+}
+function make_array(literal) {
+ return eval(make_array_string(literal));
+}
+
var __sequence = 0;
function array_natives_test() {
@@ -40,16 +50,6 @@ function array_natives_test() {
assertTrue(%HasFastDoubleElements([1.1]));
assertTrue(%HasFastDoubleElements([1.1,2]));
- // This code exists to eliminate the learning influence of AllocationSites
- // on the following tests.
- function make_array_string(literal) {
- this.__sequence = this.__sequence + 1;
- return "/* " + this.__sequence + " */ " + literal;
- }
- function make_array(literal) {
- return eval(make_array_string(literal));
- }
-
// Push
var a0 = make_array("[1, 2, 3]");
assertTrue(%HasFastSmiElements(a0));
diff --git a/deps/v8/test/mjsunit/asm/atomics-add.js b/deps/v8/test/mjsunit/asm/atomics-add.js
index 9a07ecf2b1..abb7c98030 100644
--- a/deps/v8/test/mjsunit/asm/atomics-add.js
+++ b/deps/v8/test/mjsunit/asm/atomics-add.js
@@ -78,8 +78,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(10, f(0, 10), name);
assertEquals(20, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-and.js b/deps/v8/test/mjsunit/asm/atomics-and.js
index 2e7de75c2e..da770bfa88 100644
--- a/deps/v8/test/mjsunit/asm/atomics-and.js
+++ b/deps/v8/test/mjsunit/asm/atomics-and.js
@@ -79,8 +79,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(0xf, f(0, 0x19), name);
assertEquals(0x9, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
index 84d38504b3..dd93395569 100644
--- a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
@@ -86,8 +86,8 @@ function testElementType(taConstr, f, oobValue, offset) {
assertEquals(50, f(0, 0, 100), name);
assertEquals(50, ta[0]);
// out of bounds
- assertEquals(oobValue, f(-1, 0, 0), name);
- assertEquals(oobValue, f(ta.length, 0, 0), name);
+ assertThrows(function() { f(-1, 0, 0); });
+ assertThrows(function() { f(ta.length, 0, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-exchange.js b/deps/v8/test/mjsunit/asm/atomics-exchange.js
index 6de7b4b954..ea9c24693c 100644
--- a/deps/v8/test/mjsunit/asm/atomics-exchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-exchange.js
@@ -80,8 +80,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(0x7f, f(0, 0xf), name);
assertEquals(0xf, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-load.js b/deps/v8/test/mjsunit/asm/atomics-load.js
index 3e1d19f3a6..9a61a237be 100644
--- a/deps/v8/test/mjsunit/asm/atomics-load.js
+++ b/deps/v8/test/mjsunit/asm/atomics-load.js
@@ -62,7 +62,7 @@ function clearArray() {
}
}
-function testElementType(taConstr, f, oobValue, offset) {
+function testElementType(taConstr, f, offset) {
clearArray();
var ta = new taConstr(sab, offset);
@@ -71,17 +71,17 @@ function testElementType(taConstr, f, oobValue, offset) {
assertEquals(10, f(0), name);
assertEquals(0, f(1), name);
// out of bounds
- assertEquals(oobValue, f(-1), name);
- assertEquals(oobValue, f(ta.length), name);
+ assertThrows(function() { f(-1); });
+ assertThrows(function() { f(ta.length); });
}
function testElement(m, offset) {
- testElementType(Int8Array, m.loadi8, 0, offset);
- testElementType(Int16Array, m.loadi16, 0, offset);
- testElementType(Int32Array, m.loadi32, 0, offset);
- testElementType(Uint8Array, m.loadu8, 0, offset);
- testElementType(Uint16Array, m.loadu16, 0, offset);
- testElementType(Uint32Array, m.loadu32, 0, offset);
+ testElementType(Int8Array, m.loadi8, offset);
+ testElementType(Int16Array, m.loadi16, offset);
+ testElementType(Int32Array, m.loadi32, offset);
+ testElementType(Uint8Array, m.loadu8, offset);
+ testElementType(Uint16Array, m.loadu16, offset);
+ testElementType(Uint32Array, m.loadu32, offset);
}
var offset = 0;
diff --git a/deps/v8/test/mjsunit/asm/atomics-or.js b/deps/v8/test/mjsunit/asm/atomics-or.js
index 7431e35cf3..351ce9d112 100644
--- a/deps/v8/test/mjsunit/asm/atomics-or.js
+++ b/deps/v8/test/mjsunit/asm/atomics-or.js
@@ -81,8 +81,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(0xf, f(0, 0x11), name);
assertEquals(0x1f, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-store.js b/deps/v8/test/mjsunit/asm/atomics-store.js
index dab83af8a6..8b9e4c637f 100644
--- a/deps/v8/test/mjsunit/asm/atomics-store.js
+++ b/deps/v8/test/mjsunit/asm/atomics-store.js
@@ -68,7 +68,7 @@ function clearArray() {
}
}
-function testElementType(taConstr, f, oobValue, offset) {
+function testElementType(taConstr, f, offset) {
clearArray();
var ta = new taConstr(sab, offset);
@@ -76,17 +76,17 @@ function testElementType(taConstr, f, oobValue, offset) {
assertEquals(10, f(0, 10), name);
assertEquals(10, ta[0]);
// out of bounds
- assertEquals(oobValue, f(-1, 0), name);
- assertEquals(oobValue, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
- testElementType(Int8Array, m.storei8, 0, offset);
- testElementType(Int16Array, m.storei16, 0, offset);
- testElementType(Int32Array, m.storei32, 0, offset);
- testElementType(Uint8Array, m.storeu8, 0, offset);
- testElementType(Uint16Array, m.storeu16, 0, offset);
- testElementType(Uint32Array, m.storeu32, 0, offset);
+ testElementType(Int8Array, m.storei8, offset);
+ testElementType(Int16Array, m.storei16, offset);
+ testElementType(Int32Array, m.storei32, offset);
+ testElementType(Uint8Array, m.storeu8, offset);
+ testElementType(Uint16Array, m.storeu16, offset);
+ testElementType(Uint32Array, m.storeu32, offset);
}
var offset = 0;
diff --git a/deps/v8/test/mjsunit/asm/atomics-sub.js b/deps/v8/test/mjsunit/asm/atomics-sub.js
index 2ad97e479b..6789aeda60 100644
--- a/deps/v8/test/mjsunit/asm/atomics-sub.js
+++ b/deps/v8/test/mjsunit/asm/atomics-sub.js
@@ -79,8 +79,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(20, f(0, 10), name);
assertEquals(10, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/atomics-xor.js b/deps/v8/test/mjsunit/asm/atomics-xor.js
index 990f8427a0..13c6440729 100644
--- a/deps/v8/test/mjsunit/asm/atomics-xor.js
+++ b/deps/v8/test/mjsunit/asm/atomics-xor.js
@@ -78,8 +78,8 @@ function testElementType(taConstr, f, offset) {
assertEquals(0xf, f(0, 0x11), name);
assertEquals(0x1e, ta[0]);
// out of bounds
- assertEquals(0, f(-1, 0), name);
- assertEquals(0, f(ta.length, 0), name);
+ assertThrows(function() { f(-1, 0); });
+ assertThrows(function() { f(ta.length, 0); });
}
function testElement(m, offset) {
diff --git a/deps/v8/test/mjsunit/asm/math-clz32.js b/deps/v8/test/mjsunit/asm/math-clz32.js
index 5a3aa202db..42dcf46575 100644
--- a/deps/v8/test/mjsunit/asm/math-clz32.js
+++ b/deps/v8/test/mjsunit/asm/math-clz32.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
-
var stdlib = { Math: Math };
var f = (function Module(stdlib) {
@@ -26,6 +24,5 @@ for (var i = 0; i < 32; ++i) {
assertEquals(i, f((-1) >>> i));
}
for (var i = -2147483648; i < 2147483648; i += 3999773) {
- assertEquals(%MathClz32(i), f(i));
- assertEquals(%MathClz32(i), %_MathClz32(i >>> 0));
+ assertEquals(Math.clz32(i), f(i));
}
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index e9aba1d3c9..19502f3354 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --noconcurrent-recompilation --noconcurrent-osr
+// Flags: --noconcurrent-recompilation
if (%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is turned on after all. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/call-intrinsic-type-error.js b/deps/v8/test/mjsunit/call-intrinsic-type-error.js
new file mode 100644
index 0000000000..9d586977d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/call-intrinsic-type-error.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ %Call(1, 0);
+} catch (e) {
+ assertTrue(e instanceof TypeError);
+}
diff --git a/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
index 81544ca69f..4eacc8a8be 100644
--- a/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
+++ b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax
function le(a, b) {
return a <= b;
diff --git a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
index c80e329150..217de769d3 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
@@ -34,7 +34,7 @@ var global = 3;
function f(a) {
// This will trigger a deopt since global was previously a SMI, with the
// accumulator holding an unboxed double which needs materialized.
- global = %_MathSqrt(a);
+ global = %math_sqrt(a);
}
%OptimizeFunctionOnNextCall(f);
f(0.25);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index d93344ea57..9f5e4e7f83 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --no-concurrent-osr
+// Flags: --allow-natives-syntax
// Test for-in support in Crankshaft. For simplicity this tests assumes certain
// fixed iteration order for properties and will have to be adjusted if V8
diff --git a/deps/v8/test/mjsunit/compiler/regress-600593.js b/deps/v8/test/mjsunit/compiler/regress-600593.js
new file mode 100644
index 0000000000..c93f2ab800
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-600593.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+"use strict"
+
+function f(c) {
+ if (c) { throw new Error(); }
+ throw new Error();
+};
+
+function Error() {
+ return arguments.length;
+}
+
+assertThrows(function() { f(true); });
+assertThrows(function() { f(false); });
+%OptimizeFunctionOnNextCall(f);
+assertThrows(function() { f(true); });
diff --git a/deps/v8/test/mjsunit/compiler/regress-96989.js b/deps/v8/test/mjsunit/compiler/regress-96989.js
deleted file mode 100644
index 85beaed595..0000000000
--- a/deps/v8/test/mjsunit/compiler/regress-96989.js
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Flags: --allow-natives-syntax --legacy-const
-
-// Test correct handling of uninitialized const.
-
-function test() {
- for (var i = 41; i < 42; i++) {
- var c = t ^ i;
- }
- const t;
- return c;
-}
-
-for (var i=0; i<10; i++) test();
-%OptimizeFunctionOnNextCall(test);
-assertEquals(41, test());
diff --git a/deps/v8/test/mjsunit/compiler/regress-const.js b/deps/v8/test/mjsunit/compiler/regress-const.js
index 89b559c3e0..5099c2f629 100644
--- a/deps/v8/test/mjsunit/compiler/regress-const.js
+++ b/deps/v8/test/mjsunit/compiler/regress-const.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --legacy-const
+// Flags: --allow-natives-syntax
// Test const initialization and assignments.
function f() {
@@ -38,7 +38,6 @@ function f() {
function g() {
const x = 42;
- x += 1;
return x;
}
@@ -50,7 +49,7 @@ for (var i = 0; i < 5; i++) {
%OptimizeFunctionOnNextCall(f);
%OptimizeFunctionOnNextCall(g);
-assertEquals(42, f());
+assertEquals(1, f());
assertEquals(42, g());
diff --git a/deps/v8/test/mjsunit/const-declaration.js b/deps/v8/test/mjsunit/const-declaration.js
deleted file mode 100644
index 42d03d547d..0000000000
--- a/deps/v8/test/mjsunit/const-declaration.js
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-// Test handling of const variables in various settings.
-
-(function () {
- function f() {
- function g() {
- x = 42; // should be ignored
- return x; // force x into context
- }
- x = 43; // should be ignored
- assertEquals(undefined, g());
- x = 44; // should be ignored
- const x = 0;
- x = 45; // should be ignored
- assertEquals(0, g());
- }
- f();
-})();
-
-
-(function () {
- function f() {
- function g() {
- with ({foo: 0}) {
- x = 42; // should be ignored
- return x; // force x into context
- }
- }
- x = 43; // should be ignored
- assertEquals(undefined, g());
- x = 44; // should be ignored
- const x = 0;
- x = 45; // should be ignored
- assertEquals(0, g());
- }
- f();
-})();
-
-
-(function () {
- function f() {
- function g(s) {
- eval(s);
- return x; // force x into context
- }
- x = 43; // should be ignored
- assertEquals(undefined, g("x = 42;"));
- x = 44; // should be ignored
- const x = 0;
- x = 45; // should be ignored
- assertEquals(0, g("x = 46;"));
- }
- f();
-})();
-
-
-(function () {
- function f() {
- function g(s) {
- with ({foo: 0}) {
- eval(s);
- return x; // force x into context
- }
- }
- x = 43; // should be ignored
- assertEquals(undefined, g("x = 42;"));
- x = 44; // should be ignored
- const x = 0;
- x = 45; // should be ignored
- assertEquals(0, g("x = 46;"));
- }
- f();
-})();
-
-
-(function () {
- function f(s) {
- function g() {
- x = 42; // assign to global x, or to const x
- return x;
- }
- x = 43; // declare global x
- assertEquals(42, g());
- x = 44; // assign to global x
- eval(s);
- x = 45; // should be ignored (assign to const x)
- assertEquals(0, g());
- }
- f("const x = 0;");
-})();
-
-
-(function () {
- function f(s) {
- function g() {
- with ({foo: 0}) {
- x = 42; // assign to global x, or to const x
- return x;
- }
- }
- x = 43; // declare global x
- assertEquals(42, g());
- x = 44; // assign to global x
- eval(s);
- x = 45; // should be ignored (assign to const x)
- assertEquals(0, g());
- }
- f("const x = 0;");
-})();
-
-
-(function () {
- function f(s) {
- function g(s) {
- eval(s);
- return x;
- }
- x = 43; // declare global x
- assertEquals(42, g("x = 42;"));
- x = 44; // assign to global x
- eval(s);
- x = 45; // should be ignored (assign to const x)
- assertEquals(0, g("x = 46;"));
- }
- f("const x = 0;");
-})();
-
-
-(function () {
- function f(s) {
- function g(s) {
- with ({foo: 0}) {
- eval(s);
- return x;
- }
- }
- x = 43; // declare global x
- assertEquals(42, g("x = 42;"));
- x = 44; // assign to global x
- eval(s);
- x = 45; // should be ignored (assign to const x)
- assertEquals(0, g("x = 46;"));
- }
- f("const x = 0;");
-})();
diff --git a/deps/v8/test/mjsunit/const-eval-init.js b/deps/v8/test/mjsunit/const-eval-init.js
deleted file mode 100644
index a7e1fef6e7..0000000000
--- a/deps/v8/test/mjsunit/const-eval-init.js
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --legacy-const
-
-// Test the handling of initialization of deleted const variables.
-// This only makes sense in local scopes since the declaration and
-// initialization of consts in the global scope happen at the same
-// time.
-
-function testIntroduceGlobal() {
- var source =
- // Deleting 'x' removes the local const property.
- "delete x;" +
- // Initialization redefines global 'x'.
- "const x = 3; assertEquals(3, x);" +
- // Test constness of the global 'x'.
- "x = 4; assertEquals(3, x);";
- eval(source);
-}
-
-testIntroduceGlobal();
-assertEquals("undefined", typeof x);
-
-function testAssignExistingGlobal() {
- var source =
- // Delete 'x' to remove the local const property.
- "delete x;" +
- // Initialization redefines global 'x'.
- "const x = 5; assertEquals(5, x);" +
- // Test constness of the global 'x'.
- "x = 6; assertEquals(5, x);";
- eval(source);
-}
-
-testAssignExistingGlobal();
-assertEquals("undefined", typeof x);
-
-function testAssignmentArgument(x) {
- function local() {
- var source = "delete x; const x = 7; assertEquals(7, x)";
- eval(source);
- }
- local();
- assertEquals("undefined", typeof x);
-}
-
-for (var i = 0; i < 5; i++) {
- testAssignmentArgument();
-}
-%OptimizeFunctionOnNextCall(testAssignmentArgument);
-testAssignmentArgument();
-assertEquals("undefined", typeof x);
-
-__defineSetter__('x', function() { throw 42; });
-var finished = false;
-function testRedefineGlobal() {
- // Initialization redefines global 'x'.
- var source = "delete x; const x = 8; finished = true;";
- eval(source);
-}
-
-testRedefineGlobal();
-assertTrue(finished);
-
-function testInitFastCaseExtension() {
- var source = "const x = 9; assertEquals(9, x); x = 10; assertEquals(9, x)";
- eval(source);
-}
-
-testInitFastCaseExtension();
-
-function testInitSlowCaseExtension() {
- var source = "";
- // Introduce 100 properties on the context extension object to force
- // it in slow case.
- for (var i = 0; i < 100; i++) source += ("var a" + i + " = " + i + ";");
- source += "const x = 10; assertEquals(10, x); x = 11; assertEquals(10, x)";
- eval(source);
-}
-
-testInitSlowCaseExtension();
-
-function testAssignSurroundingContextSlot() {
- var x = 12;
- function local() {
- var source = "delete x; const x = 13; assertEquals(13, x)";
- eval(source);
- }
- local();
- assertEquals(12, x);
-}
-
-testAssignSurroundingContextSlot();
diff --git a/deps/v8/test/mjsunit/const-redecl.js b/deps/v8/test/mjsunit/const-redecl.js
deleted file mode 100644
index ba7293026b..0000000000
--- a/deps/v8/test/mjsunit/const-redecl.js
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-// Test for const semantics.
-
-
-function CheckException(e) {
- var string = e.toString();
- var index = string.indexOf(':');
- assertTrue(index >= 0);
- var name = string.slice(0, index);
- assertTrue(string.indexOf("has already been declared") >= 0 ||
- string.indexOf("redeclaration") >= 0);
- if (name == 'SyntaxError') return 'TypeError';
- return name;
-}
-
-
-function TestLocal(s,e) {
- try {
- return eval("(function(){" + s + ";return " + e + "})")();
- } catch (x) {
- return CheckException(x);
- }
-}
-
-
-function TestContext(s,e) {
- try {
- // Use a with-statement to force the system to do dynamic
- // declarations of the introduced variables or constants.
- with ({}) {
- return eval(s + ";" + e);
- }
- } catch (x) {
- return CheckException(x);
- }
-}
-
-
-function TestAll(expected,s,opt_e) {
- var e = "";
- var msg = s;
- if (opt_e) { e = opt_e; msg += "; " + opt_e; }
- assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
- assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
-}
-
-
-function TestConflict(def0, def1) {
- // No eval.
- TestAll("TypeError", def0 +'; ' + def1);
- // Eval everything.
- TestAll("TypeError", 'eval("' + def0 + '; ' + def1 + '")');
- // Eval first definition.
- TestAll("TypeError", 'eval("' + def0 +'"); ' + def1);
- // Eval second definition.
- TestAll("TypeError", def0 + '; eval("' + def1 +'")');
- // Eval both definitions separately.
- TestAll("TypeError", 'eval("' + def0 +'"); eval("' + def1 + '")');
-}
-
-
-// Test conflicting definitions.
-TestConflict("const x", "var x");
-TestConflict("const x = 0", "var x");
-TestConflict("const x", "var x = 0");
-TestConflict("const x = 0", "var x = 0");
-
-TestConflict("var x", "const x");
-TestConflict("var x = 0", "const x");
-TestConflict("var x", "const x = 0");
-TestConflict("var x = 0", "const x = 0");
-
-TestConflict("const x = undefined", "var x");
-TestConflict("const x", "var x = undefined");
-TestConflict("const x = undefined", "var x = undefined");
-
-TestConflict("var x = undefined", "const x");
-TestConflict("var x", "const x = undefined");
-TestConflict("var x = undefined", "const x = undefined");
-
-TestConflict("const x = undefined", "var x = 0");
-TestConflict("const x = 0", "var x = undefined");
-
-TestConflict("var x = undefined", "const x = 0");
-TestConflict("var x = 0", "const x = undefined");
-
-TestConflict("const x", "function x() { }");
-TestConflict("const x = 0", "function x() { }");
-TestConflict("const x = undefined", "function x() { }");
-
-TestConflict("function x() { }", "const x");
-TestConflict("function x() { }", "const x = 0");
-TestConflict("function x() { }", "const x = undefined");
-
-TestConflict("const x, y", "var x");
-TestConflict("const x, y", "var y");
-TestConflict("const x = 0, y", "var x");
-TestConflict("const x = 0, y", "var y");
-TestConflict("const x, y = 0", "var x");
-TestConflict("const x, y = 0", "var y");
-TestConflict("const x = 0, y = 0", "var x");
-TestConflict("const x = 0, y = 0", "var y");
-
-TestConflict("var x", "const x, y");
-TestConflict("var y", "const x, y");
-TestConflict("var x", "const x = 0, y");
-TestConflict("var y", "const x = 0, y");
-TestConflict("var x", "const x, y = 0");
-TestConflict("var y", "const x, y = 0");
-TestConflict("var x", "const x = 0, y = 0");
-TestConflict("var y", "const x = 0, y = 0");
-
-
-// Test that multiple conflicts do not cause issues.
-TestConflict("var x, y", "const x, y");
-
-
-// Test that repeated const declarations throw redeclaration errors.
-TestConflict("const x", "const x");
-TestConflict("const x = 0", "const x");
-TestConflict("const x", "const x = 0");
-TestConflict("const x = 0", "const x = 0");
-
-TestConflict("const x = undefined", "const x");
-TestConflict("const x", "const x = undefined");
-TestConflict("const x = undefined", "const x = undefined");
-
-TestConflict("const x = undefined", "const x = 0");
-TestConflict("const x = 0", "const x = undefined");
-
-TestConflict("const x, y", "const x");
-TestConflict("const x, y", "const y");
-TestConflict("const x = 0, y", "const x");
-TestConflict("const x = 0, y", "const y");
-TestConflict("const x, y = 0", "const x");
-TestConflict("const x, y = 0", "const y");
-TestConflict("const x = 0, y = 0", "const x");
-TestConflict("const x = 0, y = 0", "const y");
-
-TestConflict("const x", "const x, y");
-TestConflict("const y", "const x, y");
-TestConflict("const x", "const x = 0, y");
-TestConflict("const y", "const x = 0, y");
-TestConflict("const x", "const x, y = 0");
-TestConflict("const y", "const x, y = 0");
-TestConflict("const x", "const x = 0, y = 0");
-TestConflict("const y", "const x = 0, y = 0");
-
-
-// Test that multiple const conflicts do not cause issues.
-TestConflict("const x, y", "const x, y");
-
-
-// Test that const inside loop behaves correctly.
-var loop = "for (var i = 0; i < 3; i++) { const x = i; }";
-TestAll(0, loop, "x");
-TestAll(0, "var a,b,c,d,e,f,g,h; " + loop, "x");
-
-
-// Test that const inside with behaves correctly.
-TestAll(87, "with ({x:42}) { const x = 87; }", "x");
-TestAll(undefined, "with ({x:42}) { const x; }", "x");
-
-
-// Additional tests for how various combinations of re-declarations affect
-// the values of the var/const in question.
-try {
- eval("var undefined;");
-} catch (ex) {
- assertUnreachable("undefined (1) has thrown");
-}
-
-var original_undef = undefined;
-var undefined = 1; // Should be silently ignored.
-assertEquals(original_undef, undefined, "undefined got overwritten");
-undefined = original_undef;
-
-const e = 1; eval('var e = 2');
-assertEquals(1, e, "e has wrong value");
-
-const h; eval('var h = 1');
-assertEquals(undefined, h, "h has wrong value");
-
-eval("Object.defineProperty(this, 'i', { writable: true });"
- + "const i = 7;"
- + "assertEquals(7, i, \"i has wrong value\");");
-
-var global = this;
-Object.defineProperty(global, 'j', { value: 100, writable: true });
-assertEquals(100, j);
-// The const declaration stays configurable, so the declaration above goes
-// through even though the const declaration is hoisted above.
-const j = 2;
-assertEquals(2, j, "j has wrong value");
-
-var k = 1;
-try { eval('const k'); } catch(e) { }
-assertEquals(1, k, "k has wrong value");
-try { eval('const k = 10'); } catch(e) { }
-assertEquals(1, k, "k has wrong value");
diff --git a/deps/v8/test/mjsunit/const.js b/deps/v8/test/mjsunit/const.js
deleted file mode 100644
index f00932f7b2..0000000000
--- a/deps/v8/test/mjsunit/const.js
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-// Test const properties and pre/postfix operation.
-function f() {
- const x = 1;
- x++;
- assertEquals(1, x);
- x--;
- assertEquals(1, x);
- ++x;
- assertEquals(1, x);
- --x;
- assertEquals(1, x);
- assertEquals(1, x++);
- assertEquals(1, x--);
- assertEquals(2, ++x);
- assertEquals(0, --x);
-}
-
-f();
-
-// Test that the value is read eventhough assignment is disallowed.
-// Spidermonkey does not do this, but it seems like the right thing to
-// do so that 'o++' is equivalent to 'o = o + 1'.
-var valueOfCount = 0;
-
-function g() {
- const o = { valueOf: function() { valueOfCount++; return 42; } };
- assertEquals(42, +o);
- assertEquals(1, valueOfCount);
- o++;
- assertEquals(42, +o);
- assertEquals(3, valueOfCount);
- ++o;
- assertEquals(42, +o);
- assertEquals(5, valueOfCount);
- o--;
- assertEquals(42, +o);
- assertEquals(7, valueOfCount);
- --o;
- assertEquals(42, +o);
- assertEquals(9, valueOfCount);
-}
-
-g();
diff --git a/deps/v8/test/mjsunit/constant-folding.js b/deps/v8/test/mjsunit/constant-folding.js
index 148928aaaf..d6ac9fe3d5 100644
--- a/deps/v8/test/mjsunit/constant-folding.js
+++ b/deps/v8/test/mjsunit/constant-folding.js
@@ -29,8 +29,6 @@
// The code generator now handles compile-time constants specially.
// Test the code generated when operands are known at compile time
-// Flags: --legacy-const
-
// Test count operations involving constants
function test_count() {
var x = "foo";
@@ -69,11 +67,6 @@ function test_count() {
z = y;
y++;
assertEquals(z, 20);
-
- const w = 30;
- assertEquals(w++, 30);
- assertEquals(++w, 31);
- assertEquals(++w, 31);
}
test_count();
diff --git a/deps/v8/test/mjsunit/debug-backtrace.js b/deps/v8/test/mjsunit/debug-backtrace.js
index 01775f560e..3a72384cce 100644
--- a/deps/v8/test/mjsunit/debug-backtrace.js
+++ b/deps/v8/test/mjsunit/debug-backtrace.js
@@ -32,9 +32,9 @@ function f(x, y) {
a=1;
};
-var m = function() {
+var m = (0, function() {
new f(1);
-};
+});
function g() {
m();
diff --git a/deps/v8/test/mjsunit/debug-evaluate-closure.js b/deps/v8/test/mjsunit/debug-evaluate-closure.js
index 541dec9d6d..ebd42f3ae9 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-closure.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-closure.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --debug-eval-readonly-locals
Debug = debug.Debug;
var listened = false;
@@ -35,7 +34,7 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
assertEquals("goo", exec_state.frame(0).evaluate("goo").value());
- exec_state.frame(0).evaluate("goo = 'goo foo'"); // no effect
+ exec_state.frame(0).evaluate("goo = 'goo foo'");
assertEquals("bar return", exec_state.frame(0).evaluate("bar()").value());
assertEquals("inner bar", exec_state.frame(0).evaluate("inner").value());
assertEquals("outer bar", exec_state.frame(0).evaluate("outer").value());
@@ -43,10 +42,10 @@ function listener(event, exec_state, event_data, data) {
assertEquals("baz inner", exec_state.frame(0).evaluate("baz").value());
assertEquals("baz outer", exec_state.frame(1).evaluate("baz").value());
exec_state.frame(0).evaluate("w = 'w foo'");
- exec_state.frame(0).evaluate("inner = 'inner foo'"); // no effect
- exec_state.frame(0).evaluate("outer = 'outer foo'"); // has effect
- exec_state.frame(0).evaluate("baz = 'baz inner foo'"); // no effect
- exec_state.frame(1).evaluate("baz = 'baz outer foo'"); // has effect
+ exec_state.frame(0).evaluate("inner = 'inner foo'");
+ exec_state.frame(0).evaluate("outer = 'outer foo'");
+ exec_state.frame(0).evaluate("baz = 'baz inner foo'");
+ exec_state.frame(1).evaluate("baz = 'baz outer foo'");
listened = true;
} catch (e) {
print(e);
@@ -68,7 +67,7 @@ function foo() {
with (withv) {
var bar = function bar() {
- assertEquals("goo", goo);
+ assertEquals("goo foo", goo);
inner = "inner bar";
outer = "outer bar";
v = "v bar";
@@ -80,8 +79,8 @@ function foo() {
debugger;
}
- assertEquals("inner bar", inner);
- assertEquals("baz inner", baz);
+ assertEquals("inner foo", inner);
+ assertEquals("baz inner foo", baz);
assertEquals("w foo", withw.w);
assertEquals("v bar", withv.v);
}
diff --git a/deps/v8/test/mjsunit/debug-evaluate-const.js b/deps/v8/test/mjsunit/debug-evaluate-const.js
deleted file mode 100644
index 6ffddbb59d..0000000000
--- a/deps/v8/test/mjsunit/debug-evaluate-const.js
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals --legacy-const
-
-Debug = debug.Debug
-
-listenerComplete = false;
-exception = false;
-
-// var0: init after break point, changed by debug eval.
-// const0: init before break point, changed by debug eval.
-// const1: init after break point, materialized but untouched by debug eval.
-// const2: init after break point, materialized and changed by debug eval.
-// const3: context allocated const, init before break point, changed by eval.
-function f() {
- var var1 = 21;
- const const3 = 3;
-
- function g() {
- const const0 = 0;
- assertEquals(undefined, const1);
- assertEquals(undefined, const2);
- assertEquals(3, const3);
- assertEquals(21, var1);
-
- debugger; // Break point.
-
- assertEquals(undefined, var0);
- assertEquals(0, const0);
- assertEquals(undefined, const1);
- assertEquals(undefined, const2);
- var var0 = 20;
- const const1 = 1;
- const const2 = 2;
- assertEquals(20, var0);
- assertEquals(1, const1);
- assertEquals(2, const2);
- }
-
- g();
-
- assertEquals(21, var1);
- assertEquals(3, const3);
-}
-
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Break) return;
- try {
- var frame = exec_state.frame(0);
- var evaluate = function(something) {
- return frame.evaluate(something).value()
- }
-
- var count = frame.localCount();
- assertEquals(4, count);
- var expectation = { "const0" : 0,
- "const1" : undefined,
- "const2" : undefined,
- "const3" : 3,
- "var0" : undefined,
- "var1" : 21 };
- for (var i = 0; i < frame.localCount(); ++i) {
- var name = frame.localName(i);
- var value = frame.localValue(i).value();
- assertEquals(expectation[name], value);
- }
-
- evaluate('const0 = 10');
- evaluate('const2 = 12');
- evaluate('const3 = 13');
- evaluate('var0 = 30');
- evaluate('var1 = 31');
-
- // Indicate that all was processed.
- listenerComplete = true;
- } catch (e) {
- exception = e;
- print("Caught something. " + e + " " + e.stack);
- };
-};
-
-// Run and compile before debugger is active.
-try { f(); } catch (e) { }
-
-Debug.setListener(listener);
-
-f();
-
-Debug.setListener(null);
-
-assertFalse(exception, "exception in listener")
-assertTrue(listenerComplete);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js b/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
index 6d65861fc7..5fdacba85c 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
Debug = debug.Debug
var exception = null;
@@ -35,7 +35,7 @@ Debug.setListener(listener);
} catch (e) {
assertEquals(0, i);
debugger;
- assertEquals(0, i);
+ assertEquals(5, i);
}
}());
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals.js b/deps/v8/test/mjsunit/debug-evaluate-locals.js
index 642e0c0682..1788bd8ce2 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -89,18 +89,17 @@ function f() {
}
function checkFrame2(frame) {
- // Frame 2 (f) has normal variables a and b (and arguments).
+ // Frame 2 (f) has normal variables a and b.
var count = frame.localCount();
- assertEquals(3, count);
+ assertEquals(2, count);
for (var i = 0; i < count; ++i) {
var name = frame.localName(i);
var value = frame.localValue(i).value();
if (name == 'a') {
assertEquals(5, value);
- } else if (name == 'b') {
- assertEquals(0, value);
} else {
- assertEquals('arguments', name);
+ assertEquals('b', name);
+ assertEquals(0, value);
}
}
}
@@ -114,20 +113,17 @@ function listener(event, exec_state, event_data, data) {
checkFrame1(exec_state.frame(1));
checkFrame2(exec_state.frame(2));
- // Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
assertEquals(1, exec_state.frame(0).evaluate('a').value());
assertEquals(2, exec_state.frame(0).evaluate('b').value());
assertEquals(5, exec_state.frame(0).evaluate('eval').value());
assertEquals(3, exec_state.frame(1).evaluate('a').value());
- // Reference error because g does not reference b.
- assertThrows(() => exec_state.frame(1).evaluate('b'), ReferenceError);
+ assertEquals(4, exec_state.frame(1).evaluate('b').value());
assertEquals("function",
typeof exec_state.frame(1).evaluate('eval').value());
assertEquals(5, exec_state.frame(2).evaluate('a').value());
assertEquals(6, exec_state.frame(2).evaluate('b').value());
assertEquals("function",
typeof exec_state.frame(2).evaluate('eval').value());
- // Assignments to local variables only have temporary effect.
assertEquals("foo",
exec_state.frame(0).evaluate('a = "foo"').value());
assertEquals("bar",
@@ -146,7 +142,7 @@ Debug.setListener(listener);
var f_result = f();
-assertEquals(4, f_result);
+assertEquals("foobar", f_result);
// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js b/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
index 07d6ccbe6f..676f78282d 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
Debug = debug.Debug
@@ -10,7 +10,11 @@ var exception = null;
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
+ exec_state.frame(0).evaluate("a = 2");
+ exec_state.frame(0).evaluate("e = 3");
exec_state.frame(0).evaluate("bar()");
+ exec_state.frame(0).evaluate("a++");
+ exec_state.frame(0).evaluate("e++");
} catch (e) {
exception = e;
print(e + e.stack);
@@ -26,12 +30,12 @@ Debug.setListener(listener);
} catch (e) {
let a = 1;
function bar() {
- a = 2;
- e = 2;
+ a *= 2;
+ e *= 2;
}
debugger;
- assertEquals(2, a);
- assertEquals(2, e);
+ assertEquals(5, a);
+ assertEquals(7, e);
}
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-605581.js b/deps/v8/test/mjsunit/debug-evaluate-modify-this.js
index 0f1daabead..930f6ed043 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-605581.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-modify-this.js
@@ -4,25 +4,30 @@
// Flags: --expose-debug-as debug
-var Debug = debug.Debug;
+Debug = debug.Debug
+
var exception = null;
+var f = () => { debugger; }
+var g = function() { debugger; }
+var h = (function() { return () => { debugger; }; }).call({});
+
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
- assertThrows(() => exec_state.frame(0).evaluate("bar.baz"), ReferenceError);
+ assertThrows(() => exec_state.frame(0).evaluate("this = 2"));
} catch (e) {
exception = e;
- }
-}
+ print("Caught something. " + e + " " + e.stack);
+ };
+};
Debug.setListener(listener);
-(function() {
- debugger; // bar is still in TDZ at this point.
- let bar = 1;
- (x => bar); // force bar to be context-allocated.
-})();
+f();
+g();
+g.call({});
+h();
Debug.setListener(null);
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-nested-let.js b/deps/v8/test/mjsunit/debug-evaluate-nested-let.js
new file mode 100644
index 0000000000..8e9f8c157a
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-nested-let.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug
+
+var exception = null;
+
+function f() {
+ let a = 0;
+ function g() {
+ let a = 1;
+ {
+ let a = 2;
+ debugger; // Breakpoint.
+ if (a !== 3) {
+ // We cannot change stack locals in optimized frames.
+ assertEquals(2, a);
+ assertOptimized(g);
+ }
+ }
+ assertEquals(1, a);
+ }
+ g.call(1);
+ if (a !== 4) {
+ // We cannot change stack locals in optimized frames.
+ assertEquals(0, a);
+ assertOptimized(f);
+ }
+}
+
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ exec_state.frame(0).evaluate("a = 3");
+ exec_state.frame(1).evaluate("a = 4");
+ assertThrows(() => exec_state.frame(0).evaluate("this = 2"));
+ } catch (e) {
+ exception = e;
+ print("Caught something. " + e + " " + e.stack);
+ };
+};
+
+Debug.setListener(listener);
+
+f();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-nested.js b/deps/v8/test/mjsunit/debug-evaluate-nested.js
new file mode 100644
index 0000000000..da11b9001c
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-nested.js
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+ScopeType = debug.ScopeType;
+var exception = null;
+var nested = false;
+
+function bar() {
+ let a = 1;
+ (function foo() {
+ let b = a;
+ with (new Proxy({}, {})) {
+ debugger;
+ }
+ })();
+}
+
+function checkScopes(scopes, expectation) {
+ assertEquals(scopes.map(s => s.scopeType()), expectation);
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (!nested) {
+ nested = true;
+ checkScopes(exec_state.frame(0).allScopes(),
+ [ ScopeType.With, ScopeType.Local, ScopeType.Closure,
+ ScopeType.Script, ScopeType.Global ]);
+ exec_state.frame(0).evaluate("debugger;");
+ } else {
+ checkScopes(exec_state.frame(0).allScopes(),
+ [ ScopeType.With, ScopeType.Closure,
+ ScopeType.Script, ScopeType.Global ]);
+ }
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+bar();
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-function-scopes.js b/deps/v8/test/mjsunit/debug-function-scopes.js
index fac3b16b8d..f63d7b26c8 100644
--- a/deps/v8/test/mjsunit/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/debug-function-scopes.js
@@ -73,7 +73,7 @@ assertEquals(6, mirror.scopeCount());
CheckScope(mirror.scope(0), { a: 4, b: 5 }, ScopeType.Closure);
CheckScope(mirror.scope(1), { w: 5, v: "Capybara" }, ScopeType.With);
-CheckScope(mirror.scope(2), { y: 17, z: 22 }, ScopeType.Closure);
+CheckScope(mirror.scope(2), { z: 22 }, ScopeType.Closure);
CheckScope(mirror.scope(3), { x: 5 }, ScopeType.Closure);
CheckScope(mirror.scope(4), {}, ScopeType.Script);
CheckScope(mirror.scope(5), {}, ScopeType.Global);
diff --git a/deps/v8/test/mjsunit/debug-multiple-var-decl.js b/deps/v8/test/mjsunit/debug-multiple-var-decl.js
new file mode 100644
index 0000000000..b27b03d28b
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-multiple-var-decl.js
@@ -0,0 +1,74 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test we break at every assignment in a var-statement with multiple
+// variable declarations.
+
+var exception = null;
+var log = []
+
+function f() {
+ var l1 = 1, // l
+ l2, // m
+ l3 = 3; // n
+ let l4, // o
+ l5 = 5, // p
+ l6 = 6; // q
+ const l7 = 7, // r
+ l8 = 8, // s
+ l9 = 9; // t
+ return 0; // u
+} // v
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = exec_state.frame(0).sourceLineText();
+ var col = exec_state.frame(0).sourceColumn();
+ print(line);
+ var match = line.match(/\/\/ (\w)$/);
+ assertEquals(2, match.length);
+ log.push(match[1] + col);
+ if (match[1] != "v") {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ } catch (e) {
+ exception = e;
+ }
+}
+
+var Debug = debug.Debug;
+Debug.setListener(listener);
+
+debugger; // a
+var g1 = 1, // b
+ g2 = 2, // c
+ g3; // d
+let g4 = 4, // e
+ g5, // f
+ g6 = 6; // g
+const g7 = 7, // h
+ g8 = 8, // i
+ g9 = f(); // j
+
+Debug.setListener(null);
+
+assertNull(exception);
+
+// Note that let declarations, if not explicitly initialized, implicitly
+// initialize to undefined.
+
+var expected = [
+ "a0", // debugger statement
+ "b9","c9", // global var
+ "e9","f4","g9", // global let
+ "h11","i11","j11", // global const
+ "l11","n11", // local var
+ "o6","p11","q11", // local let
+ "r13","s13","t13", // local const
+ "u2","v0", // return
+];
+assertEquals(expected, log);
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index 8cde95194a..3659d4e129 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -152,7 +152,7 @@ function CheckScopeChainNames(names, exec_state) {
for (var i = 0; i < names.length; i++) {
var scope = exec_state.frame().scope(i);
assertTrue(scope.isScope());
- assertEquals(scope.details().name(), names[i])
+ assertEquals(names[i], scope.details().name())
}
}
@@ -223,6 +223,21 @@ function CheckScopeContent(content, number, exec_state) {
assertTrue(found, "Scope object " + response.body.object.ref + " not found");
}
+// Check that the scopes have positions as expected.
+function CheckScopeChainPositions(positions, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals(positions.length, all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < positions.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ var position = positions[i];
+ if (!position)
+ continue;
+
+ assertEquals(position.start, scope.details().startPosition())
+ assertEquals(position.end, scope.details().endPosition())
+ }
+}
// Simple empty local scope.
BeginTest("Local 1");
@@ -529,7 +544,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({a:1}, 1, exec_state);
- CheckScopeChainNames([undefined, "closure_1", undefined, undefined], exec_state)
+ CheckScopeChainNames(["f", "closure_1", undefined, undefined], exec_state)
};
closure_1(1)();
EndTest();
@@ -556,7 +571,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({a:1,x:3}, 1, exec_state);
- CheckScopeChainNames([undefined, "closure_2", undefined, undefined], exec_state)
+ CheckScopeChainNames(["f", "closure_2", undefined, undefined], exec_state)
};
closure_2(1, 2)();
EndTest();
@@ -584,7 +599,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
- CheckScopeChainNames([undefined, "closure_3", undefined, undefined], exec_state)
+ CheckScopeChainNames(["f", "closure_3", undefined, undefined], exec_state)
};
closure_3(1, 2)();
EndTest();
@@ -615,7 +630,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
- CheckScopeChainNames([undefined, "closure_4", undefined, undefined], exec_state)
+ CheckScopeChainNames(["f", "closure_4", undefined, undefined], exec_state)
};
closure_4(1, 2)();
EndTest();
@@ -733,7 +748,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x: 2}, 0, exec_state);
- CheckScopeChainNames([undefined, undefined, undefined], exec_state)
+ CheckScopeChainNames(["inner", undefined, undefined], exec_state)
};
closure_8();
EndTest();
@@ -755,7 +770,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Closure,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
- CheckScopeChainNames([undefined, "closure_9", undefined, undefined], exec_state)
+ CheckScopeChainNames(["inner", "closure_9", undefined, undefined], exec_state)
};
closure_9();
EndTest();
@@ -822,10 +837,10 @@ function closure_in_with_1() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Local,
debug.ScopeType.With,
- debug.ScopeType.Closure,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x: 2}, 0, exec_state);
+ CheckScopeContent({x: 1}, 1, exec_state);
};
closure_in_with_1();
EndTest();
@@ -846,13 +861,12 @@ listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.With,
debug.ScopeType.Local,
debug.ScopeType.With,
- debug.ScopeType.Closure,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x: 3}, 0, exec_state);
CheckScopeContent({x: 2}, 1, exec_state);
CheckScopeContent({x: 1}, 2, exec_state);
- CheckScopeChainNames(["inner", "inner", "closure_in_with_2", "closure_in_with_2", undefined, undefined], exec_state)
+ CheckScopeChainNames(["inner", "inner", "closure_in_with_2", undefined, undefined], exec_state)
};
closure_in_with_2();
EndTest();
@@ -934,7 +948,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({e:'Exception'}, 0, exec_state);
- CheckScopeChainNames(["catch_block_1", undefined, undefined, undefined], exec_state)
+ CheckScopeChainNames(["catch_block_1", "catch_block_1", undefined, undefined], exec_state)
};
catch_block_1();
EndTest();
@@ -1080,7 +1094,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({e:'Exception'}, 0, exec_state);
- CheckScopeChainNames(["catch_block_7", undefined, undefined, undefined], exec_state)
+ CheckScopeChainNames(["catch_block_7", "catch_block_7", undefined, undefined], exec_state)
};
catch_block_7();
EndTest();
@@ -1094,7 +1108,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({}, 1, exec_state);
- CheckScopeChainNames([undefined, undefined, undefined], exec_state)
+ CheckScopeChainNames(["m", undefined, undefined], exec_state)
};
(function() {
@@ -1109,6 +1123,70 @@ listener_delegate = function(exec_state) {
EndTest();
+BeginTest("Scope positions");
+var code1 = "function f() { \n" +
+ " var a = 1; \n" +
+ " function b() { \n" +
+ " debugger; \n" +
+ " return a + 1; \n" +
+ " } \n" +
+ " b(); \n" +
+ "} \n" +
+ "f(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChainPositions([{start: 58, end: 118}, {start: 10, end: 162}, {}, {}], exec_state);
+}
+eval(code1);
+EndTest();
+
+
+function catch_block_2() {
+ try {
+ throw 'Exception';
+ } catch (e) {
+ with({n:10}) {
+ debugger;
+ }
+ }
+};
+
+BeginTest("Scope positions in catch and 'with' statement");
+var code2 = "function catch_block() { \n" +
+ " try { \n" +
+ " throw 'Exception'; \n" +
+ " } catch (e) { \n" +
+ " with({n : 10}) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ " } \n" +
+ "} \n" +
+ "catch_block(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChainPositions([{start: 131, end: 173}, {start: 94, end: 199}, {start: 20, end: 225}, {}, {}], exec_state);
+}
+eval(code2);
+EndTest();
+
+BeginTest("Scope positions in for statement");
+var code3 = "function for_statement() { \n" +
+ " for (let i = 0; i < 1; i++) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 52, end: 111}, {start: 42, end: 111}, {start: 22, end: 145}, {}, {}], exec_state);
+}
+eval(code3);
+EndTest();
assertEquals(begin_test_count, break_count,
'one or more tests did not enter the debugger');
diff --git a/deps/v8/test/mjsunit/debug-step-end-of-script.js b/deps/v8/test/mjsunit/debug-step-end-of-script.js
index ded58d1dad..e8ffcc8bc1 100644
--- a/deps/v8/test/mjsunit/debug-step-end-of-script.js
+++ b/deps/v8/test/mjsunit/debug-step-end-of-script.js
@@ -5,7 +5,7 @@
// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
-var expected = ["debugger;", "", "debugger;"];
+var expected = ["debugger;", "debugger;"];
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
diff --git a/deps/v8/test/mjsunit/debug-step.js b/deps/v8/test/mjsunit/debug-step.js
index bfbea16380..6f5c8f410c 100644
--- a/deps/v8/test/mjsunit/debug-step.js
+++ b/deps/v8/test/mjsunit/debug-step.js
@@ -64,9 +64,9 @@ function f() {
// Set a breakpoint on the for statement (line 1).
bp1 = Debug.setBreakPoint(f, 1);
-// Check that performing 1000 steps will make i 499.
+// Check that performing 1000 steps will make i 333.
var step_count = 1000;
result = -1;
f();
-assertEquals(332, result);
+assertEquals(333, result);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor.js b/deps/v8/test/mjsunit/debug-stepin-accessor.js
index daf86a3652..8513ca0eae 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor.js
+++ b/deps/v8/test/mjsunit/debug-stepin-accessor.js
@@ -96,21 +96,21 @@ var d = {
};
function testGetter1_1() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
debugger;
var x = c.getter1;
}
function testGetter1_2() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
debugger;
var x = c['getter1'];
}
function testGetter1_3() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
for (var i = 1; i < 2; i++) {
debugger;
@@ -119,14 +119,14 @@ function testGetter1_3() {
}
function testGetter1_4() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
debugger;
var x = d.c.getter1;
}
function testGetter1_5() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
for (var i = 2; i != 1; i--);
debugger;
@@ -134,7 +134,7 @@ function testGetter1_5() {
}
function testGetter2_1() {
- expected_function_name = 'getter2';
+ expected_function_name = 'get getter2';
expected_source_line_text = ' return { // getter 2';
for (var i = 2; i != 1; i--);
debugger;
@@ -172,21 +172,21 @@ function testIndexedSetter3_1() {
}
function testSetter1_1() {
- expected_function_name = 'setter1';
+ expected_function_name = 'set setter1';
expected_source_line_text = ' this.name = n; // setter 1';
debugger;
d.c.setter1 = 'aa';
}
function testSetter1_2() {
- expected_function_name = 'setter1';
+ expected_function_name = 'set setter1';
expected_source_line_text = ' this.name = n; // setter 1';
debugger;
d.c['setter1'] = 'bb';
}
function testSetter1_3() {
- expected_function_name = 'setter1';
+ expected_function_name = 'set setter1';
expected_source_line_text = ' this.name = n; // setter 1';
for (var i = 2; i != 1; i--);
debugger;
@@ -199,14 +199,14 @@ var e = {
e.__proto__ = c;
function testProtoGetter1_1() {
- expected_function_name = 'getter1';
+ expected_function_name = 'get getter1';
expected_source_line_text = ' return this.name; // getter 1';
debugger;
var x = e.getter1;
}
function testProtoSetter1_1() {
- expected_function_name = 'setter1';
+ expected_function_name = 'set setter1';
expected_source_line_text = ' this.name = n; // setter 1';
debugger;
e.setter1 = 'aa';
@@ -227,7 +227,7 @@ function testProtoIndexedSetter3_1() {
}
function testProtoSetter1_2() {
- expected_function_name = 'setter1';
+ expected_function_name = 'set setter1';
expected_source_line_text = ' this.name = n; // setter 1';
for (var i = 2; i != 1; i--);
debugger;
@@ -240,7 +240,7 @@ for (var n in this) {
}
state = 1;
this[n]();
- assertNull(exception);
+ if (exception) throw exception;
assertEquals(4, state);
}
diff --git a/deps/v8/test/mjsunit/debug-stepin-positions.js b/deps/v8/test/mjsunit/debug-stepin-positions.js
deleted file mode 100644
index ac010aac42..0000000000
--- a/deps/v8/test/mjsunit/debug-stepin-positions.js
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug --nocrankshaft
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-function DebuggerStatement() {
- debugger; /*pause*/
-}
-
-function TestCase(fun, frame_number, line_number) {
- var exception = false;
- var codeSnippet = undefined;
- var resultPositions = undefined;
- var step = 0;
-
- function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break ||
- event == Debug.DebugEvent.Exception) {
- if (step++ > 0) return;
- assertHasLineMark(/pause/, exec_state.frame(0));
- assertHasLineMark(/positions/, exec_state.frame(frame_number));
- var frame = exec_state.frame(frame_number);
- codeSnippet = frame.sourceLineText();
- resultPositions = frame.stepInPositions();
- }
- } catch (e) {
- exception = e
- }
-
- function assertHasLineMark(mark, frame) {
- var line = frame.sourceLineText();
- if (!mark.exec(frame.sourceLineText())) {
- throw new Error("Line " + line + " should contain mark " + mark);
- }
- }
- }
-
- Debug.setListener(listener);
-
- var breakpointId;
- if (line_number) breakpointId = Debug.setBreakPoint(fun, line_number);
-
- fun();
-
- if (line_number) Debug.clearBreakPoint(breakpointId);
-
- Debug.setListener(null);
-
- assertTrue(!exception, exception);
-
- var expectedPositions = {};
- var markPattern = new RegExp("/\\*#\\*/", "g");
-
- var matchResult;
- while ( (matchResult = markPattern.exec(codeSnippet)) ) {
- expectedPositions[matchResult.index] = true;
- }
-
- print(codeSnippet);
-
- var decoratedResult = codeSnippet;
-
- function replaceStringRange(s, pos, substitute) {
- return s.substring(0, pos) + substitute +
- s.substring(pos + substitute.length);
- }
-
- var markLength = 5;
- var unexpectedPositionFound = false;
-
- for (var i = 0; i < resultPositions.length; i++) {
- var col = resultPositions[i].position.column - markLength;
- if (expectedPositions[col]) {
- delete expectedPositions[col];
- decoratedResult = replaceStringRange(decoratedResult, col, "*YES*");
- } else {
- decoratedResult = replaceStringRange(decoratedResult, col, "!BAD!");
- unexpectedPositionFound = true;
- }
- }
-
- print(decoratedResult);
-
- for (var n in expectedPositions) {
- assertTrue(false, "Some positions are not reported: " + decoratedResult);
- break;
- }
- assertFalse(unexpectedPositionFound, "Found unexpected position: " +
- decoratedResult);
-}
-
-function TestCaseWithDebugger(fun) {
- TestCase(fun, 1);
-}
-
-function TestCaseWithBreakpoint(fun, line_number, frame_number) {
- TestCase(fun, frame_number, line_number);
-}
-
-function TestCaseWithException(fun, frame_number) {
- Debug.setBreakOnException();
- TestCase(fun, frame_number);
- Debug.clearBreakOnException();
-}
-
-
-// Test cases.
-
-// Step in position, when the function call that we are standing at is already
-// being executed.
-var fun = function() {
- function g(p) {
- throw String(p); /*pause*/
- }
- try {
- var res = [ g(1), /*#*/g(2) ]; /*positions*/
- } catch (e) {
- }
-};
-TestCaseWithBreakpoint(fun, 2, 1);
-TestCaseWithException(fun, 1);
-
-
-// Step in position, when the function call that we are standing at is raising
-// an exception.
-var fun = function() {
- var o = {
- g: function(p) {
- throw p;
- }
- };
- try {
- var res = [ /*#*/f(1), /*#*/g(2) ]; /*pause, positions*/
- } catch (e) {
- }
-};
-TestCaseWithException(fun, 0);
-
-
-// Step-in position, when already paused almost on the first call site.
-var fun = function() {
- function g(p) {
- throw p;
- }
- try {
- var res = [ /*#*/g(Math.rand), /*#*/g(2) ]; /*pause, positions*/
- } catch (e) {
- }
-};
-TestCaseWithBreakpoint(fun, 5, 0);
-
-// Step-in position, when already paused on the first call site.
-var fun = function() {
- function g() {
- throw "Debug";
- }
- try {
- var res = [ /*#*/g(), /*#*/g() ]; /*pause, positions*/
- } catch (e) {
- }
-};
-TestCaseWithBreakpoint(fun, 5, 0);
-
-
-// Method calls.
-var fun = function() {
- var data = {
- a: function() {}
- };
- var res = [ DebuggerStatement(), data./*#*/a(), data[/*#*/String("a")]/*#*/(), data["a"]/*#*/(), data.a, data["a"] ]; /*positions*/
-};
-TestCaseWithDebugger(fun);
-
-// Function call on a value.
-var fun = function() {
- function g(p) {
- return g;
- }
- var res = [ DebuggerStatement(), /*#*/g(2), /*#*/g(2)/*#*/(3), /*#*/g(0)/*#*/(0)/*#*/(g) ]; /*positions*/
-};
-TestCaseWithDebugger(fun);
-
-// Local function call, closure function call,
-// local function construction call.
-var fun = (function(p) {
- return function() {
- function f(a, b) {
- }
- var res = /*#*/f(DebuggerStatement(), /*#*/p(/*#*/new f())); /*positions*/
- };
-})(Object);
-TestCaseWithDebugger(fun);
-
-// Global function, global object construction, calls before pause point.
-var fun = (function(p) {
- return function() {
- var res = [ Math.abs(new Object()), DebuggerStatement(), Math./*#*/abs(4), /*#*/new Object()./*#*/toString() ]; /*positions*/
- };
-})(Object);
-TestCaseWithDebugger(fun);
diff --git a/deps/v8/test/mjsunit/declare-locally.js b/deps/v8/test/mjsunit/declare-locally.js
index 45d30e0947..f03217e739 100644
--- a/deps/v8/test/mjsunit/declare-locally.js
+++ b/deps/v8/test/mjsunit/declare-locally.js
@@ -27,19 +27,13 @@
// Make sure that we're not overwriting global
// properties defined in the prototype chain too
-// early when shadowing them with var/const
+// early when shadowing them with var
// declarations.
// This exercises the code in runtime.cc in
// DeclareGlobal...Locally().
-// Flags: --legacy-const
-
this.__proto__.foo = 42;
-this.__proto__.bar = 87;
eval("assertEquals(undefined, foo); var foo = 87;");
assertEquals(87, foo);
-
-eval("assertEquals(undefined, bar); const bar = 42;");
-assertEquals(42, bar);
diff --git a/deps/v8/test/mjsunit/es6/array-concat.js b/deps/v8/test/mjsunit/es6/array-concat.js
index bc9e1a00cc..fe320d6858 100644
--- a/deps/v8/test/mjsunit/es6/array-concat.js
+++ b/deps/v8/test/mjsunit/es6/array-concat.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
(function testArrayConcatArity() {
"use strict";
assertEquals(1, Array.prototype.concat.length);
@@ -803,9 +801,10 @@ logger.get = function(t, trap, r) {
log.length = 0;
assertEquals([obj], Array.prototype.concat.apply(obj));
- assertEquals(1, log.length);
+ assertEquals(2, log.length); // An extra read for the constructor
for (var i in log) assertSame(target, log[i][1]);
- assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "constructor", obj], log[0]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[1]);
})();
@@ -827,14 +826,15 @@ logger.get = function(t, trap, r) {
log.length = 0;
assertEquals(["a", "b"], Array.prototype.concat.apply(obj));
- assertEquals(6, log.length);
+ assertEquals(7, log.length);
for (var i in log) assertSame(target, log[i][1]);
- assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
- assertEquals(["get", target, "length", obj], log[1]);
- assertEquals(["has", target, "0"], log[2]);
- assertEquals(["get", target, "0", obj], log[3]);
- assertEquals(["has", target, "1"], log[4]);
- assertEquals(["get", target, "1", obj], log[5]);
+ assertEquals(["get", target, "constructor", obj], log[0]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[1]);
+ assertEquals(["get", target, "length", obj], log[2]);
+ assertEquals(["has", target, "0"], log[3]);
+ assertEquals(["get", target, "0", obj], log[4]);
+ assertEquals(["has", target, "1"], log[5]);
+ assertEquals(["get", target, "1", obj], log[6]);
})();
diff --git a/deps/v8/test/mjsunit/es6/array-iterator.js b/deps/v8/test/mjsunit/es6/array-iterator.js
index 5fab0fbf86..d2d19b059d 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax
var NONE = 0;
@@ -160,7 +160,7 @@ function TestArrayIteratorPrototype() {
assertArrayEquals(['next'],
Object.getOwnPropertyNames(ArrayIteratorPrototype));
assertHasOwnProperty(ArrayIteratorPrototype, 'next', DONT_ENUM);
- assertHasOwnProperty(ArrayIteratorPrototype, Symbol.iterator, DONT_ENUM);
+ assertFalse(ArrayIteratorPrototype.hasOwnProperty(Symbol.iterator));
assertEquals("[object Array Iterator]",
Object.prototype.toString.call(iterator));
diff --git a/deps/v8/test/mjsunit/es6/array-prototype-values.js b/deps/v8/test/mjsunit/es6/array-prototype-values.js
new file mode 100644
index 0000000000..64162c47c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-prototype-values.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-array-prototype-values
+
+// Functionality of the values iterator is tested elsewhere; this test
+// merely verifies that the 'values' property is set up correctly.
+var valuesDesc = Object.getOwnPropertyDescriptor(Array.prototype, 'values');
+assertEquals('object', typeof valuesDesc);
+assertSame(Array.prototype[Symbol.iterator], valuesDesc.value);
+assertTrue(valuesDesc.configurable);
+assertTrue(valuesDesc.writable);
+assertFalse(valuesDesc.enumerable);
+assertTrue(Array.prototype[Symbol.unscopables].values);
diff --git a/deps/v8/test/mjsunit/es6/array-tostring.js b/deps/v8/test/mjsunit/es6/array-tostring.js
index 397fde4ab1..973b3c3628 100644
--- a/deps/v8/test/mjsunit/es6/array-tostring.js
+++ b/deps/v8/test/mjsunit/es6/array-tostring.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
-
var global = this;
var funs = {
diff --git a/deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js b/deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js
deleted file mode 100644
index be1687b5e9..0000000000
--- a/deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
-// Flags: --legacy-const
-
-// Legacy-const-let conflict in a function throws, even if the legacy const
-// is in an eval
-
-// Throws at the top level of a function
-assertThrows(function() {
- let x = 1;
- eval('const x = 2');
-}, TypeError);
-
-// If the eval is in its own block scope, throws
-assertThrows(function() {
- let y = 1;
- { eval('const y = 2'); }
-}, TypeError);
-
-// If the let is in its own block scope, with the eval, throws
-assertThrows(function() {
- {
- let x = 1;
- eval('const x = 2');
- }
-}, TypeError);
-
-// Legal if the let is no longer visible
-assertDoesNotThrow(function() {
- {
- let x = 1;
- }
- eval('const x = 2');
-});
-
-// In global scope
-let caught = false;
-try {
- let z = 1;
- eval('const z = 2');
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
-
-// Let declarations beyond a function boundary don't conflict
-caught = false;
-try {
- let a = 1;
- (function() {
- eval('const a');
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
-
-// legacy const across with doesn't conflict
-caught = false;
-try {
- (function() {
- with ({x: 1}) {
- eval("const x = 2;");
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
-
-// legacy const can still conflict with let across a with
-caught = false;
-try {
- (function() {
- let x;
- with ({x: 1}) {
- eval("const x = 2;");
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
diff --git a/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
index 20ca10719b..ac7bca107e 100644
--- a/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
-// Flags: --legacy-const
+// Flags: --harmony-sloppy --harmony-sloppy-let
-// let is usable as a variable with var or legacy const, not let or ES6 const
+// let is usable as a variable with var, but not let or ES6 const
(function (){
assertEquals(undefined, let);
@@ -50,13 +49,6 @@ assertThrows(function() { return let; }, ReferenceError);
assertEquals(1, obj.x);
})();
-(function () {
- let obj = {};
- const [let] = [function() { return obj; }];
- let().x = 1;
- assertEquals(1, obj.x);
-})();
-
(function() {
function let() {
return 1;
diff --git a/deps/v8/test/mjsunit/es6/built-in-accessor-names.js b/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
index c5f8cec3c0..8c29d368b7 100644
--- a/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
+++ b/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
-
'use strict';
function assertGetterName(expected, object, name) {
diff --git a/deps/v8/test/mjsunit/es6/classes-proxy.js b/deps/v8/test/mjsunit/es6/classes-proxy.js
index 09d12c28dc..4642df8509 100644
--- a/deps/v8/test/mjsunit/es6/classes-proxy.js
+++ b/deps/v8/test/mjsunit/es6/classes-proxy.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-proxies --harmony-reflect
+// Flags: --allow-natives-syntax
function CreateConstructableProxy(handler) {
return new Proxy(function(){}, handler);
diff --git a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
index 313aad1d8f..7669ef3a8a 100644
--- a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
+++ b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-reflect --harmony-regexp-subclass
-// Flags: --expose-gc --strong-mode
+// Flags: --allow-natives-syntax --harmony-regexp-subclass
+// Flags: --expose-gc
"use strict";
@@ -78,11 +78,6 @@ function checkPrototypeChain(object, constructors) {
constructor(...args) {
assertFalse(new.target === undefined);
super(...args);
- // Strong functions are not extensible, so don't add fields.
- if (args[args.length - 1].indexOf("use strong") >= 0) {
- assertThrows(()=>{ this.a = 10; }, TypeError);
- return;
- }
this.a = 42;
this.d = 4.2;
this.o = {foo:153};
@@ -95,26 +90,24 @@ function checkPrototypeChain(object, constructors) {
assertNull(Object.getOwnPropertyDescriptor(sloppy_func, "caller").value);
assertEquals(undefined, Object.getOwnPropertyDescriptor(strict_func, "caller"));
- function CheckFunction(func, is_strong) {
+ function CheckFunction(func) {
assertEquals("function", typeof func);
assertTrue(func instanceof Object);
assertTrue(func instanceof Function);
assertTrue(func instanceof A);
checkPrototypeChain(func, [A, Function, Object]);
- if (!is_strong) {
- assertEquals(42, func.a);
- assertEquals(4.2, func.d);
- assertEquals(153, func.o.foo);
- assertTrue(undefined !== func.prototype);
- func.prototype.bar = "func.bar";
- var obj = new func();
- assertTrue(obj instanceof Object);
- assertTrue(obj instanceof func);
- assertEquals("object", typeof obj);
- assertEquals(113, obj.foo);
- assertEquals("func.bar", obj.bar);
- delete func.prototype.bar;
- }
+ assertEquals(42, func.a);
+ assertEquals(4.2, func.d);
+ assertEquals(153, func.o.foo);
+ assertTrue(undefined !== func.prototype);
+ func.prototype.bar = "func.bar";
+ var obj = new func();
+ assertTrue(obj instanceof Object);
+ assertTrue(obj instanceof func);
+ assertEquals("object", typeof obj);
+ assertEquals(113, obj.foo);
+ assertEquals("func.bar", obj.bar);
+ delete func.prototype.bar;
}
var source = "this.foo = 113;";
@@ -135,15 +128,6 @@ function checkPrototypeChain(object, constructors) {
var strict_func1 = new A("'use strict'; return 312;");
assertTrue(%HaveSameMap(strict_func, strict_func1));
- // Strong function
- var strong_func = new A("'use strong'; " + source);
- assertFalse(%HaveSameMap(strong_func, sloppy_func));
- assertFalse(%HaveSameMap(strong_func, strict_func));
- CheckFunction(strong_func, true);
-
- var strong_func1 = new A("'use strong'; return 312;");
- assertTrue(%HaveSameMap(strong_func, strong_func1));
-
gc();
})();
@@ -592,11 +576,6 @@ function TestMapSetSubclassing(container, is_map) {
constructor(...args) {
assertFalse(new.target === undefined);
super(...args);
- // Strong functions are not extensible, so don't add fields.
- if (args[args.length - 1].indexOf("use strong") >= 0) {
- assertThrows(()=>{ this.a = 10; }, TypeError);
- return;
- }
this.a = 42;
this.d = 4.2;
this.o = {foo:153};
@@ -610,35 +589,34 @@ function TestMapSetSubclassing(container, is_map) {
assertEquals(undefined, Object.getOwnPropertyDescriptor(sloppy_func, "caller"));
assertEquals(undefined, Object.getOwnPropertyDescriptor(strict_func, "caller"));
- function CheckFunction(func, is_strong) {
+ function CheckFunction(func) {
assertEquals("function", typeof func);
assertTrue(func instanceof Object);
assertTrue(func instanceof Function);
assertTrue(func instanceof GeneratorFunction);
assertTrue(func instanceof A);
checkPrototypeChain(func, [A, GeneratorFunction, Function, Object]);
- if (!is_strong) {
- assertEquals(42, func.a);
- assertEquals(4.2, func.d);
- assertEquals(153, func.o.foo);
-
- assertTrue(undefined !== func.prototype);
- func.prototype.bar = "func.bar";
- var obj = func(); // Generator object.
- assertTrue(obj instanceof Object);
- assertTrue(obj instanceof func);
- assertEquals("object", typeof obj);
- assertEquals("func.bar", obj.bar);
- delete func.prototype.bar;
-
- assertPropertiesEqual({done: false, value: 1}, obj.next());
- assertPropertiesEqual({done: false, value: 1}, obj.next());
- assertPropertiesEqual({done: false, value: 2}, obj.next());
- assertPropertiesEqual({done: false, value: 3}, obj.next());
- assertPropertiesEqual({done: false, value: 5}, obj.next());
- assertPropertiesEqual({done: false, value: 8}, obj.next());
- assertPropertiesEqual({done: true, value: undefined}, obj.next());
- }
+
+ assertEquals(42, func.a);
+ assertEquals(4.2, func.d);
+ assertEquals(153, func.o.foo);
+
+ assertTrue(undefined !== func.prototype);
+ func.prototype.bar = "func.bar";
+ var obj = func(); // Generator object.
+ assertTrue(obj instanceof Object);
+ assertTrue(obj instanceof func);
+ assertEquals("object", typeof obj);
+ assertEquals("func.bar", obj.bar);
+ delete func.prototype.bar;
+
+ assertPropertiesEqual({done: false, value: 1}, obj.next());
+ assertPropertiesEqual({done: false, value: 1}, obj.next());
+ assertPropertiesEqual({done: false, value: 2}, obj.next());
+ assertPropertiesEqual({done: false, value: 3}, obj.next());
+ assertPropertiesEqual({done: false, value: 5}, obj.next());
+ assertPropertiesEqual({done: false, value: 8}, obj.next());
+ assertPropertiesEqual({done: true, value: undefined}, obj.next());
}
var source = "yield 1; yield 1; yield 2; yield 3; yield 5; yield 8;";
@@ -659,15 +637,6 @@ function TestMapSetSubclassing(container, is_map) {
var strict_func1 = new A("'use strict'; yield 312;");
assertTrue(%HaveSameMap(strict_func, strict_func1));
- // Strong generator function
- var strong_func = new A("'use strong'; " + source);
- assertFalse(%HaveSameMap(strong_func, sloppy_func));
- assertFalse(%HaveSameMap(strong_func, strict_func));
- CheckFunction(strong_func, true);
-
- var strong_func1 = new A("'use strong'; yield 312;");
- assertTrue(%HaveSameMap(strong_func, strong_func1));
-
gc();
})();
@@ -950,7 +919,13 @@ function TestMapSetSubclassing(container, is_map) {
var o = Reflect.construct(RegExp, [pattern], f);
assertEquals(["match", "tostring"], log);
- assertEquals(/biep/, o);
+ // TODO(littledan): Is the RegExp constructor correct to create
+ // the internal slots and do these type checks this way?
+ assertEquals("biep", %_RegExpSource(o));
+ assertThrows(() => Object.getOwnPropertyDescriptor(RegExp.prototype,
+ 'source').get(o),
+ TypeError);
+ assertEquals("/undefined/undefined", RegExp.prototype.toString.call(o));
assertTrue(o.__proto__ === p2);
assertTrue(f.prototype === p3);
})();
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index ac10f0e033..4dabda8e44 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --allow-natives-syntax
+// Flags: --harmony-sloppy --harmony-function-name --allow-natives-syntax
+// Flags: --harmony-do-expressions
(function TestBasics() {
var C = class C {}
@@ -22,13 +23,11 @@
class D2 { constructor() {} }
assertEquals('D2', D2.name);
- // TODO(arv): The logic for the name of anonymous functions in ES6 requires
- // the below to be 'E';
var E = class {}
- assertEquals('', E.name); // Should be 'E'.
+ assertEquals('E', E.name); // Should be 'E'.
var F = class { constructor() {} };
- assertEquals('', F.name); // Should be 'F'.
+ assertEquals('F', F.name); // Should be 'F'.
})();
@@ -996,3 +995,55 @@ function testClassRestrictedProperties(C) {
testClassRestrictedProperties(
class extends Class { constructor() { super(); } });
})();
+
+
+(function testReturnFromClassLiteral() {
+
+ function usingDoExpressionInBody() {
+ let x = 42;
+ let dummy = function() {x};
+ try {
+ class C {
+ dummy() {C}
+ [do {return}]() {}
+ };
+ } finally {
+ return x;
+ }
+ }
+ assertEquals(42, usingDoExpressionInBody());
+
+ function usingDoExpressionInExtends() {
+ let x = 42;
+ let dummy = function() {x};
+ try {
+ class C extends (do {return}) { dummy() {C} };
+ } finally {
+ return x;
+ }
+ }
+ assertEquals(42, usingDoExpressionInExtends());
+
+ function usingYieldInBody() {
+ function* foo() {
+ class C {
+ [yield]() {}
+ }
+ }
+ var g = foo();
+ g.next();
+ return g.return(42).value;
+ }
+ assertEquals(42, usingYieldInBody());
+
+ function usingYieldInExtends() {
+ function* foo() {
+ class C extends (yield) {};
+ }
+ var g = foo();
+ g.next();
+ return g.return(42).value;
+ }
+ assertEquals(42, usingYieldInExtends());
+
+})();
diff --git a/deps/v8/test/mjsunit/es6/classof-proxy.js b/deps/v8/test/mjsunit/es6/classof-proxy.js
index c3bc985bb9..02043614ba 100644
--- a/deps/v8/test/mjsunit/es6/classof-proxy.js
+++ b/deps/v8/test/mjsunit/es6/classof-proxy.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-proxies
+// Flags: --allow-natives-syntax
function test_function(o) {
if (%_ClassOf(o) === "Function") {
diff --git a/deps/v8/test/mjsunit/es6/collection-iterator.js b/deps/v8/test/mjsunit/es6/collection-iterator.js
index 18b3f1a5e5..a92c9aeda0 100644
--- a/deps/v8/test/mjsunit/es6/collection-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collection-iterator.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax
(function TestSetIterator() {
diff --git a/deps/v8/test/mjsunit/es6/collections.js b/deps/v8/test/mjsunit/es6/collections.js
index e410e2259c..1664a93bde 100644
--- a/deps/v8/test/mjsunit/es6/collections.js
+++ b/deps/v8/test/mjsunit/es6/collections.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-gc --allow-natives-syntax --harmony-tostring
+// Flags: --expose-gc --allow-natives-syntax
function assertSize(expected, collection) {
diff --git a/deps/v8/test/mjsunit/es6/completion.js b/deps/v8/test/mjsunit/es6/completion.js
index 05565bfb45..7559514421 100644
--- a/deps/v8/test/mjsunit/es6/completion.js
+++ b/deps/v8/test/mjsunit/es6/completion.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy-let --no-legacy-const
+// Flags: --harmony-sloppy-let
function assertUndef(x) {
diff --git a/deps/v8/test/mjsunit/es6/debug-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
index d3c36207f1..193ad705cb 100644
--- a/deps/v8/test/mjsunit/es6/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --debug-eval-readonly-locals
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/es6/debug-evaluate-receiver-before-super.js b/deps/v8/test/mjsunit/es6/debug-evaluate-receiver-before-super.js
new file mode 100644
index 0000000000..dc8ce2cacd
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-evaluate-receiver-before-super.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test that debug-evaluate doesn't crash when this is used before super() call
+// in constructor.
+
+Debug = debug.Debug
+
+var result;
+
+function listener(event, exec_state, event_data, data)
+{
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ result = exec_state.frame(0).evaluate("this.a").value();
+ }
+ } catch (e) {
+ result = e.message;
+ }
+}
+
+Debug.setListener(listener);
+
+class A { constructor () { this.a = 239; } }
+class B extends A {
+ constructor () {
+ debugger;
+ assertEquals("Cannot read property 'a' of undefined", result);
+ super();
+ debugger;
+ assertEquals(239, result);
+ }
+}
+new B();
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js
index 043c5f10f7..8855742abf 100644
--- a/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js
+++ b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test that live-editing a frame that uses new.target fails.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/events.js b/deps/v8/test/mjsunit/es6/debug-promises/events.js
deleted file mode 100644
index 3fcb22ff27..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/events.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-debug-as debug
-
-Debug = debug.Debug;
-
-var eventsExpected = 16;
-var exception = null;
-var result = [];
-
-function updatePromise(promise, parentPromise, status, value) {
- var i;
- for (i = 0; i < result.length; ++i) {
- if (result[i].promise === promise) {
- result[i].parentPromise = parentPromise || result[i].parentPromise;
- result[i].status = status || result[i].status;
- result[i].value = value || result[i].value;
- break;
- }
- }
- assertTrue(i < result.length);
-}
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.PromiseEvent) return;
- try {
- eventsExpected--;
- assertTrue(event_data.promise().isPromise());
- if (event_data.status() === 0) {
- // New promise.
- assertEquals("pending", event_data.promise().status());
- result.push({ promise: event_data.promise().value(), status: 0 });
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- } else if (event_data.status() !== undefined) {
- // Resolve/reject promise.
- updatePromise(event_data.promise().value(),
- undefined,
- event_data.status(),
- event_data.value().value());
- } else {
- // Chain promises.
- assertTrue(event_data.parentPromise().isPromise());
- updatePromise(event_data.promise().value(),
- event_data.parentPromise().value());
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- }
- } catch (e) {
- print(e + e.stack)
- exception = e;
- }
-}
-
-Debug.setListener(listener);
-
-function resolver(resolve, reject) { resolve(); }
-
-var p1 = new Promise(resolver); // event
-var p2 = p1.then().then(); // event
-var p3 = new Promise(function(resolve, reject) { // event
- reject("rejected");
-});
-var p4 = p3.then(); // event
-var p5 = p1.then(); // event
-
-function assertAsync(b, s) {
- if (b) {
- print(s, "succeeded");
- } else {
- %AbortJS(s + " FAILED!");
- }
-}
-
-function testDone(iteration) {
- function checkResult() {
- if (eventsExpected === 0) {
- assertAsync(result.length === 6, "result.length");
-
- assertAsync(result[0].promise === p1, "result[0].promise");
- assertAsync(result[0].parentPromise === undefined,
- "result[0].parentPromise");
- assertAsync(result[0].status === 1, "result[0].status");
- assertAsync(result[0].value === undefined, "result[0].value");
-
- assertAsync(result[1].parentPromise === p1,
- "result[1].parentPromise");
- assertAsync(result[1].status === 1, "result[1].status");
-
- assertAsync(result[2].promise === p2, "result[2].promise");
-
- assertAsync(result[3].promise === p3, "result[3].promise");
- assertAsync(result[3].parentPromise === undefined,
- "result[3].parentPromise");
- assertAsync(result[3].status === -1, "result[3].status");
- assertAsync(result[3].value === "rejected", "result[3].value");
-
- assertAsync(result[4].promise === p4, "result[4].promise");
- assertAsync(result[4].parentPromise === p3,
- "result[4].parentPromise");
- assertAsync(result[4].status === -1, "result[4].status");
- assertAsync(result[4].value === "rejected", "result[4].value");
-
- assertAsync(result[5].promise === p5, "result[5].promise");
- assertAsync(result[5].parentPromise === p1,
- "result[5].parentPromise");
- assertAsync(result[5].status === 1, "result[5].status");
-
- assertAsync(exception === null, "exception === null");
- Debug.setListener(null);
- } else if (iteration > 10) {
- %AbortJS("Not all events were received!");
- } else {
- testDone(iteration + 1);
- }
- }
-
- var iteration = iteration || 0;
- %EnqueueMicrotask(checkResult);
-}
-
-testDone();
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
deleted file mode 100644
index 918ae2a2e8..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function(resolve, reject) {
- do {
- try {
- throw new Error("reject");
- } finally {
- break; // No rethrow.
- }
- } while (false);
- resolve();
-});
-
-assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
deleted file mode 100644
index 298201f103..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function (resolve, reject) {
- try {
- throw new Error("reject");
- } catch (e) {
- }
- resolve();
-});
-
-assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
deleted file mode 100644
index b1e2ff98e1..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function(resolve, reject) {
- try {
- throw new Error("reject");
- } finally {
- // Implicit rethrow.
- }
- resolve();
-});
-
-assertEquals([0 /* create */, -1 /* rethrown */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
index 349d014701..5cf49f2fae 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
@@ -31,6 +31,7 @@ function MyPromise(resolver) {
};
MyPromise.prototype = new Promise(function() {});
+MyPromise.__proto__ = Promise;
p.constructor = MyPromise;
var q = p.chain(
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
index 69ee01ee41..6fe3b172bf 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
@@ -2,18 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
-
-// Test debug events when an exception is thrown inside a Promise, which is
-// caught by a custom promise, which has no reject handler.
-// We expect two Exception debug events:
-// 1) when the exception is thrown in the promise q.
-// 2) when calling the undefined custom reject closure in MyPromise throws.
-
-Debug = debug.Debug;
-
-var expected_events = 2;
-var log = [];
+// A non-callable reject function throws eagerly
var p = new Promise(function(resolve, reject) {
log.push("resolve");
@@ -23,63 +12,11 @@ var p = new Promise(function(resolve, reject) {
function MyPromise(resolver) {
var reject = undefined;
var resolve = function() { };
- log.push("construct");
resolver(resolve, reject);
};
MyPromise.prototype = new Promise(function() {});
+MyPromise.__proto__ = Promise;
p.constructor = MyPromise;
-var q = p.chain(
- function() {
- log.push("throw caught");
- throw new Error("caught"); // event
- });
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Exception) {
- expected_events--;
- assertTrue(expected_events >= 0);
- if (expected_events == 1) {
- assertTrue(
- exec_state.frame(0).sourceLineText().indexOf('// event') > 0);
- assertEquals("caught", event_data.exception().message);
- } else if (expected_events == 0) {
- // All of the frames on the stack are from native Javascript.
- assertEquals(0, exec_state.frameCount());
- assertEquals("(var).reject is not a function",
- event_data.exception().message);
- } else {
- assertUnreachable();
- }
- assertSame(q, event_data.promise());
- }
- } catch (e) {
- %AbortJS(e + "\n" + e.stack);
- }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
-
-log.push("end main");
-
-function testDone(iteration) {
- function checkResult() {
- try {
- assertTrue(iteration < 10);
- if (expected_events === 0) {
- assertEquals(["resolve", "construct", "end main", "throw caught"], log);
- } else {
- testDone(iteration + 1);
- }
- } catch (e) {
- %AbortJS(e + "\n" + e.stack);
- }
- }
-
- %EnqueueMicrotask(checkResult);
-}
-
-testDone(0);
+assertThrows(()=> p.then(function() { }), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js b/deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js
new file mode 100644
index 0000000000..4fde928b38
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js
@@ -0,0 +1,85 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var exception = null;
+var Debug = debug.Debug;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source = exec_state.frame(0).sourceLineText();
+ print(source);
+ assertTrue(source.indexOf(`// B${break_count++}`) > 0);
+ if (source.indexOf("assertEquals") > 0) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } else {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ } catch (e) {
+ exception = e;
+ print(e);
+ }
+};
+
+Debug.setListener(listener);
+
+function f() {
+ var a, b, c, d;
+ debugger; // B0
+ [ // B1
+ a, // B2
+ b, // B3
+ c = 3 // B4
+ ] = [1, 2];
+ assertEquals({a:1,b:2,c:3}, {a, b, c}); // B5
+
+ [ // B6
+ a, // B7
+ [
+ b, // B8
+ c // B9
+ ],
+ d // B10
+ ] = [5, [6, 7], 8];
+ assertEquals({a:5,b:6,c:7,d:8}, {a, b, c, d}); // B11
+
+ [ // B12
+ a, // B13
+ b, // B14
+ ...c // B15
+ ] = [1, 2, 3, 4];
+ assertEquals({a:1,b:2,c:[3,4]}, {a, b, c}); // B16
+
+ ({ // B17
+ a, // B18
+ b, // B19
+ c = 7 // B20
+ } = {a: 5, b: 6});
+ assertEquals({a:5,b:6,c:7}, {a, b, c}); // B21
+
+ ({ // B22
+ a, // B23
+ b = return1(), // B24
+ c = return1() // B25
+ } = {a: 5, b: 6});
+ assertEquals({a:5,b:6,c:1}, {a, b, c}); // B28
+
+ ({ // B29
+ x : a, // B30
+ y : b, // B31
+ z : c = 3 // B32
+ } = {x: 1, y: 2});
+ assertEquals({a:1,b:2,c:3}, {a, b, c}); // B33
+} // B34
+
+function return1() {
+ return 1; // B26
+} // B27
+
+f();
+Debug.setListener(null); // B35
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js b/deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js
index a78431bb02..f670f525af 100644
--- a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js
+++ b/deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-destructuring-bind
+// Flags: --expose-debug-as debug
var exception = null;
var Debug = debug.Debug;
@@ -98,13 +98,13 @@ function test() {
assertEquals([3, 4, 6], [a, b, c]); // B45
}
- var { // B46
- x: a, // B47
- y: b = 9 // B48
+ var {
+ x: a, // B46
+ y: b = 9 // B47
} = { x: 4 };
- assertEquals([4, 9], [a, b]); // B49
-} // B50
+ assertEquals([4, 9], [a, b]); // B48
+} // B49
test();
-Debug.setListener(null); // B51
+Debug.setListener(null); // B50
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js b/deps/v8/test/mjsunit/es6/debug-stepin-default-parameters.js
index 6ebf7ba726..aaac9f0d4e 100644
--- a/deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-default-parameters.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-default-parameters
+// Flags: --expose-debug-as debug
Debug = debug.Debug
@@ -42,5 +42,5 @@ Debug.setListener(null); // c
assertNull(exception);
assertEquals("default", result);
-assertEquals(["a0","b0","f18b13","d2f18b13","e0f18b13","g2b13","h0b13","c0"],
+assertEquals(["a0","b13","f18b13","d2f18b13","e0f18b13","g2b13","h0b13","c0"],
log);
diff --git a/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js b/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js
index 8595f404f0..4e71c79198 100644
--- a/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-proxies
+// Flags: --expose-debug-as debug
Debug = debug.Debug
@@ -54,8 +54,8 @@ assertEquals(42, get);
assertEquals([
"a0",
- "b0", "h4b20", "i2b20", // [[Has]]
- "c0", "j4c15", "k2c15", // [[Get]]
- "d0", "l4d11", "m2d11", // [[Set]]
+ "b17", "h4b20", "i2b20", // [[Has]]
+ "c15", "j4c15", "k2c15", // [[Get]]
+ "d0", "l4d11", "m2d11", // [[Set]]
"g0"
], log);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js b/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js
index f500faeee2..2d8c39497e 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js
@@ -48,7 +48,8 @@ assertNull(exception);
assertEquals([
"a0",
- "b0",
+ "b44",
+ "b13",
"d2b13",
"e0b13",
"b25",
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js b/deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js
new file mode 100644
index 0000000000..6020ba9d50
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-tailcalls
+
+"use strict";
+
+var Debug = debug.Debug
+var exception = null;
+var breaks = 0;
+
+function f(x) {
+ if (x > 0) { // B3 B5 B7 B9
+ return f(x - 1); // B4 B6 B8
+ }
+} // B10
+
+function g(x) {
+ return f(x); // B2
+}
+
+function h(x) {
+ debugger; // B0
+ g(x); // B1
+} // B11
+
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(event_data.sourceLineText().indexOf(`B${breaks++}`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ };
+};
+
+Debug.setListener(listener);
+
+h(3);
+
+Debug.setListener(null); // B12
+assertNull(exception);
+assertEquals(13, breaks);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
index 932840a6f9..9d5641a4a3 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
@@ -91,7 +91,7 @@ print("log:\n"+ JSON.stringify(log));
// based on other values.
var expected = [
// Entry
- "a2","b2",
+ "a2",
// Empty for-in-var: get enumerable
"c16",
// Empty for-in: get enumerable
@@ -108,12 +108,12 @@ var expected = [
"i12","i10","i11","I4","i11","I4","i11","I4","i11",
// For-of-let: [Symbol.iterator](), next(), body, next(), ...
"j16","j14","j15","J4","j15","J4","j15","J4","j15",
- // For-var: var decl, condition, body, next, condition, body, ...
- "k7","k20","K4","k26","k20","K4","k26","k20","K4","k26","k20",
+ // For-var: init, condition, body, next, condition, body, ...
+ "k15","k20","K4","k26","k20","K4","k26","k20","K4","k26","k20",
// For: init, condition, body, next, condition, body, ...
"l7","l16","L4","l22","l16","L4","l22","l16","L4","l22","l16",
// For-let: init, condition, body, next, condition, body, ...
- "m7","m20","M4","m26","m20","M4","m26","m20","M4","m26","m20",
+ "m15","m20","M4","m26","m20","M4","m26","m20","M4","m26","m20",
// Exit.
"y0","z0",
]
diff --git a/deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js b/deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js
new file mode 100644
index 0000000000..db0878d7a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-tailcalls
+
+"use strict";
+
+var Debug = debug.Debug
+var exception = null;
+var breaks = 0;
+
+function f(x) {
+ if (x > 0) {
+ return f(x - 1); // Tail call
+ }
+ debugger; // Break 0
+}
+
+function g(x) {
+ return f(x); // Tail call
+}
+
+function h(x) {
+ g(x); // Not tail call
+} // Break 1
+
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertTrue(event_data.sourceLineText().indexOf(`Break ${breaks++}`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ } catch (e) {
+ exception = e;
+ };
+};
+
+Debug.setListener(listener);
+
+h(3);
+
+Debug.setListener(null); // Break 2
+assertNull(exception);
+assertEquals(3, breaks);
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters-debug.js b/deps/v8/test/mjsunit/es6/default-parameters-debug.js
index ce9e626621..30e19c42af 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters-debug.js
+++ b/deps/v8/test/mjsunit/es6/default-parameters-debug.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-default-parameters
+// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js b/deps/v8/test/mjsunit/es6/default-parameters-destructuring.js
index 47cca5c95b..50071f0e34 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js
+++ b/deps/v8/test/mjsunit/es6/default-parameters-destructuring.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-default-parameters --harmony-destructuring-bind
-
(function TestSloppyEvalScoping() {
var x = 1;
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters.js b/deps/v8/test/mjsunit/es6/default-parameters.js
index 8d1eb8b096..4e0bf542ef 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters.js
+++ b/deps/v8/test/mjsunit/es6/default-parameters.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-default-parameters
-
(function TestDefaults() {
function f1(x = 1) { return x }
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js b/deps/v8/test/mjsunit/es6/destructuring-assignment-lazy.js
index 8915eb97a3..fdae30ec63 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-assignment-lazy.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring-assignment --harmony-destructuring-bind
// Flags: --min-preparse-length=0
function f() {
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-assignment.js b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
index bc8c424d8b..df9bb0e8c6 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-assignment.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring-assignment --harmony-destructuring-bind
-
// script-level tests
var ox, oy = {}, oz;
({
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js
index 5b90fb17a9..0317509194 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
// Flags: --no-lazy --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js
index 140ed9da52..77a3226788 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind
// Flags: --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring.js b/deps/v8/test/mjsunit/es6/destructuring.js
index e84abd112f..1f16c45270 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring.js
+++ b/deps/v8/test/mjsunit/es6/destructuring.js
@@ -1,9 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-destructuring-bind
-// Flags: --harmony-default-parameters
(function TestObjectLiteralPattern() {
var { x : x, y : y, get, set } = { x : 1, y : 2, get: 3, set: 4 };
@@ -1188,4 +1185,7 @@
assertEquals("hello", foo);
assertEquals("world", bar);
assertEquals(42, baz);
+
+ assertEquals(undefined, eval('try {throw {foo: 1, bar: 2}} catch({foo}) {}'));
+ assertEquals(undefined, eval('try {throw [1, 2, 3]} catch([x]) {}'));
})();
diff --git a/deps/v8/test/mjsunit/es6/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index f304738841..a0c3b809be 100644
--- a/deps/v8/test/mjsunit/es6/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax
// Test instantations of generators.
diff --git a/deps/v8/test/mjsunit/es6/instanceof-proxies.js b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
index cc720ad8fe..86b104ce70 100644
--- a/deps/v8/test/mjsunit/es6/instanceof-proxies.js
+++ b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
-// Flags: --harmony-proxies --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Test instanceof with proxies.
diff --git a/deps/v8/test/mjsunit/es6/iteration-semantics.js b/deps/v8/test/mjsunit/es6/iteration-semantics.js
index 6466ac5e26..558fb837e7 100644
--- a/deps/v8/test/mjsunit/es6/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/es6/iteration-semantics.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
-
// Test for-of semantics.
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/json.js b/deps/v8/test/mjsunit/es6/json.js
index 3fad08318f..4c1ada8a86 100644
--- a/deps/v8/test/mjsunit/es6/json.js
+++ b/deps/v8/test/mjsunit/es6/json.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
-
function testJSONToString() {
assertEquals('[object JSON]', "" + JSON);
assertEquals("JSON", JSON[Symbol.toStringTag]);
diff --git a/deps/v8/test/mjsunit/es6/math-trunc.js b/deps/v8/test/mjsunit/es6/math-trunc.js
index 9231576dda..c925b5b363 100644
--- a/deps/v8/test/mjsunit/es6/math-trunc.js
+++ b/deps/v8/test/mjsunit/es6/math-trunc.js
@@ -25,25 +25,78 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-assertEquals("Infinity", String(1/Math.trunc(0)));
-assertEquals("-Infinity", String(1/Math.trunc(-0)));
-assertEquals("Infinity", String(1/Math.trunc(Math.PI/4)));
-assertEquals("-Infinity", String(1/Math.trunc(-Math.sqrt(2)/2)));
-assertEquals(100, Math.trunc(100));
-assertEquals(-199, Math.trunc(-199));
-assertEquals(100, Math.trunc(100.1));
-assertTrue(isNaN(Math.trunc("abc")));
-assertTrue(isNaN(Math.trunc({})));
-assertEquals(0, Math.trunc([]));
-assertEquals(1, Math.trunc([1]));
-assertEquals(-100, Math.trunc([-100.1]));
-assertTrue(isNaN(Math.trunc([1, 1])));
-assertEquals(-100, Math.trunc({ toString: function() { return "-100.3"; } }));
-assertEquals(10, Math.trunc({ toString: function() { return 10.1; } }));
-assertEquals(-1, Math.trunc({ valueOf: function() { return -1.1; } }));
-assertEquals("-Infinity",
- String(1/Math.trunc({ valueOf: function() { return "-0.1"; } })));
-assertEquals("-Infinity", String(Math.trunc(-Infinity)));
-assertEquals("Infinity", String(Math.trunc(Infinity)));
-assertEquals("-Infinity", String(Math.trunc("-Infinity")));
-assertEquals("Infinity", String(Math.trunc("Infinity")));
+// Flags: --allow-natives-syntax
+
+var test_id = 0;
+
+function testTrunc(expected, input) {
+ var test = new Function('n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expected, test(input));
+ assertEquals(expected, test(input));
+ assertEquals(expected, test(input));
+ %OptimizeFunctionOnNextCall(test);
+ assertEquals(expected, test(input));
+
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(+n)');
+ assertEquals(expected, test_double_input(input));
+ assertEquals(expected, test_double_input(input));
+ assertEquals(expected, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expected, test_double_input(input));
+
+ var test_double_output = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n) + -0.0');
+ assertEquals(expected, test_double_output(input));
+ assertEquals(expected, test_double_output(input));
+ assertEquals(expected, test_double_output(input));
+ %OptimizeFunctionOnNextCall(test_double_output);
+ assertEquals(expected, test_double_output(input));
+}
+
+function test() {
+ // Ensure that a negative zero coming from Math.trunc is properly handled
+ // by other operations.
+ function itrunc(x) {
+ return 1 / Math.trunc(x);
+ }
+ assertEquals(Infinity, itrunc(0));
+ assertEquals(-Infinity, itrunc(-0));
+ assertEquals(Infinity, itrunc(Math.PI / 4));
+ assertEquals(-Infinity, itrunc(-Math.sqrt(2) / 2));
+ assertEquals(-Infinity, itrunc({valueOf: function() { return "-0.1"; }}));
+ %OptimizeFunctionOnNextCall(itrunc);
+
+ testTrunc(100, 100);
+ testTrunc(-199, -199);
+ testTrunc(100, 100.1);
+ testTrunc(4503599627370495.0, 4503599627370495.0);
+ testTrunc(4503599627370496.0, 4503599627370496.0);
+ testTrunc(-4503599627370495.0, -4503599627370495.0);
+ testTrunc(-4503599627370496.0, -4503599627370496.0);
+ testTrunc(9007199254740991.0, 9007199254740991.0);
+ testTrunc(-9007199254740991.0, -9007199254740991.0);
+ testTrunc(0, []);
+ testTrunc(1, [1]);
+ testTrunc(-100, [-100.1]);
+ testTrunc(-100, {toString: function() { return "-100.3"; }});
+ testTrunc(10, {toString: function() { return 10.1; }});
+ testTrunc(-1, {valueOf: function() { return -1.1; }});
+ testTrunc(-Infinity, -Infinity);
+ testTrunc(Infinity, Infinity);
+ testTrunc(-Infinity, "-Infinity");
+ testTrunc(Infinity, "Infinity");
+
+ assertTrue(isNaN(Math.trunc("abc")));
+ assertTrue(isNaN(Math.trunc({})));
+ assertTrue(isNaN(Math.trunc([1, 1])));
+}
+
+// Test in a loop to cover the custom IC and GC-related issues.
+for (var i = 0; i < 10; i++) {
+ test();
+ new Array(i * 10000);
+}
diff --git a/deps/v8/test/mjsunit/es6/math.js b/deps/v8/test/mjsunit/es6/math.js
index 3f76f1197a..cb43bd5bd1 100644
--- a/deps/v8/test/mjsunit/es6/math.js
+++ b/deps/v8/test/mjsunit/es6/math.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
-
function testMathToString() {
assertEquals('[object Math]', "" + Math);
assertEquals("Math", Math[Symbol.toStringTag]);
diff --git a/deps/v8/test/mjsunit/es6/new-target.js b/deps/v8/test/mjsunit/es6/new-target.js
index 8a06ff6c89..4be1254d67 100644
--- a/deps/v8/test/mjsunit/es6/new-target.js
+++ b/deps/v8/test/mjsunit/es6/new-target.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect --harmony-destructuring-bind
-
(function TestClass() {
'use strict';
diff --git a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
index 035627c4d4..82d070e92d 100644
--- a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
+++ b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
@@ -7,7 +7,7 @@
// mjsunit/es6/regexp-flags tests that the property is there when the
// flag is on.
-// Flags: --harmony-regexps --no-harmony-unicode-regexps
+// Flags: --no-harmony-unicode-regexps
'use strict';
diff --git a/deps/v8/test/mjsunit/es6/object-tostring.js b/deps/v8/test/mjsunit/es6/object-tostring.js
index 4d6090faf1..29d07f263a 100644
--- a/deps/v8/test/mjsunit/es6/object-tostring.js
+++ b/deps/v8/test/mjsunit/es6/object-tostring.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring --harmony-proxies
-
var global = this;
var funs = {
diff --git a/deps/v8/test/mjsunit/es6/pattern-brand-check.js b/deps/v8/test/mjsunit/es6/pattern-brand-check.js
new file mode 100644
index 0000000000..9b0c0111ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/pattern-brand-check.js
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-subclass
+
+function createNonRegExp(calls) {
+ return {
+ get [Symbol.match]() {
+ calls.push("@@match");
+ return undefined;
+ },
+ get [Symbol.replace]() {
+ calls.push("@@replace");
+ return undefined;
+ },
+ get [Symbol.search]() {
+ calls.push("@@search");
+ return undefined;
+ },
+ get [Symbol.split]() {
+ calls.push("@@split");
+ return undefined;
+ },
+ [Symbol.toPrimitive]() {
+ calls.push("@@toPrimitive");
+ return "";
+ }
+ };
+}
+
+(function testStringMatchBrandCheck() {
+ var calls = [];
+ "".match(createNonRegExp(calls));
+ assertEquals(["@@match", "@@toPrimitive"], calls);
+})();
+
+(function testStringSearchBrandCheck() {
+ var calls = [];
+ "".search(createNonRegExp(calls));
+ assertEquals(["@@search", "@@toPrimitive"], calls);
+})();
+
+(function testStringSplitBrandCheck() {
+ var calls = [];
+ "".split(createNonRegExp(calls));
+ assertEquals(["@@split", "@@toPrimitive"], calls);
+})();
+
+(function testStringReplaceBrandCheck() {
+ var calls = [];
+ "".replace(createNonRegExp(calls), "");
+ assertEquals(["@@replace", "@@toPrimitive"], calls);
+})();
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index e4c8b389e8..4eb539cbd5 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-tostring --promise-extra
+// Flags: --allow-natives-syntax --promise-extra
// Make sure we don't rely on functions patchable by monkeys.
var call = Function.prototype.call.call.bind(Function.prototype.call)
diff --git a/deps/v8/test/mjsunit/harmony/proxies-accesschecks.js b/deps/v8/test/mjsunit/es6/proxies-accesschecks.js
index 209d4329f9..f5b90dcb08 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-accesschecks.js
+++ b/deps/v8/test/mjsunit/es6/proxies-accesschecks.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var realm = Realm.create();
this.__proto__ = new Proxy({}, {
diff --git a/deps/v8/test/mjsunit/harmony/proxies-apply.js b/deps/v8/test/mjsunit/es6/proxies-apply.js
index dae362ac61..a94541c01a 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-apply.js
+++ b/deps/v8/test/mjsunit/es6/proxies-apply.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
(function testNonCallable() {
var proxy = new Proxy({},{});
assertThrows(function(){ proxy() }, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-bind.js b/deps/v8/test/mjsunit/es6/proxies-bind.js
index 9e4c5b79c4..83876a00c3 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-bind.js
+++ b/deps/v8/test/mjsunit/es6/proxies-bind.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
// Tests the interaction of Function.prototype.bind with proxies.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-construct.js b/deps/v8/test/mjsunit/es6/proxies-construct.js
index 6e02a47bd0..344c50a7e4 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-construct.js
+++ b/deps/v8/test/mjsunit/es6/proxies-construct.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
(function testNonConstructable() {
var proxy = new Proxy({},{});
assertThrows(function(){ new proxy() }, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js b/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js
index 5530a60fed..ffba5c2d81 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js
+++ b/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Do not read out the prototype from a cross-realm object.
var realm = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-define-property.js b/deps/v8/test/mjsunit/es6/proxies-define-property.js
index 27f23be173..14c95bfe34 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-define-property.js
+++ b/deps/v8/test/mjsunit/es6/proxies-define-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
// Check basic call to trap.
var g_target, g_name, g_desc;
diff --git a/deps/v8/test/mjsunit/harmony/proxies-delete-property.js b/deps/v8/test/mjsunit/es6/proxies-delete-property.js
index 27f9c059cc..7a46b9b3b2 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-delete-property.js
+++ b/deps/v8/test/mjsunit/es6/proxies-delete-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
var properties =
["bla", "0", 1, Symbol(), {[Symbol.toPrimitive]() {return "a"}}];
diff --git a/deps/v8/test/mjsunit/es6/proxies-example-membrane.js b/deps/v8/test/mjsunit/es6/proxies-example-membrane.js
new file mode 100644
index 0000000000..dd373b7429
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/proxies-example-membrane.js
@@ -0,0 +1,308 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// A simple membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#a_simple_membrane
+
+function createSimpleMembrane(target) {
+ let enabled = true;
+
+ function wrap(obj) {
+ if (obj !== Object(obj)) return obj;
+
+ let handler = new Proxy({}, {get: function(_, key) {
+ if (!enabled) throw new Error("disabled");
+ switch (key) {
+ case "apply":
+ return (_, that, args) => {
+ try {
+ return wrap(Reflect.apply(
+ obj, wrap(that), args.map((x) => wrap(x))));
+ } catch(e) {
+ throw wrap(e);
+ }
+ }
+ case "construct":
+ return (_, args, newt) => {
+ try {
+ return wrap(Reflect.construct(
+ obj, args.map((x) => wrap(x)), wrap(newt)));
+ } catch(e) {
+ throw wrap(e);
+ }
+ }
+ default:
+ return (_, ...args) => {
+ try {
+ return wrap(Reflect[key](obj, ...(args.map(wrap))));
+ } catch(e) {
+ throw wrap(e);
+ }
+ }
+ }
+ }});
+
+ return new Proxy(obj, handler);
+ }
+
+ const gate = Object.freeze({
+ enable: () => enabled = true,
+ disable: () => enabled = false
+ });
+
+ return Object.freeze({
+ wrapper: wrap(target),
+ gate: gate
+ });
+}
+
+
+// Test the simple membrane.
+{
+ var o = {
+ a: 6,
+ b: {bb: 8},
+ f: function(x) { return x },
+ g: function(x) { return x.a },
+ h: function(x) { this.q = x }
+ };
+ o[2] = {c: 7};
+ var m = createSimpleMembrane(o);
+ var w = m.wrapper;
+ var f = w.f;
+ var x = f(66);
+ var x = f({a: 1});
+ var x = w.f({a: 1});
+ var a = x.a;
+ assertEquals(6, w.a);
+ assertEquals(8, w.b.bb);
+ assertEquals(7, w[2]["c"]);
+ assertEquals(undefined, w.c);
+ assertEquals(1, w.f(1));
+ assertEquals(1, w.f({a: 1}).a);
+ assertEquals(2, w.g({a: 2}));
+ assertEquals(3, (w.r = {a: 3}).a);
+ assertEquals(3, w.r.a);
+ assertEquals(3, o.r.a);
+ w.h(3);
+ assertEquals(3, w.q);
+ assertEquals(3, o.q);
+ assertEquals(4, (new w.h(4)).q);
+
+ var wb = w.b;
+ var wr = w.r;
+ var wf = w.f;
+ var wf3 = w.f(3);
+ var wfx = w.f({a: 6});
+ var wgx = w.g({a: {aa: 7}});
+ var wh4 = new w.h(4);
+ m.gate.disable();
+ assertEquals(3, wf3);
+ assertThrows(function() { w.a }, Error);
+ assertThrows(function() { w.r }, Error);
+ assertThrows(function() { w.r = {a: 4} }, Error);
+ assertThrows(function() { o.r.a }, Error);
+ assertEquals("object", typeof o.r);
+ assertEquals(5, (o.r = {a: 5}).a);
+ assertEquals(5, o.r.a);
+ assertThrows(function() { w[1] }, Error);
+ assertThrows(function() { w.c }, Error);
+ assertThrows(function() { wb.bb }, Error);
+ assertThrows(function() { wr.a }, Error);
+ assertThrows(function() { wf(4) }, Error);
+ assertThrows(function() { wfx.a }, Error);
+ assertThrows(function() { wgx.aa }, Error);
+ assertThrows(function() { wh4.q }, Error);
+
+ m.gate.enable();
+ assertEquals(6, w.a);
+ assertEquals(5, w.r.a);
+ assertEquals(5, o.r.a);
+ assertEquals(7, w.r = 7);
+ assertEquals(7, w.r);
+ assertEquals(7, o.r);
+ assertEquals(8, w.b.bb);
+ assertEquals(7, w[2]["c"]);
+ assertEquals(undefined, w.c);
+ assertEquals(8, wb.bb);
+ assertEquals(3, wr.a);
+ assertEquals(4, wf(4));
+ assertEquals(3, wf3);
+ assertEquals(6, wfx.a);
+ assertEquals(7, wgx.aa);
+ assertEquals(4, wh4.q);
+}
+
+
+
+// An identity-preserving membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#an_identity-preserving_membrane
+
+function createMembrane(target) {
+ const wet2dry = 0;
+ const dry2wet = 1;
+
+ function flip(dir) { return (dir + 1) % 2 }
+
+ let maps = [new WeakMap(), new WeakMap()];
+
+ let revoked = false;
+
+ function wrap(dir, obj) {
+ if (obj !== Object(obj)) return obj;
+
+ let wrapper = maps[dir].get(obj);
+ if (wrapper) return wrapper;
+
+ let handler = new Proxy({}, {get: function(_, key) {
+ if (revoked) throw new Error("revoked");
+ switch (key) {
+ case "apply":
+ return (_, that, args) => {
+ try {
+ return wrap(dir, Reflect.apply(
+ obj, wrap(flip(dir), that),
+ args.map((x) => wrap(flip(dir), x))));
+ } catch(e) {
+ throw wrap(dir, e);
+ }
+ }
+ case "construct":
+ return (_, args, newt) => {
+ try {
+ return wrap(dir, Reflect.construct(
+ obj, args.map((x) => wrap(flip(dir), x)),
+ wrap(flip(dir), newt)));
+ } catch(e) {
+ throw wrap(dir, e);
+ }
+ }
+ default:
+ return (_, ...args) => {
+ try {
+ return wrap(dir, Reflect[key](
+ obj, ...(args.map((x) => wrap(flip(dir), x)))))
+ } catch(e) {
+ throw wrap(dir, e);
+ }
+ }
+ }
+ }});
+
+ wrapper = new Proxy(obj, handler);
+ maps[dir].set(obj, wrapper);
+ maps[flip(dir)].set(wrapper, obj);
+ return wrapper;
+ }
+
+ const gate = Object.freeze({
+ revoke: () => revoked = true
+ });
+
+ return Object.freeze({
+ wrapper: wrap(wet2dry, target),
+ gate: gate
+ });
+}
+
+
+// Test the identity-preserving membrane.
+{
+ var receiver
+ var argument
+ var o = {
+ a: 6,
+ b: {bb: 8},
+ f: function(x) {receiver = this; argument = x; return x},
+ g: function(x) {receiver = this; argument = x; return x.a},
+ h: function(x) {receiver = this; argument = x; this.q = x},
+ s: function(x) {receiver = this; argument = x; this.x = {y: x}; return this}
+ }
+ o[2] = {c: 7}
+ var m = createMembrane(o)
+ var w = m.wrapper
+ var f = w.f
+ var x = f(66)
+ var x = f({a: 1})
+ var x = w.f({a: 1})
+ var a = x.a
+ assertEquals(6, w.a)
+ assertEquals(8, w.b.bb)
+ assertEquals(7, w[2]["c"])
+ assertEquals(undefined, w.c)
+ assertEquals(1, w.f(1))
+ assertSame(o, receiver)
+ assertEquals(1, w.f({a: 1}).a)
+ assertSame(o, receiver)
+ assertEquals(2, w.g({a: 2}))
+ assertSame(o, receiver)
+ assertSame(w, w.f(w))
+ assertSame(o, receiver)
+ assertSame(o, argument)
+ assertSame(o, w.f(o))
+ assertSame(o, receiver)
+ // Note that argument !== o, since o isn't dry, so gets wrapped wet again.
+ assertEquals(3, (w.r = {a: 3}).a)
+ assertEquals(3, w.r.a)
+ assertEquals(3, o.r.a)
+ w.h(3)
+ assertEquals(3, w.q)
+ assertEquals(3, o.q)
+ assertEquals(4, (new w.h(4)).q)
+ assertEquals(5, w.s(5).x.y)
+ assertSame(o, receiver)
+
+ var wb = w.b
+ var wr = w.r
+ var wf = w.f
+ var wf3 = w.f(3)
+ var wfx = w.f({a: 6})
+ var wgx = w.g({a: {aa: 7}})
+ var wh4 = new w.h(4)
+ var ws5 = w.s(5)
+ var ws5x = ws5.x
+ m.gate.revoke()
+ assertEquals(3, wf3)
+ assertThrows(function() { w.a }, Error)
+ assertThrows(function() { w.r }, Error)
+ assertThrows(function() { w.r = {a: 4} }, Error)
+ assertThrows(function() { o.r.a }, Error)
+ assertEquals("object", typeof o.r)
+ assertEquals(5, (o.r = {a: 5}).a)
+ assertEquals(5, o.r.a)
+ assertThrows(function() { w[1] }, Error)
+ assertThrows(function() { w.c }, Error)
+ assertThrows(function() { wb.bb }, Error)
+ assertEquals(3, wr.a)
+ assertThrows(function() { wf(4) }, Error)
+ assertEquals(6, wfx.a)
+ assertEquals(7, wgx.aa)
+ assertThrows(function() { wh4.q }, Error)
+ assertThrows(function() { ws5.x }, Error)
+ assertThrows(function() { ws5x.y }, Error)
+}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-for.js b/deps/v8/test/mjsunit/es6/proxies-for.js
index e52ee43031..5b818453a9 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-for.js
+++ b/deps/v8/test/mjsunit/es6/proxies-for.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
-
// Helper.
function TestWithProxies(test, x, y, z) {
@@ -125,16 +123,96 @@ TestForInThrow(new Proxy({}, {
}
}));
-(function() {
- var p = new Proxy({}, {ownKeys:function() { return ["0"]; }});
+
+function keys(object) {
+ var keys = [];
+ for (var k in object) {
+ keys.push(k);
+ }
+ return keys;
+}
+
+(function testKeysProxyOnProtoEmpty() {
+ var p = new Proxy({}, {
+ ownKeys() { return []; },
+ });
var o = [0];
o.__proto__ = p;
- var keys = [];
- for (var k in o) { keys.push(k); };
- assertEquals(["0"], keys);
+ assertEquals(["0"], keys(o));
+
+ delete o[0];
+ assertEquals([], keys(o));
+})();
+
+(function testKeysProxyOnProto() {
+ var handler = {ownKeys() { return ["0"]; }};
+ var proxy = new Proxy({}, handler);
+ var object = [0];
+ object.__proto__ = proxy;
+ assertEquals(["0"], keys(object));
+
+ // The Proxy doesn't set his ownKeys enumerable.
+ delete object[0];
+ assertEquals([], keys(object));
+
+ // The [[Has]] trap has no influence on which are enumerable properties are
+ // shown in for-in.
+ handler.has = function() { return true };
+ assertEquals([], keys(object));
+
+ handler.getOwnPropertyDescriptor = function() {
+ return {enumerable: true, configurable: true}
+ }
+ assertEquals(["0"], keys(object));
})();
+(function testKeysProxyProto() {
+ var target = {t1:true, t2:true};
+ var handler = {};
+ var proxy = new Proxy(target, handler);
+
+ assertEquals(["t1", "t2"], keys(proxy));
+
+ target.__proto__ = {p1:true, p2:true};
+ assertEquals(["t1", "t2", "p1", "p2"], keys(proxy));
+
+ handler.getPrototypeOf = function(target) {
+ return {p3:true, p4:true};
+ };
+ // for-in walks the prototype chain for the [[Has]] / Enumerable check.
+ assertEquals(["t1", "t2", "p3", "p4"], keys(proxy));
+
+ // [[Has]] is not used in for-in.
+ handler.has = function() { return false };
+ assertEquals(["t1", "t2", "p3", "p4"], keys(proxy));
+
+ // Proxy intercepts enumerability check.
+ handler.getOwnPropertyDescriptor = function() {
+ return {enumerable: false, configurable: true}
+ }
+ assertEquals([], keys(proxy));
+
+ handler.getOwnPropertyDescriptor = function() {
+ return {enumerable: true, configurable: true}
+ }
+ assertEquals(["t1", "t2", "p3", "p4"], keys(proxy));
+
+ handler.getOwnPropertyDescriptor = function(target, key) {
+ return {
+ enumerable: key in target,
+ configurable: true
+ }
+ }
+ assertEquals(["t1", "t2"], keys(proxy));
+
+ handler.getPrototypeOf = function() { throw "error" };
+ assertThrowsEquals(() => {keys(proxy)}, "error");
+})();
+
+
(function () {
- var p = new Proxy({}, {ownKeys: function() { return ["1", Symbol(), "2"] }});
+ var symbol = Symbol();
+ var p = new Proxy({}, {ownKeys() { return ["1", symbol, "2"] }});
assertEquals(["1","2"], Object.getOwnPropertyNames(p));
+ assertEquals([symbol], Object.getOwnPropertySymbols(p));
})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-function.js b/deps/v8/test/mjsunit/es6/proxies-function.js
new file mode 100644
index 0000000000..cb3a26c535
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/proxies-function.js
@@ -0,0 +1,630 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+
+var handler = {
+ get : function(r, n) { return n == "length" ? 2 : undefined }
+}
+
+
+// Calling (call, Function.prototype.call, Function.prototype.apply,
+// Function.prototype.bind).
+
+var global_object = this
+var receiver
+
+function TestCall(isStrict, callTrap) {
+ assertEquals(42, callTrap(undefined, undefined, [5, 37]))
+
+ var handler = {
+ get: function(r, k) {
+ return k == "length" ? 2 : Function.prototype[k]
+ },
+ apply: callTrap
+ }
+ var f = new Proxy(()=>{}, handler)
+ var o = {f: f}
+ global_object.f = f
+
+ receiver = 333
+ assertEquals(42, f(11, 31))
+ receiver = 333
+ assertEquals(42, o.f(10, 32))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, o["f"](9, 33))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, (1, o).f(8, 34))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, (1, o)["f"](7, 35))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, f.call(o, 32, 10))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, f.call(undefined, 33, 9))
+ receiver = 333
+ assertEquals(42, f.call(null, 33, 9))
+ receiver = 333
+ assertEquals(44, f.call(2, 21, 23))
+ assertSame(2, receiver.valueOf())
+ receiver = 333
+ assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+ assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+ assertEquals(2, receiver.valueOf())
+ receiver = 333
+ assertEquals(32, f.apply(o, [16, 16]))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %Call(f, o, 11, 31));
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %Call(f, null, 11, 31));
+ receiver = 333
+ assertEquals(42, %_Call(f, o, 11, 31))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %_Call(f, null, 11, 31))
+
+ var ff = Function.prototype.bind.call(f, o, 12)
+ assertTrue(ff.length <= 1) // TODO(rossberg): Not spec'ed yet, be lax.
+ receiver = 333
+ assertEquals(42, ff(30))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(33, Function.prototype.call.call(ff, {}, 21))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(23, %Call(ff, {}, 11));
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(23, %Call(ff, {}, 11, 3));
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(34, %_Call(ff, {}, 22))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(34, %_Call(ff, {}, 22, 3))
+ assertSame(o, receiver)
+
+ var fff = Function.prototype.bind.call(ff, o, 30)
+ assertEquals(0, fff.length)
+ receiver = 333
+ assertEquals(42, fff())
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, Function.prototype.call.call(fff, {}))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, Function.prototype.apply.call(fff, {}))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %Call(fff, {}));
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %Call(fff, {}, 11, 3))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %_Call(fff, {}))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %_Call(fff, {}, 3, 4, 5))
+ assertSame(o, receiver)
+
+ var f = new Proxy(()=>{}, {apply: callTrap})
+ receiver = 333
+ assertEquals(42, f(11, 31))
+ var o = {f: f}
+ receiver = 333
+ assertEquals(42, o.f(10, 32))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, o["f"](9, 33))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, (1, o).f(8, 34))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, (1, o)["f"](7, 35))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(23, %Call(f, o, 11, 12))
+ assertSame(o, receiver)
+ receiver = 333
+ assertEquals(42, %_Call(f, o, 18, 24))
+ assertSame(o, receiver)
+}
+
+TestCall(false, function(_, that, [x, y]) {
+ receiver = that
+ return x + y
+})
+
+TestCall(true, function(_, that, args) {
+ "use strict"
+ receiver = that
+ return args[0] + args[1]
+})
+
+TestCall(false, function() {
+ receiver = arguments[1]
+ return arguments[2][0] + arguments[2][1]
+})
+
+TestCall(false, new Proxy(function(_, that, [x, y]) {
+ receiver = that
+ return x + y
+ }, handler))
+
+TestCall(true, new Proxy(function(_, that, args) {
+ "use strict"
+ receiver = that
+ return args[0] + args[1]
+ }, handler))
+
+TestCall(false, Object.freeze(new Proxy(function(_, that, [x, y]) {
+ receiver = that
+ return x + y
+ }, handler)))
+
+
+
+// Using intrinsics as call traps.
+
+function TestCallIntrinsic(type, callTrap) {
+ var f = new Proxy(()=>{}, {apply: (_, that, args) => callTrap(...args)})
+ var x = f()
+ assertTrue(typeof x == type)
+}
+
+TestCallIntrinsic("boolean", Boolean)
+TestCallIntrinsic("number", Number)
+TestCallIntrinsic("string", String)
+TestCallIntrinsic("object", Object)
+TestCallIntrinsic("function", Function)
+
+
+
+// Throwing from call trap.
+
+function TestCallThrow(callTrap) {
+ var f = new Proxy(()=>{}, {apply: callTrap})
+ assertThrowsEquals(() => f(11), "myexn")
+ assertThrowsEquals(() => ({x: f}).x(11), "myexn")
+ assertThrowsEquals(() => ({x: f})["x"](11), "myexn")
+ assertThrowsEquals(() => Function.prototype.call.call(f, {}, 2), "myexn")
+ assertThrowsEquals(() => Function.prototype.apply.call(f, {}, [1]), "myexn")
+ assertThrowsEquals(() => %Call(f, {}), "myexn")
+ assertThrowsEquals(() => %Call(f, {}, 1, 2), "myexn")
+ assertThrowsEquals(() => %_Call(f, {}), "myexn")
+ assertThrowsEquals(() => %_Call(f, {}, 1, 2), "myexn")
+
+ var f = Object.freeze(new Proxy(()=>{}, {apply: callTrap}))
+ assertThrowsEquals(() => f(11), "myexn")
+ assertThrowsEquals(() => ({x: f}).x(11), "myexn")
+ assertThrowsEquals(() => ({x: f})["x"](11), "myexn")
+ assertThrowsEquals(() => Function.prototype.call.call(f, {}, 2), "myexn")
+ assertThrowsEquals(() => Function.prototype.apply.call(f, {}, [1]), "myexn")
+ assertThrowsEquals(() => %Call(f, {}), "myexn")
+ assertThrowsEquals(() => %Call(f, {}, 1, 2), "myexn")
+ assertThrowsEquals(() => %_Call(f, {}), "myexn")
+ assertThrowsEquals(() => %_Call(f, {}, 1, 2), "myexn")
+}
+
+TestCallThrow(function() { throw "myexn" })
+TestCallThrow(new Proxy(() => {throw "myexn"}, {}))
+TestCallThrow(Object.freeze(new Proxy(() => {throw "myexn"}, {})))
+
+
+
+// Construction (new).
+
+var prototype = {myprop: 0}
+var receiver
+
+var handlerWithPrototype = {
+ get: function(r, n) {
+ if (n == "length") return 2;
+ assertEquals("prototype", n);
+ return prototype;
+ }
+}
+
+var handlerSansPrototype = {
+ get: function(r, n) {
+ if (n == "length") return 2;
+ assertEquals("prototype", n);
+ return undefined;
+ }
+}
+
+function ReturnUndef(_, args, newt) {
+ "use strict";
+ newt.sum = args[0] + args[1];
+}
+
+function ReturnThis(x, y) {
+ "use strict";
+ receiver = this;
+ this.sum = x + y;
+ return this;
+}
+
+function ReturnNew(_, args, newt) {
+ "use strict";
+ return {sum: args[0] + args[1]};
+}
+
+function ReturnNewWithProto(_, args, newt) {
+ "use strict";
+ var result = Object.create(prototype);
+ result.sum = args[0] + args[1];
+ return result;
+}
+
+function TestConstruct(proto, constructTrap) {
+ TestConstruct2(proto, constructTrap, handlerWithPrototype)
+ TestConstruct2(proto, constructTrap, handlerSansPrototype)
+}
+
+function TestConstruct2(proto, constructTrap, handler) {
+ var f = new Proxy(function(){}, {construct: constructTrap})
+ var o = new f(11, 31)
+ assertEquals(42, o.sum)
+ assertSame(proto, Object.getPrototypeOf(o))
+
+ var f = Object.freeze(new Proxy(function(){}, {construct: constructTrap}))
+ var o = new f(11, 32)
+ assertEquals(43, o.sum)
+ assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstruct(Object.prototype, ReturnNew)
+TestConstruct(prototype, ReturnNewWithProto)
+
+TestConstruct(Object.prototype, new Proxy(ReturnNew, {}))
+TestConstruct(prototype, new Proxy(ReturnNewWithProto, {}))
+
+TestConstruct(Object.prototype, Object.freeze(new Proxy(ReturnNew, {})))
+TestConstruct(prototype, Object.freeze(new Proxy(ReturnNewWithProto, {})))
+
+
+
+// Throwing from the construct trap.
+
+function TestConstructThrow(trap) {
+ var f = new Proxy(function(){}, {construct: trap});
+ assertThrowsEquals(() => new f(11), "myexn")
+ Object.freeze(f)
+ assertThrowsEquals(() => new f(11), "myexn")
+}
+
+TestConstructThrow(function() { throw "myexn" })
+TestConstructThrow(new Proxy(function() { throw "myexn" }, {}))
+TestConstructThrow(Object.freeze(new Proxy(function() { throw "myexn" }, {})))
+
+
+
+// Using function proxies as getters and setters.
+
+var value
+var receiver
+
+function TestAccessorCall(getterCallTrap, setterCallTrap) {
+ var pgetter = new Proxy(()=>{}, {apply: getterCallTrap})
+ var psetter = new Proxy(()=>{}, {apply: setterCallTrap})
+
+ var o = {}
+ var oo = Object.create(o)
+ Object.defineProperty(o, "a", {get: pgetter, set: psetter})
+ Object.defineProperty(o, "b", {get: pgetter})
+ Object.defineProperty(o, "c", {set: psetter})
+ Object.defineProperty(o, "3", {get: pgetter, set: psetter})
+ Object.defineProperty(oo, "a", {value: 43})
+
+ receiver = ""
+ assertEquals(42, o.a)
+ assertSame(o, receiver)
+ receiver = ""
+ assertEquals(42, o.b)
+ assertSame(o, receiver)
+ receiver = ""
+ assertEquals(undefined, o.c)
+ assertEquals("", receiver)
+ receiver = ""
+ assertEquals(42, o["a"])
+ assertSame(o, receiver)
+ receiver = ""
+ assertEquals(42, o[3])
+ assertSame(o, receiver)
+
+ receiver = ""
+ assertEquals(43, oo.a)
+ assertEquals("", receiver)
+ receiver = ""
+ assertEquals(42, oo.b)
+ assertSame(oo, receiver)
+ receiver = ""
+ assertEquals(undefined, oo.c)
+ assertEquals("", receiver)
+ receiver = ""
+ assertEquals(43, oo["a"])
+ assertEquals("", receiver)
+ receiver = ""
+ assertEquals(42, oo[3])
+ assertSame(oo, receiver)
+
+ receiver = ""
+ assertEquals(50, o.a = 50)
+ assertSame(o, receiver)
+ assertEquals(50, value)
+ receiver = ""
+ assertEquals(51, o.b = 51)
+ assertEquals("", receiver)
+ assertEquals(50, value) // no setter
+ assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
+ receiver = ""
+ assertEquals(52, o.c = 52)
+ assertSame(o, receiver)
+ assertEquals(52, value)
+ receiver = ""
+ assertEquals(53, o["a"] = 53)
+ assertSame(o, receiver)
+ assertEquals(53, value)
+ receiver = ""
+ assertEquals(54, o[3] = 54)
+ assertSame(o, receiver)
+ assertEquals(54, value)
+
+ value = 0
+ receiver = ""
+ assertEquals(60, oo.a = 60)
+ assertEquals("", receiver)
+ assertEquals(0, value) // oo has own 'a'
+ assertEquals(61, oo.b = 61)
+ assertSame("", receiver)
+ assertEquals(0, value) // no setter
+ assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
+ receiver = ""
+ assertEquals(62, oo.c = 62)
+ assertSame(oo, receiver)
+ assertEquals(62, value)
+ receiver = ""
+ assertEquals(63, oo["c"] = 63)
+ assertSame(oo, receiver)
+ assertEquals(63, value)
+ receiver = ""
+ assertEquals(64, oo[3] = 64)
+ assertSame(oo, receiver)
+ assertEquals(64, value)
+}
+
+TestAccessorCall(
+ function(_, that) { receiver = that; return 42 },
+ function(_, that, [x]) { receiver = that; value = x }
+)
+
+TestAccessorCall(
+ function(_, that) { "use strict"; receiver = that; return 42 },
+ function(_, that, args) { "use strict"; receiver = that; value = args[0] }
+)
+
+TestAccessorCall(
+ new Proxy(function(_, that) { receiver = that; return 42 }, {}),
+ new Proxy(function(_, that, [x]) { receiver = that; value = x }, {})
+)
+
+TestAccessorCall(
+ Object.freeze(
+ new Proxy(function(_, that) { receiver = that; return 42 }, {})),
+ Object.freeze(
+ new Proxy(function(_, that, [x]) { receiver = that; value = x }, {}))
+)
+
+
+// Passing a proxy function to higher-order library functions.
+
+function TestHigherOrder(f) {
+ assertEquals(6, [6, 2].map(f)[0])
+ assertEquals(4, [5, 2].reduce(f, 4))
+ assertTrue([1, 2].some(f))
+ assertEquals("a.b.c", "a.b.c".replace(".", f))
+}
+
+TestHigherOrder(function(x) { return x })
+TestHigherOrder(function(x) { "use strict"; return x })
+TestHigherOrder(new Proxy(function(x) { return x }, {}))
+TestHigherOrder(Object.freeze(new Proxy(function(x) { return x }, {})))
+
+
+
+// TODO(rossberg): Ultimately, I want to have the following test function
+// run through, but it currently fails on so many cases (some not even
+// involving proxies), that I leave that for later...
+/*
+function TestCalls() {
+ var handler = {
+ get: function(r, k) {
+ return k == "length" ? 2 : Function.prototype[k]
+ }
+ }
+ var bind = Function.prototype.bind
+ var o = {}
+
+ var traps = [
+ function(x, y) {
+ return {receiver: this, result: x + y, strict: false}
+ },
+ function(x, y) { "use strict";
+ return {receiver: this, result: x + y, strict: true}
+ },
+ function() {
+ var x = arguments[0], y = arguments[1]
+ return {receiver: this, result: x + y, strict: false}
+ },
+ Proxy.createFunction(handler, function(x, y) {
+ return {receiver: this, result: x + y, strict: false}
+ }),
+ Proxy.createFunction(handler, function() {
+ var x = arguments[0], y = arguments[1]
+ return {receiver: this, result: x + y, strict: false}
+ }),
+ Proxy.createFunction(handler, function(x, y) { "use strict"
+ return {receiver: this, result: x + y, strict: true}
+ }),
+ CreateFrozen(handler, function(x, y) {
+ return {receiver: this, result: x + y, strict: false}
+ }),
+ CreateFrozen(handler, function(x, y) { "use strict"
+ return {receiver: this, result: x + y, strict: true}
+ }),
+ ]
+ var creates = [
+ function(trap) { return trap },
+ function(trap) { return CreateFrozen({}, callTrap) },
+ function(trap) { return Proxy.createFunction(handler, callTrap) },
+ function(trap) {
+ return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
+ },
+ function(trap) {
+ return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
+ },
+ ]
+ var binds = [
+ function(f, o, x, y) { return f },
+ function(f, o, x, y) { return bind.call(f, o) },
+ function(f, o, x, y) { return bind.call(f, o, x) },
+ function(f, o, x, y) { return bind.call(f, o, x, y) },
+ function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
+ function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
+ function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
+ function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
+ ]
+ var calls = [
+ function(f, x, y) { return f(x, y) },
+ function(f, x, y) { var g = f; return g(x, y) },
+ function(f, x, y) { with ({}) return f(x, y) },
+ function(f, x, y) { var g = f; with ({}) return g(x, y) },
+ function(f, x, y, o) { with (o) return f(x, y) },
+ function(f, x, y, o) { return f.call(o, x, y) },
+ function(f, x, y, o) { return f.apply(o, [x, y]) },
+ function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
+ function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
+ function(f, x, y, o) { return %_Call(f, o, x, y) },
+ function(f, x, y, o) { return %Call(f, o, x, y) },
+ function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
+ function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
+ function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
+ function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
+ function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
+ function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
+ ]
+ var receivers = [o, global_object, undefined, null, 2, "bla", true]
+ var expectedSloppies = [o, global_object, global_object, global_object]
+
+ for (var t = 0; t < traps.length; ++t) {
+ for (var i = 0; i < creates.length; ++i) {
+ for (var j = 0; j < binds.length; ++j) {
+ for (var k = 0; k < calls.length; ++k) {
+ for (var m = 0; m < receivers.length; ++m) {
+ for (var n = 0; n < receivers.length; ++n) {
+ var bound = receivers[m]
+ var receiver = receivers[n]
+ var func = binds[j](creates[i](traps[t]), bound, 31, 11)
+ var expected = j > 0 ? bound : receiver
+ var expectedSloppy = expectedSloppies[j > 0 ? m : n]
+ o.f = func
+ global_object.f = func
+ var x = calls[k](func, 11, 31, receiver)
+ if (x !== undefined) {
+ assertEquals(42, x.result)
+ if (calls[k].length < 4)
+ assertSame(x.strict ? undefined : global_object, x.receiver)
+ else if (x.strict)
+ assertSame(expected, x.receiver)
+ else if (expectedSloppy === undefined)
+ assertSame(expected, x.receiver.valueOf())
+ else
+ assertSame(expectedSloppy, x.receiver)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TestCalls()
+*/
+
+var realms = [Realm.create(), Realm.create()];
+Realm.shared = {};
+
+Realm.eval(realms[0], "function f(_, that) { return that; };");
+Realm.eval(realms[0], "Realm.shared.f = f;");
+Realm.eval(realms[0], "Realm.shared.fg = this;");
+Realm.eval(realms[1], "function g(_, that) { return that; };");
+Realm.eval(realms[1], "Realm.shared.g = g;");
+Realm.eval(realms[1], "Realm.shared.gg = this;");
+
+var fp = new Proxy(()=>{}, {apply: Realm.shared.f});
+var gp = new Proxy(()=>{}, {apply: Realm.shared.g});
+
+for (var i = 0; i < 10; i++) {
+ assertEquals(undefined, fp());
+ assertEquals(undefined, gp());
+
+ with (this) {
+ assertEquals(this, fp());
+ assertEquals(this, gp());
+ }
+
+ with ({}) {
+ assertEquals(undefined, fp());
+ assertEquals(undefined, gp());
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js b/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js
index f7dff61908..441ff16ad9 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js
+++ b/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var target = {};
var configurable_desc = {
value: 123,
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js b/deps/v8/test/mjsunit/es6/proxies-get-prototype-of.js
index 36f67356d5..a628f3fa15 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js
+++ b/deps/v8/test/mjsunit/es6/proxies-get-prototype-of.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var target = { target: 1 };
target.__proto__ = {};
var handler = { handler: 1 };
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get.js b/deps/v8/test/mjsunit/es6/proxies-get.js
index 04ebd31257..b1b92dbb06 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-get.js
+++ b/deps/v8/test/mjsunit/es6/proxies-get.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+// Flags: --allow-natives-syntax
(function testBasicFunctionality() {
var target = {
diff --git a/deps/v8/test/mjsunit/harmony/proxies-global-reference.js b/deps/v8/test/mjsunit/es6/proxies-global-reference.js
index 1b77e66fdf..975d7f75fb 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-global-reference.js
+++ b/deps/v8/test/mjsunit/es6/proxies-global-reference.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var failing_proxy = new Proxy({}, new Proxy({}, {
get() { throw "No trap should fire" }}));
diff --git a/deps/v8/test/mjsunit/harmony/proxies-has-own-property.js b/deps/v8/test/mjsunit/es6/proxies-has-own-property.js
index 1455d2b273..5b9ddbce62 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-has-own-property.js
+++ b/deps/v8/test/mjsunit/es6/proxies-has-own-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var handler = {};
var target = {a:1};
var proxy = new Proxy(target, handler);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-has.js b/deps/v8/test/mjsunit/es6/proxies-has.js
index b7848e8ae3..7294196a11 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-has.js
+++ b/deps/v8/test/mjsunit/es6/proxies-has.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var target = {
"target_one": 1
};
diff --git a/deps/v8/test/mjsunit/harmony/proxies-hash.js b/deps/v8/test/mjsunit/es6/proxies-hash.js
index 830facb28d..05433f0dce 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-hash.js
+++ b/deps/v8/test/mjsunit/es6/proxies-hash.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
-
// Helper.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-integrity.js b/deps/v8/test/mjsunit/es6/proxies-integrity.js
index 9ed6005d6b..4cdf77009a 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-integrity.js
+++ b/deps/v8/test/mjsunit/es6/proxies-integrity.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
function toKey(x) {
diff --git a/deps/v8/test/mjsunit/harmony/proxies-is-extensible.js b/deps/v8/test/mjsunit/es6/proxies-is-extensible.js
index f597d0d0a6..9ab2c05f96 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-is-extensible.js
+++ b/deps/v8/test/mjsunit/es6/proxies-is-extensible.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
(function () {
// No trap.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-json.js b/deps/v8/test/mjsunit/es6/proxies-json.js
index 19a13298df..d48d5390f6 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-json.js
+++ b/deps/v8/test/mjsunit/es6/proxies-json.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies --harmony-reflect
-
///////////////////////////////////////////////////////////////////////////////
@@ -129,9 +127,6 @@ var handler4 = {
get: function(target, name) {
return 0;
},
- enumerate: function(target) {
- return [][Symbol.iterator]();
- },
has: function() {
return true;
},
@@ -152,8 +147,8 @@ var handler5 = {
if (name == 'z') return 97000;
return function(key) { return key.charCodeAt(0) + this.z; };
},
- enumerate: function(target) {
- return ['toJSON', 'z'][Symbol.iterator]();
+ ownKeys: function(target) {
+ return ['toJSON', 'z'];
},
has: function() {
return true;
@@ -173,8 +168,8 @@ var handler6 = {
get: function(target, name) {
return function(key) { return undefined; };
},
- enumerate: function(target) {
- return ['toJSON'][Symbol.iterator]();
+ ownKeys: function(target) {
+ return ['toJSON'];
},
has: function() {
return true;
diff --git a/deps/v8/test/mjsunit/harmony/proxies-keys.js b/deps/v8/test/mjsunit/es6/proxies-keys.js
index 61a39f4972..7344032aaf 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-keys.js
+++ b/deps/v8/test/mjsunit/es6/proxies-keys.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var target = {
target: 1
};
diff --git a/deps/v8/test/mjsunit/harmony/proxies-object-assign.js b/deps/v8/test/mjsunit/es6/proxies-object-assign.js
index 154f8c15f6..c350f4e879 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-object-assign.js
+++ b/deps/v8/test/mjsunit/es6/proxies-object-assign.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var handler = {
ownKeys: function(t) { return ["a", "b"]; },
getOwnPropertyDescriptor: function(t, p) {
diff --git a/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js b/deps/v8/test/mjsunit/es6/proxies-ownkeys.js
index 88350cca02..7cc0a87b68 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
+++ b/deps/v8/test/mjsunit/es6/proxies-ownkeys.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
var target = {
"target_one": 1
};
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js b/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js
index 0d6ae4c101..dc3c42ed12 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
(function () {
// No trap.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js b/deps/v8/test/mjsunit/es6/proxies-property-is-enumerable.js
index b1742e20b8..0d4a92f1b6 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js
+++ b/deps/v8/test/mjsunit/es6/proxies-property-is-enumerable.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var handler = {};
var target = { a: 1 };
var proxy = new Proxy(target, handler);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js b/deps/v8/test/mjsunit/es6/proxies-prototype-handler-stackoverflow.js
index e88476dd50..3da36c451d 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js
+++ b/deps/v8/test/mjsunit/es6/proxies-prototype-handler-stackoverflow.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect --stack-size=100
+// Flags: --stack-size=100
// Test that traps that involve walking the target object's prototype chain
// don't overflow the stack when the original proxy is on that chain.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js b/deps/v8/test/mjsunit/es6/proxies-prototype-target-stackoverflow.js
index ba55f6aad9..741a8b0ed5 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js
+++ b/deps/v8/test/mjsunit/es6/proxies-prototype-target-stackoverflow.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
// Test that traps that involve walking the target object's prototype chain
// don't overflow the stack when the original proxy is on that chain.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-revocable.js b/deps/v8/test/mjsunit/es6/proxies-revocable.js
index d0d0f781d2..1f61174a35 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-revocable.js
+++ b/deps/v8/test/mjsunit/es6/proxies-revocable.js
@@ -2,15 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
traps = [
"getPrototypeOf", "setPrototypeOf", "isExtensible", "preventExtensions",
"getOwnPropertyDescriptor", "has", "get", "set", "deleteProperty",
"defineProperty", "ownKeys", "apply", "construct"
];
-// TODO(neis): Fix enumerate.
var {proxy, revoke} = Proxy.revocable({}, {});
assertEquals(0, revoke.length);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js b/deps/v8/test/mjsunit/es6/proxies-set-prototype-of.js
index 810c219533..9d9e73f908 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
+++ b/deps/v8/test/mjsunit/es6/proxies-set-prototype-of.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
var target = { target: 1 };
target.__proto__ = {};
var handler = { handler: 1 };
diff --git a/deps/v8/test/mjsunit/harmony/proxies-set.js b/deps/v8/test/mjsunit/es6/proxies-set.js
index 2fec115a10..19f39f9a65 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-set.js
+++ b/deps/v8/test/mjsunit/es6/proxies-set.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
function sloppyDefaultSet(o, p, v) { return o[p] = v }
function sloppyReflectSet(o, p, v) { return Reflect.set(o, p, v) }
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js b/deps/v8/test/mjsunit/es6/proxies-with-unscopables.js
index c87492c61d..b9a7ad8012 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
+++ b/deps/v8/test/mjsunit/es6/proxies-with-unscopables.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
function TestBasics() {
var log = [];
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with.js b/deps/v8/test/mjsunit/es6/proxies-with.js
index 1aa13adea6..710e8b5e2d 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-with.js
+++ b/deps/v8/test/mjsunit/es6/proxies-with.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
-
// Helper.
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/es6/proxies.js
index 8f24d4d9ad..3b9a4c5119 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/es6/proxies.js
@@ -29,7 +29,7 @@
// test enters an infinite recursion which goes through the runtime and we
// overflow the system stack before the simulator stack.
-// Flags: --harmony-proxies --sim-stack-size=500 --allow-natives-syntax
+// Flags: --sim-stack-size=500 --allow-natives-syntax
// Helper.
@@ -1018,7 +1018,19 @@ TestHasOwnThrow({
var o4 = Object.create(o2)
var o5 = Object.create(o3)
- function handler(o) { return {get: function() { return o } } }
+ function handler(o) {
+ return {
+ get: function(r, p) {
+ // We want to test prototype lookup, so ensure the proxy
+ // offers OrdinaryHasInstance behavior.
+ if (p === Symbol.hasInstance) {
+ return undefined;
+ }
+ return o;
+ }
+ }
+ }
+
var f0 = new Proxy(function() {}, handler(o0))
var f1 = new Proxy(function() {}, handler(o1))
var f2 = new Proxy(function() {}, handler(o2))
diff --git a/deps/v8/test/mjsunit/harmony/reflect-apply.js b/deps/v8/test/mjsunit/es6/reflect-apply.js
index 2cfb98282b..fa3801378f 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-apply.js
+++ b/deps/v8/test/mjsunit/es6/reflect-apply.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect
-
(function testReflectApplyArity() {
assertEquals(3, Reflect.apply.length);
diff --git a/deps/v8/test/mjsunit/harmony/reflect-construct.js b/deps/v8/test/mjsunit/es6/reflect-construct.js
index c136957df0..b37f876e94 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-construct.js
+++ b/deps/v8/test/mjsunit/es6/reflect-construct.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect
-
(function testReflectConstructArity() {
assertEquals(2, Reflect.construct.length);
diff --git a/deps/v8/test/mjsunit/harmony/reflect-define-property.js b/deps/v8/test/mjsunit/es6/reflect-define-property.js
index afd3ff6595..b19c5aa6ff 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-define-property.js
+++ b/deps/v8/test/mjsunit/es6/reflect-define-property.js
@@ -28,7 +28,7 @@
// Tests the Reflect.defineProperty method - ES6 26.1.3
// This is adapted from mjsunit/object-define-property.js.
-// Flags: --allow-natives-syntax --harmony-reflect
+// Flags: --allow-natives-syntax
// Check that an exception is thrown when null is passed as object.
diff --git a/deps/v8/test/mjsunit/harmony/reflect-get-own-property-descriptor.js b/deps/v8/test/mjsunit/es6/reflect-get-own-property-descriptor.js
index 3cbffea78b..5e968995a1 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-get-own-property-descriptor.js
+++ b/deps/v8/test/mjsunit/es6/reflect-get-own-property-descriptor.js
@@ -30,8 +30,6 @@
// This is adapted from mjsunit/get-own-property-descriptor.js.
-// Flags: --harmony-reflect
-
function get() { return x; }
function set(x) { this.x = x; }
diff --git a/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js b/deps/v8/test/mjsunit/es6/reflect-get-prototype-of.js
index 4dee91b61e..9fd155939a 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js
+++ b/deps/v8/test/mjsunit/es6/reflect-get-prototype-of.js
@@ -28,8 +28,6 @@
// Tests the Reflect.getPrototypeOf - ES6 26.1.8.
// This is adapted from mjsunit/get-prototype-of.js.
-// Flags: --harmony-reflect
-
function assertPrototypeOf(func, expected) {
diff --git a/deps/v8/test/mjsunit/harmony/reflect-own-keys.js b/deps/v8/test/mjsunit/es6/reflect-own-keys.js
index 6f5dacf1c8..5f51f4053f 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-own-keys.js
+++ b/deps/v8/test/mjsunit/es6/reflect-own-keys.js
@@ -27,8 +27,6 @@
// This is adapted from mjsunit/object-get-own-property-names.js.
-// Flags: --harmony-reflect
-
// Check simple cases.
var obj = { a: 1, b: 2};
diff --git a/deps/v8/test/mjsunit/harmony/reflect-prevent-extensions.js b/deps/v8/test/mjsunit/es6/reflect-prevent-extensions.js
index a964ed7b2d..c6f37495d7 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/es6/reflect-prevent-extensions.js
@@ -28,7 +28,7 @@
// Tests the Reflect.preventExtensions method - ES6 26.1.12.
// This is adapted from object-prevent-extensions.js.
-// Flags: --allow-natives-syntax --harmony-reflect
+// Flags: --allow-natives-syntax
var obj1 = {};
diff --git a/deps/v8/test/mjsunit/harmony/reflect-set-prototype-of.js b/deps/v8/test/mjsunit/es6/reflect-set-prototype-of.js
index 0e5554e907..8f2a00a9c4 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-set-prototype-of.js
+++ b/deps/v8/test/mjsunit/es6/reflect-set-prototype-of.js
@@ -27,8 +27,6 @@
// This is adapted from mjsunit/harmony/set-prototype-of.js.
-// Flags: --harmony-reflect
-
function getObjects() {
diff --git a/deps/v8/test/mjsunit/harmony/reflect.js b/deps/v8/test/mjsunit/es6/reflect.js
index 6449eb8259..ee272b0fc7 100644
--- a/deps/v8/test/mjsunit/harmony/reflect.js
+++ b/deps/v8/test/mjsunit/es6/reflect.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect
-
// TODO(neis): Test with proxies.
diff --git a/deps/v8/test/mjsunit/es6/regexp-constructor.js b/deps/v8/test/mjsunit/es6/regexp-constructor.js
index e3b7efa0e7..559ac00cd0 100644
--- a/deps/v8/test/mjsunit/es6/regexp-constructor.js
+++ b/deps/v8/test/mjsunit/es6/regexp-constructor.js
@@ -21,32 +21,47 @@ function should_not_be_called() {
})();
(function() {
+ let allow = false;
class A extends RegExp {
- get source() { throw new Error("should not be called") }
- get flags() { throw new Error("should not be called") }
+ get source() {
+ if (!allow) throw new Error("should not be called");
+ return super.source;
+ }
+ get flags() {
+ if (!allow) throw new Error("should not be called");
+ return super.flags
+ }
}
var r = new A("biep");
var r2 = RegExp(r);
assertFalse(r === r2);
+ allow = true;
assertEquals(r, r2);
+ allow = false;
assertTrue(A.prototype === r.__proto__);
assertTrue(RegExp.prototype === r2.__proto__);
var r3 = RegExp(r);
assertFalse(r3 === r);
+ allow = true;
assertEquals(r3, r);
+ allow = false;
var r4 = new A(r2);
assertFalse(r4 === r2);
+ allow = true;
assertEquals(r4, r2);
+ allow = false;
assertTrue(A.prototype === r4.__proto__);
r[Symbol.match] = false;
var r5 = new A(r);
assertFalse(r5 === r);
+ allow = true;
assertEquals(r5, r);
+ allow = false;
assertTrue(A.prototype === r5.__proto__);
})();
diff --git a/deps/v8/test/mjsunit/es6/regexp-flags.js b/deps/v8/test/mjsunit/es6/regexp-flags.js
index 79b0197e91..480222d95a 100644
--- a/deps/v8/test/mjsunit/es6/regexp-flags.js
+++ b/deps/v8/test/mjsunit/es6/regexp-flags.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexps --harmony-unicode-regexps
+// Flags: --harmony-unicode-regexps
var r1 = /abc/gi;
assertEquals("abc", r1.source);
@@ -44,15 +44,18 @@ assertEquals(2, get_count);
// Overridden flag getters affects the flags getter.
assertEquals("gi", r3.flags);
assertEquals(4, get_count);
-// Overridden flag getters do not affect the internal flags.
+// Overridden flag getters affect string.replace
+// TODO(adamk): Add more tests here once we've switched
+// to use [[OriginalFlags]] in more cases.
assertEquals(expected, string.replace(r3, "X"));
-assertEquals(4, get_count);
+assertEquals(5, get_count);
function testName(name) {
- // TODO(littledan): For web compatibility, we don't throw an exception,
- // but ES2015 expects an exception to be thrown from this getter.
- assertEquals(undefined, RegExp.prototype[name]);
+ // Test for ES2017 RegExp web compatibility semantics
+ // https://github.com/tc39/ecma262/pull/511
+ assertEquals(name === "source" ? "(?:)" : undefined,
+ RegExp.prototype[name]);
assertEquals(
"get " + name,
Object.getOwnPropertyDescriptor(RegExp.prototype, name).get.name);
@@ -64,3 +67,55 @@ testName("multiline");
testName("source");
testName("sticky");
testName("unicode");
+
+
+RegExp.prototype.flags = 'setter should be undefined';
+
+assertEquals('', RegExp('').flags);
+assertEquals('', /./.flags);
+assertEquals('gimuy', RegExp('', 'yugmi').flags);
+assertEquals('gimuy', /foo/yumig.flags);
+
+var descriptor = Object.getOwnPropertyDescriptor(RegExp.prototype, 'flags');
+assertTrue(descriptor.configurable);
+assertFalse(descriptor.enumerable);
+assertInstanceof(descriptor.get, Function);
+assertEquals(undefined, descriptor.set);
+
+function testGenericFlags(object) {
+ return descriptor.get.call(object);
+}
+
+assertEquals('', testGenericFlags({}));
+assertEquals('i', testGenericFlags({ ignoreCase: true }));
+assertEquals('uy', testGenericFlags({ global: 0, sticky: 1, unicode: 1 }));
+assertEquals('m', testGenericFlags({ __proto__: { multiline: true } }));
+assertThrows(function() { testGenericFlags(); }, TypeError);
+assertThrows(function() { testGenericFlags(undefined); }, TypeError);
+assertThrows(function() { testGenericFlags(null); }, TypeError);
+assertThrows(function() { testGenericFlags(true); }, TypeError);
+assertThrows(function() { testGenericFlags(false); }, TypeError);
+assertThrows(function() { testGenericFlags(''); }, TypeError);
+assertThrows(function() { testGenericFlags(42); }, TypeError);
+
+var counter = 0;
+var map = {};
+var object = {
+ get global() {
+ map.g = counter++;
+ },
+ get ignoreCase() {
+ map.i = counter++;
+ },
+ get multiline() {
+ map.m = counter++;
+ },
+ get unicode() {
+ map.u = counter++;
+ },
+ get sticky() {
+ map.y = counter++;
+ }
+};
+testGenericFlags(object);
+assertEquals({ g: 0, i: 1, m: 2, u: 3, y: 4 }, map);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-sticky.js b/deps/v8/test/mjsunit/es6/regexp-sticky.js
index 971adb7fed..c0633f9173 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-sticky.js
+++ b/deps/v8/test/mjsunit/es6/regexp-sticky.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-regexps
-
var re = /foo.bar/;
assertTrue(!!"foo*bar".match(re));
diff --git a/deps/v8/test/mjsunit/es6/regexp-tostring.js b/deps/v8/test/mjsunit/es6/regexp-tostring.js
index 3deeeb7ed8..23e137c7aa 100644
--- a/deps/v8/test/mjsunit/es6/regexp-tostring.js
+++ b/deps/v8/test/mjsunit/es6/regexp-tostring.js
@@ -44,3 +44,14 @@ testThrows(1);
assertEquals("/pattern/flags", RegExp.prototype.toString.call(fake));
assertEquals(["p", "ps", "f", "fs"], log);
+
+// Monkey-patching is also possible on RegExp instances
+
+let weird = /foo/;
+Object.defineProperty(weird, 'flags', {value: 'bar'});
+Object.defineProperty(weird, 'source', {value: 'baz'});
+assertEquals('/baz/bar', weird.toString());
+
+assertEquals('/(?:)/', RegExp.prototype.toString());
+assertEquals('(?:)', RegExp.prototype.source);
+assertEquals('', RegExp.prototype.flags);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2219.js b/deps/v8/test/mjsunit/es6/regress/regress-2219.js
index 29e08603b5..79f5bfbccc 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2219.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies --expose-gc
+// Flags: --expose-gc
var p = new Proxy({}, {getOwnPropertyDescriptor: function() { gc() }});
var o = Object.create(p);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2225.js b/deps/v8/test/mjsunit/es6/regress/regress-2225.js
index 75778753b2..cb5cd8c664 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2225.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
-
var proxy_has_x = false;
var proxy = new Proxy({}, {
get(t, key, receiver) {
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js b/deps/v8/test/mjsunit/es6/regress/regress-4395-global-eval.js
index b4579b141f..72a0eced5f 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4395-global-eval.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-default-parameters --harmony-destructuring-bind
((x, y = eval('x')) => assertEquals(42, y))(42);
((x, {y = eval('x')}) => assertEquals(42, y))(42, {});
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4395.js b/deps/v8/test/mjsunit/es6/regress/regress-4395.js
index fcc6784428..bdf8443149 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4395.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4395.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-destructuring-bind --harmony-default-parameters
(function testExpressionTypes() {
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4400.js b/deps/v8/test/mjsunit/es6/regress/regress-4400.js
index 7c42e4f557..98ad269bfb 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4400.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4400.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-default-parameters --min-preparse-length=0
+// Flags: --min-preparse-length=0
function borked(a = [], b = {}, c) {}
borked();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4585.js b/deps/v8/test/mjsunit/es6/regress/regress-4585.js
index ada91c67ca..8ded6464c9 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4585.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4585.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring-bind
-
assertThrows(`for(const { method() {} } = this) {}`, SyntaxError);
assertThrows(`var { method() {} } = this;`, SyntaxError);
assertThrows(`for(const { *method() {} } = this) {}`, SyntaxError);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-4759.js b/deps/v8/test/mjsunit/es6/regress/regress-4759.js
new file mode 100644
index 0000000000..5f8ee6827e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4759.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function iterable(done) {
+ return {
+ [Symbol.iterator]: function() {
+ return {
+ next: function() {
+ if (done) return { done: true };
+ done = true;
+ return { value: 42, done: false };
+ }
+ }
+ }
+ }
+}
+
+var [...result] = iterable(true);
+assertEquals([], result);
+
+var [...result] = iterable(false);
+assertEquals([42], result);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-517455.js b/deps/v8/test/mjsunit/es6/regress/regress-517455.js
index f07e8fe63b..9c1dfd758a 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-517455.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-destructuring-bind
function f({x = ""}) { eval(x) }
f({})
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-576662.js b/deps/v8/test/mjsunit/es6/regress/regress-576662.js
index 5541b79b5d..ad582d6633 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-576662.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-576662.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
// https://code.google.com/p/chromium/issues/detail?id=576662 (simplified)
Realm.create();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
index 2b0b7eace7..7fbbd7dc19 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect
-
"use strict";
var global = this;
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
index 8d843ee694..2bff76349c 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring-bind
-
function f({}) {
for (var v in []);
};
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js b/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js
index cf26127643..a3c70acf6d 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-proxies
+// Flags: --allow-natives-syntax
function bar() {}
bar({ a: new Proxy({}, {}) });
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js b/deps/v8/test/mjsunit/es6/regress/regress-crbug-461520.js
index 7ef9e20520..d12ec53f9c 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-crbug-461520.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
var fuse = 1;
var handler = {
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js b/deps/v8/test/mjsunit/es6/regress/regress-lookup-transition.js
index 9a440b6ab3..c6da9bd746 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-lookup-transition.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --expose-gc
+// Flags: --expose-gc
var proxy = new Proxy({}, { getOwnPropertyDescriptor:function() {
gc();
diff --git a/deps/v8/test/mjsunit/es6/string-endswith.js b/deps/v8/test/mjsunit/es6/string-endswith.js
index cbf2ed8766..4246f166a4 100644
--- a/deps/v8/test/mjsunit/es6/string-endswith.js
+++ b/deps/v8/test/mjsunit/es6/string-endswith.js
@@ -408,3 +408,11 @@ assertThrows(function() {
"toString": function() { return "abc"; }
}, [/./]);
}, TypeError);
+
+// endsWith does its brand checks with Symbol.match
+var re = /./;
+assertThrows(function() {
+ "".startsWith(re);
+}, TypeError);
+re[Symbol.match] = false;
+assertEquals(false, "".startsWith(re));
diff --git a/deps/v8/test/mjsunit/es6/string-includes.js b/deps/v8/test/mjsunit/es6/string-includes.js
index 61bf779483..c825ffdc68 100644
--- a/deps/v8/test/mjsunit/es6/string-includes.js
+++ b/deps/v8/test/mjsunit/es6/string-includes.js
@@ -162,3 +162,11 @@ assertThrows("String.prototype.includes.apply({ 'toString': function() { " +
"throw RangeError(); } }, [/./])", RangeError);
assertThrows("String.prototype.includes.apply({ 'toString': function() { " +
"return 'abc'; } }, [/./])", TypeError);
+
+// includes does its brand checks with Symbol.match
+var re = /./;
+assertThrows(function() {
+ "".includes(re);
+}, TypeError);
+re[Symbol.match] = false;
+assertEquals(false, "".includes(re));
diff --git a/deps/v8/test/mjsunit/es6/string-iterator.js b/deps/v8/test/mjsunit/es6/string-iterator.js
index 769f549254..8eb27b199a 100644
--- a/deps/v8/test/mjsunit/es6/string-iterator.js
+++ b/deps/v8/test/mjsunit/es6/string-iterator.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
-
function TestStringPrototypeIterator() {
assertTrue(String.prototype.hasOwnProperty(Symbol.iterator));
assertFalse("".hasOwnProperty(Symbol.iterator));
diff --git a/deps/v8/test/mjsunit/es6/string-startswith.js b/deps/v8/test/mjsunit/es6/string-startswith.js
index 887db994a0..f38f7b9cb8 100644
--- a/deps/v8/test/mjsunit/es6/string-startswith.js
+++ b/deps/v8/test/mjsunit/es6/string-startswith.js
@@ -399,3 +399,11 @@ assertThrows(function() {
"toString": function() { return "abc"; }
}, [/./]);
}, TypeError);
+
+// startsWith does its brand checks with Symbol.match
+var re = /./;
+assertThrows(function() {
+ "".startsWith(re);
+}, TypeError);
+re[Symbol.match] = false;
+assertEquals(false, "".startsWith(re));
diff --git a/deps/v8/test/mjsunit/es6/super.js b/deps/v8/test/mjsunit/es6/super.js
index 67cb45f590..a2ba1e863b 100644
--- a/deps/v8/test/mjsunit/es6/super.js
+++ b/deps/v8/test/mjsunit/es6/super.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Flags: --harmony-destructuring-bind --harmony-sloppy
+// Flags: --harmony-sloppy
(function TestSuperNamedLoads() {
function Base() { }
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index 38338575a0..9bac41f863 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-gc --allow-natives-syntax --harmony-tostring
+// Flags: --expose-gc --allow-natives-syntax
var symbols = []
@@ -86,6 +86,7 @@ TestPrototype()
function TestConstructor() {
+ assertEquals(0, Symbol.length);
assertSame(Function.prototype, Symbol.__proto__)
assertFalse(Object === Symbol.prototype.constructor)
assertFalse(Symbol === Object.prototype.constructor)
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard0.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard0.js
new file mode 100644
index 0000000000..87fe29e136
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard0.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(0);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard1.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard1.js
new file mode 100644
index 0000000000..10deb28357
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard1.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(1);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard2.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard2.js
new file mode 100644
index 0000000000..7d2bd97783
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard2.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(2);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard3.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard3.js
new file mode 100644
index 0000000000..7bce6c4a0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard3.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(3);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard4.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard4.js
new file mode 100644
index 0000000000..6c43d3ef6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard4.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(4);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard5.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard5.js
new file mode 100644
index 0000000000..a91bd3f3c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard5.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(5);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard6.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard6.js
new file mode 100644
index 0000000000..0d70a4203b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard6.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(6);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard7.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard7.js
new file mode 100644
index 0000000000..63477af08b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard7.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(7);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard8.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard8.js
new file mode 100644
index 0000000000..0c68827602
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard8.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(8);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest-shard9.js b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard9.js
new file mode 100644
index 0000000000..82f991a329
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest-shard9.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+try {
+ load("mjsunit/es6/tail-call-megatest.js");
+} catch(e) {
+ load("test/mjsunit/es6/tail-call-megatest.js");
+}
+
+run_tests(9);
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest.js b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
index 005796195a..1de8ec6c8e 100644
--- a/deps/v8/test/mjsunit/es6/tail-call-megatest.js
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-tailcalls --no-turbo-inlining
+// Flags: --allow-natives-syntax --harmony-tailcalls
Error.prepareStackTrace = (error,stack) => {
@@ -11,23 +11,15 @@ Error.prepareStackTrace = (error,stack) => {
}
-function CheckStackTrace(expected) {
+function checkStackTrace(expected) {
var e = new Error();
e.stack; // prepare stack trace
var stack = e.strace;
- assertEquals("CheckStackTrace", stack[0].getFunctionName());
+ assertEquals("checkStackTrace", stack[0].getFunctionName());
for (var i = 0; i < expected.length; i++) {
assertEquals(expected[i].name, stack[i + 1].getFunctionName());
}
}
-%NeverOptimizeFunction(CheckStackTrace);
-
-
-function CheckArguments(expected, args) {
- args = Array.prototype.slice.call(args);
- assertEquals(expected, args);
-}
-%NeverOptimizeFunction(CheckArguments);
var CAN_INLINE_COMMENT = "// Let it be inlined.";
@@ -45,28 +37,59 @@ function ident_source(source, ident) {
return ident + source.replace(/\n/gi, "\n" + ident);
}
-var global = Function('return this')();
-var the_receiver = {receiver: 1};
+var SHARDS_COUNT = 10;
-function run_tests() {
+function run_tests(shard) {
function inlinable_comment(inlinable) {
return inlinable ? CAN_INLINE_COMMENT : DONT_INLINE_COMMENT;
}
+ // Check arguments manually to avoid bailing out with reason "bad value
+ // context for arguments value".
+ function check_arguments_template(expected_name) {
+ var lines = [
+ ` assertEquals_(${expected_name}.length, arguments.length);`,
+ ` for (var i = 0; i < ${expected_name}.length; i++) {`,
+ ` assertEquals_(${expected_name}[i], arguments[i]);`,
+ ` }`,
+ ];
+ return lines.join("\n");
+ }
+ var check_arguments = check_arguments_template("expected_args");
+
+ function deopt_template(deopt_mode) {
+ switch(deopt_mode) {
+ case "none":
+ return " // Don't deoptimize";
+ case "f":
+ case "g":
+ case "test":
+ return ` %DeoptimizeFunction(${deopt_mode});`;
+ default:
+ assertUnreachable();
+ }
+ }
+
var f_cfg_sloppy = {
func_name: 'f',
source_template: function(cfg) {
var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
: "global";
+ var do_checks = [
+ ` assertEquals_(${receiver}, this);`,
+ ` ${!cfg.check_new_target ? "// " : ""}assertEquals_(undefined, new.target);`,
+ check_arguments,
+ ` checkStackTrace_([f, test]);`,
+ ].join("\n");
+
var lines = [
`function f(a) {`,
` ${inlinable_comment(cfg.f_inlinable)}`,
- ` assertEquals(${receiver}, this);`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
- ` %DeoptimizeNow();`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
+ ` counter++;`,
+ ` var expected_args = [${cfg.f_args}];`,
+ do_checks,
+ deopt_template(cfg.deopt_mode),
+ do_checks,
` return 42;`,
`}`,
];
@@ -79,16 +102,22 @@ function run_tests() {
source_template: function(cfg) {
var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
: "undefined";
+ var do_checks = [
+ ` assertEquals_(${receiver}, this);`,
+ ` ${!cfg.check_new_target ? "// " : ""}assertEquals_(undefined, new.target);`,
+ check_arguments,
+ ` checkStackTrace_([f, test]);`,
+ ].join("\n");
+
var lines = [
`function f(a) {`,
` "use strict";`,
` ${inlinable_comment(cfg.f_inlinable)}`,
- ` assertEquals(${receiver}, this);`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
- ` %DeoptimizeNow();`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
+ ` counter++;`,
+ ` var expected_args = [${cfg.f_args}];`,
+ do_checks,
+ deopt_template(cfg.deopt_mode),
+ do_checks,
` return 42;`,
`}`,
];
@@ -101,15 +130,21 @@ function run_tests() {
source_template: function(cfg) {
var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
: "global";
+ var do_checks = [
+ ` assertEquals_(${receiver}, this);`,
+ ` ${!cfg.check_new_target ? "// " : ""}assertEquals_(undefined, new.target);`,
+ check_arguments,
+ ` checkStackTrace_([f, test]);`,
+ ].join("\n");
+
var lines = [
`function f(a) {`,
` ${inlinable_comment(cfg.f_inlinable)}`,
- ` assertEquals(${receiver}, this);`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
- ` %DeoptimizeNow();`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
+ ` counter++;`,
+ ` var expected_args = [${cfg.f_args}];`,
+ do_checks,
+ deopt_template(cfg.deopt_mode),
+ do_checks,
` return 42;`,
`}`,
`var eval = f;`,
@@ -121,16 +156,22 @@ function run_tests() {
var f_cfg_bound = {
func_name: 'bound',
source_template: function(cfg) {
+ var do_checks = [
+ ` assertEquals_(receiver, this);`,
+ ` ${!cfg.check_new_target ? "// " : ""}assertEquals_(undefined, new.target);`,
+ check_arguments,
+ ` checkStackTrace_([f, test]);`,
+ ].join("\n");
+
var lines = [
`function f(a) {`,
` "use strict";`,
` ${inlinable_comment(cfg.f_inlinable)}`,
- ` assertEquals(receiver, this);`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
- ` %DeoptimizeNow();`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
+ ` counter++;`,
+ ` var expected_args = [${cfg.f_args}];`,
+ do_checks,
+ deopt_template(cfg.deopt_mode),
+ do_checks,
` return 42;`,
`}`,
`var receiver = {a: 153};`,
@@ -145,15 +186,21 @@ function run_tests() {
source_template: function(cfg) {
var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
: "global";
+ var do_checks = [
+ ` assertEquals_(${receiver}, this);`,
+ ` ${!cfg.check_new_target ? "// " : ""}assertEquals_(undefined, new.target);`,
+ check_arguments,
+ ` checkStackTrace_([f, test]);`,
+ ].join("\n");
+
var lines = [
`function f(a) {`,
` ${inlinable_comment(cfg.f_inlinable)}`,
- ` assertEquals(${receiver}, this);`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
- ` %DeoptimizeNow();`,
- ` CheckArguments([${cfg.f_args}], arguments);`,
- ` CheckStackTrace([f, test]);`,
+ ` counter++;`,
+ ` var expected_args = [${cfg.f_args}];`,
+ do_checks,
+ deopt_template(cfg.deopt_mode),
+ do_checks,
` return 42;`,
`}`,
`var p = new Proxy(f, {});`,
@@ -169,7 +216,8 @@ function run_tests() {
`function g(a) {`,
` "use strict";`,
` ${inlinable_comment(cfg.g_inlinable)}`,
- ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` var expected_args = [${cfg.g_args}];`,
+ check_arguments,
` return ${cfg.f_name}(${cfg.f_args});`,
`}`,
];
@@ -178,6 +226,23 @@ function run_tests() {
};
+ var g_cfg_reflect_apply = {
+ receiver: "the_receiver",
+ source_template: function(cfg) {
+ var lines = [
+ `function g(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.g_inlinable)}`,
+ ` var expected_args = [${cfg.g_args}];`,
+ check_arguments,
+ ` return Reflect.apply(${cfg.f_name}, the_receiver, [${cfg.f_args}]);`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+
var g_cfg_function_apply = {
receiver: "the_receiver",
source_template: function(cfg) {
@@ -185,7 +250,8 @@ function run_tests() {
`function g(a) {`,
` "use strict";`,
` ${inlinable_comment(cfg.g_inlinable)}`,
- ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` var expected_args = [${cfg.g_args}];`,
+ check_arguments,
` return ${cfg.f_name}.apply(the_receiver, [${cfg.f_args}]);`,
`}`,
];
@@ -194,6 +260,24 @@ function run_tests() {
};
+ var g_cfg_function_apply_arguments_object = {
+ receiver: "the_receiver",
+ source_template: function(cfg) {
+ cfg.f_args = cfg.g_args;
+ var lines = [
+ `function g(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.g_inlinable)}`,
+ ` var expected_args = [${cfg.g_args}];`,
+ check_arguments,
+ ` return ${cfg.f_name}.apply(the_receiver, arguments);`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+
var g_cfg_function_call = {
receiver: "the_receiver",
source_template: function(cfg) {
@@ -205,7 +289,8 @@ function run_tests() {
`function g(a) {`,
` "use strict";`,
` ${inlinable_comment(cfg.g_inlinable)}`,
- ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` var expected_args = [${cfg.g_args}];`,
+ check_arguments,
` return ${cfg.f_name}.call(${f_args});`,
`}`,
];
@@ -215,27 +300,39 @@ function run_tests() {
function test_template(cfg) {
- var f_source = cfg.f_source_template(cfg);
+ // Note: g_source_template modifies cfg.f_args in some cases.
var g_source = cfg.g_source_template(cfg);
- f_source = ident_source(f_source, 2);
g_source = ident_source(g_source, 2);
+ var f_source = cfg.f_source_template(cfg);
+ f_source = ident_source(f_source, 2);
+
var lines = [
`(function() {`,
+ ` // Avoid bailing out because of "Reference to a variable which requires dynamic lookup".`,
+ ` var assertEquals_ = assertEquals;`,
+ ` var checkStackTrace_ = checkStackTrace;`,
+ ` var undefined = void 0;`,
+ ` var global = Function('return this')();`,
+ ` var the_receiver = {receiver: 1};`,
+ ` var counter = 0;`,
+ ``,
+ ` // Don't inline helper functions`,
+ ` %NeverOptimizeFunction(assertEquals);`,
+ ` %NeverOptimizeFunction(checkStackTrace);`,
+ ``,
f_source,
g_source,
` function test() {`,
` "use strict";`,
- ` assertEquals(42, g(${cfg.g_args}));`,
+ ` assertEquals_(42, g(${cfg.g_args}));`,
` }`,
- ` ${cfg.f_inlinable ? "%SetForceInlineFlag(f)" : ""};`,
- ` ${cfg.g_inlinable ? "%SetForceInlineFlag(g)" : ""};`,
- ``,
- ` test();`,
+ ` ${"test();".repeat(cfg.test_warmup_count)}`,
+ ` ${cfg.f_inlinable ? "%SetForceInlineFlag(f)" : "%OptimizeFunctionOnNextCall(f)"};`,
+ ` ${cfg.g_inlinable ? "%SetForceInlineFlag(g)" : "%OptimizeFunctionOnNextCall(g)"};`,
` %OptimizeFunctionOnNextCall(test);`,
- ` %OptimizeFunctionOnNextCall(f);`,
- ` %OptimizeFunctionOnNextCall(g);`,
` test();`,
+ ` assertEquals(${1 + cfg.test_warmup_count}, counter);`,
`})();`,
``,
];
@@ -243,11 +340,13 @@ function run_tests() {
return source;
}
- // TODO(v8:4698), TODO(ishell): support all commented cases.
var f_args_variants = ["", "1", "1, 2"];
- var g_args_variants = [/*"",*/ "10", /*"10, 20"*/];
- var f_inlinable_variants = [/*true,*/ false];
+ var g_args_variants = ["", "10", "10, 20"];
+ var f_inlinable_variants = [true, false];
var g_inlinable_variants = [true, false];
+ // This is to avoid bailing out because of referencing new.target.
+ var check_new_target_variants = [true, false];
+ var deopt_mode_variants = ["none", "f", "g", "test"];
var f_variants = [
f_cfg_sloppy,
f_cfg_strict,
@@ -257,36 +356,60 @@ function run_tests() {
];
var g_variants = [
g_cfg_normal,
- g_cfg_function_call,
+ g_cfg_reflect_apply,
g_cfg_function_apply,
+ g_cfg_function_apply_arguments_object,
+ g_cfg_function_call,
];
+ var test_warmup_counts = [0, 1, 2];
+ var iter = 0;
+ var tests_executed = 0;
+ if (shard !== undefined) {
+ print("Running shard #" + shard);
+ }
f_variants.forEach((f_cfg) => {
- g_variants.forEach((g_cfg) => {
- f_args_variants.forEach((f_args) => {
- g_args_variants.forEach((g_args) => {
- f_inlinable_variants.forEach((f_inlinable) => {
- g_inlinable_variants.forEach((g_inlinable) => {
- var cfg = {
- f_source_template: f_cfg.source_template,
- f_inlinable,
- f_args,
- f_name: f_cfg.func_name,
- f_receiver: g_cfg.receiver,
- g_source_template: g_cfg.source_template,
- g_inlinable,
- g_args,
- };
- var source = test_template(cfg);
- print("====================");
- print(source);
- eval(source);
+ check_new_target_variants.forEach((check_new_target) => {
+ deopt_mode_variants.forEach((deopt_mode) => {
+ g_variants.forEach((g_cfg) => {
+ f_args_variants.forEach((f_args) => {
+ g_args_variants.forEach((g_args) => {
+ f_inlinable_variants.forEach((f_inlinable) => {
+ g_inlinable_variants.forEach((g_inlinable) => {
+ test_warmup_counts.forEach((test_warmup_count) => {
+ if (shard !== undefined && (iter++) % SHARDS_COUNT != shard) {
+ print("skipping...");
+ return;
+ }
+ tests_executed++;
+ var cfg = {
+ f_source_template: f_cfg.source_template,
+ f_inlinable,
+ f_args,
+ f_name: f_cfg.func_name,
+ f_receiver: g_cfg.receiver,
+ g_source_template: g_cfg.source_template,
+ g_inlinable,
+ g_args,
+ test_warmup_count,
+ check_new_target,
+ deopt_mode,
+ };
+ var source = test_template(cfg);
+ print("====================");
+ print(source);
+ eval(source);
+ });
+ });
+ });
});
});
});
});
});
});
+ print("Number of tests executed: " + tests_executed);
}
-run_tests();
+// Uncomment to run all the tests at once or use shard runners.
+//run_tests();
diff --git a/deps/v8/test/mjsunit/es6/tail-call-proxies.js b/deps/v8/test/mjsunit/es6/tail-call-proxies.js
index 25f9fcfbe7..251ac0c20f 100644
--- a/deps/v8/test/mjsunit/es6/tail-call-proxies.js
+++ b/deps/v8/test/mjsunit/es6/tail-call-proxies.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-tailcalls --harmony-proxies
+// Flags: --allow-natives-syntax --harmony-tailcalls
"use strict";
Error.prepareStackTrace = (e,s) => s;
diff --git a/deps/v8/test/mjsunit/es6/tail-call-simple.js b/deps/v8/test/mjsunit/es6/tail-call-simple.js
index d2890b0212..cc638082be 100644
--- a/deps/v8/test/mjsunit/es6/tail-call-simple.js
+++ b/deps/v8/test/mjsunit/es6/tail-call-simple.js
@@ -10,7 +10,7 @@
(function() {
function f(n) {
if (n <= 0) {
- return "foo";
+ return "foo";
}
return f(n - 1);
}
@@ -27,7 +27,7 @@
"use strict";
function f(n) {
if (n <= 0) {
- return "foo";
+ return "foo";
}
return f(n - 1);
}
@@ -39,6 +39,20 @@
(function() {
"use strict";
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return f(n - 1, 42); // Call with arguments adaptor.
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
function f(n){
if (n <= 0) {
return "foo";
@@ -59,6 +73,28 @@
})();
+(function() {
+ "use strict";
+ function f(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return g(n - 1, 42); // Call with arguments adaptor.
+ }
+ function g(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return f(n - 1, 42); // Call with arguments adaptor.
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
+
+
//
// Tail call bound functions.
//
diff --git a/deps/v8/test/mjsunit/es6/tail-call.js b/deps/v8/test/mjsunit/es6/tail-call.js
index e9539c37ba..d0d00f4b3e 100644
--- a/deps/v8/test/mjsunit/es6/tail-call.js
+++ b/deps/v8/test/mjsunit/es6/tail-call.js
@@ -20,6 +20,8 @@ function CheckStackTrace(expected) {
assertEquals(expected[i].name, stack[i + 1].getFunctionName());
}
}
+%NeverOptimizeFunction(CheckStackTrace);
+
function f(expected_call_stack, a, b) {
CheckStackTrace(expected_call_stack);
@@ -69,6 +71,7 @@ function f_153(expected_call_stack, a) {
assertEquals(12, g4(1));
}
test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -111,6 +114,7 @@ function f_153(expected_call_stack, a) {
assertEquals(12, g4());
}
test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -162,6 +166,7 @@ function f_153(expected_call_stack, a) {
assertEquals(12, g4(1));
}
test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -212,6 +217,89 @@ function f_153(expected_call_stack, a) {
assertEquals(12, g4());
}
test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from getter.
+(function() {
+ function g(v) {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return 153;
+ }
+ %NeverOptimizeFunction(g);
+
+ function f(v) {
+ return g();
+ }
+ %SetForceInlineFlag(f);
+
+ function test() {
+ var o = {};
+ o.__defineGetter__('p', f);
+ assertEquals(153, o.p);
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from setter.
+(function() {
+ function g() {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return 153;
+ }
+ %NeverOptimizeFunction(g);
+
+ var context = 10;
+ function f(v) {
+ return g(context);
+ }
+ %SetForceInlineFlag(f);
+
+ function test() {
+ var o = {};
+ o.__defineSetter__('q', f);
+ assertEquals(1, o.q = 1);
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from constructor.
+(function() {
+ function g(context) {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return {x: 153};
+ }
+ %NeverOptimizeFunction(g);
+
+ function A() {
+ this.x = 42;
+ return g();
+ }
+
+ function test() {
+ var o = new A();
+ %DebugPrint(o);
+ assertEquals(153, o.x);
+ }
+
+ test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -237,6 +325,53 @@ function f_153(expected_call_stack, a) {
assertEquals(153, g3());
}
test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from various statements.
+(function() {
+ function g1() {
+ for (var v in {a:0}) {
+ return f_153([f_153, g1, test]);
+ }
+ }
+
+ function g2() {
+ for (var v of [1, 2, 3]) {
+ return f_153([f_153, g2, test]);
+ }
+ }
+
+ function g3() {
+ for (var i = 0; i < 10; i++) {
+ return f_153([f_153, test]);
+ }
+ }
+
+ function g4() {
+ while (true) {
+ return f_153([f_153, test]);
+ }
+ }
+
+ function g5() {
+ do {
+ return f_153([f_153, test]);
+ } while (true);
+ }
+
+ function test() {
+ assertEquals(153, g1());
+ assertEquals(153, g2());
+ assertEquals(153, g3());
+ assertEquals(153, g4());
+ assertEquals(153, g5());
+ }
+ test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -280,6 +415,7 @@ function f_153(expected_call_stack, a) {
assertEquals(153, tc3());
}
test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -321,6 +457,7 @@ function f_153(expected_call_stack, a) {
assertEquals(153, tf3());
}
test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
@@ -381,6 +518,28 @@ function f_153(expected_call_stack, a) {
assertEquals(153, tcf4());
}
test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from arrow functions.
+(function () {
+ function g1(a) {
+ return (() => { return f_153([f_153, test]); })();
+ }
+
+ function g2(a) {
+ return (() => f_153([f_153, test]))();
+ }
+
+ function test() {
+ assertEquals(153, g1());
+ assertEquals(153, g2());
+ }
+ test();
+ test();
%OptimizeFunctionOnNextCall(test);
test();
})();
diff --git a/deps/v8/test/mjsunit/es6/typed-array-iterator.js b/deps/v8/test/mjsunit/es6/typed-array-iterator.js
index 0b27625c5c..7970bba928 100644
--- a/deps/v8/test/mjsunit/es6/typed-array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/typed-array-iterator.js
@@ -21,10 +21,10 @@ assertFalse(TypedArrayPrototype.propertyIsEnumerable('values'));
assertFalse(TypedArrayPrototype.propertyIsEnumerable('keys'));
assertFalse(TypedArrayPrototype.propertyIsEnumerable(Symbol.iterator));
-assertEquals(Array.prototype.entries, TypedArrayPrototype.entries);
-assertEquals(Array.prototype[Symbol.iterator], TypedArrayPrototype.values);
-assertEquals(Array.prototype.keys, TypedArrayPrototype.keys);
-assertEquals(Array.prototype[Symbol.iterator], TypedArrayPrototype[Symbol.iterator]);
+assertFalse(Array.prototype.entries === TypedArrayPrototype.entries);
+assertFalse(Array.prototype[Symbol.iterator] === TypedArrayPrototype.values);
+assertFalse(Array.prototype.keys === TypedArrayPrototype.keys);
+assertFalse(Array.prototype[Symbol.iterator] === TypedArrayPrototype[Symbol.iterator]);
function TestTypedArrayValues(constructor) {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-of.js b/deps/v8/test/mjsunit/es6/typedarray-of.js
index a6df29a0dd..eaa7bde11b 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-of.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-of.js
@@ -115,9 +115,9 @@ function TestTypedArrayOf(constructor) {
// Check superficial features of %TypedArray%.of.
var desc = Object.getOwnPropertyDescriptor(constructor.__proto__, "of");
- assertEquals(desc.configurable, false);
+ assertEquals(desc.configurable, true);
assertEquals(desc.enumerable, false);
- assertEquals(desc.writable, false);
+ assertEquals(desc.writable, true);
assertEquals(constructor.of.length, 0);
// %TypedArray%.of is not a constructor.
diff --git a/deps/v8/test/mjsunit/es6/typedarray-proto.js b/deps/v8/test/mjsunit/es6/typedarray-proto.js
index 346b2ea63d..0bd90d13b4 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-proto.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-proto.js
@@ -28,17 +28,13 @@ assertEquals(TypedArrayPrototype.__proto__, Object.prototype);
let classProperties = new Set([
"length", "name", "arguments", "caller", "prototype", "BYTES_PER_ELEMENT"
]);
-let instanceProperties = new Set([
- "BYTES_PER_ELEMENT", "constructor", "prototype",
- // length is also an instance property as a temporary workaround to
- // BUG(chromium:579905). TODO(littledan): remove the workaround
- "length"
-]);
+let instanceProperties = new Set(["BYTES_PER_ELEMENT", "constructor", "prototype"]);
function functionProperties(object) {
return Object.getOwnPropertyNames(object).filter(function(name) {
return typeof Object.getOwnPropertyDescriptor(object, name).value
- == "function" && name != 'constructor';
+ == "function"
+ && name != 'constructor' && name != 'subarray';
});
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index e6a949ca59..4bdf8226a8 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-tostring
-
// ArrayBuffer
function TestByteLength(param, expectedByteLength) {
diff --git a/deps/v8/test/mjsunit/es7/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
index 712f5a6415..a558c51421 100644
--- a/deps/v8/test/mjsunit/es7/object-observe.js
+++ b/deps/v8/test/mjsunit/es7/object-observe.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies --harmony-object-observe
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
var allObservers = [];
diff --git a/deps/v8/test/mjsunit/for-in-opt.js b/deps/v8/test/mjsunit/for-in-opt.js
index 8f73539382..bc59a1b087 100644
--- a/deps/v8/test/mjsunit/for-in-opt.js
+++ b/deps/v8/test/mjsunit/for-in-opt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --allow-natives-syntax --expose-debug-as debug
+// Flags: --allow-natives-syntax --expose-debug-as debug
"use strict";
@@ -23,9 +23,9 @@ assertEquals(["0","1","2"], f("bla"));
// Test the lazy deopt points.
var keys = ["a", "b", "c", "d"];
-var has_keys = [];
-var deopt_has = false;
+var property_descriptor_keys = [];
var deopt_enum = false;
+var deopt_property_descriptor = false;
var handler = {
ownKeys() {
@@ -35,16 +35,14 @@ var handler = {
}
return keys;
},
- getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }},
-
- has(target, k) {
- if (deopt_has) {
+ getOwnPropertyDescriptor(target, k) {
+ if (deopt_property_descriptor) {
%DeoptimizeFunction(f2);
- deopt_has = false;
+ deopt_property_descriptor = false;
}
- has_keys.push(k);
- return true;
- }
+ property_descriptor_keys.push(k);
+ return { enumerable: true, configurable: true }
+ },
};
@@ -61,8 +59,8 @@ function f2(o) {
function check_f2() {
assertEquals(keys, f2(o));
- assertEquals(keys, has_keys);
- has_keys.length = 0;
+ assertEquals(keys, property_descriptor_keys);
+ property_descriptor_keys.length = 0;
}
check_f2();
@@ -75,9 +73,10 @@ check_f2();
// Test lazy deopt after FILTER_KEY
%OptimizeFunctionOnNextCall(f2);
-deopt_has = true;
+deopt_property_descriptor = true;
check_f2();
+
function f3(o) {
for (var i in o) {
}
@@ -91,14 +90,6 @@ f3(undefined);
f3(null);
// Reliable repro for an issue previously flushed out by GC stress.
-var handler2 = {
- getPropertyDescriptor(target, k) {
- has_keys.push(k);
- return {value: 10, configurable: true, writable: false, enumerable: true};
- }
-}
-var proxy2 = new Proxy({}, handler2);
-var o2 = {__proto__: proxy2};
var p = {x: "x"}
function f4(o, p) {
@@ -112,8 +103,8 @@ function f4(o, p) {
function check_f4() {
assertEquals(keys, f4(o, p));
- assertEquals(keys, has_keys);
- has_keys.length = 0;
+ assertEquals(keys, property_descriptor_keys);
+ property_descriptor_keys.length = 0;
}
check_f4();
@@ -138,12 +129,10 @@ function listener(event, exec_state, event_data, data) {
var handler3 = {
ownKeys() { return ["a", "b"] },
- getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }},
-
- has(target, k) {
+ getOwnPropertyDescriptor(target, k) {
if (k == "a") count++;
- if (x) %ScheduleBreak();
- return true;
+ if (x) %ScheduleBreak()
+ return { enumerable: true, configurable: true }
}
};
diff --git a/deps/v8/test/mjsunit/for-in.js b/deps/v8/test/mjsunit/for-in.js
index 644c27a632..bece37a3ee 100644
--- a/deps/v8/test/mjsunit/for-in.js
+++ b/deps/v8/test/mjsunit/for-in.js
@@ -120,7 +120,23 @@ for (i=0 ; i < 3; ++i) {
assertEquals("undefined", typeof y[0], "y[0]");
}
-(function() {
+(function testLargeElementKeys() {
+ // Key out of SMI range but well within safe double representaion.
+ var large_key = 2147483650;
+ var o = [];
+ // Trigger dictionary elements with HeapNumber keys.
+ o[large_key] = 0;
+ o[large_key+1] = 1;
+ o[large_key+2] = 2;
+ o[large_key+3] = 3;
+ var keys = [];
+ for (var k in o) {
+ keys.push(k);
+ }
+ assertEquals(["2147483650", "2147483651", "2147483652", "2147483653"], keys);
+})();
+
+(function testLargeElementKeysWithProto() {
var large_key = 2147483650;
var o = {__proto__: {}};
o[large_key] = 1;
@@ -131,3 +147,17 @@ for (i=0 ; i < 3; ++i) {
}
assertEquals(["2147483650"], keys);
})();
+
+(function testNonEnumerableArgumentsIndex() {
+ Object.defineProperty(arguments, 0, {enumerable:false});
+ for (var k in arguments) {
+ assertUnreachable();
+ }
+})();
+
+(function testNonEnumerableSloppyArgumentsIndex(a) {
+ Object.defineProperty(arguments, 0, {enumerable:false});
+ for (var k in arguments) {
+ assertUnreachable();
+ }
+})(true);
diff --git a/deps/v8/test/mjsunit/function-caller.js b/deps/v8/test/mjsunit/function-caller.js
index 84f3cbed2f..7443cf71a9 100644
--- a/deps/v8/test/mjsunit/function-caller.js
+++ b/deps/v8/test/mjsunit/function-caller.js
@@ -47,7 +47,7 @@ f(null);
eval('f(null)');
// Check called from strict builtin functions.
-// [null, null].sort(f); // Does not work because sort tail calls.
+[null, null].sort(f);
[null].forEach(f, null);
// Check called from sloppy builtin functions.
diff --git a/deps/v8/test/mjsunit/global-const-var-conflicts.js b/deps/v8/test/mjsunit/global-const-var-conflicts.js
deleted file mode 100644
index 960b3d3753..0000000000
--- a/deps/v8/test/mjsunit/global-const-var-conflicts.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Check that dynamically introducing conflicting consts/vars
-// is silently ignored (and does not lead to exceptions).
-
-// Flags: --legacy-const
-
-var caught = 0;
-
-eval("const a");
-try { eval("var a"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof a == 'undefined');
-try { eval("var a = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof a == 'undefined');
-
-eval("const b = 0");
-try { eval("var b"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, b);
-try { eval("var b = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, b);
-assertEquals(0, caught);
-
-eval("var c");
-try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof c == 'undefined');
-assertEquals(1, caught);
-try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(undefined, c);
-assertEquals(2, caught);
-
-eval("var d = 0");
-try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
-assertEquals(3, caught);
-try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
-assertEquals(4, caught);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js b/deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js
new file mode 100644
index 0000000000..4c852f06f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species --allow-natives-syntax
+
+// Overwriting the constructor of an instance updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+Object.defineProperty(x, 'constructor', {get() { return MyArray; }});
+assertFalse(%SpeciesProtector());
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js b/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
index e61d0ddebf..f341282dd9 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
@@ -19,6 +19,7 @@ class MyArray extends Array { }
Object.prototype.constructor = MyArray;
delete Array.prototype.constructor;
+assertFalse(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor.js b/deps/v8/test/mjsunit/harmony/array-species-constructor.js
index d4eeefa010..d766e09eee 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-constructor.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
@@ -18,6 +18,7 @@ assertEquals(1, x.concat([1])[0]);
class MyArray extends Array { }
x.constructor = MyArray;
+assertFalse(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-delete.js b/deps/v8/test/mjsunit/harmony/array-species-delete.js
index bccf3a4df9..ba49414069 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-delete.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-delete.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
@@ -19,6 +19,7 @@ class MyArray extends Array { }
Object.prototype[Symbol.species] = MyArray;
delete Array[Symbol.species];
+assertFalse(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-modified.js b/deps/v8/test/mjsunit/harmony/array-species-modified.js
index b5c5c16d7b..73c52b91a4 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-modified.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-modified.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting Array[Symbol.species] updates the protector
@@ -18,6 +18,7 @@ assertEquals(1, x.concat([1])[0]);
class MyArray extends Array { }
Object.defineProperty(Array, Symbol.species, {value: MyArray});
+assertFalse(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js b/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
index 8ea59bcfe4..347732e1de 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting Array.prototype.constructor updates the protector
@@ -18,6 +18,7 @@ assertEquals(1, x.concat([1])[0]);
class MyArray extends Array { }
Array.prototype.constructor = MyArray;
+assertFalse(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-proto.js b/deps/v8/test/mjsunit/harmony/array-species-proto.js
index 077b3f5a17..70db751519 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-proto.js
+++ b/deps/v8/test/mjsunit/harmony/array-species-proto.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
+// Flags: --harmony-species --allow-natives-syntax
// Overwriting an array instance's __proto__ updates the protector
@@ -18,6 +18,7 @@ assertEquals(1, x.concat([1])[0]);
class MyArray extends Array { }
x.__proto__ = MyArray.prototype;
+assertTrue(%SpeciesProtector());
assertEquals(MyArray, x.map(()=>{}).constructor);
assertEquals(MyArray, x.filter(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/array-species.js b/deps/v8/test/mjsunit/harmony/array-species.js
index 3cef50cc4c..19ed1d8185 100644
--- a/deps/v8/test/mjsunit/harmony/array-species.js
+++ b/deps/v8/test/mjsunit/harmony/array-species.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --harmony-proxies
+// Flags: --harmony-species
// Test the ES2015 @@species feature
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index 4b9c9f6c66..bf27eb46d5 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -60,83 +60,65 @@ var IntegerTypedArrayConstructors = [
});
})();
-function testAtomicOp(op, ia, index, expectedIndex, name) {
- for (var i = 0; i < ia.length; ++i)
- ia[i] = 22;
-
- ia[expectedIndex] = 0;
- assertEquals(0, op(ia, index, 0, 0), name);
- assertEquals(0, ia[expectedIndex], name);
-
- for (var i = 0; i < ia.length; ++i) {
- if (i == expectedIndex) continue;
- assertEquals(22, ia[i], name);
- }
-}
-
(function TestBadIndex() {
var sab = new SharedArrayBuffer(8);
var si32a = new Int32Array(sab);
var si32a2 = new Int32Array(sab, 4);
- // Non-integer indexes are converted to an integer first, so they should all
- // operate on index 0.
- [undefined, null, false, 'hi', {}].forEach(function(i) {
-
- var name = String(i);
- testAtomicOp(Atomics.compareExchange, si32a, i, 0, name);
- testAtomicOp(Atomics.load, si32a, i, 0, name);
- testAtomicOp(Atomics.store, si32a, i, 0, name);
- testAtomicOp(Atomics.add, si32a, i, 0, name);
- testAtomicOp(Atomics.sub, si32a, i, 0, name);
- testAtomicOp(Atomics.and, si32a, i, 0, name);
- testAtomicOp(Atomics.or, si32a, i, 0, name);
- testAtomicOp(Atomics.xor, si32a, i, 0, name);
- testAtomicOp(Atomics.exchange, si32a, i, 0, name);
- });
-
- // Out-of-bounds indexes should return undefined.
- // TODO(binji): Should these throw RangeError instead?
+ // Non-integer indexes should throw RangeError.
+ var nonInteger = [1.4, '1.4', NaN, -Infinity, Infinity, undefined, 'hi', {}];
+ nonInteger.forEach(function(i) {
+ assertThrows(function() { Atomics.compareExchange(si32a, i, 0); },
+ RangeError);
+ assertThrows(function() { Atomics.load(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.store(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.add(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.sub(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.and(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.or(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.xor(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.exchange(si32a, i, 0); }, RangeError);
+ }, RangeError);
+
+ // Out-of-bounds indexes should throw RangeError.
[-1, 2, 100].forEach(function(i) {
- var name = String(i);
- assertEquals(undefined, Atomics.compareExchange(si32a, i, 0, 0), name);
- assertEquals(undefined, Atomics.load(si32a, i), name);
- assertEquals(undefined, Atomics.store(si32a, i, 0), name);
- assertEquals(undefined, Atomics.add(si32a, i, 0), name);
- assertEquals(undefined, Atomics.sub(si32a, i, 0), name);
- assertEquals(undefined, Atomics.and(si32a, i, 0), name);
- assertEquals(undefined, Atomics.or(si32a, i, 0), name);
- assertEquals(undefined, Atomics.xor(si32a, i, 0), name);
- assertEquals(undefined, Atomics.exchange(si32a, i, 0), name);
- });
-
- // Out-of-bounds indexes for offset-array
+ assertThrows(function() { Atomics.compareExchange(si32a, i, 0, 0); },
+ RangeError);
+ assertThrows(function() { Atomics.load(si32a, i); }, RangeError);
+ assertThrows(function() { Atomics.store(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.add(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.sub(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.and(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.or(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.xor(si32a, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.exchange(si32a, i, 0); }, RangeError);
+ }, RangeError);
+
+ // Out-of-bounds indexes for array with offset should throw RangeError.
[-1, 1, 100].forEach(function(i) {
- var name = String(i);
- assertEquals(undefined, Atomics.compareExchange(si32a2, i, 0, 0), name);
- assertEquals(undefined, Atomics.load(si32a2, i), name);
- assertEquals(undefined, Atomics.store(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.add(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.sub(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.and(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.or(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.xor(si32a2, i, 0), name);
- assertEquals(undefined, Atomics.exchange(si32a2, i, 0), name);
+ assertThrows(function() { Atomics.compareExchange(si32a2, i, 0, 0); });
+ assertThrows(function() { Atomics.load(si32a2, i); }, RangeError);
+ assertThrows(function() { Atomics.store(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.add(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.sub(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.and(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.or(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.xor(si32a2, i, 0); }, RangeError);
+ assertThrows(function() { Atomics.exchange(si32a2, i, 0); }, RangeError);
});
- // Monkey-patch length and make sure these functions still return undefined.
+ // Monkey-patch length and make sure these functions still throw.
Object.defineProperty(si32a, 'length', {get: function() { return 1000; }});
[2, 100].forEach(function(i) {
- var name = String(i);
- assertEquals(undefined, Atomics.compareExchange(si32a, i, 0, 0), name);
- assertEquals(undefined, Atomics.load(si32a, i), name);
- assertEquals(undefined, Atomics.store(si32a, i, 0), name);
- assertEquals(undefined, Atomics.add(si32a, i, 0), name);
- assertEquals(undefined, Atomics.sub(si32a, i, 0), name);
- assertEquals(undefined, Atomics.and(si32a, i, 0), name);
- assertEquals(undefined, Atomics.or(si32a, i, 0), name);
- assertEquals(undefined, Atomics.xor(si32a, i, 0), name);
- assertEquals(undefined, Atomics.exchange(si32a, i, 0), name);
+ assertThrows(function() { Atomics.compareExchange(si32a, i, 0, 0); });
+ assertThrows(function() { Atomics.load(si32a, i); });
+ assertThrows(function() { Atomics.store(si32a, i, 0); });
+ assertThrows(function() { Atomics.add(si32a, i, 0); });
+ assertThrows(function() { Atomics.sub(si32a, i, 0); });
+ assertThrows(function() { Atomics.and(si32a, i, 0); });
+ assertThrows(function() { Atomics.or(si32a, i, 0); });
+ assertThrows(function() { Atomics.xor(si32a, i, 0); });
+ assertThrows(function() { Atomics.exchange(si32a, i, 0); });
});
})();
@@ -145,22 +127,52 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
var si32a = new Int32Array(sab);
var si32a2 = new Int32Array(sab, 32);
+ var testOp = function(op, ia, index, expectedIndex, name) {
+ for (var i = 0; i < ia.length; ++i)
+ ia[i] = 22;
+
+ ia[expectedIndex] = 0;
+ assertEquals(0, op(ia, index, 0, 0), name);
+ assertEquals(0, ia[expectedIndex], name);
+
+ for (var i = 0; i < ia.length; ++i) {
+ if (i == expectedIndex) continue;
+ assertEquals(22, ia[i], name);
+ }
+ };
+
+ // These values all map to index 0
+ [-0, 0, 0.0, null, false].forEach(function(i) {
+ var name = String(i);
+ [si32a, si32a2].forEach(function(array) {
+ testOp(Atomics.compareExchange, array, i, 0, name);
+ testOp(Atomics.load, array, i, 0, name);
+ testOp(Atomics.store, array, i, 0, name);
+ testOp(Atomics.add, array, i, 0, name);
+ testOp(Atomics.sub, array, i, 0, name);
+ testOp(Atomics.and, array, i, 0, name);
+ testOp(Atomics.or, array, i, 0, name);
+ testOp(Atomics.xor, array, i, 0, name);
+ testOp(Atomics.exchange, array, i, 0, name);
+ });
+ });
+
+ // These values all map to index 3
var valueOf = {valueOf: function(){ return 3;}};
var toString = {toString: function(){ return '3';}};
-
- [3, 3.5, '3', '3.5', valueOf, toString].forEach(function(i) {
+ [3, 3.0, '3', '3.0', valueOf, toString].forEach(function(i) {
var name = String(i);
[si32a, si32a2].forEach(function(array) {
- testAtomicOp(Atomics.compareExchange, array, i, 3, name);
- testAtomicOp(Atomics.load, array, i, 3, name);
- testAtomicOp(Atomics.store, array, i, 3, name);
- testAtomicOp(Atomics.add, array, i, 3, name);
- testAtomicOp(Atomics.sub, array, i, 3, name);
- testAtomicOp(Atomics.and, array, i, 3, name);
- testAtomicOp(Atomics.or, array, i, 3, name);
- testAtomicOp(Atomics.xor, array, i, 3, name);
- testAtomicOp(Atomics.exchange, array, i, 3, name);
- })
+ testOp(Atomics.compareExchange, array, i, 3, name);
+ testOp(Atomics.load, array, i, 3, name);
+ testOp(Atomics.store, array, i, 3, name);
+ testOp(Atomics.add, array, i, 3, name);
+ testOp(Atomics.sub, array, i, 3, name);
+ testOp(Atomics.and, array, i, 3, name);
+ testOp(Atomics.or, array, i, 3, name);
+ testOp(Atomics.xor, array, i, 3, name);
+ testOp(Atomics.exchange, array, i, 3, name);
+ });
});
})();
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js b/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
index ad947700ac..8908ce4e56 100644
--- a/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
@@ -4,7 +4,7 @@
// Test for conflicting variable bindings.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
function CheckException(e) {
var string = e.toString();
diff --git a/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js b/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
index 506847c5b6..5dde82cbf2 100644
--- a/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
// Test that we throw early syntax errors in harmony mode
// when using an immutable binding in an assigment or with
diff --git a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js b/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
index c95123167c..98091b4218 100644
--- a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
+++ b/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function --no-legacy-const
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
// Var-let conflict in a function throws, even if the var is in an eval
diff --git a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
index 051d2b16ea..261c46a166 100644
--- a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
function props(x) {
var array = [];
diff --git a/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js b/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
index fe21341c2e..0023fa08f3 100644
--- a/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
// We want to test the context chain shape. In each of the tests cases
// below, the outer with is to force a runtime lookup of the identifier 'x'
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
index dc5cdfb5b7..4f29c05693 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
// Check that the following functions are optimizable.
var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
index b94576cabc..af95553bd0 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
@@ -27,7 +27,7 @@
// Test let declarations in various settings.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
// Global
let x;
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
index 8ec1eeacd0..a55ff8fe49 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --no-legacy-const --harmony-sloppy-let --harmony-sloppy-function
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
// Test temporal dead zone semantics of let bound variables in
// function and block scopes.
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js b/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
index 36a07f111e..1785901276 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-sloppy --no-legacy-const --harmony-sloppy-let --harmony-sloppy-function
+// Flags: --allow-natives-syntax --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
// Test functionality of block scopes.
// Hoisting of var declarations.
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js b/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
index 74492c4ca6..6f6a8fe06d 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --min-preparse-length=0
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy --harmony-sloppy-let
let xxx = 1;
let f = undefined;
diff --git a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
index 4fa79c2149..2bea1476ab 100644
--- a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
+++ b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
-// Flags: --harmony-sloppy-function --harmony-destructuring-bind
+// Flags: --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy-function
// Test Annex B 3.3 semantics for functions declared in blocks in sloppy mode.
// http://www.ecma-international.org/ecma-262/6.0/#sec-block-level-function-declarations-web-legacy-compatibility-semantics
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js b/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js
deleted file mode 100644
index bfc75bd23f..0000000000
--- a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --harmony-destructuring-assignment
-// Flags: --harmony-destructuring-bind
-
-var exception = null;
-var Debug = debug.Debug;
-var break_count = 0;
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Break) return;
- try {
- var source = exec_state.frame(0).sourceLineText();
- print(source);
- assertTrue(source.indexOf(`// B${break_count++}`) > 0);
- if (source.indexOf("assertEquals") > 0) {
- exec_state.prepareStep(Debug.StepAction.StepNext);
- } else {
- exec_state.prepareStep(Debug.StepAction.StepIn);
- }
- } catch (e) {
- exception = e;
- print(e);
- }
-};
-
-Debug.setListener(listener);
-
-function f() {
- var a, b, c, d;
- debugger; // B0
- [ // B1
- a, // B3
- b, // B4
- c = 3 // B5
- ] = [1, 2]; // B2
- assertEquals({a:1,b:2,c:3}, {a, b, c}); // B6
-
- [ // B7
- a, // B9
- [
- b, // B10
- c // B11
- ],
- d // B12
- ] = [5, [6, 7], 8]; // B8
- assertEquals({a:5,b:6,c:7,d:8}, {a, b, c, d}); // B13
-
- [ // B14
- a, // B16
- b, // B17
- ...c // B18
- ] = [1, 2, 3, 4]; // B15
- assertEquals({a:1,b:2,c:[3,4]}, {a, b, c}); // B19
-
- ({ // B20
- a, // B22
- b, // B23
- c = 7 // B24
- } = {a: 5, b: 6}); // B21
- assertEquals({a:5,b:6,c:7}, {a, b, c}); // B25
-
- ({ // B26
- a, // B28
- b = return1(), // B29
- c = return1() // B30
- } = {a: 5, b: 6}); // B27
- assertEquals({a:5,b:6,c:1}, {a, b, c}); // B33
-
- ({ // B34
- x : a, // B36
- y : b, // B37
- z : c = 3 // B38
- } = {x: 1, y: 2}); // B35
- assertEquals({a:1,b:2,c:3}, {a, b, c}); // B39
-} // B40
-
-function return1() {
- return 1; // B31
-} // B32
-
-f();
-Debug.setListener(null); // B41
-assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions-control.js b/deps/v8/test/mjsunit/harmony/do-expressions-control.js
new file mode 100644
index 0000000000..12c54295cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/do-expressions-control.js
@@ -0,0 +1,109 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-do-expressions
+
+(function TestDoForInDoBreak() {
+ function f(o, i) {
+ var a = "result@" + do {
+ var r = "(";
+ for (var x in o) {
+ var b = "end@" + do {
+ if (x == i) { break } else { r += o[x]; x }
+ }
+ }
+ r + ")";
+ }
+ return a + "," + b;
+ }
+ assertEquals("result@(3),end@0", f([3], 2));
+ assertEquals("result@(35),end@1", f([3,5], 2));
+ assertEquals("result@(35),end@1", f([3,5,7], 2));
+ assertEquals("result@(35),end@1", f([3,5,7,9], 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("result@(3),end@0", f([3], 2));
+ assertEquals("result@(35),end@1", f([3,5], 2));
+ assertEquals("result@(35),end@1", f([3,5,7], 2));
+ assertEquals("result@(35),end@1", f([3,5,7,9], 2));
+})();
+
+(function TestDoForInDoContinue() {
+ function f(o, i) {
+ var a = "result@" + do {
+ var r = "("
+ for (var x in o) {
+ var b = "end@" + do {
+ if (x == i) { continue } else { r += o[x]; x }
+ }
+ }
+ r + ")"
+ }
+ return a + "," + b
+ }
+ assertEquals("result@(3),end@0", f([3], 2));
+ assertEquals("result@(35),end@1", f([3,5], 2));
+ assertEquals("result@(35),end@1", f([3,5,7], 2));
+ assertEquals("result@(359),end@3", f([3,5,7,9], 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("result@(3),end@0", f([3], 2));
+ assertEquals("result@(35),end@1", f([3,5], 2));
+ assertEquals("result@(35),end@1", f([3,5,7], 2));
+ assertEquals("result@(359),end@3", f([3,5,7,9], 2));
+})();
+
+(function TestDoForNestedWithTargetLabels() {
+ function f(mode) {
+ var loop = true;
+ var head = "<";
+ var tail = ">";
+ var middle =
+ "1" + do { loop1: for(; loop; head += "A") {
+ "2" + do { loop2: for(; loop; head += "B") {
+ "3" + do { loop3: for(; loop; head += "C") {
+ "4" + do { loop4: for(; loop; head += "D") {
+ "5" + do { loop5: for(; loop; head += "E") {
+ "6" + do { loop6: for(; loop; head += "F") {
+ loop = false;
+ switch (mode) {
+ case "b1": break loop1;
+ case "b2": break loop2;
+ case "b3": break loop3;
+ case "b4": break loop4;
+ case "b5": break loop5;
+ case "b6": break loop6;
+ case "c1": continue loop1;
+ case "c2": continue loop2;
+ case "c3": continue loop3;
+ case "c4": continue loop4;
+ case "c5": continue loop5;
+ case "c6": continue loop6;
+ default: "7";
+ }
+ }}
+ }}
+ }}
+ }}
+ }}
+ }}
+ return head + middle + tail;
+ }
+ function test() {
+ assertEquals( "<1undefined>", f("b1"));
+ assertEquals( "<A1undefined>", f("c1"));
+ assertEquals( "<A12undefined>", f("b2"));
+ assertEquals( "<BA12undefined>", f("c2"));
+ assertEquals( "<BA123undefined>", f("b3"));
+ assertEquals( "<CBA123undefined>", f("c3"));
+ assertEquals( "<CBA1234undefined>", f("b4"));
+ assertEquals( "<DCBA1234undefined>", f("c4"));
+ assertEquals( "<DCBA12345undefined>", f("b5"));
+ assertEquals( "<EDCBA12345undefined>", f("c5"));
+ assertEquals( "<EDCBA123456undefined>", f("b6"));
+ assertEquals("<FEDCBA123456undefined>", f("c6"));
+ assertEquals("<FEDCBA1234567>", f("xx"));
+ }
+ test();
+ %OptimizeFunctionOnNextCall(f);
+ test();
+})();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
index 3aace577d5..b3be4eca91 100644
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ b/deps/v8/test/mjsunit/harmony/do-expressions.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --harmony-do-expressions --harmony-sloppy-let --allow-natives-syntax
-// Flags: --harmony-default-parameters --harmony-destructuring-bind
function returnValue(v) { return v; }
function MyError() {}
diff --git a/deps/v8/test/mjsunit/harmony/exponentiation-operator.js b/deps/v8/test/mjsunit/harmony/exponentiation-operator.js
new file mode 100644
index 0000000000..543e1046c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/exponentiation-operator.js
@@ -0,0 +1,278 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-exponentiation-operator
+
+function TestBasic() {
+ assertEquals(-(8 ** 2), -64);
+ assertEquals(+(8 ** 2), 64);
+ assertEquals(~(8 ** 2), -65);
+ assertEquals(!(8 ** 2), false);
+
+ assertEquals(2 ** -2, 0.25);
+
+ var o = { p: 1 };
+ assertEquals(2 ** delete o.p, 2);
+
+ assertEquals(2 ** void 1, NaN);
+
+ assertEquals(2 ** typeof 1, NaN);
+
+ var s = "2";
+ var n = 2;
+
+ assertEquals(2 ** "2", 4);
+ assertEquals(2 ** +"2", 4);
+ assertEquals(2 ** +s, 4);
+ assertEquals(2 ** s, 4);
+ assertEquals(2 ** 2, 4);
+ assertEquals(2 ** +2, 4);
+ assertEquals(2 ** +n, 4);
+ assertEquals(2 ** n, 4);
+
+ assertEquals(2 ** -"2", 0.25);
+ assertEquals(2 ** -s, 0.25);
+ assertEquals(2 ** -2, 0.25);
+ assertEquals(2 ** -n, 0.25);
+
+ assertEquals(2 ** ~"2", 0.125);
+ assertEquals(2 ** ~s, 0.125);
+ assertEquals(2 ** ~2, 0.125);
+ assertEquals(2 ** ~n, 0.125);
+
+ assertEquals(2 ** !"2", 1);
+ assertEquals(2 ** !s, 1);
+ assertEquals(2 ** !2, 1);
+ assertEquals(2 ** !n, 1);
+
+ var exponent = 2;
+ assertEquals(2 ** 3, 8);
+ assertEquals(3 * 2 ** 3, 24);
+ assertEquals(2 ** ++exponent, 8);
+ assertEquals(2 ** -1 * 2, 1);
+ assertEquals(2 ** 2 * 4, 16);
+ assertEquals(2 ** 2 / 2, 2);
+ assertEquals(2 ** (3 ** 2), 512);
+ assertEquals(2 ** 3 ** 2, 512);
+ assertEquals(2 * 3 ** 2, 18);
+ assertEquals(16 / 2 ** 2, 4);
+}
+TestBasic();
+
+
+function TestAssignment() {
+ var base = -5;
+ assertEquals(base **= 3, -125);
+ assertEquals(base, -125);
+}
+TestAssignment();
+
+
+function TestPrecedence() {
+ var base = 4;
+ assertEquals(--base ** 2, 9); // 3 ** 2
+ assertEquals(++base ** 2, 16); // 4 ** 2
+ assertEquals(base++ ** 2, 16); // 4 ** 2
+ assertEquals(base-- ** 2, 25); // 5 ** 2
+
+ assertEquals(4, base);
+ assertEquals(--base ** --base ** 2,
+ Math.pow(3, Math.pow(2, 2)));
+
+ assertEquals(2, base);
+ assertEquals(++base ** ++base ** 2,
+ Math.pow(3, Math.pow(4, 2)));
+
+ base = 4;
+ assertEquals(base-- ** base-- ** 2,
+ Math.pow(4, Math.pow(3, 2)));
+
+ assertEquals(2, base);
+ assertEquals(base++ ** base++ ** 2,
+ Math.pow(2, Math.pow(3, 2)));
+}
+TestPrecedence();
+
+
+function TestInvariants() {
+ assertEquals(NaN, 2 ** NaN);
+ assertEquals(NaN, (+0) ** NaN);
+ assertEquals(NaN, (-0) ** NaN);
+ assertEquals(NaN, Infinity ** NaN);
+ assertEquals(NaN, (-Infinity) ** NaN);
+
+ assertEquals(1, NaN ** +0);
+ assertEquals(1, NaN ** -0);
+
+ assertEquals(NaN, NaN ** NaN);
+ assertEquals(NaN, NaN ** 2.2);
+ assertEquals(NaN, NaN ** 1);
+ assertEquals(NaN, NaN ** -1);
+ assertEquals(NaN, NaN ** -2.2);
+ assertEquals(NaN, NaN ** Infinity);
+ assertEquals(NaN, NaN ** -Infinity);
+
+ assertEquals(Infinity, 1.1 ** Infinity);
+ assertEquals(Infinity, (-1.1) ** Infinity);
+ assertEquals(Infinity, 2 ** Infinity);
+ assertEquals(Infinity, (-2) ** Infinity);
+
+ // Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
+ assertEquals(+Infinity, 1/1.1 ** -Infinity);
+ assertEquals(+Infinity, 1/(-1.1) ** -Infinity);
+ assertEquals(+Infinity, 1/2 ** -Infinity);
+ assertEquals(+Infinity, 1/(-2) ** -Infinity);
+
+ assertEquals(NaN, 1 ** Infinity);
+ assertEquals(NaN, 1 ** -Infinity);
+ assertEquals(NaN, (-1) ** Infinity);
+ assertEquals(NaN, (-1) ** -Infinity);
+
+ assertEquals(+0, 0.1 ** Infinity);
+ assertEquals(+0, (-0.1) ** Infinity);
+ assertEquals(+0, 0.999 ** Infinity);
+ assertEquals(+0, (-0.999) ** Infinity);
+
+ assertEquals(Infinity, 0.1 ** -Infinity);
+ assertEquals(Infinity, (-0.1) ** -Infinity);
+ assertEquals(Infinity, 0.999 ** -Infinity);
+ assertEquals(Infinity, (-0.999) ** -Infinity);
+
+ assertEquals(Infinity, Infinity ** 0.1);
+ assertEquals(Infinity, Infinity ** 2);
+
+ assertEquals(+Infinity, 1/Infinity ** -0.1);
+ assertEquals(+Infinity, 1/Infinity ** -2);
+
+ assertEquals(-Infinity, (-Infinity) ** 3);
+ assertEquals(-Infinity, (-Infinity) ** 13);
+
+ assertEquals(Infinity, (-Infinity) ** 3.1);
+ assertEquals(Infinity, (-Infinity) ** 2);
+
+ assertEquals(-Infinity, 1/(-Infinity) ** -3);
+ assertEquals(-Infinity, 1/(-Infinity) ** -13);
+
+ assertEquals(+Infinity, 1/(-Infinity) ** -3.1);
+ assertEquals(+Infinity, 1/(-Infinity) ** -2);
+
+ assertEquals(+Infinity, 1/(+0) ** 1.1);
+ assertEquals(+Infinity, 1/(+0) ** 2);
+
+ assertEquals(Infinity, (+0) ** -1.1);
+ assertEquals(Infinity, (+0) ** -2);
+
+ assertEquals(-Infinity, 1/(-0) ** 3);
+ assertEquals(-Infinity, 1/(-0) ** 13);
+
+ assertEquals(+Infinity, 1/(-0) ** 3.1);
+ assertEquals(+Infinity, 1/(-0) ** 2);
+
+ assertEquals(-Infinity, (-0) ** -3);
+ assertEquals(-Infinity, (-0) ** -13);
+
+ assertEquals(Infinity, (-0) ** -3.1);
+ assertEquals(Infinity, (-0) ** -2);
+
+ assertEquals(NaN, (-0.00001) ** 1.1);
+ assertEquals(NaN, (-0.00001) ** -1.1);
+ assertEquals(NaN, (-1.1) ** 1.1);
+ assertEquals(NaN, (-1.1) ** -1.1);
+ assertEquals(NaN, (-2) ** 1.1);
+ assertEquals(NaN, (-2) ** -1.1);
+ assertEquals(NaN, (-1000) ** 1.1);
+ assertEquals(NaN, (-1000) ** -1.1);
+
+ assertEquals(+Infinity, 1/(-0) ** 0.5);
+ assertEquals(+Infinity, 1/(-0) ** 0.6);
+ assertEquals(-Infinity, 1/(-0) ** 1);
+ assertEquals(-Infinity, 1/(-0) ** 10000000001);
+
+ assertEquals(+Infinity, (-0) ** -0.5);
+ assertEquals(+Infinity, (-0) ** -0.6);
+ assertEquals(-Infinity, (-0) ** -1);
+ assertEquals(-Infinity, (-0) ** -10000000001);
+
+ assertEquals(4, 16 ** 0.5);
+ assertEquals(NaN, (-16) ** 0.5);
+ assertEquals(0.25, 16 ** -0.5);
+ assertEquals(NaN, (-16) ** -0.5);
+}
+TestInvariants();
+
+
+function TestOperationOrder() {
+ var log = [];
+ var handler = {
+ get(t, n) {
+ var result = Reflect.get(t, n);
+ var str = typeof result === "object" ? "[object Object]" : String(result);
+ log.push(`[[Get]](${String(n)}) -> ${str}`);
+ return result;
+ },
+ set(t, n, v) {
+ var result = Reflect.set(t, n, v);
+ log.push(`[[Set]](${String(n)}, ${String(v)}) -> ${String(result)}`);
+ return result;
+ },
+ has() { assertUnreachable("trap 'has' invoked"); },
+ deleteProperty() { assertUnreachable("trap 'deleteProperty' invoked"); },
+ ownKeys() { assertUnreachable("trap 'ownKeys' invoked"); },
+ apply() { assertUnreachable("trap 'apply' invoked"); },
+ construct() { assertUnreachable("trap 'construct' invoked"); },
+ getPrototypeOf() { assertUnreachable("trap 'getPrototypeOf' invoked"); },
+ setPrototypeOf() { assertUnreachable("trap 'setPrototypeOf' invoked"); },
+ isExtensible() { assertUnreachable("trap 'isExtensible' invoked"); },
+ preventExtensions() {
+ assertUnreachable("trap 'preventExtensions' invoked"); },
+ getOwnPropertyDescriptor() {
+ assertUnreachable("trap 'getOwnPropertyDescriptor' invoked"); },
+ defineProperty() { assertUnreachable("trap 'defineProperty' invoked"); },
+ };
+ var P = new Proxy({ x: 2 }, handler);
+
+ assertEquals(256, P.x **= "8");
+ assertEquals([
+ "[[Get]](x) -> 2",
+ "[[Set]](x, 256) -> true"
+ ], log);
+
+ log = [];
+ var O = new Proxy({ p: P }, handler);
+ assertEquals(65536, O.p.x **= 2 );
+ assertEquals([
+ "[[Get]](p) -> [object Object]",
+ "[[Get]](x) -> 256",
+ "[[Set]](x, 65536) -> true"
+ ], log);
+}
+TestOperationOrder();
+
+
+function TestOverrideMathPow() {
+ var MathPow = MathPow;
+ Math.pow = function(a, b) {
+ assertUnreachable(`Math.pow(${String(a)}, ${String(b)}) invoked`);
+ }
+
+ TestBasic();
+ TestAssignment();
+ TestInvariants();
+ TestOperationOrder();
+
+ Math.pow = MathPow;
+}
+TestOverrideMathPow();
+
+function TestBadAssignmentLHS() {
+ assertThrows("if (false) { 17 **= 10; }", ReferenceError);
+ assertThrows("if (false) { '17' **= 10; }", ReferenceError);
+ assertThrows("if (false) { /17/ **= 10; }", ReferenceError);
+ assertThrows("if (false) { ({ valueOf() { return 17; } } **= 10); }",
+ ReferenceError);
+ // TODO(caitp): a Call expression as LHS should be an early ReferenceError!
+ // assertThrows("if (false) { Array() **= 10; }", ReferenceError);
+ assertThrows(() => Array() **= 10, ReferenceError);
+}
+TestBadAssignmentLHS();
diff --git a/deps/v8/test/mjsunit/harmony/function-name.js b/deps/v8/test/mjsunit/harmony/function-name.js
index 7bb1f6ae01..66a69e0f16 100644
--- a/deps/v8/test/mjsunit/harmony/function-name.js
+++ b/deps/v8/test/mjsunit/harmony/function-name.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
//
// Flags: --harmony-function-name
-// Flags: --harmony-destructuring-bind --harmony-destructuring-assignment
(function testVariableDeclarationsFunction() {
'use strict';
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index 3a73e0a9b8..626cff5fdb 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -51,24 +51,36 @@
// Valid indexes are 0-3.
[-1, 4, 100].forEach(function(invalidIndex) {
- assertEquals(undefined, Atomics.futexWait(i32a, invalidIndex, 0));
- assertEquals(undefined, Atomics.futexWake(i32a, invalidIndex, 0));
+ assertThrows(function() {
+ Atomics.futexWait(i32a, invalidIndex, 0);
+ }, RangeError);
+ assertThrows(function() {
+ Atomics.futexWake(i32a, invalidIndex, 0);
+ }, RangeError);
var validIndex = 0;
- assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0,
- validIndex));
- assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0,
- invalidIndex));
+ assertThrows(function() {
+ Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0, validIndex);
+ }, RangeError);
+ assertThrows(function() {
+ Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0, invalidIndex);
+ }, RangeError);
});
i32a = new Int32Array(sab, 8);
[-1, 2, 100].forEach(function(invalidIndex) {
- assertEquals(undefined, Atomics.futexWait(i32a, invalidIndex, 0));
- assertEquals(undefined, Atomics.futexWake(i32a, invalidIndex, 0));
+ assertThrows(function() {
+ Atomics.futexWait(i32a, invalidIndex, 0);
+ }, RangeError);
+ assertThrows(function() {
+ Atomics.futexWake(i32a, invalidIndex, 0);
+ }, RangeError);
var validIndex = 0;
- assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0,
- validIndex));
- assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0,
- invalidIndex));
+ assertThrows(function() {
+ Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0, validIndex);
+ }, RangeError);
+ assertThrows(function() {
+ Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0, invalidIndex);
+ }, RangeError);
});
})();
diff --git a/deps/v8/test/mjsunit/harmony/generators.js b/deps/v8/test/mjsunit/harmony/generators.js
index 5b045049e9..df6cec8925 100644
--- a/deps/v8/test/mjsunit/harmony/generators.js
+++ b/deps/v8/test/mjsunit/harmony/generators.js
@@ -237,8 +237,8 @@
let x = g();
assertEquals({value: 1, done: false}, x.next());
assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.return(666));
- assertEquals({value: undefined, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.return(44));
+ assertEquals({value: 44, done: false}, x.next());
assertEquals({value: undefined, done: true}, x.next());
}
@@ -250,3 +250,23 @@
assertThrowsEquals(() => x.next(), 666);
}
}
+
+
+{ // yield*, .return argument is final result
+
+ function* inner() {
+ yield 2;
+ }
+
+ function* g() {
+ yield 1;
+ return yield* inner();
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 2, done: false}, x.next());
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
new file mode 100644
index 0000000000..3292e94eee
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-padding
+
+(function TestMeta() {
+ assertEquals(1, String.prototype.padEnd.length);
+ assertEquals("function", typeof String.prototype.padEnd);
+ assertEquals(Object.getPrototypeOf(Function),
+ Object.getPrototypeOf(String.prototype.padEnd));
+ assertEquals("padEnd", String.prototype.padEnd.name);
+
+ var desc = Object.getOwnPropertyDescriptor(String.prototype, "padEnd");
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertTrue(desc.writable);
+ assertEquals(undefined, desc.get);
+ assertEquals(undefined, desc.set);
+
+ assertThrows(() => new Function(`${String.prototype.padEnd}`), SyntaxError);
+})();
+
+
+(function TestRequireObjectCoercible() {
+ var padEnd = String.prototype.padEnd;
+ assertThrows(() => padEnd.call(null, 4, "test"), TypeError);
+ assertThrows(() => padEnd.call(undefined, 4, "test"), TypeError);
+ assertEquals("123 ", padEnd.call({
+ __proto__: null,
+ valueOf() { return 123; }
+ }, 6, " "));
+
+ var proxy = new Proxy({}, {
+ get(t, name) {
+ if (name === Symbol.toPrimitive || name === "toString") return;
+ if (name === "valueOf") return () => 6.7;
+ assertUnreachable();
+ }
+ });
+ assertEquals("6.7 ", padEnd.call(proxy, 6, " "));
+
+ proxy = new Proxy({}, {
+ get(t, name) {
+ if (name === Symbol.toPrimitive || name === "valueOf") return;
+ if (name === "toString") return () => 6.7;
+ assertUnreachable();
+ }
+ });
+ assertEquals("6.7 ", padEnd.call(proxy, 6, " "));
+})();
+
+
+(function TestToLength() {
+ assertThrows(() => "123".padEnd(Symbol("16")), TypeError);
+ assertEquals("123", "123".padEnd(-1));
+ assertEquals("123", "123".padEnd({ toString() { return -1; } }));
+ assertEquals("123", "123".padEnd(-0));
+ assertEquals("123", "123".padEnd({ toString() { return -0; } }));
+ assertEquals("123", "123".padEnd(+0));
+ assertEquals("123", "123".padEnd({ toString() { return +0; } }));
+ assertEquals("123", "123".padEnd(NaN));
+ assertEquals("123", "123".padEnd({ toString() { return NaN; } }));
+})();
+
+
+(function TestFillerToString() {
+ assertEquals(". ", ".".padEnd(10));
+ assertEquals(". ", ".".padEnd(10, undefined));
+ assertEquals(". ", ".".padEnd(10, { toString() { return ""; } }));
+ assertEquals(".nullnulln", ".".padEnd(10, null));
+})();
+
+
+(function TestFillerRepetition() {
+ for (var i = 2000; i > 0; --i) {
+ var expected = "123" + "xoxo".repeat(i / 4).slice(0, i - 3);
+ var actual = "123".padEnd(i, "xoxo");
+ assertEquals(expected, actual);
+ assertEquals(i > "123".length ? i : 3, actual.length);
+ }
+})();
+
+
+(function TestTruncation() {
+ assertEquals("ab", "a".padEnd(2, "bc"));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
new file mode 100644
index 0000000000..2b2d004251
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-padding
+
+(function TestMeta() {
+ assertEquals(1, String.prototype.padStart.length);
+ assertEquals("function", typeof String.prototype.padStart);
+ assertEquals(Object.getPrototypeOf(Function),
+ Object.getPrototypeOf(String.prototype.padStart));
+ assertEquals("padStart", String.prototype.padStart.name);
+
+ var desc = Object.getOwnPropertyDescriptor(String.prototype, "padStart");
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertTrue(desc.writable);
+ assertEquals(undefined, desc.get);
+ assertEquals(undefined, desc.set);
+
+ assertThrows(() => new Function(`${String.prototype.padStart}`), SyntaxError);
+})();
+
+
+(function TestRequireObjectCoercible() {
+ var padStart = String.prototype.padStart;
+ assertThrows(() => padStart.call(null, 4, "test"), TypeError);
+ assertThrows(() => padStart.call(undefined, 4, "test"), TypeError);
+ assertEquals(" 123", padStart.call({
+ __proto__: null,
+ valueOf() { return 123; }
+ }, 6, " "));
+
+ var proxy = new Proxy({}, {
+ get(t, name) {
+ if (name === Symbol.toPrimitive || name === "toString") return;
+ if (name === "valueOf") return () => 6.7;
+ assertUnreachable();
+ }
+ });
+ assertEquals(" 6.7", padStart.call(proxy, 6, " "));
+
+ proxy = new Proxy({}, {
+ get(t, name) {
+ if (name === Symbol.toPrimitive || name === "valueOf") return;
+ if (name === "toString") return () => 6.7;
+ assertUnreachable();
+ }
+ });
+ assertEquals(" 6.7", padStart.call(proxy, 6, " "));
+})();
+
+
+(function TestToLength() {
+ assertThrows(() => "123".padStart(Symbol("16")), TypeError);
+ assertEquals("123", "123".padStart(-1));
+ assertEquals("123", "123".padStart({ toString() { return -1; } }));
+ assertEquals("123", "123".padStart(-0));
+ assertEquals("123", "123".padStart({ toString() { return -0; } }));
+ assertEquals("123", "123".padStart(+0));
+ assertEquals("123", "123".padStart({ toString() { return +0; } }));
+ assertEquals("123", "123".padStart(NaN));
+ assertEquals("123", "123".padStart({ toString() { return NaN; } }));
+})();
+
+
+(function TestFillerToString() {
+ assertEquals(" .", ".".padStart(10));
+ assertEquals(" .", ".".padStart(10, undefined));
+ assertEquals(" .", ".".padStart(10, { toString() { return ""; } }));
+ assertEquals("nullnulln.", ".".padStart(10, null));
+})();
+
+
+(function TestFillerRepetition() {
+ for (var i = 2000; i > 0; --i) {
+ var expected = "xoxo".repeat(i / 4).slice(0, i - 3) + "123";
+ var actual = "123".padStart(i, "xoxo");
+ assertEquals(expected, actual);
+ assertEquals(i > "123".length ? i : 3, actual.length);
+ }
+})();
+
+
+(function TestTruncation() {
+ assertEquals("ba", "a".padStart(2, "bc"));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/iterator-close.js b/deps/v8/test/mjsunit/harmony/iterator-close.js
index 94785de51f..b719c17c04 100644
--- a/deps/v8/test/mjsunit/harmony/iterator-close.js
+++ b/deps/v8/test/mjsunit/harmony/iterator-close.js
@@ -4,6 +4,7 @@
// Flags: --harmony-iterator-close
+
function* g() { yield 42; return 88 };
@@ -11,33 +12,86 @@ function* g() { yield 42; return 88 };
{
g.prototype.return = null;
+
+ assertEquals(undefined, (() => {
+ for (var x of g()) { break; }
+ })());
+
assertEquals(undefined, (() => {
for (let x of g()) { break; }
})());
assertEquals(undefined, (() => {
+ for (const x of g()) { break; }
+ })());
+
+ assertEquals(undefined, (() => {
for (x of g()) { break; }
})());
+
+ assertThrowsEquals(() => {
+ for (var x of g()) { throw 42; }
+ }, 42);
+
assertThrowsEquals(() => {
for (let x of g()) { throw 42; }
}, 42);
assertThrowsEquals(() => {
+ for (const x of g()) { throw 42; }
+ }, 42);
+
+ assertThrowsEquals(() => {
for (x of g()) { throw 42; }
}, 42);
+
+ assertEquals(42, (() => {
+ for (var x of g()) { return 42; }
+ })());
+
assertEquals(42, (() => {
for (let x of g()) { return 42; }
})());
assertEquals(42, (() => {
+ for (const x of g()) { return 42; }
+ })());
+
+ assertEquals(42, (() => {
for (x of g()) { return 42; }
})());
- assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (var x of g()) { x; }'));
assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (const x of g()) { x; }'));
+
+ assertEquals(42, eval('for (x of g()) { x; }'));
+
+
+ assertEquals(42, (() => {
+ var [x] = g(); return x;
+ })());
+
+ assertEquals(42, (() => {
+ let [x] = g(); return x;
+ })());
+
+ assertEquals(42, (() => {
+ const [x] = g(); return x;
+ })());
+
+ assertEquals(42, (() => {
+ [x] = g(); return x;
+ })());
+
+ assertEquals(42,
+ (([x]) => x)(g())
+ );
}
@@ -45,33 +99,86 @@ function* g() { yield 42; return 88 };
{
g.prototype.return = 666;
+
+ assertThrows(() => {
+ for (var x of g()) { break; }
+ }, TypeError);
+
assertThrows(() => {
for (let x of g()) { break; }
}, TypeError);
assertThrows(() => {
+ for (const x of g()) { break; }
+ }, TypeError);
+
+ assertThrows(() => {
for (x of g()) { break; }
}, TypeError);
+
+ assertThrows(() => {
+ for (var x of g()) { throw 666; }
+ }, TypeError);
+
assertThrows(() => {
for (let x of g()) { throw 666; }
}, TypeError);
assertThrows(() => {
+ for (const x of g()) { throw 666; }
+ }, TypeError);
+
+ assertThrows(() => {
for (x of g()) { throw 666; }
}, TypeError);
+
+ assertThrows(() => {
+ for (var x of g()) { return 666; }
+ }, TypeError);
+
assertThrows(() => {
for (let x of g()) { return 666; }
}, TypeError);
assertThrows(() => {
+ for (const x of g()) { return 666; }
+ }, TypeError);
+
+ assertThrows(() => {
for (x of g()) { return 666; }
}, TypeError);
- assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (var x of g()) { x; }'));
assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (const x of g()) { x; }'));
+
+ assertEquals(42, eval('for (x of g()) { x; }'));
+
+
+ assertThrows(() => {
+ var [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ let [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ const [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ (([x]) => x)(g());
+ }, TypeError);
}
@@ -79,20 +186,46 @@ function* g() { yield 42; return 88 };
{
g.prototype.return = () => 666;
+
assertThrows(() => {
- for (let x of g()) { break; }
+ for (var x of g()) { break; }
}, TypeError);
assertThrows(() => {
- for (x of g()) { break; }
+ for (let x of g()) { break; }
}, TypeError);
assertThrows(() => {
- for (let x of g()) { throw 666; }
+ for (const x of g()) { break; }
}, TypeError);
assertThrows(() => {
+ for (x of g()) { break; }
+ }, TypeError);
+
+
+ // Throw from the body of a for loop 'wins' vs throw
+ // originating from a bad 'return' value.
+
+ assertThrowsEquals(() => {
+ for (var x of g()) { throw 666; }
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (let x of g()) { throw 666; }
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (const x of g()) { throw 666; }
+ }, 666);
+
+ assertThrowsEquals(() => {
for (x of g()) { throw 666; }
+ }, 666);
+
+
+ assertThrows(() => {
+ for (var x of g()) { return 666; }
}, TypeError);
assertThrows(() => {
@@ -100,12 +233,42 @@ function* g() { yield 42; return 88 };
}, TypeError);
assertThrows(() => {
+ for (const x of g()) { return 666; }
+ }, TypeError);
+
+ assertThrows(() => {
for (x of g()) { return 666; }
}, TypeError);
+
+ assertEquals(42, eval('for (var x of g()) { x; }'));
+
assertEquals(42, eval('for (let x of g()) { x; }'));
+ assertEquals(42, eval('for (const x of g()) { x; }'));
+
assertEquals(42, eval('for (x of g()) { x; }'));
+
+
+ assertThrows(() => {
+ var [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ let [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ const [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ [x] = g(); return x;
+ }, TypeError);
+
+ assertThrows(() => {
+ (([x]) => x)(g());
+ }, TypeError);
}
@@ -114,14 +277,30 @@ function* g() { yield 42; return 88 };
let log = [];
g.prototype.return = (...args) => { log.push(args); return {} };
+
+ log = [];
+ for (var x of g()) { break; }
+ assertEquals([[]], log);
+
log = [];
for (let x of g()) { break; }
assertEquals([[]], log);
log = [];
+ for (const x of g()) { break; }
+ assertEquals([[]], log);
+
+ log = [];
for (x of g()) { break; }
assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (var x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
log = [];
assertThrowsEquals(() => {
for (let x of g()) { throw 42; }
@@ -130,10 +309,23 @@ function* g() { yield 42; return 88 };
log = [];
assertThrowsEquals(() => {
+ for (const x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
for (x of g()) { throw 42; }
}, 42);
assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ for (var x of g()) { return 42; }
+ })());
+ assertEquals([[]], log);
+
log = [];
assertEquals(42, (() => {
for (let x of g()) { return 42; }
@@ -142,17 +334,505 @@ function* g() { yield 42; return 88 };
log = [];
assertEquals(42, (() => {
+ for (const x of g()) { return 42; }
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
for (x of g()) { return 42; }
})());
assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, eval('for (var x of g()) { x; }'));
+ assertEquals([], log);
+
log = [];
assertEquals(42, eval('for (let x of g()) { x; }'));
assertEquals([], log);
log = [];
+ assertEquals(42, eval('for (const x of g()) { x; }'));
+ assertEquals([], log);
+
+ log = [];
assertEquals(42, eval('for (x of g()) { x; }'));
assertEquals([], log);
+
+
+ // Even if doing the assignment throws, still call return
+ log = [];
+ x = { set attr(_) { throw 1234; } };
+ assertThrowsEquals(() => {
+ for (x.attr of g()) { throw 456; }
+ }, 1234);
+ assertEquals([[]], log);
+
+
+ log = [];
+ assertEquals(42, (() => {
+ var [x] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ let [x] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ const [x] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ [x] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = []
+ assertEquals(42,
+ (([x]) => x)(g())
+ );
+ assertEquals([[]], log);
+
+
+ log = [];
+ assertEquals(42, (() => {
+ var [x,] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ let [x,] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ const [x,] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ [x,] = g(); return x;
+ })());
+ assertEquals([[]], log);
+
+ log = []
+ assertEquals(42,
+ (([x,]) => x)(g())
+ );
+ assertEquals([[]], log);
+
+
+ log = [];
+ assertEquals(42, (() => {
+ var [x,,] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ let [x,,] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ const [x,,] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ [x,,] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals(42,
+ (([x,,]) => x)(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ var [x, y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ let [x, y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ const [x, y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ [x, y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([42, undefined],
+ (([x, y]) => [x, y])(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([42], (() => {
+ var [...x] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ let [...x] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ const [...x] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ [...x] = g(); return x;
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([42],
+ (([...x]) => x)(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([42, []], (() => {
+ var [x, ...y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, []], (() => {
+ let [x, ...y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, []], (() => {
+ const [x, ...y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, []], (() => {
+ [x, ...y] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([42, []],
+ (([x, ...y]) => [x, y])(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([], (() => {
+ var [] = g(); return [];
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals([], (() => {
+ let [] = g(); return [];
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals([], (() => {
+ const [] = g(); return [];
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals([], (() => {
+ [] = g(); return [];
+ })());
+ assertEquals([[]], log);
+
+ log = []
+ assertEquals([],
+ (([]) => [])(g())
+ );
+ assertEquals([[]], log);
+
+
+ log = [];
+ assertEquals([], (() => {
+ var [...[]] = g(); return [];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([], (() => {
+ let [...[]] = g(); return [];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([], (() => {
+ const [...[]] = g(); return [];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([], (() => {
+ [...[]] = g(); return [];
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([],
+ (([...[]]) => [])(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([42], (() => {
+ var [...[x]] = g(); return [x];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ let [...[x]] = g(); return [x];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ const [...[x]] = g(); return [x];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42], (() => {
+ [...[x]] = g(); return [x];
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([42],
+ (([...[x]]) => [x])(g())
+ );
+ assertEquals([], log);
+
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ var [...[x, y]] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ let [...[x, y]] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ const [...[x, y]] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = [];
+ assertEquals([42, undefined], (() => {
+ [...[x, y]] = g(); return [x, y];
+ })());
+ assertEquals([], log);
+
+ log = []
+ assertEquals([42, undefined],
+ (([...[x, y]]) => [x, y])(g())
+ );
+ assertEquals([], log);
+
+
+ log = []
+ assertThrowsEquals(() => {
+ let x = { set foo(_) { throw 666; } };
+ [x.foo] = g();
+ }, 666);
+ assertEquals([[]], log);
+
+
+ log = []
+ assertThrows(() => {
+ var [[]] = g();
+ }, TypeError);
+ assertEquals([[]], log);
+
+ log = []
+ assertThrows(() => {
+ let [[]] = g();
+ }, TypeError);
+ assertEquals([[]], log);
+
+ log = []
+ assertThrows(() => {
+ const [[]] = g();
+ }, TypeError);
+ assertEquals([[]], log);
+
+ log = []
+ assertThrows(() => {
+ [[]] = g();
+ }, TypeError);
+ assertEquals([[]], log);
+
+ log = []
+ assertThrows(() => {
+ (([[]]) => 0)(g());
+ }, TypeError);
+ assertEquals([[]], log);
+
+
+ log = []
+ assertThrows(() => {
+ var [...[[]]] = g();
+ }, TypeError);
+ assertEquals([], log);
+
+ log = []
+ assertThrows(() => {
+ let [...[[]]] = g();
+ }, TypeError);
+ assertEquals([], log);
+
+ log = []
+ assertThrows(() => {
+ const [...[[]]] = g();
+ }, TypeError);
+ assertEquals([], log);
+
+ log = []
+ assertThrows(() => {
+ [...[[]]] = g();
+ }, TypeError);
+ assertEquals([], log);
+
+ log = []
+ assertThrows(() => {
+ (([...[[]]]) => 0)(g());
+ }, TypeError);
+ assertEquals([], log);
+
+
+ {
+ let backup = Array.prototype[Symbol.iterator];
+ Array.prototype[Symbol.iterator] = () => g();
+
+
+ log = [];
+ assertDoesNotThrow(() => {
+ var [x, ...[y]] = [1, 2, 3]
+ });
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ let [x, ...[y]] = [1, 2, 3];
+ });
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ const [x, ...[y]] = [1, 2, 3];
+ });
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ (([x, ...[y]]) => {})([1, 2, 3]);
+ });
+ assertEquals(log, [[]]);
+
+
+ log = [];
+ assertThrows(() => {
+ var [x, ...[[]]] = [1, 2, 3];
+ }, TypeError);
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertThrows(() => {
+ let [x, ...[[]]] = [1, 2, 3];
+ }, TypeError);
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertThrows(() => {
+ const [x, ...[[]]] = [1, 2, 3];
+ }, TypeError);
+ assertEquals(log, [[]]);
+
+ log = [];
+ assertThrows(() => {
+ (([x, ...[[]]]) => {})([1, 2, 3]);
+ }, TypeError);
+ assertEquals(log, [[]]);
+
+
+ log = [];
+ assertDoesNotThrow(() => {
+ var [x, ...[...y]] = [1, 2, 3];
+ });
+ assertEquals(log, []);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ let [x, ...[...y]] = [1, 2, 3];
+ });
+ assertEquals(log, []);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ const [x, ...[...y]] = [1, 2, 3];
+ });
+ assertEquals(log, []);
+
+ log = [];
+ assertDoesNotThrow(() => {
+ (([x, ...[...y]]) => {})([1, 2, 3]);
+ });
+ assertEquals(log, []);
+
+
+ Array.prototype[Symbol.iterator] = backup;
+ }
}
@@ -161,6 +841,13 @@ function* g() { yield 42; return 88 };
let log = [];
g.prototype.return = (...args) => { log.push(args); throw 23 };
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (var x of g()) { break; }
+ }, 23);
+ assertEquals([[]], log);
+
log = [];
assertThrowsEquals(() => {
for (let x of g()) { break; }
@@ -169,10 +856,23 @@ function* g() { yield 42; return 88 };
log = [];
assertThrowsEquals(() => {
+ for (const x of g()) { break; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
for (x of g()) { break; }
}, 23);
assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (var x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
log = [];
assertThrowsEquals(() => {
for (let x of g()) { throw 42; }
@@ -181,10 +881,23 @@ function* g() { yield 42; return 88 };
log = [];
assertThrowsEquals(() => {
+ for (const x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
for (x of g()) { throw 42; }
}, 42);
assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (var x of g()) { return 42; }
+ }, 23);
+ assertEquals([[]], log);
+
log = [];
assertThrowsEquals(() => {
for (let x of g()) { return 42; }
@@ -193,17 +906,63 @@ function* g() { yield 42; return 88 };
log = [];
assertThrowsEquals(() => {
+ for (const x of g()) { return 42; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
for (x of g()) { return 42; }
}, 23);
assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, eval('for (var x of g()) { x; }'));
+ assertEquals([], log);
+
log = [];
assertEquals(42, eval('for (let x of g()) { x; }'));
assertEquals([], log);
log = [];
+ assertEquals(42, eval('for (const x of g()) { x; }'));
+ assertEquals([], log);
+
+ log = [];
assertEquals(42, eval('for (x of g()) { x; }'));
assertEquals([], log);
+
+
+ log = [];
+ assertThrowsEquals(() => {
+ var [x] = g(); return x;
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ let [x] = g(); return x;
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ const [x] = g(); return x;
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ [x] = g(); return x;
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ (([x]) => x)(g())
+ }, 23);
+ assertEquals([[]], log);
}
@@ -212,13 +971,130 @@ function* g() { yield 42; return 88 };
g.prototype.next = () => { throw 666; };
g.prototype.return = () => { assertUnreachable() };
+
+ assertThrowsEquals(() => {
+ for (var x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (let x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (const x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ var [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([x]) => x)(g());
+ }, 666);
+}
+
+
+// Value throws.
+{
+ g.prototype.next = () => ({get value() {throw 666}});
+ g.prototype.return = () => { assertUnreachable() };
+
+
+ assertThrowsEquals(() => {
+ for (var x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (let x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (const x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ var [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([x]) => x)(g());
+ }, 666);
+}
+
+
+// Done throws.
+{
+ g.prototype.next = () => ({get done() {throw 666}});
+ g.prototype.return = () => { assertUnreachable() };
+
+
+ assertThrowsEquals(() => {
+ for (var x of g()) {}
+ }, 666);
+
assertThrowsEquals(() => {
for (let x of g()) {}
}, 666);
assertThrowsEquals(() => {
+ for (const x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
for (x of g()) {}
}, 666);
+
+ assertThrowsEquals(() => {
+ var [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([x]) => x)(g());
+ }, 666);
}
@@ -362,3 +1238,75 @@ function* g() { yield 42; return 88 };
}, 5);
assertEquals([1], log);
}
+
+
+// yield*, argument's return method is "undefined".
+function TestYieldStarWithoutReturn(get_iterable) {
+ assertTrue(get_iterable().return == undefined);
+
+ function* g() { yield* get_iterable() }
+
+ {
+ let gen = g();
+ assertEquals({value: 1, done: false}, gen.next());
+ assertEquals({value: undefined, done: true}, gen.return());
+ }
+
+ assertEquals(42, (() => {
+ for (let x of g()) break;
+ return 42;
+ })());
+
+ assertEquals(42, (() => {
+ for (let x of g()) return 42;
+ })());
+
+ assertThrowsEquals(() => {
+ for (let x of g()) throw 42;
+ }, 42);
+}
+{
+ let get_iterable1 = () => [1, 2];
+ let get_iterable2 = function*() { yield 1; yield 2 };
+ get_iterable2.prototype.return = null;
+ TestYieldStarWithoutReturn(get_iterable1);
+ TestYieldStarWithoutReturn(get_iterable2);
+}
+
+
+// yield*, argument's return method is defined.
+{
+ let get_iterable = function*() { yield 1; yield 2 };
+ const obj = {};
+ get_iterable.prototype.return = (...args) => obj;
+
+ function* g() { yield* get_iterable() }
+
+ {
+ let gen = g();
+ assertEquals({value: 1, done: false}, gen.next());
+ assertSame(obj, gen.return());
+ assertSame(obj, gen.return());
+ assertSame(obj, gen.return());
+ assertEquals({value: 2, done: false}, gen.next());
+ assertSame(obj, gen.return());
+ assertSame(obj, gen.return());
+ assertSame(obj, gen.return());
+ assertEquals({value: undefined, done: true}, gen.next());
+ assertEquals({value: undefined, done: true}, gen.return());
+ assertEquals({value: undefined, done: true}, gen.return());
+ }
+
+ assertEquals(42, (() => {
+ for (let x of g()) break;
+ return 42;
+ })());
+
+ assertEquals(42, (() => {
+ for (let x of g()) return 42;
+ })());
+
+ assertThrowsEquals(() => {
+ for (let x of g()) throw 42;
+ }, 42);
+}
diff --git a/deps/v8/test/mjsunit/harmony/module-linking.js b/deps/v8/test/mjsunit/harmony/module-linking.js
deleted file mode 100644
index faaf7f2e49..0000000000
--- a/deps/v8/test/mjsunit/harmony/module-linking.js
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-modules
-
-// Test basic module linking and initialization.
-
-"use strict";
-
-module R {
- // At this point, only functions and modules are initialized.
- assertEquals(undefined, v)
- assertEquals(undefined, vv)
- assertEquals(undefined, R.v)
- assertEquals(undefined, M.v)
- assertEquals(undefined, MM.v)
- assertEquals(undefined, F.v)
- assertEquals(undefined, G.v)
- assertThrows(function() { l }, ReferenceError)
- assertThrows(function() { ll }, ReferenceError)
- assertThrows(function() { R.l }, ReferenceError)
- assertThrows(function() { M.l }, ReferenceError)
- assertThrows(function() { MM.l }, ReferenceError)
- assertThrows(function() { F.l }, ReferenceError)
- assertThrows(function() { G.l }, ReferenceError)
- assertThrows(function() { c }, ReferenceError)
- assertThrows(function() { cc }, ReferenceError)
- assertThrows(function() { R.c }, ReferenceError)
- assertThrows(function() { M.c }, ReferenceError)
- assertThrows(function() { MM.c }, ReferenceError)
- assertThrows(function() { F.c }, ReferenceError)
- assertThrows(function() { G.c }, ReferenceError)
- assertEquals(4, f())
- assertEquals(24, ff())
- assertEquals(4, R.f())
- assertEquals(14, M.f())
- assertEquals(34, MM.f())
- assertEquals(44, F.f())
- assertEquals(14, G.f())
-
- // All properties should already exist on the instance objects, though.
- assertTrue("v" in R)
- assertTrue("v" in RR)
- assertTrue("v" in M)
- assertTrue("v" in MM)
- assertTrue("v" in F)
- assertTrue("v" in G)
- assertTrue("l" in R)
- assertTrue("l" in RR)
- assertTrue("l" in M)
- assertTrue("l" in MM)
- assertTrue("l" in F)
- assertTrue("l" in G)
- assertTrue("c" in R)
- assertTrue("c" in RR)
- assertTrue("c" in M)
- assertTrue("c" in MM)
- assertTrue("c" in F)
- assertTrue("c" in G)
- assertTrue("f" in R)
- assertTrue("f" in RR)
- assertTrue("f" in M)
- assertTrue("f" in MM)
- assertTrue("f" in F)
- assertTrue("f" in G)
- assertTrue("M" in R)
- assertTrue("M" in RR)
- assertTrue("RR" in R)
- assertTrue("RR" in RR)
-
- // And aliases should be identical.
- assertSame(R, RR)
- assertSame(R, R.RR)
- assertSame(M, R.M)
- assertSame(M, G)
-
- // We can only assign to var.
- assertEquals(-1, v = -1)
- assertEquals(-2, R.v = -2)
- assertEquals(-2, v)
- assertEquals(-2, R.v)
-
- assertThrows(function() { l = -1 }, ReferenceError)
- assertThrows(function() { R.l = -2 }, ReferenceError)
- assertThrows(function() { l }, ReferenceError)
- assertThrows(function() { R.l }, ReferenceError)
-
- assertThrows(function() { eval("c = -1") }, TypeError)
- assertThrows(function() { R.c = -2 }, TypeError)
-
- // Initialize first bunch of variables.
- export var v = 1
- export let l = 2
- export const c = 3
- export function f() { return 4 }
-
- assertEquals(1, v)
- assertEquals(1, R.v)
- assertEquals(2, l)
- assertEquals(2, R.l)
- assertEquals(3, c)
- assertEquals(3, R.c)
-
- assertEquals(-3, v = -3)
- assertEquals(-4, R.v = -4)
- assertEquals(-3, l = -3)
- assertEquals(-4, R.l = -4)
- assertThrows(function() { eval("c = -3") }, TypeError)
- assertThrows(function() { R.c = -4 }, TypeError)
-
- assertEquals(-4, v)
- assertEquals(-4, R.v)
- assertEquals(-4, l)
- assertEquals(-4, R.l)
- assertEquals(3, c)
- assertEquals(3, R.c)
-
- // Initialize nested module.
- export module M {
- export var v = 11
- export let l = 12
- export const c = 13
- export function f() { return 14 }
- }
-
- assertEquals(11, M.v)
- assertEquals(11, G.v)
- assertEquals(12, M.l)
- assertEquals(12, G.l)
- assertEquals(13, M.c)
- assertEquals(13, G.c)
-
- // Initialize non-exported variables.
- var vv = 21
- let ll = 22
- const cc = 23
- function ff() { return 24 }
-
- assertEquals(21, vv)
- assertEquals(22, ll)
- assertEquals(23, cc)
-
- // Initialize non-exported module.
- module MM {
- export var v = 31
- export let l = 32
- export const c = 33
- export function f() { return 34 }
- }
-
- assertEquals(31, MM.v)
- assertEquals(32, MM.l)
- assertEquals(33, MM.c)
-
- // Recursive self reference.
- export module RR = R
-}
-
-// Initialize sibling module that was forward-used.
-module F {
- assertEquals(undefined, v)
- assertEquals(undefined, F.v)
- assertThrows(function() { l }, ReferenceError)
- assertThrows(function() { F.l }, ReferenceError)
- assertThrows(function() { c }, ReferenceError)
- assertThrows(function() { F.c }, ReferenceError)
-
- export var v = 41
- export let l = 42
- export const c = 43
- export function f() { return 44 }
-
- assertEquals(41, v)
- assertEquals(41, F.v)
- assertEquals(42, l)
- assertEquals(42, F.l)
- assertEquals(43, c)
- assertEquals(43, F.c)
-}
-
-// Define recursive module alias.
-module G = R.M
-
-
-
-// Second test with side effects and more module nesting.
-
-let log = "";
-
-export let x = (log += "1");
-
-export module B = A.B
-
-export module A {
- export let x = (log += "2");
- let y = (log += "3");
- export function f() { log += "5" };
- export module B {
- module BB = B;
- export BB, x;
- let x = (log += "4");
- f();
- let y = (log += "6");
- }
- export let z = (log += "7");
- export module C {
- export let z = (log += "8");
- export module D = B
- export module C = A.C
- }
- module D {}
-}
-
-export module M1 {
- export module A2 = M2;
- export let x = (log += "9");
-}
-export module M2 {
- export module A1 = M1;
- export let x = (log += "0");
-}
-
-assertEquals("object", typeof A);
-assertTrue('x' in A);
-assertFalse('y' in A);
-assertTrue('f' in A);
-assertTrue('B' in A);
-assertTrue('z' in A);
-assertTrue('C' in A);
-assertFalse('D' in A);
-
-assertEquals("object", typeof B);
-assertTrue('BB' in B);
-assertTrue('x' in B);
-assertFalse('y' in B);
-
-assertEquals("object", typeof A.B);
-assertTrue('BB' in A.B);
-assertTrue('x' in A.B);
-assertFalse('y' in A.B);
-
-assertEquals("object", typeof A.B.BB);
-assertTrue('BB' in A.B.BB);
-assertTrue('x' in A.B.BB);
-assertFalse('y' in A.B.BB);
-
-assertEquals("object", typeof A.C);
-assertTrue('z' in A.C);
-assertTrue('D' in A.C);
-assertTrue('C' in A.C);
-
-assertEquals("object", typeof M1);
-assertEquals("object", typeof M2);
-assertTrue('A2' in M1);
-assertTrue('A1' in M2);
-assertEquals("object", typeof M1.A2);
-assertEquals("object", typeof M2.A1);
-assertTrue('A1' in M1.A2);
-assertTrue('A2' in M2.A1);
-assertEquals("object", typeof M1.A2.A1);
-assertEquals("object", typeof M2.A1.A2);
-
-assertSame(B, A.B);
-assertSame(B, B.BB);
-assertSame(B, A.C.D);
-assertSame(A.C, A.C.C);
-assertFalse(A.D === A.C.D);
-
-assertSame(M1, M2.A1);
-assertSame(M2, M1.A2);
-assertSame(M1, M1.A2.A1);
-assertSame(M2, M2.A1.A2);
-
-assertEquals("1234567890", log);
diff --git a/deps/v8/test/mjsunit/harmony/module-parsing-eval.js b/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
index fa9e5ec35c..6c080dbf44 100644
--- a/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
+++ b/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-modules
-
// Check that import/export declarations are rejected in eval or local scope.
assertThrows("export let x;", SyntaxError);
assertThrows("import x from 'http://url';", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/module-recompile.js b/deps/v8/test/mjsunit/harmony/module-recompile.js
deleted file mode 100644
index 23f5bfc4d4..0000000000
--- a/deps/v8/test/mjsunit/harmony/module-recompile.js
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-modules
-
-// Test that potential recompilation of the global scope does not screw up.
-
-"use strict";
-
-var N = 1e5; // Number of loop iterations that trigger optimization.
-
-module A {
- export var x = 1
- export function f() { return x }
-}
-var f = A.f
-
-assertEquals(1, A.x)
-assertEquals(1, A.f())
-assertEquals(1, f())
-
-A.x = 2
-
-assertEquals(2, A.x)
-assertEquals(2, A.f())
-assertEquals(2, f())
-
-for (var i = 0; i < N; i++) {
- if (i > N) print("impossible");
-}
-
-assertEquals(2, A.x)
-assertEquals(2, A.f())
-assertEquals(2, f())
-
-
-// Same test with loop inside a module.
-
-module B {
- module A {
- export var x = 1
- export function f() { return x }
- }
- var f = A.f
-
- assertEquals(1, A.x)
- assertEquals(1, A.f())
- assertEquals(1, f())
-
- A.x = 2
-
- assertEquals(2, A.x)
- assertEquals(2, A.f())
- assertEquals(2, f())
-
- for (var i = 0; i < N; i++) {
- if (i > N) print("impossible");
- }
-
- assertEquals(2, A.x)
- assertEquals(2, A.f())
- assertEquals(2, f())
-}
diff --git a/deps/v8/test/mjsunit/harmony/module-resolution.js b/deps/v8/test/mjsunit/harmony/module-resolution.js
deleted file mode 100644
index 7f1e431313..0000000000
--- a/deps/v8/test/mjsunit/harmony/module-resolution.js
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-modules
-
-// Test basic module interface inference.
-
-"use strict";
-
-print("begin.")
-
-
-export let x = print("0")
-
-export module B = A.B
-
-export module A {
- export let x = print("1")
- export let f = function() { return B.x }
- export module B {
- module BB = B
- export BB, x
- let x = print("2")
- var y = print("3")
- let Ax = A.x
- try { A.y } catch (e) {} // throws
- let Az = A.z // undefined
- let Az2 = z // undefined
- A.g() // hoisted
- g() // hoisted
- let ABx = A.B.x
- let ABy = A.B.y
- let Bx = B.x
- let By = B.y
- let BBx = BB.x
- let BBy = BB.y
- let Af = A.f
- function f(x,y) { return x }
- }
- export let y = print("4")
- export var z = print("4.1")
- export function g() {}
- let Ax = A.x
- let Bx = B.x
- let ABx = A.B.x
- module C {
- export let z = print("5")
- export module D = B
- // TODO(rossberg): turn these into proper negative test cases once we have
- // suitable error messages.
- // import C.z // multiple declarations
- import x from B
- }
- module D {
- // TODO(rossberg): Handle import *.
- // import A.* // invalid forward import
- }
- module M {}
- // TODO(rossberg): Handle import *.
- // import M.* // invalid forward import
- let Cz = C.z
- let CDx = C.D.x
-}
-
-export module Imports {
- module A1 {
- export module A2 {}
- }
- module B {
- // TODO(rossberg): Handle import *.
- // import A1.*
- // import A2.* // unbound variable A2
- }
-}
-
-export module E {
- export let xx = x
- export y, B
- let Bx = B.x
- // TODO(rossberg): Handle import *.
- // import A.*
- module B = A.B
- let y = A.y
-}
-
-export module M1 {
- export module A2 = M2
-}
-export module M2 {
- export module A1 = M1
-}
-
-// TODO(rossberg): turn these into proper negative test cases once we have
-// suitable error messages.
-// module W1 = W2.W
-// module W2 = { export module W = W3 }
-// module W3 = W1 // cyclic module definition
-
-// module W1 = W2.W3
-// module W2 = {
-// export module W3 = W4
-// export module W4 = W1
-// } // cyclic module definition
-
-// TODO(rossberg): Handle import *.
-//module M3B = M3.B
-//export module M3 {
-// export module B { export let x = "" }
-// module C1 = { import M3.* }
-// module C2 = { import M3.B.* }
-// module C3 = { import M3B.* }
-// module C4 = { export x import B.* }
-//// TODO(rossberg): turn these into proper negative test cases once we have
-//// suitable error messages.
-//// export module C5 = { import C5.* } // invalid forward import
-//// export module C6 = { import M3.C6.* } // invalid forward import
-//}
-
-export module External at "external.js"
-export module External1 = External
-//export module ExternalA = External.A
-export module InnerExternal {
- export module E at "external.js"
-}
-export module External2 = InnerExternal.E
-//export let xxx = InnerExternal.E.A.x
-
-print("end.")
diff --git a/deps/v8/test/mjsunit/harmony/object-entries.js b/deps/v8/test/mjsunit/harmony/object-entries.js
index 58af4d6f33..101988a052 100644
--- a/deps/v8/test/mjsunit/harmony/object-entries.js
+++ b/deps/v8/test/mjsunit/harmony/object-entries.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-values-entries --harmony-proxies --harmony-reflect
+// Flags: --harmony-object-values-entries
// Flags: --allow-natives-syntax
function TestMeta() {
@@ -247,3 +247,69 @@ function TestMutateDuringEnumeration() {
assertEquals([ [ "a", 1 ], [ "b", 2 ] ], Object.entries(aMakesBEnumerable));
}
TestMutateDuringEnumeration();
+
+
+(function TestElementKinds() {
+ var O1 = { name: "1" }, O2 = { name: "2" }, O3 = { name: "3" };
+ var PI = 3.141592653589793;
+ var E = 2.718281828459045;
+ function fastSloppyArguments(a, b, c) {
+ delete arguments[0];
+ arguments[0] = a;
+ return arguments;
+ }
+
+ function slowSloppyArguments(a, b, c) {
+ delete arguments[0];
+ arguments[0] = a;
+ Object.defineProperties(arguments, {
+ 0: {
+ enumerable: true,
+ value: a
+ },
+ 9999: {
+ enumerable: false,
+ value: "Y"
+ }
+ });
+ arguments[10000] = "X";
+ return arguments;
+ }
+
+ var element_kinds = {
+ FAST_SMI_ELEMENTS: [ [1, 2, 3], [ ["0", 1], ["1", 2], ["2", 3] ] ],
+ FAST_HOLEY_SMI_ELEMENTS: [ [, , 3], [ ["2", 3] ] ],
+ FAST_ELEMENTS: [ [O1, O2, O3], [ ["0", O1], ["1", O2], ["2", O3] ] ],
+ FAST_HOLEY_ELEMENTS: [ [, , O3], [ ["2", O3] ] ],
+ FAST_DOUBLE_ELEMENTS: [ [E, NaN, PI], [ ["0", E], ["1", NaN], ["2", PI] ] ],
+ FAST_HOLEY_DOUBLE_ELEMENTS: [ [, , NaN], [ ["2", NaN] ] ],
+
+ DICTIONARY_ELEMENTS: [ Object.defineProperties({ 10000: "world" }, {
+ 100: { enumerable: true, value: "hello" },
+ 99: { enumerable: false, value: "nope" }
+ }), [ ["100", "hello"], ["10000", "world" ] ] ],
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS: [
+ fastSloppyArguments("a", "b", "c"),
+ [ ["0", "a"], ["1", "b"], ["2", "c"] ] ],
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS: [
+ slowSloppyArguments("a", "b", "c"),
+ [ ["0", "a"], ["1", "b"], ["2", "c"], ["10000", "X"] ] ],
+
+ FAST_STRING_WRAPPER_ELEMENTS: [ new String("str"),
+ [ ["0", "s"], ["1", "t"], ["2", "r"]] ],
+ SLOW_STRING_WRAPPER_ELEMENTS: [
+ Object.defineProperties(new String("str"), {
+ 10000: { enumerable: false, value: "X" },
+ 9999: { enumerable: true, value: "Y" }
+ }), [["0", "s"], ["1", "t"], ["2", "r"], ["9999", "Y"]] ],
+ };
+
+ for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
+ let result1 = Object.entries(object);
+ assertEquals(expected, result1, `fast Object.entries() with ${kind}`);
+
+ let proxy = new Proxy(object, {});
+ let result2 = Object.entries(proxy);
+ assertEquals(result1, result2, `slow Object.entries() with ${kind}`);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
index b23e7d6e02..7f631d8e58 100644
--- a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
+++ b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-object-own-property-descriptors
-// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+// Flags: --allow-natives-syntax
function DataDescriptor(value) {
return { "enumerable": true, "configurable": true, "writable": true, value };
diff --git a/deps/v8/test/mjsunit/harmony/object-values.js b/deps/v8/test/mjsunit/harmony/object-values.js
index f56fe8a7b3..141070db9c 100644
--- a/deps/v8/test/mjsunit/harmony/object-values.js
+++ b/deps/v8/test/mjsunit/harmony/object-values.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-values-entries --harmony-proxies --harmony-reflect
+// Flags: --harmony-object-values-entries
// Flags: --allow-natives-syntax
function TestMeta() {
@@ -227,3 +227,66 @@ function TestMutateDuringEnumeration() {
assertEquals([1, 2], Object.values(aMakesBEnumerable));
}
TestMutateDuringEnumeration();
+
+
+(function TestElementKinds() {
+ var O1 = { name: "1" }, O2 = { name: "2" }, O3 = { name: "3" };
+ var PI = 3.141592653589793;
+ var E = 2.718281828459045;
+ function fastSloppyArguments(a, b, c) {
+ delete arguments[0];
+ arguments[0] = a;
+ return arguments;
+ }
+
+ function slowSloppyArguments(a, b, c) {
+ delete arguments[0];
+ arguments[0] = a;
+ Object.defineProperties(arguments, {
+ 0: {
+ enumerable: true,
+ value: a
+ },
+ 9999: {
+ enumerable: false,
+ value: "Y"
+ }
+ });
+ arguments[10000] = "X";
+ return arguments;
+ }
+
+ var element_kinds = {
+ FAST_SMI_ELEMENTS: [ [1, 2, 3], [1, 2, 3] ],
+ FAST_HOLEY_SMI_ELEMENTS: [ [, , 3], [ 3 ] ],
+ FAST_ELEMENTS: [ [O1, O2, O3], [O1, O2, O3] ],
+ FAST_HOLEY_ELEMENTS: [ [, , O3], [O3] ],
+ FAST_DOUBLE_ELEMENTS: [ [E, NaN, PI], [E, NaN, PI] ],
+ FAST_HOLEY_DOUBLE_ELEMENTS: [ [, , NaN], [NaN] ],
+
+ DICTIONARY_ELEMENTS: [ Object.defineProperties({ 10000: "world" }, {
+ 100: { enumerable: true, value: "hello" },
+ 99: { enumerable: false, value: "nope" }
+ }), [ "hello", "world" ] ],
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS: [
+ fastSloppyArguments("a", "b", "c"), ["a", "b", "c"] ],
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS: [
+ slowSloppyArguments("a", "b", "c"), [ "a", "b", "c", "X"]],
+
+ FAST_STRING_WRAPPER_ELEMENTS: [ new String("str"), ["s", "t", "r"] ],
+ SLOW_STRING_WRAPPER_ELEMENTS: [
+ Object.defineProperties(new String("str"), {
+ 10000: { enumerable: false, value: "X" },
+ 9999: { enumerable: true, value: "Y" }
+ }), ["s", "t", "r", "Y"] ],
+ };
+
+ for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
+ let result1 = Object.values(object);
+ assertEquals(expected, result1, `fast Object.values() with ${kind}`);
+
+ let proxy = new Proxy(object, {});
+ let result2 = Object.values(proxy);
+ assertEquals(result1, result2, `slow Object.values() with ${kind}`);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/private-symbols.js b/deps/v8/test/mjsunit/harmony/private-symbols.js
index 18a2e4cf07..4006359746 100644
--- a/deps/v8/test/mjsunit/harmony/private-symbols.js
+++ b/deps/v8/test/mjsunit/harmony/private-symbols.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+// Flags: --allow-natives-syntax
var symbol = %CreatePrivateSymbol("private");
diff --git a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js b/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
deleted file mode 100644
index 72ab092a88..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony --harmony-proxies
-
-
-// A simple no-op handler. Adapted from:
-// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#examplea_no-op_forwarding_proxy
-
-function createHandler(obj) {
- return {
- getOwnPropertyDescriptor: function(name) {
- var desc = Object.getOwnPropertyDescriptor(obj, name);
- if (desc !== undefined) desc.configurable = true;
- return desc;
- },
- getPropertyDescriptor: function(name) {
- var desc = Object.getOwnPropertyDescriptor(obj, name);
- //var desc = Object.getPropertyDescriptor(obj, name); // not in ES5
- if (desc !== undefined) desc.configurable = true;
- return desc;
- },
- getOwnPropertyNames: function() {
- return Object.getOwnPropertyNames(obj);
- },
- getPropertyNames: function() {
- return Object.getOwnPropertyNames(obj);
- //return Object.getPropertyNames(obj); // not in ES5
- },
- defineProperty: function(name, desc) {
- Object.defineProperty(obj, name, desc);
- },
- delete: function(name) {
- return delete obj[name];
- },
- fix: function() {
- if (Object.isFrozen(obj)) {
- var result = {};
- Object.getOwnPropertyNames(obj).forEach(function(name) {
- result[name] = Object.getOwnPropertyDescriptor(obj, name);
- });
- return result;
- }
- // As long as obj is not frozen, the proxy won't allow itself to be fixed
- return undefined; // will cause a TypeError to be thrown
- },
- has: function(name) { return name in obj; },
- hasOwn: function(name) { return ({}).hasOwnProperty.call(obj, name); },
- get: function(receiver, name) { return obj[name]; },
- set: function(receiver, name, val) {
- obj[name] = val; // bad behavior when set fails in sloppy mode
- return true;
- },
- enumerate: function() {
- var result = [];
- for (var name in obj) { result.push(name); };
- return result;
- },
- keys: function() { return Object.keys(obj); }
- };
-}
-
-
-
-// Auxiliary definitions enabling tracking of object identity in output.
-
-var objectMap = new WeakMap;
-var objectCounter = 0;
-
-function registerObject(x, s) {
- if (x === Object(x) && !objectMap.has(x))
- objectMap.set(x, ++objectCounter + (s == undefined ? "" : ":" + s));
-}
-
-registerObject(this, "global");
-registerObject(Object.prototype, "Object.prototype");
-
-function str(x) {
- if (x === Object(x)) return "[" + typeof x + " " + objectMap.get(x) + "]";
- if (typeof x == "string") return "\"" + x + "\"";
- return "" + x;
-}
-
-
-
-// A simple membrane. Adapted from:
-// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#a_simple_membrane
-
-function createSimpleMembrane(target) {
- var enabled = true;
-
- function wrap(obj) {
- registerObject(obj);
- print("wrap enter", str(obj));
- try {
- var x = wrap2(obj);
- registerObject(x, "wrapped");
- print("wrap exit", str(obj), "as", str(x));
- return x;
- } catch(e) {
- print("wrap exception", str(e));
- throw e;
- }
- }
-
- function wrap2(obj) {
- if (obj !== Object(obj)) {
- return obj;
- }
-
- function wrapCall(fun, that, args) {
- registerObject(that);
- print("wrapCall enter", fun, str(that));
- try {
- var x = wrapCall2(fun, that, args);
- print("wrapCall exit", fun, str(that), "returning", str(x));
- return x;
- } catch(e) {
- print("wrapCall exception", fun, str(that), str(e));
- throw e;
- }
- }
-
- function wrapCall2(fun, that, args) {
- if (!enabled) { throw new Error("disabled"); }
- try {
- return wrap(fun.apply(that, Array.prototype.map.call(args, wrap)));
- } catch (e) {
- throw wrap(e);
- }
- }
-
- var baseHandler = createHandler(obj);
- var handler = new Proxy({}, Object.freeze({
- get: function(receiver, name) {
- return function() {
- var arg = (name === "get" || name == "set") ? arguments[1] : "";
- print("handler enter", name, arg);
- var x = wrapCall(baseHandler[name], baseHandler, arguments);
- print("handler exit", name, arg, "returning", str(x));
- return x;
- }
- }
- }));
- registerObject(baseHandler, "basehandler");
- registerObject(handler, "handler");
-
- if (typeof obj === "function") {
- function callTrap() {
- print("call trap enter", str(obj), str(this));
- var x = wrapCall(obj, wrap(this), arguments);
- print("call trap exit", str(obj), str(this), "returning", str(x));
- return x;
- }
- function constructTrap() {
- if (!enabled) { throw new Error("disabled"); }
- try {
- function forward(args) { return obj.apply(this, args) }
- return wrap(new forward(Array.prototype.map.call(arguments, wrap)));
- } catch (e) {
- throw wrap(e);
- }
- }
- return Proxy.createFunction(handler, callTrap, constructTrap);
- } else {
- var prototype = wrap(Object.getPrototypeOf(obj));
- return new Proxy(prototype, handler);
- }
- }
-
- var gate = Object.freeze({
- enable: function() { enabled = true; },
- disable: function() { enabled = false; }
- });
-
- return Object.freeze({
- wrapper: wrap(target),
- gate: gate
- });
-}
-
-
-var o = {
- a: 6,
- b: {bb: 8},
- f: function(x) { return x },
- g: function(x) { return x.a },
- h: function(x) { this.q = x }
-};
-o[2] = {c: 7};
-var m = createSimpleMembrane(o);
-var w = m.wrapper;
-print("o =", str(o))
-print("w =", str(w));
-
-var f = w.f;
-var x = f(66);
-var x = f({a: 1});
-var x = w.f({a: 1});
-var a = x.a;
-assertEquals(6, w.a);
-assertEquals(8, w.b.bb);
-assertEquals(7, w[2]["c"]);
-assertEquals(undefined, w.c);
-assertEquals(1, w.f(1));
-assertEquals(1, w.f({a: 1}).a);
-assertEquals(2, w.g({a: 2}));
-assertEquals(3, (w.r = {a: 3}).a);
-assertEquals(3, w.r.a);
-assertEquals(3, o.r.a);
-w.h(3);
-assertEquals(3, w.q);
-assertEquals(3, o.q);
-assertEquals(4, (new w.h(4)).q);
-
-var wb = w.b;
-var wr = w.r;
-var wf = w.f;
-var wf3 = w.f(3);
-var wfx = w.f({a: 6});
-var wgx = w.g({a: {aa: 7}});
-var wh4 = new w.h(4);
-m.gate.disable();
-assertEquals(3, wf3);
-assertThrows(function() { w.a }, Error);
-assertThrows(function() { w.r }, Error);
-assertThrows(function() { w.r = {a: 4} }, Error);
-assertThrows(function() { o.r.a }, Error);
-assertEquals("object", typeof o.r);
-assertEquals(5, (o.r = {a: 5}).a);
-assertEquals(5, o.r.a);
-assertThrows(function() { w[1] }, Error);
-assertThrows(function() { w.c }, Error);
-assertThrows(function() { wb.bb }, Error);
-assertThrows(function() { wr.a }, Error);
-assertThrows(function() { wf(4) }, Error);
-assertThrows(function() { wfx.a }, Error);
-assertThrows(function() { wgx.aa }, Error);
-assertThrows(function() { wh4.q }, Error);
-
-m.gate.enable();
-assertEquals(6, w.a);
-assertEquals(5, w.r.a);
-assertEquals(5, o.r.a);
-assertEquals(7, w.r = 7);
-assertEquals(7, w.r);
-assertEquals(7, o.r);
-assertEquals(8, w.b.bb);
-assertEquals(7, w[2]["c"]);
-assertEquals(undefined, w.c);
-assertEquals(8, wb.bb);
-assertEquals(3, wr.a);
-assertEquals(4, wf(4));
-assertEquals(3, wf3);
-assertEquals(6, wfx.a);
-assertEquals(7, wgx.aa);
-assertEquals(4, wh4.q);
-
-
-// An identity-preserving membrane. Adapted from:
-// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#an_identity-preserving_membrane
-
-function createMembrane(wetTarget) {
- var wet2dry = new WeakMap();
- var dry2wet = new WeakMap();
-
- function asDry(obj) {
- registerObject(obj)
- print("asDry enter", str(obj))
- try {
- var x = asDry2(obj);
- registerObject(x, "dry");
- print("asDry exit", str(obj), "as", str(x));
- return x;
- } catch(e) {
- print("asDry exception", str(e));
- throw e;
- }
- }
- function asDry2(wet) {
- if (wet !== Object(wet)) {
- // primitives provide only irrevocable knowledge, so don't
- // bother wrapping it.
- return wet;
- }
- var dryResult = wet2dry.get(wet);
- if (dryResult) { return dryResult; }
-
- var wetHandler = createHandler(wet);
- var dryRevokeHandler = new Proxy({}, Object.freeze({
- get: function(receiver, name) {
- return function() {
- var arg = (name === "get" || name == "set") ? arguments[1] : "";
- print("dry handler enter", name, arg);
- var optWetHandler = dry2wet.get(dryRevokeHandler);
- try {
- var x = asDry(optWetHandler[name].apply(
- optWetHandler, Array.prototype.map.call(arguments, asWet)));
- print("dry handler exit", name, arg, "returning", str(x));
- return x;
- } catch (eWet) {
- var x = asDry(eWet);
- print("dry handler exception", name, arg, "throwing", str(x));
- throw x;
- }
- };
- }
- }));
- dry2wet.set(dryRevokeHandler, wetHandler);
-
- if (typeof wet === "function") {
- function callTrap() {
- print("dry call trap enter", str(this));
- var x = asDry(wet.apply(
- asWet(this), Array.prototype.map.call(arguments, asWet)));
- print("dry call trap exit", str(this), "returning", str(x));
- return x;
- }
- function constructTrap() {
- function forward(args) { return wet.apply(this, args) }
- return asDry(new forward(Array.prototype.map.call(arguments, asWet)));
- }
- dryResult =
- Proxy.createFunction(dryRevokeHandler, callTrap, constructTrap);
- } else {
- dryResult =
- new Proxy(asDry(Object.getPrototypeOf(wet)), dryRevokeHandler);
- }
- wet2dry.set(wet, dryResult);
- dry2wet.set(dryResult, wet);
- return dryResult;
- }
-
- function asWet(obj) {
- registerObject(obj)
- print("asWet enter", str(obj))
- try {
- var x = asWet2(obj)
- registerObject(x, "wet")
- print("asWet exit", str(obj), "as", str(x))
- return x
- } catch(e) {
- print("asWet exception", str(e))
- throw e
- }
- }
- function asWet2(dry) {
- if (dry !== Object(dry)) {
- // primitives provide only irrevocable knowledge, so don't
- // bother wrapping it.
- return dry;
- }
- var wetResult = dry2wet.get(dry);
- if (wetResult) { return wetResult; }
-
- var dryHandler = createHandler(dry);
- var wetRevokeHandler = new Proxy({}, Object.freeze({
- get: function(receiver, name) {
- return function() {
- var arg = (name === "get" || name == "set") ? arguments[1] : "";
- print("wet handler enter", name, arg);
- var optDryHandler = wet2dry.get(wetRevokeHandler);
- try {
- var x = asWet(optDryHandler[name].apply(
- optDryHandler, Array.prototype.map.call(arguments, asDry)));
- print("wet handler exit", name, arg, "returning", str(x));
- return x;
- } catch (eDry) {
- var x = asWet(eDry);
- print("wet handler exception", name, arg, "throwing", str(x));
- throw x;
- }
- };
- }
- }));
- wet2dry.set(wetRevokeHandler, dryHandler);
-
- if (typeof dry === "function") {
- function callTrap() {
- print("wet call trap enter", str(this));
- var x = asWet(dry.apply(
- asDry(this), Array.prototype.map.call(arguments, asDry)));
- print("wet call trap exit", str(this), "returning", str(x));
- return x;
- }
- function constructTrap() {
- function forward(args) { return dry.apply(this, args) }
- return asWet(new forward(Array.prototype.map.call(arguments, asDry)));
- }
- wetResult =
- Proxy.createFunction(wetRevokeHandler, callTrap, constructTrap);
- } else {
- wetResult =
- new Proxy(asWet(Object.getPrototypeOf(dry)), wetRevokeHandler);
- }
- dry2wet.set(dry, wetResult);
- wet2dry.set(wetResult, dry);
- return wetResult;
- }
-
- var gate = Object.freeze({
- revoke: function() {
- dry2wet = wet2dry = Object.freeze({
- get: function(key) { throw new Error("revoked"); },
- set: function(key, val) { throw new Error("revoked"); }
- });
- }
- });
-
- return Object.freeze({ wrapper: asDry(wetTarget), gate: gate });
-}
-
-
-var receiver
-var argument
-var o = {
- a: 6,
- b: {bb: 8},
- f: function(x) { receiver = this; argument = x; return x },
- g: function(x) { receiver = this; argument = x; return x.a },
- h: function(x) { receiver = this; argument = x; this.q = x },
- s: function(x) { receiver = this; argument = x; this.x = {y: x}; return this }
-}
-o[2] = {c: 7}
-var m = createMembrane(o)
-var w = m.wrapper
-print("o =", str(o))
-print("w =", str(w))
-
-var f = w.f
-var x = f(66)
-var x = f({a: 1})
-var x = w.f({a: 1})
-var a = x.a
-assertEquals(6, w.a)
-assertEquals(8, w.b.bb)
-assertEquals(7, w[2]["c"])
-assertEquals(undefined, w.c)
-assertEquals(1, w.f(1))
-assertSame(o, receiver)
-assertEquals(1, w.f({a: 1}).a)
-assertSame(o, receiver)
-assertEquals(2, w.g({a: 2}))
-assertSame(o, receiver)
-assertSame(w, w.f(w))
-assertSame(o, receiver)
-assertSame(o, argument)
-assertSame(o, w.f(o))
-assertSame(o, receiver)
-// Note that argument !== o, since o isn't dry, so gets wrapped wet again.
-assertEquals(3, (w.r = {a: 3}).a)
-assertEquals(3, w.r.a)
-assertEquals(3, o.r.a)
-w.h(3)
-assertEquals(3, w.q)
-assertEquals(3, o.q)
-assertEquals(4, (new w.h(4)).q)
-assertEquals(5, w.s(5).x.y)
-assertSame(o, receiver)
-
-var wb = w.b
-var wr = w.r
-var wf = w.f
-var wf3 = w.f(3)
-var wfx = w.f({a: 6})
-var wgx = w.g({a: {aa: 7}})
-var wh4 = new w.h(4)
-var ws5 = w.s(5)
-var ws5x = ws5.x
-m.gate.revoke()
-assertEquals(3, wf3)
-assertThrows(function() { w.a }, Error)
-assertThrows(function() { w.r }, Error)
-assertThrows(function() { w.r = {a: 4} }, Error)
-assertThrows(function() { o.r.a }, Error)
-assertEquals("object", typeof o.r)
-assertEquals(5, (o.r = {a: 5}).a)
-assertEquals(5, o.r.a)
-assertThrows(function() { w[1] }, Error)
-assertThrows(function() { w.c }, Error)
-assertThrows(function() { wb.bb }, Error)
-assertEquals(3, wr.a)
-assertThrows(function() { wf(4) }, Error)
-assertEquals(6, wfx.a)
-assertEquals(7, wgx.aa)
-assertThrows(function() { wh4.q }, Error)
-assertThrows(function() { ws5.x }, Error)
-assertThrows(function() { ws5x.y }, Error)
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
deleted file mode 100644
index b2498b8665..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ /dev/null
@@ -1,768 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-proxies --allow-natives-syntax
-
-
-// TODO(neis): These tests are temporarily commented out because of ongoing
-// changes to the implementation of proxies.
-
-
-//// Ensures that checking the "length" property of a function proxy doesn't
-//// crash due to lack of a [[Get]] method.
-//var handler = {
-// get : function(r, n) { return n == "length" ? 2 : undefined }
-//}
-//
-//
-//// Calling (call, Function.prototype.call, Function.prototype.apply,
-//// Function.prototype.bind).
-//
-//var global_object = this
-//var receiver
-//
-//function TestCall(isStrict, callTrap) {
-// assertEquals(42, callTrap(5, 37))
-// assertSame(isStrict ? undefined : global_object, receiver)
-//
-// var handler = {
-// get: function(r, k) {
-// return k == "length" ? 2 : Function.prototype[k]
-// }
-// }
-// var f = Proxy.createFunction(handler, callTrap)
-// var o = {f: f}
-// global_object.f = f
-//
-// receiver = 333
-// assertEquals(42, f(11, 31))
-// assertSame(isStrict ? undefined : global_object, receiver)
-// receiver = 333
-// assertEquals(42, o.f(10, 32))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, o["f"](9, 33))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, (1, o).f(8, 34))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, (1, o)["f"](7, 35))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, f.call(o, 32, 10))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, f.call(undefined, 33, 9))
-// assertSame(isStrict ? undefined : global_object, receiver)
-// receiver = 333
-// assertEquals(42, f.call(null, 33, 9))
-// assertSame(isStrict ? null : global_object, receiver)
-// receiver = 333
-// assertEquals(44, f.call(2, 21, 23))
-// assertSame(2, receiver.valueOf())
-// receiver = 333
-// assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
-// assertSame(isStrict ? null : global_object, receiver)
-// assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
-// assertEquals(2, receiver.valueOf())
-// receiver = 333
-// assertEquals(32, f.apply(o, [16, 16]))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Call(f, o, 11, 31));
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Call(f, null, 11, 31));
-// assertSame(isStrict ? null : global_object, receiver)
-// receiver = 333
-// assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Apply(f, null, [11, 31], 0, 2))
-// assertSame(isStrict ? null : global_object, receiver)
-// receiver = 333
-// assertEquals(42, %_Call(f, o, 11, 31))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %_Call(f, null, 11, 31))
-// assertSame(isStrict ? null : global_object, receiver)
-//
-// var ff = Function.prototype.bind.call(f, o, 12)
-// assertTrue(ff.length <= 1) // TODO(rossberg): Not spec'ed yet, be lax.
-// receiver = 333
-// assertEquals(42, ff(30))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(33, Function.prototype.call.call(ff, {}, 21))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(23, %Call(ff, {}, 11));
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(23, %Call(ff, {}, 11, 3));
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(24, %Apply(ff, {}, [12, 13], 0, 2))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(34, %_Call(ff, {}, 22))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(34, %_Call(ff, {}, 22, 3))
-// assertSame(o, receiver)
-//
-// var fff = Function.prototype.bind.call(ff, o, 30)
-// assertEquals(0, fff.length)
-// receiver = 333
-// assertEquals(42, fff())
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, Function.prototype.call.call(fff, {}))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, Function.prototype.apply.call(fff, {}))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Call(fff, {}));
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Call(fff, {}, 11, 3))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Apply(fff, {}, [], 0, 0))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Apply(fff, {}, [12, 13], 0, 0))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %Apply(fff, {}, [12, 13], 0, 2))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %_Call(fff, {}))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %_Call(fff, {}, 3, 4, 5))
-// assertSame(o, receiver)
-//
-// var f = CreateFrozen({}, callTrap)
-// receiver = 333
-// assertEquals(42, f(11, 31))
-// assertSame(isStrict ? undefined : global_object, receiver)
-// var o = {f: f}
-// receiver = 333
-// assertEquals(42, o.f(10, 32))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, o["f"](9, 33))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, (1, o).f(8, 34))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, (1, o)["f"](7, 35))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(23, %Call(f, o, 11, 12))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
-// assertSame(o, receiver)
-// receiver = 333
-// assertEquals(42, %_Call(f, o, 18, 24))
-// assertSame(o, receiver)
-//}
-//
-//TestCall(false, function(x, y) {
-// receiver = this
-// return x + y
-//})
-//
-//TestCall(true, function(x, y) {
-// "use strict"
-// receiver = this
-// return x + y
-//})
-//
-//TestCall(false, function() {
-// receiver = this
-// return arguments[0] + arguments[1]
-//})
-//
-//TestCall(false, Proxy.createFunction(handler, function(x, y) {
-// receiver = this
-// return x + y
-//}))
-//
-//TestCall(true, Proxy.createFunction(handler, function(x, y) {
-// "use strict"
-// receiver = this
-// return x + y
-//}))
-//
-//TestCall(false, CreateFrozen(handler, function(x, y) {
-// receiver = this
-// return x + y
-//}))
-//
-//
-//
-//// Using intrinsics as call traps.
-//
-//function TestCallIntrinsic(type, callTrap) {
-// var f = Proxy.createFunction({}, callTrap)
-// var x = f()
-// assertTrue(typeof x == type)
-//}
-//
-//TestCallIntrinsic("boolean", Boolean)
-//TestCallIntrinsic("number", Number)
-//TestCallIntrinsic("string", String)
-//TestCallIntrinsic("object", Object)
-//TestCallIntrinsic("function", Function)
-//
-//
-//
-//// Throwing from call trap.
-//
-//function TestCallThrow(callTrap) {
-// var f = Proxy.createFunction({}, callTrap)
-// assertThrows(function(){ f(11) }, "myexn")
-// assertThrows(function(){ ({x: f}).x(11) }, "myexn")
-// assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
-// assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-// assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-// assertThrows(function(){ %Call(f, {}) }, "myexn")
-// assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
-// assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
-// assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
-// assertThrows(function(){ %_Call(f, {}) }, "myexn")
-// assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
-//
-// var f = CreateFrozen({}, callTrap)
-// assertThrows(function(){ f(11) }, "myexn")
-// assertThrows(function(){ ({x: f}).x(11) }, "myexn")
-// assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
-// assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-// assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-// assertThrows(function(){ %Call(f, {}) }, "myexn")
-// assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
-// assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
-// assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
-// assertThrows(function(){ %_Call(f, {}) }, "myexn")
-// assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
-//}
-//
-//TestCallThrow(function() { throw "myexn" })
-//TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-//TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
-//
-//
-//
-//// Construction (new).
-//
-//var prototype = {myprop: 0}
-//var receiver
-//
-//var handlerWithPrototype = {
-// fix: function() { return { prototype: { value: prototype } }; },
-// get: function(r, n) {
-// if (n == "length") return 2;
-// assertEquals("prototype", n);
-// return prototype;
-// }
-//}
-//
-//var handlerSansPrototype = {
-// fix: function() { return { length: { value: 2 } } },
-// get: function(r, n) {
-// if (n == "length") return 2;
-// assertEquals("prototype", n);
-// return undefined;
-// }
-//}
-//
-//function ReturnUndef(x, y) {
-// "use strict";
-// receiver = this;
-// this.sum = x + y;
-//}
-//
-//function ReturnThis(x, y) {
-// "use strict";
-// receiver = this;
-// this.sum = x + y;
-// return this;
-//}
-//
-//function ReturnNew(x, y) {
-// "use strict";
-// receiver = this;
-// return {sum: x + y};
-//}
-//
-//function ReturnNewWithProto(x, y) {
-// "use strict";
-// receiver = this;
-// var result = Object.create(prototype);
-// result.sum = x + y;
-// return result;
-//}
-//
-//function TestConstruct(proto, constructTrap) {
-// TestConstruct2(proto, constructTrap, handlerWithPrototype)
-// TestConstruct2(proto, constructTrap, handlerSansPrototype)
-//}
-//
-//function TestConstruct2(proto, constructTrap, handler) {
-// var f = Proxy.createFunction(handler, function() {}, constructTrap)
-// var o = new f(11, 31)
-// assertEquals(undefined, receiver)
-// assertEquals(42, o.sum)
-// assertSame(proto, Object.getPrototypeOf(o))
-//
-// var f = CreateFrozen(handler, function() {}, constructTrap)
-// var o = new f(11, 32)
-// assertEquals(undefined, receiver)
-// assertEquals(43, o.sum)
-// assertSame(proto, Object.getPrototypeOf(o))
-//}
-//
-//TestConstruct(Object.prototype, ReturnNew)
-//TestConstruct(prototype, ReturnNewWithProto)
-//
-//TestConstruct(Object.prototype, Proxy.createFunction(handler, ReturnNew))
-//TestConstruct(prototype, Proxy.createFunction(handler, ReturnNewWithProto))
-//
-//TestConstruct(Object.prototype, CreateFrozen(handler, ReturnNew))
-//TestConstruct(prototype, CreateFrozen(handler, ReturnNewWithProto))
-//
-//
-//
-//// Construction with derived construct trap.
-//
-//function TestConstructFromCall(proto, returnsThis, callTrap) {
-// TestConstructFromCall2(prototype, returnsThis, callTrap, handlerWithPrototype)
-// TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
-//}
-//
-//function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
-// // TODO(rossberg): handling of prototype for derived construct trap will be
-// // fixed in a separate change. Commenting out checks below for now.
-// var f = Proxy.createFunction(handler, callTrap)
-// var o = new f(11, 31)
-// if (returnsThis) assertEquals(o, receiver)
-// assertEquals(42, o.sum)
-// // assertSame(proto, Object.getPrototypeOf(o))
-//
-// var g = CreateFrozen(handler, callTrap)
-// // assertSame(f.prototype, g.prototype)
-// var o = new g(11, 32)
-// if (returnsThis) assertEquals(o, receiver)
-// assertEquals(43, o.sum)
-// // assertSame(proto, Object.getPrototypeOf(o))
-//}
-//
-//TestConstructFromCall(Object.prototype, true, ReturnUndef)
-//TestConstructFromCall(Object.prototype, true, ReturnThis)
-//TestConstructFromCall(Object.prototype, false, ReturnNew)
-//TestConstructFromCall(prototype, false, ReturnNewWithProto)
-//
-//TestConstructFromCall(Object.prototype, true,
-// Proxy.createFunction(handler, ReturnUndef))
-//TestConstructFromCall(Object.prototype, true,
-// Proxy.createFunction(handler, ReturnThis))
-//TestConstructFromCall(Object.prototype, false,
-// Proxy.createFunction(handler, ReturnNew))
-//TestConstructFromCall(prototype, false,
-// Proxy.createFunction(handler, ReturnNewWithProto))
-//
-//TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
-//TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
-//TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
-//TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
-//
-//ReturnUndef.prototype = prototype
-//ReturnThis.prototype = prototype
-//ReturnNew.prototype = prototype
-//ReturnNewWithProto.prototype = prototype
-//
-//TestConstructFromCall(prototype, true, ReturnUndef)
-//TestConstructFromCall(prototype, true, ReturnThis)
-//TestConstructFromCall(Object.prototype, false, ReturnNew)
-//TestConstructFromCall(prototype, false, ReturnNewWithProto)
-//
-//TestConstructFromCall(Object.prototype, true,
-// Proxy.createFunction(handler, ReturnUndef))
-//TestConstructFromCall(Object.prototype, true,
-// Proxy.createFunction(handler, ReturnThis))
-//TestConstructFromCall(Object.prototype, false,
-// Proxy.createFunction(handler, ReturnNew))
-//TestConstructFromCall(prototype, false,
-// Proxy.createFunction(handler, ReturnNewWithProto))
-//
-//TestConstructFromCall(prototype, true,
-// Proxy.createFunction(handlerWithPrototype, ReturnUndef))
-//TestConstructFromCall(prototype, true,
-// Proxy.createFunction(handlerWithPrototype, ReturnThis))
-//TestConstructFromCall(Object.prototype, false,
-// Proxy.createFunction(handlerWithPrototype, ReturnNew))
-//TestConstructFromCall(prototype, false,
-// Proxy.createFunction(handlerWithPrototype,
-// ReturnNewWithProto))
-//
-//TestConstructFromCall(prototype, true,
-// CreateFrozen(handlerWithPrototype, ReturnUndef))
-//TestConstructFromCall(prototype, true,
-// CreateFrozen(handlerWithPrototype, ReturnThis))
-//TestConstructFromCall(Object.prototype, false,
-// CreateFrozen(handlerWithPrototype, ReturnNew))
-//TestConstructFromCall(prototype, false,
-// CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
-//
-//
-//
-//// Throwing from the construct trap.
-//
-//function TestConstructThrow(trap) {
-// TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
-// trap))
-// TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
-// function() {},
-// trap))
-//}
-//
-//function TestConstructThrow2(f) {
-// assertThrows(function(){ new f(11) }, "myexn")
-// Object.freeze(f)
-// assertThrows(function(){ new f(11) }, "myexn")
-//}
-//
-//TestConstructThrow(function() { throw "myexn" })
-//TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-//TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
-//
-//
-//
-//// Using function proxies as getters and setters.
-//
-//var value
-//var receiver
-//
-//function TestAccessorCall(getterCallTrap, setterCallTrap) {
-// var handler = { fix: function() { return {} } }
-// var pgetter = Proxy.createFunction(handler, getterCallTrap)
-// var psetter = Proxy.createFunction(handler, setterCallTrap)
-//
-// var o = {}
-// var oo = Object.create(o)
-// Object.defineProperty(o, "a", {get: pgetter, set: psetter})
-// Object.defineProperty(o, "b", {get: pgetter})
-// Object.defineProperty(o, "c", {set: psetter})
-// Object.defineProperty(o, "3", {get: pgetter, set: psetter})
-// Object.defineProperty(oo, "a", {value: 43})
-//
-// receiver = ""
-// assertEquals(42, o.a)
-// assertSame(o, receiver)
-// receiver = ""
-// assertEquals(42, o.b)
-// assertSame(o, receiver)
-// receiver = ""
-// assertEquals(undefined, o.c)
-// assertEquals("", receiver)
-// receiver = ""
-// assertEquals(42, o["a"])
-// assertSame(o, receiver)
-// receiver = ""
-// assertEquals(42, o[3])
-// assertSame(o, receiver)
-//
-// receiver = ""
-// assertEquals(43, oo.a)
-// assertEquals("", receiver)
-// receiver = ""
-// assertEquals(42, oo.b)
-// assertSame(oo, receiver)
-// receiver = ""
-// assertEquals(undefined, oo.c)
-// assertEquals("", receiver)
-// receiver = ""
-// assertEquals(43, oo["a"])
-// assertEquals("", receiver)
-// receiver = ""
-// assertEquals(42, oo[3])
-// assertSame(oo, receiver)
-//
-// receiver = ""
-// assertEquals(50, o.a = 50)
-// assertSame(o, receiver)
-// assertEquals(50, value)
-// receiver = ""
-// assertEquals(51, o.b = 51)
-// assertEquals("", receiver)
-// assertEquals(50, value) // no setter
-// assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
-// receiver = ""
-// assertEquals(52, o.c = 52)
-// assertSame(o, receiver)
-// assertEquals(52, value)
-// receiver = ""
-// assertEquals(53, o["a"] = 53)
-// assertSame(o, receiver)
-// assertEquals(53, value)
-// receiver = ""
-// assertEquals(54, o[3] = 54)
-// assertSame(o, receiver)
-// assertEquals(54, value)
-//
-// value = 0
-// receiver = ""
-// assertEquals(60, oo.a = 60)
-// assertEquals("", receiver)
-// assertEquals(0, value) // oo has own 'a'
-// assertEquals(61, oo.b = 61)
-// assertSame("", receiver)
-// assertEquals(0, value) // no setter
-// assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
-// receiver = ""
-// assertEquals(62, oo.c = 62)
-// assertSame(oo, receiver)
-// assertEquals(62, value)
-// receiver = ""
-// assertEquals(63, oo["c"] = 63)
-// assertSame(oo, receiver)
-// assertEquals(63, value)
-// receiver = ""
-// assertEquals(64, oo[3] = 64)
-// assertSame(oo, receiver)
-// assertEquals(64, value)
-//}
-//
-//TestAccessorCall(
-// function() { receiver = this; return 42 },
-// function(x) { receiver = this; value = x }
-//)
-//
-//TestAccessorCall(
-// function() { "use strict"; receiver = this; return 42 },
-// function(x) { "use strict"; receiver = this; value = x }
-//)
-//
-//TestAccessorCall(
-// Proxy.createFunction({}, function() { receiver = this; return 42 }),
-// Proxy.createFunction({}, function(x) { receiver = this; value = x })
-//)
-//
-//TestAccessorCall(
-// CreateFrozen({}, function() { receiver = this; return 42 }),
-// CreateFrozen({}, function(x) { receiver = this; value = x })
-//)
-//
-//
-//
-//// Passing a proxy function to higher-order library functions.
-//
-//function TestHigherOrder(f) {
-// assertEquals(6, [6, 2].map(f)[0])
-// assertEquals(4, [5, 2].reduce(f, 4))
-// assertTrue([1, 2].some(f))
-// assertEquals("a.b.c", "a.b.c".replace(".", f))
-//}
-//
-//TestHigherOrder(function(x) { return x })
-//TestHigherOrder(function(x) { "use strict"; return x })
-//TestHigherOrder(Proxy.createFunction({}, function(x) { return x }))
-//TestHigherOrder(CreateFrozen({}, function(x) { return x }))
-//
-//
-//
-//// TODO(rossberg): Ultimately, I want to have the following test function
-//// run through, but it currently fails on so many cases (some not even
-//// involving proxies), that I leave that for later...
-///*
-//function TestCalls() {
-// var handler = {
-// get: function(r, k) {
-// return k == "length" ? 2 : Function.prototype[k]
-// }
-// }
-// var bind = Function.prototype.bind
-// var o = {}
-//
-// var traps = [
-// function(x, y) {
-// return {receiver: this, result: x + y, strict: false}
-// },
-// function(x, y) { "use strict";
-// return {receiver: this, result: x + y, strict: true}
-// },
-// function() {
-// var x = arguments[0], y = arguments[1]
-// return {receiver: this, result: x + y, strict: false}
-// },
-// Proxy.createFunction(handler, function(x, y) {
-// return {receiver: this, result: x + y, strict: false}
-// }),
-// Proxy.createFunction(handler, function() {
-// var x = arguments[0], y = arguments[1]
-// return {receiver: this, result: x + y, strict: false}
-// }),
-// Proxy.createFunction(handler, function(x, y) { "use strict"
-// return {receiver: this, result: x + y, strict: true}
-// }),
-// CreateFrozen(handler, function(x, y) {
-// return {receiver: this, result: x + y, strict: false}
-// }),
-// CreateFrozen(handler, function(x, y) { "use strict"
-// return {receiver: this, result: x + y, strict: true}
-// }),
-// ]
-// var creates = [
-// function(trap) { return trap },
-// function(trap) { return CreateFrozen({}, callTrap) },
-// function(trap) { return Proxy.createFunction(handler, callTrap) },
-// function(trap) {
-// return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
-// },
-// function(trap) {
-// return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
-// },
-// ]
-// var binds = [
-// function(f, o, x, y) { return f },
-// function(f, o, x, y) { return bind.call(f, o) },
-// function(f, o, x, y) { return bind.call(f, o, x) },
-// function(f, o, x, y) { return bind.call(f, o, x, y) },
-// function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
-// function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
-// function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
-// function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
-// ]
-// var calls = [
-// function(f, x, y) { return f(x, y) },
-// function(f, x, y) { var g = f; return g(x, y) },
-// function(f, x, y) { with ({}) return f(x, y) },
-// function(f, x, y) { var g = f; with ({}) return g(x, y) },
-// function(f, x, y, o) { with (o) return f(x, y) },
-// function(f, x, y, o) { return f.call(o, x, y) },
-// function(f, x, y, o) { return f.apply(o, [x, y]) },
-// function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
-// function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
-// function(f, x, y, o) { return %_Call(f, o, x, y) },
-// function(f, x, y, o) { return %Call(f, o, x, y) },
-// function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
-// function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
-// function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
-// function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
-// function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
-// function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
-// ]
-// var receivers = [o, global_object, undefined, null, 2, "bla", true]
-// var expectedSloppies = [o, global_object, global_object, global_object]
-//
-// for (var t = 0; t < traps.length; ++t) {
-// for (var i = 0; i < creates.length; ++i) {
-// for (var j = 0; j < binds.length; ++j) {
-// for (var k = 0; k < calls.length; ++k) {
-// for (var m = 0; m < receivers.length; ++m) {
-// for (var n = 0; n < receivers.length; ++n) {
-// var bound = receivers[m]
-// var receiver = receivers[n]
-// var func = binds[j](creates[i](traps[t]), bound, 31, 11)
-// var expected = j > 0 ? bound : receiver
-// var expectedSloppy = expectedSloppies[j > 0 ? m : n]
-// o.f = func
-// global_object.f = func
-// var x = calls[k](func, 11, 31, receiver)
-// if (x !== undefined) {
-// assertEquals(42, x.result)
-// if (calls[k].length < 4)
-// assertSame(x.strict ? undefined : global_object, x.receiver)
-// else if (x.strict)
-// assertSame(expected, x.receiver)
-// else if (expectedSloppy === undefined)
-// assertSame(expected, x.receiver.valueOf())
-// else
-// assertSame(expectedSloppy, x.receiver)
-// }
-// }
-// }
-// }
-// }
-// }
-// }
-//}
-//
-//TestCalls()
-//*/
-//
-//var realms = [Realm.create(), Realm.create()];
-//Realm.shared = {};
-//
-//Realm.eval(realms[0], "function f() { return this; };");
-//Realm.eval(realms[0], "Realm.shared.f = f;");
-//Realm.eval(realms[0], "Realm.shared.fg = this;");
-//Realm.eval(realms[1], "function g() { return this; };");
-//Realm.eval(realms[1], "Realm.shared.g = g;");
-//Realm.eval(realms[1], "Realm.shared.gg = this;");
-//
-//var fp = Proxy.createFunction({}, Realm.shared.f);
-//var gp = Proxy.createFunction({}, Realm.shared.g);
-//
-//for (var i = 0; i < 10; i++) {
-// assertEquals(Realm.shared.fg, fp());
-// assertEquals(Realm.shared.gg, gp());
-//
-// with (this) {
-// assertEquals(this, fp());
-// assertEquals(this, gp());
-// }
-//
-// with ({}) {
-// assertEquals(Realm.shared.fg, fp());
-// assertEquals(Realm.shared.gg, gp());
-// }
-//}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-change-exec.js b/deps/v8/test/mjsunit/harmony/regexp-change-exec.js
new file mode 100644
index 0000000000..4c9757e3d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-change-exec.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-exec
+
+class MyError extends Error { }
+RegExp.prototype.exec = () => { throw new MyError() };
+assertThrows(() => "foo".match(/bar/), MyError);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-flags.js b/deps/v8/test/mjsunit/harmony/regexp-flags.js
deleted file mode 100644
index fae88610e4..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-flags.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexps --harmony-unicode-regexps
-
-RegExp.prototype.flags = 'setter should be undefined';
-
-assertEquals('', RegExp('').flags);
-assertEquals('', /./.flags);
-assertEquals('gimuy', RegExp('', 'yugmi').flags);
-assertEquals('gimuy', /foo/yumig.flags);
-
-var descriptor = Object.getOwnPropertyDescriptor(RegExp.prototype, 'flags');
-assertTrue(descriptor.configurable);
-assertFalse(descriptor.enumerable);
-assertInstanceof(descriptor.get, Function);
-assertEquals(undefined, descriptor.set);
-
-function testGenericFlags(object) {
- return descriptor.get.call(object);
-}
-
-assertEquals('', testGenericFlags({}));
-assertEquals('i', testGenericFlags({ ignoreCase: true }));
-assertEquals('uy', testGenericFlags({ global: 0, sticky: 1, unicode: 1 }));
-assertEquals('m', testGenericFlags({ __proto__: { multiline: true } }));
-assertThrows(function() { testGenericFlags(); }, TypeError);
-assertThrows(function() { testGenericFlags(undefined); }, TypeError);
-assertThrows(function() { testGenericFlags(null); }, TypeError);
-assertThrows(function() { testGenericFlags(true); }, TypeError);
-assertThrows(function() { testGenericFlags(false); }, TypeError);
-assertThrows(function() { testGenericFlags(''); }, TypeError);
-assertThrows(function() { testGenericFlags(42); }, TypeError);
-
-var counter = 0;
-var map = {};
-var object = {
- get global() {
- map.g = counter++;
- },
- get ignoreCase() {
- map.i = counter++;
- },
- get multiline() {
- map.m = counter++;
- },
- get unicode() {
- map.u = counter++;
- },
- get sticky() {
- map.y = counter++;
- }
-};
-testGenericFlags(object);
-assertEquals({ g: 0, i: 1, m: 2, u: 3, y: 4 }, map);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js b/deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js
new file mode 100644
index 0000000000..30b5050945
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-harmony-regexp-exec
+
+class MyError extends Error { }
+RegExp.prototype.exec = () => { throw new MyError() };
+assertEquals(null, "foo".match(/bar/));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js b/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js
new file mode 100644
index 0000000000..d186e985f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property --harmony-unicode-regexps
+
+function t(re, s) { assertTrue(re.test(s)); }
+function f(re, s) { assertFalse(re.test(s)); }
+
+t(/\p{InASCII}+/u, ".");
+t(/\p{InASCII}+/u, "supercalifragilisticexpialidocious");
+t(/\p{InBasic_Latin}+/u, ".");
+t(/\p{InBasic_Latin}+/u, "supercalifragilisticexpialidocious");
+
+t(/\p{InCJK}+/u, "话说天下大势,分久必合,合久必分");
+t(/\p{InCJK_Unified_Ideographs}+/u, "吾庄后有一桃园,花开正盛");
+f(/\p{InCJK}+/u, "おはようございます");
+f(/\p{InCJK_Unified_Ideographs}+/u,
+ "Something is rotten in the state of Denmark");
+
+t(/\p{InLatin_1}+/u, "Wie froh bin ich, daß ich weg bin!");
+f(/\p{InLatin_1_Supplement}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
+f(/\p{InLatin_1_Sup}+/u, "いただきます");
+
+t(/\p{InHiragana}/u, "いただきます");
+t(/\p{Hiragana}/u, "\u{1b001}"); // This refers to the script "Hiragana".
+f(/\p{InHiragana}/u, "\u{1b001}"); // This refers to the block "Hiragana".
+
+t(/\p{InGreek_And_Coptic}/u,
+ "ἄνδρα μοι ἔννεπε, μοῦσα, πολύτροπον, ὃς μάλα πολλὰ");
+t(/\p{InGreek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
+
+assertThrows("/\\p{In}/u");
+assertThrows("/\\pI/u");
+assertThrows("/\\p{I}/u");
+assertThrows("/\\p{CJK}/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js b/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js
new file mode 100644
index 0000000000..76774cb572
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js
@@ -0,0 +1,27 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps --harmony-regexp-property
+
+assertThrows("/[\\p]/u");
+assertThrows("/[\\p{garbage}]/u");
+assertThrows("/[\\p{}]/u");
+assertThrows("/[\\p{]/u");
+assertThrows("/[\\p}]/u");
+assertThrows("/[\\p{Math}]/u");
+assertThrows("/[\\p{Bidi_M}]/u");
+assertThrows("/[\\p{Hex}]/u");
+
+assertTrue(/^[\p{Lu}\p{Ll}]+$/u.test("ABCabc"));
+assertTrue(/^[\p{Lu}-\p{Ll}]+$/u.test("ABC-abc"));
+assertFalse(/^[\P{Lu}\p{Ll}]+$/u.test("ABCabc"));
+assertTrue(/^[\P{Lu}\p{Ll}]+$/u.test("abc"));
+assertTrue(/^[\P{Lu}]+$/u.test("abc123"));
+assertFalse(/^[\P{Lu}]+$/u.test("XYZ"));
+
+assertTrue(/^[^\P{Lu}]+$/u.test("XYZ"));
+assertFalse(/^[^\p{Lu}\p{Ll}]+$/u.test("abc"));
+assertFalse(/^[^\p{Lu}\p{Ll}]+$/u.test("ABC"));
+assertTrue(/^[^\p{Lu}\p{Ll}]+$/u.test("123"));
+assertTrue(/^[^\p{Lu}\P{Ll}]+$/u.test("abc"));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js b/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js
new file mode 100644
index 0000000000..7a3158c68b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps --no-harmony-regexp-property
+
+function test(source, message) {
+ try {
+ eval(source);
+ } catch (e) {
+ assertEquals(message, e.message);
+ return;
+ }
+ assertUnreachable();
+}
+
+test("/\\pL/u", "Invalid regular expression: /\\pL/: Invalid escape");
+test("/[\\p{L}]/u", "Invalid regular expression: /[\\p{L}]/: Invalid escape");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js b/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js
new file mode 100644
index 0000000000..4dfcc5f96e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property --harmony-unicode-regexps
+
+assertThrows("/\\p{In CJK}/u");
+assertThrows("/\\p{InCJKUnifiedIdeographs}/u");
+assertDoesNotThrow("/\\p{InCJK}/u");
+assertDoesNotThrow("/\\p{InCJK_Unified_Ideographs}/u");
+
+assertDoesNotThrow("/\\p{InCyrillic_Sup}/u");
+assertDoesNotThrow("/\\p{InCyrillic_Supplement}/u");
+assertDoesNotThrow("/\\p{InCyrillic_Supplementary}/u");
+assertThrows("/\\p{InCyrillicSupplementary}/u");
+assertThrows("/\\p{InCyrillic_supplementary}/u");
+
+assertDoesNotThrow("/\\pC/u");
+assertDoesNotThrow("/\\p{Other}/u");
+assertDoesNotThrow("/\\p{Cc}/u");
+assertDoesNotThrow("/\\p{Control}/u");
+assertDoesNotThrow("/\\p{cntrl}/u");
+assertDoesNotThrow("/\\p{M}/u");
+assertDoesNotThrow("/\\p{Mark}/u");
+assertDoesNotThrow("/\\p{Combining_Mark}/u");
+assertThrows("/\\p{Combining Mark}/u");
+
+assertDoesNotThrow("/\\p{Copt}/u");
+assertDoesNotThrow("/\\p{Coptic}/u");
+assertDoesNotThrow("/\\p{Qaac}/u");
+assertDoesNotThrow("/\\p{Egyp}/u");
+assertDoesNotThrow("/\\p{Egyptian_Hieroglyphs}/u");
+assertThrows("/\\p{EgyptianHieroglyphs}/u");
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js b/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js
index 323873ab7f..e2015ad72d 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js
@@ -62,3 +62,6 @@ assertTrue(/\pL/u.test("\u1FAB"));
assertFalse(/\PL/u.test("\u1FAB"));
assertFalse(/\p{L}/u.test("\uA6EE"));
assertTrue(/\P{L}/u.test("\uA6EE"));
+
+assertTrue(/\p{Lowercase_Letter}/u.test("a"));
+assertTrue(/\p{Math_Symbol}/u.test("+"));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js b/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js
new file mode 100644
index 0000000000..19b50ee7db
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property --harmony-unicode-regexps
+
+function t(re, s) { assertTrue(re.test(s)); }
+function f(re, s) { assertFalse(re.test(s)); }
+
+t(/\p{Common}+/u, ".");
+f(/\p{Common}+/u, "supercalifragilisticexpialidocious");
+
+t(/\p{Han}+/u, "话说天下大势,分久必合,合久必分");
+t(/\p{Hani}+/u, "吾庄后有一桃园,花开正盛");
+f(/\p{Han}+/u, "おはようございます");
+f(/\p{Hani}+/u, "Something is rotten in the state of Denmark");
+
+t(/\p{Latin}+/u, "Wie froh bin ich, daß ich weg bin!");
+t(/\p{Latn}+/u,
+ "It was a bright day in April, and the clocks were striking thirteen");
+f(/\p{Latin}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
+f(/\p{Latn}+/u, "いただきます");
+
+t(/\p{Hiragana}/u, "いただきます");
+t(/\p{Hira}/u, "ありがとうございました");
+f(/\p{Hiragana}/u,
+ "Als Gregor Samsa eines Morgens aus unruhigen Träumen erwachte");
+f(/\p{Hira}/u, "Call me Ishmael");
+
+t(/\p{Phoenician}/u, "\u{10900}\u{1091a}");
+t(/\p{Phnx}/u, "\u{1091f}\u{10916}");
+f(/\p{Phoenician}/u, "Arthur est un perroquet");
+f(/\p{Phnx}/u, "设心狠毒非良士,操卓原来一路人");
+
+t(/\p{Grek}/u, "ἄνδρα μοι ἔννεπε, μοῦσα, πολύτροπον, ὃς μάλα πολλὰ");
+t(/\p{Greek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
+f(/\p{Greek}/u, "高贤未服英雄志,屈节偏生杰士疑");
+f(/\p{Greek}/u,
+ "Mr. Jones, of the Manor Farm, had locked the hen-houses for the night");
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-343928.js b/deps/v8/test/mjsunit/harmony/regress/regress-343928.js
deleted file mode 100644
index f2ff3715a9..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-343928.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-modules --expose-debug-as=debug
-
-(function () { // Scope for utility functions.
- escaping_function = function(object) {
- // Argument must not be null or undefined.
- var string = Object.prototype.toString.call(object);
- // String has format [object <ClassName>].
- return string.substring(8, string.length - 1);
- }
-})();
-
-module B {
- var stuff = 3
-}
-
-var __v_0 = {};
-var __v_4 = debug.MakeMirror(__v_0);
-print(__v_4.referencedBy().length); // core dump here if not fixed.
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4482.js b/deps/v8/test/mjsunit/harmony/regress/regress-4482.js
index bffca42ecc..2472b466ab 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4482.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4482.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --nolegacy-const
+// Flags: --harmony-sloppy
assertEquals("function", (function f() { f = 42; return typeof f })());
assertEquals("function",
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index 7f7f8fb2d5..7bd4e5b121 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sharedarraybuffer --harmony-tostring
+// Flags: --harmony-sharedarraybuffer
// SharedArrayBuffer
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
index ff81506afe..1868050e50 100644
--- a/deps/v8/test/mjsunit/harmony/simd.js
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-simd --harmony-tostring --harmony-reflect
+// Flags: --harmony-simd
// Flags: --allow-natives-syntax --expose-natives-as natives --noalways-opt
function lanesForType(typeName) {
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js b/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js
new file mode 100644
index 0000000000..0702320864
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-harmony-restrictive-declarations
+
+// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+// Annex B 3.4 FunctionDeclarations in IfStatement Statement Clauses
+// In sloppy mode, function declarations in if statements act like
+// they have a block around them. Prohibited in strict mode.
+(function() {
+ assertEquals(undefined, f);
+ if (false) function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (true) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (true) {} else function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (false) {} else function f() { };
+ assertEquals('function', typeof f);
+})();
+
+// For legacy reasons, we also support these types of semantics as
+// the body of a for or with statement.
+(function() {
+ for (;false;) function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ for (var x in {}) function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ var x;
+ for (x in {}) function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ for (var i = 0; i < 1; i++) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ for (var x in {a: 1}) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ var x;
+ for (x in {a: 1}) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ with ({}) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ do function f() {} while (0);
+ assertEquals('function', typeof f);
+})();
+
+// Labeled function declarations undergo the same hoisting/FiB semantics as if
+// they were unalbeled.
+(function() {
+ function bar() {
+ return f;
+ x: function f() {}
+ }
+ assertEquals('function', typeof bar());
+})();
+
+(function() {
+ function bar() {
+ return f;
+ {
+ x: function f() {}
+ }
+ }
+ assertEquals(undefined, bar());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js b/deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js
new file mode 100644
index 0000000000..07ce64b9ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-restrictive-declarations
+
+// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+// Annex B 3.4 FunctionDeclarations in IfStatement Statement Clauses
+// In sloppy mode, function declarations in if statements act like
+// they have a block around them. Prohibited in strict mode.
+(function() {
+ if (false) function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (true) function f() { };
+ assertEquals('function', typeof f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (true) {} else function f() { };
+ assertEquals(undefined, f);
+})();
+
+(function() {
+ assertEquals(undefined, f);
+ if (false) {} else function f() { };
+ assertEquals('function', typeof f);
+})();
+
+// Labeled function declarations undergo the same hoisting/FiB semantics as if
+// they were unalbeled.
+(function() {
+ function bar() {
+ return f;
+ x: function f() {}
+ }
+ assertEquals('function', typeof bar());
+})();
+
+(function() {
+ function bar() {
+ return f;
+ {
+ x: function f() {}
+ }
+ }
+ assertEquals(undefined, bar());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
index 895e0c6722..7ea6f62990 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
+++ b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
@@ -4,7 +4,7 @@
// ES6 extends the \uxxxx escape and also allows \u{xxxxx}.
-// Flags: --harmony-unicode-regexps --harmony-regexps
+// Flags: --harmony-unicode-regexps
function testRegexpHelper(r) {
assertTrue(r.test("foo"));
diff --git a/deps/v8/test/mjsunit/ignition/debug-scope-on-return.js b/deps/v8/test/mjsunit/ignition/debug-scope-on-return.js
new file mode 100644
index 0000000000..5be6de6e65
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-scope-on-return.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Check that the we are still in function context when we break on return.
+
+var Debug = debug.Debug;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ // Access scope details to check the context is correct.
+ var scope_count = exec_state.frame().scopeCount();
+ // Do steps until we reach the global scope again.
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+}
+
+Debug.setListener(listener);
+
+function f() {
+ debugger;
+
+ L: with ({x:12}) {
+ break L;
+ }
+
+ return;
+}
+f();
diff --git a/deps/v8/test/mjsunit/ignition/debug-step-prefix-bytecodes.js b/deps/v8/test/mjsunit/ignition/debug-step-prefix-bytecodes.js
new file mode 100644
index 0000000000..819fdf8469
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-step-prefix-bytecodes.js
@@ -0,0 +1,375 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --ignition-filter=f --expose-debug-as debug
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var done = false;
+var step_count = 0;
+var values = [];
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
+ step_count++;
+ }
+};
+
+// Set the global variables state to prpare the stepping test.
+function prepare_step_test() {
+ done = false;
+ step_count = 0;
+}
+
+// Test function to step through, uses widended bytecodes.
+function f() {
+var x = 10000000;
+var y = x + 1;
+var _aa = 0; values[_aa] = _aa;
+var _ab = 1; values[_ab] = _ab;
+var _ac = 2; values[_ac] = _ac;
+var _ad = 3; values[_ad] = _ad;
+var _ae = 4; values[_ae] = _ae;
+var _af = 5; values[_af] = _af;
+var _ag = 6; values[_ag] = _ag;
+var _ah = 7; values[_ah] = _ah;
+var _ai = 8; values[_ai] = _ai;
+var _aj = 9; values[_aj] = _aj;
+var _ak = 10; values[_ak] = _ak;
+var _al = 11; values[_al] = _al;
+var _am = 12; values[_am] = _am;
+var _an = 13; values[_an] = _an;
+var _ao = 14; values[_ao] = _ao;
+var _ap = 15; values[_ap] = _ap;
+var _ba = 16; values[_ba] = _ba;
+var _bb = 17; values[_bb] = _bb;
+var _bc = 18; values[_bc] = _bc;
+var _bd = 19; values[_bd] = _bd;
+var _be = 20; values[_be] = _be;
+var _bf = 21; values[_bf] = _bf;
+var _bg = 22; values[_bg] = _bg;
+var _bh = 23; values[_bh] = _bh;
+var _bi = 24; values[_bi] = _bi;
+var _bj = 25; values[_bj] = _bj;
+var _bk = 26; values[_bk] = _bk;
+var _bl = 27; values[_bl] = _bl;
+var _bm = 28; values[_bm] = _bm;
+var _bn = 29; values[_bn] = _bn;
+var _bo = 30; values[_bo] = _bo;
+var _bp = 31; values[_bp] = _bp;
+var _ca = 32; values[_ca] = _ca;
+var _cb = 33; values[_cb] = _cb;
+var _cc = 34; values[_cc] = _cc;
+var _cd = 35; values[_cd] = _cd;
+var _ce = 36; values[_ce] = _ce;
+var _cf = 37; values[_cf] = _cf;
+var _cg = 38; values[_cg] = _cg;
+var _ch = 39; values[_ch] = _ch;
+var _ci = 40; values[_ci] = _ci;
+var _cj = 41; values[_cj] = _cj;
+var _ck = 42; values[_ck] = _ck;
+var _cl = 43; values[_cl] = _cl;
+var _cm = 44; values[_cm] = _cm;
+var _cn = 45; values[_cn] = _cn;
+var _co = 46; values[_co] = _co;
+var _cp = 47; values[_cp] = _cp;
+var _da = 48; values[_da] = _da;
+var _db = 49; values[_db] = _db;
+var _dc = 50; values[_dc] = _dc;
+var _dd = 51; values[_dd] = _dd;
+var _de = 52; values[_de] = _de;
+var _df = 53; values[_df] = _df;
+var _dg = 54; values[_dg] = _dg;
+var _dh = 55; values[_dh] = _dh;
+var _di = 56; values[_di] = _di;
+var _dj = 57; values[_dj] = _dj;
+var _dk = 58; values[_dk] = _dk;
+var _dl = 59; values[_dl] = _dl;
+var _dm = 60; values[_dm] = _dm;
+var _dn = 61; values[_dn] = _dn;
+var _do = 62; values[_do] = _do;
+var _dp = 63; values[_dp] = _dp;
+var _ea = 64; values[_ea] = _ea;
+var _eb = 65; values[_eb] = _eb;
+var _ec = 66; values[_ec] = _ec;
+var _ed = 67; values[_ed] = _ed;
+var _ee = 68; values[_ee] = _ee;
+var _ef = 69; values[_ef] = _ef;
+var _eg = 70; values[_eg] = _eg;
+var _eh = 71; values[_eh] = _eh;
+var _ei = 72; values[_ei] = _ei;
+var _ej = 73; values[_ej] = _ej;
+var _ek = 74; values[_ek] = _ek;
+var _el = 75; values[_el] = _el;
+var _em = 76; values[_em] = _em;
+var _en = 77; values[_en] = _en;
+var _eo = 78; values[_eo] = _eo;
+var _ep = 79; values[_ep] = _ep;
+var _fa = 80; values[_fa] = _fa;
+var _fb = 81; values[_fb] = _fb;
+var _fc = 82; values[_fc] = _fc;
+var _fd = 83; values[_fd] = _fd;
+var _fe = 84; values[_fe] = _fe;
+var _ff = 85; values[_ff] = _ff;
+var _fg = 86; values[_fg] = _fg;
+var _fh = 87; values[_fh] = _fh;
+var _fi = 88; values[_fi] = _fi;
+var _fj = 89; values[_fj] = _fj;
+var _fk = 90; values[_fk] = _fk;
+var _fl = 91; values[_fl] = _fl;
+var _fm = 92; values[_fm] = _fm;
+var _fn = 93; values[_fn] = _fn;
+var _fo = 94; values[_fo] = _fo;
+var _fp = 95; values[_fp] = _fp;
+var _ga = 96; values[_ga] = _ga;
+var _gb = 97; values[_gb] = _gb;
+var _gc = 98; values[_gc] = _gc;
+var _gd = 99; values[_gd] = _gd;
+var _ge = 100; values[_ge] = _ge;
+var _gf = 101; values[_gf] = _gf;
+var _gg = 102; values[_gg] = _gg;
+var _gh = 103; values[_gh] = _gh;
+var _gi = 104; values[_gi] = _gi;
+var _gj = 105; values[_gj] = _gj;
+var _gk = 106; values[_gk] = _gk;
+var _gl = 107; values[_gl] = _gl;
+var _gm = 108; values[_gm] = _gm;
+var _gn = 109; values[_gn] = _gn;
+var _go = 110; values[_go] = _go;
+var _gp = 111; values[_gp] = _gp;
+var _ha = 112; values[_ha] = _ha;
+var _hb = 113; values[_hb] = _hb;
+var _hc = 114; values[_hc] = _hc;
+var _hd = 115; values[_hd] = _hd;
+var _he = 116; values[_he] = _he;
+var _hf = 117; values[_hf] = _hf;
+var _hg = 118; values[_hg] = _hg;
+var _hh = 119; values[_hh] = _hh;
+var _hi = 120; values[_hi] = _hi;
+var _hj = 121; values[_hj] = _hj;
+var _hk = 122; values[_hk] = _hk;
+var _hl = 123; values[_hl] = _hl;
+var _hm = 124; values[_hm] = _hm;
+var _hn = 125; values[_hn] = _hn;
+var _ho = 126; values[_ho] = _ho;
+var _hp = 127; values[_hp] = _hp;
+var _ia = 128; values[_ia] = _ia;
+var _ib = 129; values[_ib] = _ib;
+var _ic = 130; values[_ic] = _ic;
+var _id = 131; values[_id] = _id;
+var _ie = 132; values[_ie] = _ie;
+var _if = 133; values[_if] = _if;
+var _ig = 134; values[_ig] = _ig;
+var _ih = 135; values[_ih] = _ih;
+var _ii = 136; values[_ii] = _ii;
+var _ij = 137; values[_ij] = _ij;
+var _ik = 138; values[_ik] = _ik;
+var _il = 139; values[_il] = _il;
+var _im = 140; values[_im] = _im;
+var _in = 141; values[_in] = _in;
+var _io = 142; values[_io] = _io;
+var _ip = 143; values[_ip] = _ip;
+var _ja = 144; values[_ja] = _ja;
+var _jb = 145; values[_jb] = _jb;
+var _jc = 146; values[_jc] = _jc;
+var _jd = 147; values[_jd] = _jd;
+var _je = 148; values[_je] = _je;
+var _jf = 149; values[_jf] = _jf;
+var _jg = 150; values[_jg] = _jg;
+var _jh = 151; values[_jh] = _jh;
+var _ji = 152; values[_ji] = _ji;
+var _jj = 153; values[_jj] = _jj;
+var _jk = 154; values[_jk] = _jk;
+var _jl = 155; values[_jl] = _jl;
+var _jm = 156; values[_jm] = _jm;
+var _jn = 157; values[_jn] = _jn;
+var _jo = 158; values[_jo] = _jo;
+var _jp = 159; values[_jp] = _jp;
+var _ka = 160; values[_ka] = _ka;
+var _kb = 161; values[_kb] = _kb;
+var _kc = 162; values[_kc] = _kc;
+var _kd = 163; values[_kd] = _kd;
+var _ke = 164; values[_ke] = _ke;
+var _kf = 165; values[_kf] = _kf;
+var _kg = 166; values[_kg] = _kg;
+var _kh = 167; values[_kh] = _kh;
+var _ki = 168; values[_ki] = _ki;
+var _kj = 169; values[_kj] = _kj;
+var _kk = 170; values[_kk] = _kk;
+var _kl = 171; values[_kl] = _kl;
+var _km = 172; values[_km] = _km;
+var _kn = 173; values[_kn] = _kn;
+var _ko = 174; values[_ko] = _ko;
+var _kp = 175; values[_kp] = _kp;
+var _la = 176; values[_la] = _la;
+var _lb = 177; values[_lb] = _lb;
+var _lc = 178; values[_lc] = _lc;
+var _ld = 179; values[_ld] = _ld;
+var _le = 180; values[_le] = _le;
+var _lf = 181; values[_lf] = _lf;
+var _lg = 182; values[_lg] = _lg;
+var _lh = 183; values[_lh] = _lh;
+var _li = 184; values[_li] = _li;
+var _lj = 185; values[_lj] = _lj;
+var _lk = 186; values[_lk] = _lk;
+var _ll = 187; values[_ll] = _ll;
+var _lm = 188; values[_lm] = _lm;
+var _ln = 189; values[_ln] = _ln;
+var _lo = 190; values[_lo] = _lo;
+var _lp = 191; values[_lp] = _lp;
+var _ma = 192; values[_ma] = _ma;
+var _mb = 193; values[_mb] = _mb;
+var _mc = 194; values[_mc] = _mc;
+var _md = 195; values[_md] = _md;
+var _me = 196; values[_me] = _me;
+var _mf = 197; values[_mf] = _mf;
+var _mg = 198; values[_mg] = _mg;
+var _mh = 199; values[_mh] = _mh;
+var _mi = 200; values[_mi] = _mi;
+var _mj = 201; values[_mj] = _mj;
+var _mk = 202; values[_mk] = _mk;
+var _ml = 203; values[_ml] = _ml;
+var _mm = 204; values[_mm] = _mm;
+var _mn = 205; values[_mn] = _mn;
+var _mo = 206; values[_mo] = _mo;
+var _mp = 207; values[_mp] = _mp;
+var _na = 208; values[_na] = _na;
+var _nb = 209; values[_nb] = _nb;
+var _nc = 210; values[_nc] = _nc;
+var _nd = 211; values[_nd] = _nd;
+var _ne = 212; values[_ne] = _ne;
+var _nf = 213; values[_nf] = _nf;
+var _ng = 214; values[_ng] = _ng;
+var _nh = 215; values[_nh] = _nh;
+var _ni = 216; values[_ni] = _ni;
+var _nj = 217; values[_nj] = _nj;
+var _nk = 218; values[_nk] = _nk;
+var _nl = 219; values[_nl] = _nl;
+var _nm = 220; values[_nm] = _nm;
+var _nn = 221; values[_nn] = _nn;
+var _no = 222; values[_no] = _no;
+var _np = 223; values[_np] = _np;
+var _oa = 224; values[_oa] = _oa;
+var _ob = 225; values[_ob] = _ob;
+var _oc = 226; values[_oc] = _oc;
+var _od = 227; values[_od] = _od;
+var _oe = 228; values[_oe] = _oe;
+var _of = 229; values[_of] = _of;
+var _og = 230; values[_og] = _og;
+var _oh = 231; values[_oh] = _oh;
+var _oi = 232; values[_oi] = _oi;
+var _oj = 233; values[_oj] = _oj;
+var _ok = 234; values[_ok] = _ok;
+var _ol = 235; values[_ol] = _ol;
+var _om = 236; values[_om] = _om;
+var _on = 237; values[_on] = _on;
+var _oo = 238; values[_oo] = _oo;
+var _op = 239; values[_op] = _op;
+var _pa = 240; values[_pa] = _pa;
+var _pb = 241; values[_pb] = _pb;
+var _pc = 242; values[_pc] = _pc;
+var _pd = 243; values[_pd] = _pd;
+var _pe = 244; values[_pe] = _pe;
+var _pf = 245; values[_pf] = _pf;
+var _pg = 246; values[_pg] = _pg;
+var _ph = 247; values[_ph] = _ph;
+var _pi = 248; values[_pi] = _pi;
+var _pj = 249; values[_pj] = _pj;
+var _pk = 250; values[_pk] = _pk;
+var _pl = 251; values[_pl] = _pl;
+var _pm = 252; values[_pm] = _pm;
+var _pn = 253; values[_pn] = _pn;
+var _po = 254; values[_po] = _po;
+var _pp = 255; values[_pp] = _pp;
+var _qa = 256; values[_qa] = _qa;
+var _qb = 257; values[_qb] = _qb;
+var _qc = 258; values[_qc] = _qc;
+var _qd = 259; values[_qd] = _qd;
+var _qe = 260; values[_qe] = _qe;
+var _qf = 261; values[_qf] = _qf;
+var _qg = 262; values[_qg] = _qg;
+var _qh = 263; values[_qh] = _qh;
+var _qi = 264; values[_qi] = _qi;
+var _qj = 265; values[_qj] = _qj;
+var _qk = 266; values[_qk] = _qk;
+var _ql = 267; values[_ql] = _ql;
+var _qm = 268; values[_qm] = _qm;
+var _qn = 269; values[_qn] = _qn;
+var _qo = 270; values[_qo] = _qo;
+var _qp = 271; values[_qp] = _qp;
+var _ra = 272; values[_ra] = _ra;
+var _rb = 273; values[_rb] = _rb;
+var _rc = 274; values[_rc] = _rc;
+var _rd = 275; values[_rd] = _rd;
+var _re = 276; values[_re] = _re;
+var _rf = 277; values[_rf] = _rf;
+var _rg = 278; values[_rg] = _rg;
+var _rh = 279; values[_rh] = _rh;
+var _ri = 280; values[_ri] = _ri;
+var _rj = 281; values[_rj] = _rj;
+var _rk = 282; values[_rk] = _rk;
+var _rl = 283; values[_rl] = _rl;
+var _rm = 284; values[_rm] = _rm;
+var _rn = 285; values[_rn] = _rn;
+var _ro = 286; values[_ro] = _ro;
+var _rp = 287; values[_rp] = _rp;
+ done = true;
+};
+
+function check_values() {
+ for (var i = 0; i < values.length; i++) {
+ assertEquals(values[i], i);
+ values[i] = -i;
+ }
+}
+
+// Pass 1 - no debugger, no steps seen
+prepare_step_test();
+f();
+check_values();
+assertEquals(0, step_count);
+
+// Pass 2 - debugger attached and stepping from BP
+Debug.setListener(listener);
+var bp = Debug.setBreakPoint(f, 1);
+prepare_step_test();
+f();
+check_values();
+assertEquals(580, step_count);
+Debug.clearBreakPoint(bp);
+
+// Pass 3 - debugger attached and no BP
+prepare_step_test();
+f();
+check_values();
+assertEquals(0, step_count);
diff --git a/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js b/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js
new file mode 100644
index 0000000000..d31150b6d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ var column = event_data.sourceColumn();
+ assertTrue(event_data.sourceLineText().indexOf(
+ `Break ${break_count++}. ${column}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+};
+
+function f() {
+ var a = 1; // Break 2. 10.
+ return a; // Break 3. 2.
+} // Break 4. 0.
+
+Debug.setListener(listener);
+debugger; // Break 0. 0.
+f(); // Break 1. 0.
+Debug.setListener(null); // Break 5. 0.
+
+assertNull(exception);
+assertEquals(6, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/elided-instruction.js b/deps/v8/test/mjsunit/ignition/elided-instruction.js
new file mode 100644
index 0000000000..807974bbc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/elided-instruction.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ var column = event_data.sourceColumn();
+ assertTrue(event_data.sourceLineText().indexOf(
+ `Break ${break_count++}. ${column}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+};
+
+function f() {
+ var a = 1; // Break 2. 10.
+ // This return statement emits no bytecode instruction for the evaluation of
+ // the to-be-returned expression. Therefore we cannot set a break location
+ // before the statement and a second break location immediately before
+ // returning to the caller.
+ return a;
+} // Break 3. 0.
+
+Debug.setListener(listener);
+debugger; // Break 0. 0.
+f(); // Break 1. 0.
+Debug.setListener(null); // Break 4. 0.
+
+assertNull(exception);
+assertEquals(5, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/optimized-debug-frame.js b/deps/v8/test/mjsunit/ignition/optimized-debug-frame.js
new file mode 100644
index 0000000000..be316ad6f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/optimized-debug-frame.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+// Test that PC in optimized frame would correctly translate into
+// unoptimized frame when retrieving frame information in the debugger.
+
+function f() {
+ debugger;
+}
+
+function g(x) {
+ return f();
+}
+
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ assertEquals(14, exec_state.frame(1).sourceLine());
+ assertEquals(9, exec_state.frame(1).sourceColumn());
+ break_count++;
+}
+
+g();
+g();
+%OptimizeFunctionOnNextCall(g);
+
+var Debug = debug.Debug;
+Debug.setListener(listener);
+
+g();
+
+Debug.setListener(null);
+
+assertEquals(1, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js b/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js
new file mode 100644
index 0000000000..77f9acf3c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test that PC in optimized frame would correctly translate into
+// unoptimized frame when collecting stack trace.
+
+function f() {
+ return new Error().stack;
+}
+
+function g(x) {
+ return f();
+}
+
+g();
+g();
+%OptimizeFunctionOnNextCall(g);
+print(g());
+assertTrue(/g \(.*?\.js:15:10\)/.test(g()));
diff --git a/deps/v8/test/mjsunit/ignition/regress-597565-double-to-object-transition.js b/deps/v8/test/mjsunit/ignition/regress-597565-double-to-object-transition.js
new file mode 100644
index 0000000000..7bf8e83d1e
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-597565-double-to-object-transition.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-inline-new
+
+function __f_2(b, value) {
+ b[1] = value;
+}
+function __f_9() {
+ var arr = [1.5, 0, 0];
+ // Call with a double, so the expected element type is double.
+ __f_2(1.5);
+ // Call with an object, which triggers transition from FAST_double
+ // to Object for the elements type.
+ __f_2(arr);
+}
+__f_9();
diff --git a/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js b/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js
new file mode 100644
index 0000000000..5aa2efdb36
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --expose-gc
+
+// Tests that verify heap works for BytecodeArrays in the large object space.
+
+// Creates a list of variable declarations and calls it through eval to
+// generate a large BytecodeArray.
+var s = "";
+for (var i = 0; i < 65535; i++) {
+ s += ("var a" + i + ";");
+}
+
+(function() { eval(s); })();
+gc();
diff --git a/deps/v8/test/mjsunit/ignition/tracing.js b/deps/v8/test/mjsunit/ignition/tracing.js
new file mode 100644
index 0000000000..70fb092898
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/tracing.js
@@ -0,0 +1,325 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --trace-ignition
+
+// Test tracing doesn't crash or leak. Not explicitly pattern matching.
+function f() {
+ var values = [];
+ var x = 10000000;
+ var y = x + 1;
+ var _aa;
+ var _ab;
+ var _ac;
+ var _ad;
+ var _ae;
+ var _af;
+ var _ag;
+ var _ah;
+ var _ai;
+ var _aj;
+ var _ak;
+ var _al;
+ var _am;
+ var _an;
+ var _ao;
+ var _ap;
+ var _ba;
+ var _bb;
+ var _bc;
+ var _bd;
+ var _be;
+ var _bf;
+ var _bg;
+ var _bh;
+ var _bi;
+ var _bj;
+ var _bk;
+ var _bl;
+ var _bm;
+ var _bn;
+ var _bo;
+ var _bp;
+ var _ca;
+ var _cb;
+ var _cc;
+ var _cd;
+ var _ce;
+ var _cf;
+ var _cg;
+ var _ch;
+ var _ci;
+ var _cj;
+ var _ck;
+ var _cl;
+ var _cm;
+ var _cn;
+ var _co;
+ var _cp;
+ var _da;
+ var _db;
+ var _dc;
+ var _dd;
+ var _de;
+ var _df;
+ var _dg;
+ var _dh;
+ var _di;
+ var _dj;
+ var _dk;
+ var _dl;
+ var _dm;
+ var _dn;
+ var _do;
+ var _dp;
+ var _ea;
+ var _eb;
+ var _ec;
+ var _ed;
+ var _ee;
+ var _ef;
+ var _eg;
+ var _eh;
+ var _ei;
+ var _ej;
+ var _ek;
+ var _el;
+ var _em;
+ var _en;
+ var _eo;
+ var _ep;
+ var _fa;
+ var _fb;
+ var _fc;
+ var _fd;
+ var _fe;
+ var _ff;
+ var _fg;
+ var _fh;
+ var _fi;
+ var _fj;
+ var _fk;
+ var _fl;
+ var _fm;
+ var _fn;
+ var _fo;
+ var _fp;
+ var _ga;
+ var _gb;
+ var _gc;
+ var _gd;
+ var _ge;
+ var _gf;
+ var _gg;
+ var _gh;
+ var _gi;
+ var _gj;
+ var _gk;
+ var _gl;
+ var _gm;
+ var _gn;
+ var _go;
+ var _gp;
+ var _ha;
+ var _hb;
+ var _hc;
+ var _hd;
+ var _he;
+ var _hf;
+ var _hg;
+ var _hh;
+ var _hi;
+ var _hj;
+ var _hk;
+ var _hl;
+ var _hm;
+ var _hn;
+ var _ho;
+ var _hp;
+ var _ia;
+ var _ib;
+ var _ic;
+ var _id;
+ var _ie;
+ var _if;
+ var _ig;
+ var _ih;
+ var _ii;
+ var _ij;
+ var _ik;
+ var _il;
+ var _im;
+ var _in;
+ var _io;
+ var _ip;
+ var _ja;
+ var _jb;
+ var _jc;
+ var _jd;
+ var _je;
+ var _jf;
+ var _jg;
+ var _jh;
+ var _ji;
+ var _jj;
+ var _jk;
+ var _jl;
+ var _jm;
+ var _jn;
+ var _jo;
+ var _jp;
+ var _ka;
+ var _kb;
+ var _kc;
+ var _kd;
+ var _ke;
+ var _kf;
+ var _kg;
+ var _kh;
+ var _ki;
+ var _kj;
+ var _kk;
+ var _kl;
+ var _km;
+ var _kn;
+ var _ko;
+ var _kp;
+ var _la;
+ var _lb;
+ var _lc;
+ var _ld;
+ var _le;
+ var _lf;
+ var _lg;
+ var _lh;
+ var _li;
+ var _lj;
+ var _lk;
+ var _ll;
+ var _lm;
+ var _ln;
+ var _lo;
+ var _lp;
+ var _ma;
+ var _mb;
+ var _mc;
+ var _md;
+ var _me;
+ var _mf;
+ var _mg;
+ var _mh;
+ var _mi;
+ var _mj;
+ var _mk;
+ var _ml;
+ var _mm;
+ var _mn;
+ var _mo;
+ var _mp;
+ var _na;
+ var _nb;
+ var _nc;
+ var _nd;
+ var _ne;
+ var _nf;
+ var _ng;
+ var _nh;
+ var _ni;
+ var _nj;
+ var _nk;
+ var _nl;
+ var _nm;
+ var _nn;
+ var _no;
+ var _np;
+ var _oa;
+ var _ob;
+ var _oc;
+ var _od;
+ var _oe;
+ var _of;
+ var _og;
+ var _oh;
+ var _oi;
+ var _oj;
+ var _ok;
+ var _ol;
+ var _om;
+ var _on;
+ var _oo;
+ var _op;
+ var _pa;
+ var _pb;
+ var _pc;
+ var _pd;
+ var _pe;
+ var _pf;
+ var _pg;
+ var _ph;
+ var _pi;
+ var _pj;
+ var _pk;
+ var _pl;
+ var _pm;
+ var _pn;
+ var _po;
+ var _pp;
+ var _qa;
+ var _qb;
+ var _qc;
+ var _qd;
+ var _qe;
+ var _qf;
+ var _qg;
+ var _qh;
+ var _qi;
+ var _qj;
+ var _qk;
+ var _ql;
+ var _qm;
+ var _qn;
+ var _qo;
+ var _qp;
+ var _ra;
+ var _rb;
+ var _rc;
+ var _rd;
+ var _re;
+ var _rf;
+ var _rg;
+ var _rh;
+ var _ri;
+ var _rj;
+ var _rk;
+ var _rl;
+ var _rm;
+ var _rn;
+ var _ro;
+ var _rp = 287; values[_rp] = _rp;
+};
+
+f();
diff --git a/deps/v8/test/mjsunit/json-stringify-stack.js b/deps/v8/test/mjsunit/json-stringify-stack.js
new file mode 100644
index 0000000000..3048f8940b
--- /dev/null
+++ b/deps/v8/test/mjsunit/json-stringify-stack.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Object.prototype, "length", {set() { throw "error" }});
+Object.defineProperty(Object.prototype, "values", {set() { throw "error" }});
+
+JSON.stringify({}, v=>v);
diff --git a/deps/v8/test/mjsunit/lookup-behind-property.js b/deps/v8/test/mjsunit/lookup-behind-property.js
new file mode 100644
index 0000000000..abf1a52fbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/lookup-behind-property.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var g = Realm.global(Realm.create());
+assertThrows(()=>g.toString());
diff --git a/deps/v8/test/mjsunit/math-ceil.js b/deps/v8/test/mjsunit/math-ceil.js
new file mode 100644
index 0000000000..05794f4bb2
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-ceil.js
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-semi-space-size=1 --allow-natives-syntax
+
+var test_id = 0;
+
+function testCeil(expect, input) {
+ var test = new Function('n',
+ '"' + (test_id++) + '";return Math.ceil(n)');
+ assertEquals(expect, test(input));
+ assertEquals(expect, test(input));
+ assertEquals(expect, test(input));
+ %OptimizeFunctionOnNextCall(test);
+ assertEquals(expect, test(input));
+
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.ceil(+n)');
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expect, test_double_input(input));
+
+ var test_double_output = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.ceil(n) + -0.0');
+ assertEquals(expect, test_double_output(input));
+ assertEquals(expect, test_double_output(input));
+ assertEquals(expect, test_double_output(input));
+ %OptimizeFunctionOnNextCall(test_double_output);
+ assertEquals(expect, test_double_output(input));
+
+ var test_via_floor = new Function(
+ 'n',
+ '"' + (test_id++) + '";return -Math.floor(-n)');
+ assertEquals(expect, test_via_floor(input));
+ assertEquals(expect, test_via_floor(input));
+ assertEquals(expect, test_via_floor(input));
+ %OptimizeFunctionOnNextCall(test_via_floor);
+ assertEquals(expect, test_via_floor(input));
+
+ if (input <= 0) {
+ var test_via_trunc = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ %OptimizeFunctionOnNextCall(test_via_trunc);
+ assertEquals(expect, test_via_trunc(input));
+ }
+}
+
+function test() {
+ testCeil(0, 0);
+ testCeil(+0, +0);
+ testCeil(-0, -0);
+ testCeil(1, 0.49999);
+ testCeil(1, 0.6);
+ testCeil(1, 0.5);
+ testCeil(-0, -0.1);
+ testCeil(-0, -0.5);
+ testCeil(-0, -0.6);
+ testCeil(-1, -1.6);
+ testCeil(-0, -0.50001);
+ testCeil(Infinity, Infinity);
+ testCeil(-Infinity, -Infinity);
+}
+
+
+// Test in a loop to cover the custom IC and GC-related issues.
+for (var i = 0; i < 10; i++) {
+ test();
+ new Array(i * 10000);
+}
diff --git a/deps/v8/test/mjsunit/math-floor-part1.js b/deps/v8/test/mjsunit/math-floor-part1.js
index 65ae3c68e4..bad1edd081 100644
--- a/deps/v8/test/mjsunit/math-floor-part1.js
+++ b/deps/v8/test/mjsunit/math-floor-part1.js
@@ -38,6 +38,15 @@ function testFloor(expect, input) {
%OptimizeFunctionOnNextCall(test);
assertEquals(expect, test(input));
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.floor(+n)');
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expect, test_double_input(input));
+
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
@@ -46,6 +55,26 @@ function testFloor(expect, input) {
assertEquals(expect, test_double_output(input));
%OptimizeFunctionOnNextCall(test_double_output);
assertEquals(expect, test_double_output(input));
+
+ var test_via_ceil = new Function(
+ 'n',
+ '"' + (test_id++) + '";return -Math.ceil(-n)');
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ %OptimizeFunctionOnNextCall(test_via_ceil);
+ assertEquals(expect, test_via_ceil(input));
+
+ if (input >= 0) {
+ var test_via_trunc = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ %OptimizeFunctionOnNextCall(test_via_trunc);
+ assertEquals(expect, test_via_trunc(input));
+ }
}
function zero() {
diff --git a/deps/v8/test/mjsunit/math-floor-part2.js b/deps/v8/test/mjsunit/math-floor-part2.js
index 60045705ce..eab3ab3a2b 100644
--- a/deps/v8/test/mjsunit/math-floor-part2.js
+++ b/deps/v8/test/mjsunit/math-floor-part2.js
@@ -38,6 +38,15 @@ function testFloor(expect, input) {
%OptimizeFunctionOnNextCall(test);
assertEquals(expect, test(input));
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.floor(+n)');
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expect, test_double_input(input));
+
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
@@ -46,6 +55,26 @@ function testFloor(expect, input) {
assertEquals(expect, test_double_output(input));
%OptimizeFunctionOnNextCall(test_double_output);
assertEquals(expect, test_double_output(input));
+
+ var test_via_ceil = new Function(
+ 'n',
+ '"' + (test_id++) + '";return -Math.ceil(-n)');
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ %OptimizeFunctionOnNextCall(test_via_ceil);
+ assertEquals(expect, test_via_ceil(input));
+
+ if (input >= 0) {
+ var test_via_trunc = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ %OptimizeFunctionOnNextCall(test_via_trunc);
+ assertEquals(expect, test_via_trunc(input));
+ }
}
function zero() {
diff --git a/deps/v8/test/mjsunit/math-floor-part3.js b/deps/v8/test/mjsunit/math-floor-part3.js
index 9225c388ba..6a536657ac 100644
--- a/deps/v8/test/mjsunit/math-floor-part3.js
+++ b/deps/v8/test/mjsunit/math-floor-part3.js
@@ -38,6 +38,15 @@ function testFloor(expect, input) {
%OptimizeFunctionOnNextCall(test);
assertEquals(expect, test(input));
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.floor(+n)');
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expect, test_double_input(input));
+
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
@@ -46,6 +55,26 @@ function testFloor(expect, input) {
assertEquals(expect, test_double_output(input));
%OptimizeFunctionOnNextCall(test_double_output);
assertEquals(expect, test_double_output(input));
+
+ var test_via_ceil = new Function(
+ 'n',
+ '"' + (test_id++) + '";return -Math.ceil(-n)');
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ %OptimizeFunctionOnNextCall(test_via_ceil);
+ assertEquals(expect, test_via_ceil(input));
+
+ if (input >= 0) {
+ var test_via_trunc = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ %OptimizeFunctionOnNextCall(test_via_trunc);
+ assertEquals(expect, test_via_trunc(input));
+ }
}
function zero() {
diff --git a/deps/v8/test/mjsunit/math-floor-part4.js b/deps/v8/test/mjsunit/math-floor-part4.js
index ade36a9c30..9ae83d87c6 100644
--- a/deps/v8/test/mjsunit/math-floor-part4.js
+++ b/deps/v8/test/mjsunit/math-floor-part4.js
@@ -38,6 +38,15 @@ function testFloor(expect, input) {
%OptimizeFunctionOnNextCall(test);
assertEquals(expect, test(input));
+ var test_double_input = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.floor(+n)');
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ assertEquals(expect, test_double_input(input));
+ %OptimizeFunctionOnNextCall(test_double_input);
+ assertEquals(expect, test_double_input(input));
+
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
@@ -46,6 +55,26 @@ function testFloor(expect, input) {
assertEquals(expect, test_double_output(input));
%OptimizeFunctionOnNextCall(test_double_output);
assertEquals(expect, test_double_output(input));
+
+ var test_via_ceil = new Function(
+ 'n',
+ '"' + (test_id++) + '";return -Math.ceil(-n)');
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ assertEquals(expect, test_via_ceil(input));
+ %OptimizeFunctionOnNextCall(test_via_ceil);
+ assertEquals(expect, test_via_ceil(input));
+
+ if (input >= 0) {
+ var test_via_trunc = new Function(
+ 'n',
+ '"' + (test_id++) + '";return Math.trunc(n)');
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ assertEquals(expect, test_via_trunc(input));
+ %OptimizeFunctionOnNextCall(test_via_trunc);
+ assertEquals(expect, test_via_trunc(input));
+ }
}
function zero() {
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 7deef02615..8796d05f16 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=100 --harmony --harmony-reflect --harmony-regexps
-// Flags: --harmony-simd
+// Flags: --stack-size=100 --harmony
+// Flags: --harmony-simd --harmony-instanceof
function test(f, expected, type) {
try {
@@ -150,7 +150,7 @@ TypeError);
// kInstanceofFunctionExpected
test(function() {
1 instanceof 1;
-}, "Expecting an object in instanceof check", TypeError);
+}, "Right-hand side of 'instanceof' is not an object", TypeError);
// kInstanceofNonobjectProto
test(function() {
@@ -339,6 +339,11 @@ test(function() {
eval("/a/x.test(\"a\");");
}, "Invalid regular expression flags", SyntaxError);
+// kInvalidOrUnexpectedToken
+test(function() {
+ eval("'\n'");
+}, "Invalid or unexpected token", SyntaxError);
+
//kJsonParseUnexpectedEOS
test(function() {
JSON.parse("{")
diff --git a/deps/v8/test/mjsunit/mirror-regexp.js b/deps/v8/test/mjsunit/mirror-regexp.js
index 882af8dd6e..7aae1c62ec 100644
--- a/deps/v8/test/mjsunit/mirror-regexp.js
+++ b/deps/v8/test/mjsunit/mirror-regexp.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-regexps --harmony-unicode-regexps
+// Flags: --expose-debug-as debug --harmony-unicode-regexps
// Test the mirror object for regular expression values
var dont_enum = debug.PropertyAttribute.DontEnum;
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index 7642839d53..ed0dd12ace 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -84,7 +84,7 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
testScriptMirror(function(){}, 'mirror-script.js', 99, 2, 0);
-testScriptMirror(Math.round, 'native math.js', -1, 0, 0);
+testScriptMirror(Math.abs, 'native math.js', -1, 0, 0);
testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index fc9c7174b1..6d786f97cf 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -43,9 +43,6 @@
# This test non-deterministically runs out of memory on Windows ia32.
'regress/regress-crbug-160010': [SKIP],
- # Issue 4698: not fully supported by Turbofan yet
- 'es6/tail-call': [PASS, NO_VARIANTS],
-
# Issue 3389: deopt_every_n_garbage_collections is unsafe
'regress/regress-2653': [SKIP],
@@ -83,17 +80,6 @@
'regress/regress-2185-2': [PASS, NO_VARIANTS],
'regress/regress-2612': [PASS, NO_VARIANTS],
- # Modules are busted
- 'harmony/module-linking': [SKIP],
- 'harmony/module-recompile': [SKIP],
- 'harmony/module-resolution': [SKIP],
- 'harmony/regress/regress-343928': [SKIP],
-
- # Proxy tests rely on non ES6 version of Proxies
- # TODO(neis,cbruni): figure out which Proxy tests can be reused
- 'harmony/proxies-example-membrane': [SKIP],
- 'strong/load-proxy': [SKIP],
-
# Issue 3660: Replacing activated TurboFan frames by unoptimized code does
# not work, but we expect it to not crash.
'debug-step-turbofan': [PASS, FAIL],
@@ -121,20 +107,6 @@
'debug-listbreakpoints': [PASS, NO_VARIANTS], # arm64 nosnap with turbofan
'debug-enable-disable-breakpoints': [PASS, NO_VARIANTS], #arm64 nosnap with turbofan.
- # Issue 3956: Strong mode is deprecating. The expectations inside the
- # following tests should be updated once deprecation is complete.
- 'strong/destructuring': [SKIP],
- 'strong/implicit-conversions': [SKIP],
- 'strong/implicit-conversions-count': [SKIP],
- 'strong/implicit-conversions-inlining': [SKIP],
- 'strong/load-builtins': [SKIP],
- 'strong/load-element': [SKIP],
- 'strong/load-element-mutate-backing-store': [SKIP],
- 'strong/load-property': [SKIP],
- 'strong/load-property-mutate-backing-store': [SKIP],
- 'strong/load-super': [SKIP],
- 'strong/literals': [SKIP], # Rest arguments do not respect strongness in Turbofan.
-
# Issue 4035: unexpected frame->context() in debugger
'regress/regress-crbug-107996': [PASS, NO_VARIANTS],
'regress/regress-crbug-171715': [PASS, NO_VARIANTS],
@@ -177,6 +149,10 @@
# issue 4078:
'allocation-site-info': [PASS, NO_VARIANTS],
+ # TODO(turbofan): The escape analysis needs some investigation.
+ 'compiler/escape-analysis-deopt-5': [PASS, NO_VARIANTS],
+ 'compiler/escape-analysis-9': [PASS, NO_VARIANTS],
+
##############################################################################
# Too slow in debug mode with --stress-opt mode.
'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
@@ -277,17 +253,10 @@
'asm/poppler/*': [PASS, SLOW, FAST_VARIANTS],
'asm/sqlite3/*': [PASS, SLOW, FAST_VARIANTS],
- # BUG(v8:3838).
- 'regress/regress-3116': [PASS, ['isolates', FLAKY]],
-
# BUG(v8:4458). TODO(mvstanton): reenable the test once --vector-stores is
# prermanently enabled.
'call-counts': [SKIP],
- # BUG(chromium:508074). Remove this once the issue is fixed.
- 'harmony/arrow-rest-params': [PASS, NO_VARIANTS],
- 'harmony/rest-params': [PASS, ['no_snap == True', NO_VARIANTS]],
-
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
'debug-scopes': [PASS, SLOW],
@@ -296,16 +265,30 @@
'readonly': [PASS, SLOW],
'regress/regress-1200351': [PASS, ['mode == debug', SLOW]],
'regress/regress-crbug-474297': [PASS, ['mode == debug', SLOW]],
+ 'es6/tail-call-megatest*': [PASS, FAST_VARIANTS],
- # TODO(titzer): correct WASM adapter frame alignment on arm64
- 'wasm/*': [PASS, ['arch == arm64', SKIP]],
- 'wasm/asm-wasm': [PASS, ['arch == arm or arch == arm64', SKIP]],
+ # TODO(titzer): ASM->WASM tests on these platforms
+ 'wasm/asm-wasm': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
+ # TODO(branelson): Figure out why ignition + asm-wasm-stdlib fails.
+ 'wasm/asm-wasm-stdlib': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el] or ignition == True', SKIP]],
+ 'wasm/asm-wasm-literals': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el] or ignition == True', SKIP]],
+ 'wasm/asm-wasm-copy': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
+ 'wasm/asm-wasm-deopt': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
+
+ # TODO(branelson): Figure out why ignition + asm->wasm fails embenchen.
+ 'wasm/embenchen/*': [PASS, ['arch == arm64', SKIP], ['ignition == True', SKIP]],
+
+ # TODO(bradnelson) Fix and re-enable.
+ 'wasm/embenchen/box2d': [SKIP], # hang
+ 'wasm/embenchen/lua_binarytrees': [SKIP], # fails decode
+ #'wasm/embenchen/zlib': [SKIP], # fails gc-stress
# case-insensitive unicode regexp relies on case mapping provided by ICU.
'harmony/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
'harmony/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
+ 'regress/regress-5036': [PASS, ['no_i18n == True', FAIL]],
# desugaring regexp property class relies on ICU.
- 'harmony/unicode-regexp-property-class': [PASS, ['no_i18n == True', FAIL]],
+ 'harmony/regexp-property-*': [PASS, ['no_i18n == True', FAIL]],
}], # ALWAYS
['novfp3 == True', {
@@ -371,9 +354,6 @@
# BUG(v8:4237)
'regress/regress-3976': [SKIP],
- # BUG(v8:4359)
- 'strong/load-proxy': [SKIP],
-
# Slow tests.
'array-constructor': [PASS, SLOW],
'json': [PASS, SLOW],
@@ -385,6 +365,9 @@
# BUG(v8:4754).
'debug-referenced-by': [PASS, NO_VARIANTS],
+
+ # BUG(v8:4779): Crashes flakily with stress mode on arm64.
+ 'array-splice': [PASS, SLOW, ['arch == arm64', FAST_VARIANTS]],
}], # 'gc_stress == True'
##############################################################################
@@ -401,6 +384,8 @@
'asm/sqlite3/*': [SKIP],
# TODO(mips-team): Fix Wasm for big-endian.
'wasm/*': [SKIP],
+ 'regress/regress-599717': [SKIP],
+ 'regress/regress-599719': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -484,7 +469,6 @@
'packed-elements': [SKIP],
'regexp-global': [SKIP],
'compiler/alloc-numbers': [SKIP],
- 'harmony/symbols': [SKIP],
'math-floor-of-div': [PASS, TIMEOUT],
'math-floor-of-div-nosudiv': [PASS, TIMEOUT],
'unicodelctest': [PASS, TIMEOUT],
@@ -688,7 +672,6 @@
'debug-liveedit-stack-padding': [SKIP],
'debug-liveedit-restart-frame': [SKIP],
'debug-liveedit-double-call': [SKIP],
- 'harmony/generators-debug-liveedit': [SKIP],
# NaCl builds have problems with this test since Pepper_28.
# V8 Issue 2786
@@ -725,10 +708,19 @@
# Crashes.
'harmony/private': [SKIP],
- 'harmony/symbols': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
##############################################################################
+['arch == s390 or arch == s390x', {
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+}], # 'arch == s390 or arch == s390x'
+
+##############################################################################
['deopt_fuzzer == True', {
# Skip tests that are not suitable for deoptimization fuzzing.
@@ -746,6 +738,9 @@
# Deopt every n garbage collections collides with deopt every n times.
'regress/regress-2653': [SKIP],
+
+ # Too slow.
+ 'es6/tail-call-megatest*': [SKIP],
}], # 'deopt_fuzzer == True'
##############################################################################
@@ -763,179 +758,163 @@
'regress/regress-1132': [SKIP],
}], # 'arch == ppc and simulator_run == True'
+##############################################################################
['ignition == True', {
- # Skip strong mode tests since strong mode is unsupported on ignition.
- 'strong/*': [SKIP],
-
- # TODO(yangguo,4690): Requires debugger support.
- 'es6/debug*': [SKIP],
- 'harmony/debug*': [SKIP],
- 'regress/debug*': [SKIP],
- 'regress/regress-debug*': [SKIP],
-
# TODO(yangguo,4690): assertion failures in debugger tests.
'debug-allscopes-on-debugger': [FAIL],
+ 'es6/debug-stepnext-for': [FAIL],
+ 'es6/debug-promises/stepin-constructor': [FAIL],
+ 'es6/debug-stepin-proxies': [FAIL],
+ 'regress/regress-crbug-119800': [FAIL],
+ 'regress/regress-opt-after-debug-deopt': [FAIL],
+
+ # TODO(yangguo,4690): flaky failures on the bots.
+ 'debug-stepin-builtin-callback-opt': [SKIP],
+
+ # TODO(rmcilroy,4765): assertion failures in LiveEdit tests.
'debug-liveedit-restart-frame': [FAIL],
- 'debug-return-value': [FAIL],
'debug-liveedit-literals': [FAIL],
'debug-liveedit-3': [FAIL],
'debug-liveedit-1': [FAIL],
- 'debug-step-into-json': [FAIL],
+ 'debug-liveedit-2': [FAIL],
'debug-liveedit-patch-positions-replace': [FAIL],
- 'debug-step-into-valueof': [FAIL],
'debug-liveedit-patch-positions': [FAIL],
'debug-liveedit-stepin': [FAIL],
- 'debug-step-4': [FAIL],
'debug-liveedit-newsource': [FAIL],
'debug-liveedit-stack-padding': [FAIL],
- 'debug-stepframe': [FAIL],
- 'debug-negative-break-points': [FAIL],
- 'debug-stepin-accessor': [FAIL],
- 'debug-step-stub-callfunction': [FAIL],
'debug-liveedit-breakpoints': [FAIL],
- 'debug-stepin-accessor-ic': [FAIL],
- 'debug-stepin-builtin': [FAIL],
- 'debug-stepin-foreach': [FAIL],
- 'debug-stepnext-do-while': [FAIL],
- 'debug-stepin-builtin-callback-opt': [FAIL],
- 'debug-stepin-function-call': [FAIL],
-
- # TODO(yangguo,4690): Check failure in debug.cc BreakLocation::SetBreakPoint
- # DCHECK(IsDebugBreak() || IsDebuggerStatement());
- 'regress/regress-1523': [FAIL],
- 'regress/regress-102153': [FAIL],
- 'regress/regress-2825': [FAIL],
- 'regress/regress-crbug-119800': [FAIL],
- 'regress/regress-crbug-467180': [FAIL],
- 'regress/regress-opt-after-debug-deopt': [FAIL],
+ 'es6/debug-liveedit-new-target-1': [FAIL],
+ 'es6/debug-liveedit-new-target-2': [FAIL],
+ 'es6/debug-liveedit-new-target-3': [FAIL],
+ 'es6/generators-debug-liveedit': [FAIL],
- # TODO(rmcilroy,4681): Requires support for generators.
- 'messages': [FAIL],
+ # TODO(mythria, 4780): Related to type feedback for calls in interpreter.
+ 'array-literal-feedback': [FAIL],
+ 'regress/regress-4121': [FAIL],
+
+ # TODO(mythria, 4764): lack of osr support.
+ 'regress/regress-2618': [FAIL],
+ # TODO(mythria, 4764): lack of osr support. The tests waits in a loop
+ # till it is optimized. So test timeouts.
+ 'array-literal-transitions': [SKIP],
+
+ # TODO(mythria, 4680): Relate to GC and ignition holding references to
+ # objects.
+ 'es6/mirror-collections': [FAIL],
+
+ # TODO(mythria, 4680): Fails with context_register_count_ > 0 (0 vs. 0) when
+ # trying to get a context register in BytecodeGenerator.
+ 'harmony/regress/regress-4658': [FAIL, ['mode == release and dcheck_always_on == False', PASS],],
+
+ # TODO(rmcilroy, 4680): Script throws RangeError as expected, but does so during
+ # eager compile of the whole script instead of during lazy compile of the function
+ # f(), so we can't catch the exception in the try/catch. Skip because on some
+ # platforms the stack limit is different and the exception doesn't fire.
+ 'regress/regress-crbug-589472': [SKIP],
+
+ # Debugger test cases that pass with ignition, but not full-codegen.
+ # These differences between full-codegen and ignition are deliberate.
+ 'ignition/elided-instruction-no-ignition': [FAIL],
+
+ 'wasm/asm-wasm-f32': [PASS, ['arch in [arm64]', SKIP]],
+ 'wasm/asm-wasm-f64': [PASS, ['arch in [arm64]', SKIP]],
+}], # ignition == True
+
+['ignition == True and system == windows', {
+ # TODO(rmcilroy,4680): Crash on windows nosnap shared.
+ 'regress/regress-crbug-352058': [PASS, ['no_snap == True', SKIP]],
+
+ # TODO(513471): Attempting to optimize generator hits unreachable path.
+ 'regress/regress-crbug-513471': [PASS, ['no_snap == True', SKIP]],
+
+ # TODO(rmcilroy,4680): Fails on win32 debug.
+ 'div-mod': [PASS, ['arch == ia32', SKIP]],
+}], # ignition == True and system == windows
+
+['ignition == True and arch == arm64', {
+ # TODO(rmcilroy,4680): Arm64 specific timeouts.
+ 'asm/construct-double': [SKIP],
+ 'compiler/osr-one': [SKIP],
+ 'compiler/osr-two': [SKIP],
+ 'wasm/asm-wasm-i32': [SKIP],
+ 'wasm/asm-wasm-u32': [SKIP],
+}], # ignition == True and arch == arm64
+
+['ignition == True and arch == arm', {
+ # TODO(rmcilroy,4680): Arm specific timeouts.
+ 'compiler/osr-one': [SKIP],
+ 'compiler/osr-two': [SKIP],
+ 'regress/regress-1257': [SKIP],
+}], # ignition == True and arch == arm
+
+['ignition == True and msan', {
+ # TODO(mythria,4680): All of these tests have large loops and hence slow
+ # and timeout.
+ 'compiler/osr-big': [SKIP],
+ 'compiler/osr-nested': [SKIP],
+ 'regress/regress-298269': [SKIP],
+ 'regress/regress-crbug-319860': [SKIP],
+ 'regress/regress-deep-proto': [SKIP],
+ 'try': [SKIP],
+ # Too slow for interpreter and msan.
+ 'es6/tail-call-megatest*': [SKIP],
+}], # ignition == True and msan
+
+['ignition == True and gc_stress == True', {
+ # TODO(oth,4680): failures from the bots.
+ 'es6/debug-step-destructuring-bind': [SKIP],
+ 'es6/debug-stepin-collections-foreach': [SKIP],
+ 'ignition/elided-instruction': [SKIP],
+ 'regress/regress-269': [SKIP],
+}], # ignition == True and gc_stress == True
+
+['ignition == False', {
+ # Debugger test cases that pass with full-codegen, but not ignition.
+ # These differences between full-codegen and ignition are deliberate.
+ 'ignition/elided-instruction': [FAIL],
+}], # ignition == False
+
+['ignition == True and system == windows and no_snap', {
+ # TODO(rmcilroy): Fail with nosnap and shared libraries.
'es6/array-from': [FAIL],
- 'regress-3225': [FAIL],
'es6/classes-subclass-builtins': [FAIL],
'es6/computed-property-names-classes': [FAIL],
'es6/computed-property-names-object-literals-methods': [FAIL],
- 'es6/function-length-configurable': [FAIL],
- 'es6/generators-poisoned-properties': [FAIL],
- 'es6/generators-runtime': [FAIL],
- 'es6/generators-objects': [FAIL],
- 'es6/generators-parsing': [FAIL],
+ 'es6/debug-stepin-generators': [FAIL],
+ 'es6/destructuring': [FAIL],
+ 'es6/destructuring-assignment': [FAIL],
'es6/generators-iteration': [FAIL],
+ 'es6/generators-mirror': [FAIL],
+ 'es6/generators-parsing': [FAIL],
+ 'es6/generators-poisoned-properties': [FAIL],
+ 'es6/generators-relocation': [FAIL],
'es6/generators-states': [FAIL],
'es6/iteration-semantics': [FAIL],
- 'es6/iterator-prototype': [FAIL],
- 'es6/generators-mirror': [FAIL],
'es6/object-literals-method': [FAIL],
'es6/object-literals-super': [FAIL],
- 'es6/generators-relocation': [FAIL],
- 'es6/spread-array': [FAIL],
- 'es6/generators-debug-liveedit': [FAIL],
- 'es6/spread-call': [FAIL],
- 'es6/typedarray-from': [FAIL],
- 'es6/typedarray': [FAIL],
+ 'es6/promises': [FAIL],
'es6/regress/regress-2681': [FAIL],
'es6/regress/regress-2691': [FAIL],
'es6/regress/regress-3280': [FAIL],
- 'harmony/destructuring-assignment': [FAIL],
+ 'es6/spread-array': [FAIL],
+ 'es6/spread-call': [FAIL],
+ 'es6/typedarray': [FAIL],
+ 'es6/typedarray-from': [FAIL],
'harmony/function-sent': [FAIL],
- 'harmony/reflect-enumerate-delete': [FAIL],
- 'harmony/reflect-enumerate-special-cases': [FAIL],
- 'harmony/proxies-enumerate': [FAIL],
- 'harmony/reflect-enumerate-opt': [FAIL],
- 'harmony/reflect-enumerate': [FAIL],
- 'harmony/destructuring': [FAIL],
- 'harmony/regress/regress-4482': [FAIL],
'harmony/generators': [FAIL],
'harmony/iterator-close': [FAIL],
- 'harmony/reflect-construct': [FAIL],
- 'es6/promises': [FAIL],
-
- # TODO(rmcilroy,4680): Check failed in
- # BytecodeGenerator::VisitFunctionLiteral - !shared_info.is_null().
- 'regress/regress-crbug-429159': [FAIL],
-
- # TODO(rmcilroy,4680): Pass on debug, fail on release.
- 'compiler/regress-stacktrace-methods': [PASS, ['mode == release', FAIL]],
-
- # TODO(rmcilroy,4680): Test assert failures.
- 'array-literal-feedback': [FAIL],
- 'undetectable-compare': [FAIL],
- 'debug-liveedit-2': [FAIL],
- 'es6/string-search': [FAIL],
- 'es6/mirror-collections': [FAIL],
- 'es6/regress/regress-468661': [FAIL],
- 'harmony/string-replace': [FAIL],
- 'harmony/string-match': [FAIL],
- 'harmony/string-split': [FAIL],
- 'regress/regress-2618': [FAIL],
- 'regress/regress-4121': [FAIL],
- 'regress/regress-4266': [FAIL],
- 'harmony/simd': [FAIL],
- 'regress/regress-crbug-109362': [FAIL],
- 'regress/regress-crbug-568477-2': [FAIL],
- 'regress/regress-crbug-568477-3': [FAIL],
- 'regress/regress-crbug-568477-1': [FAIL],
- 'regress/regress-2318': [FAIL],
-
- # TODO(rmcilroy, 4680): new ES6 instanceof support
- 'harmony/instanceof-es6': [SKIP],
-
- # TODO(rmcilroy,4680): Test timeouts.
- 'array-literal-transitions': [SKIP],
- 'regress/regress-crbug-517592': [SKIP],
- 'regress/regress-crbug-568477-4': [SKIP],
- 'regress/regress-crbug-409614': [SKIP],
- 'regress/regress-crbug-42414': [SKIP],
- 'regress/regress-1853': [SKIP],
- 'regress/regress-crbug-424142': [SKIP],
-}], # ignition == True
-
-['ignition == True and arch == arm64', {
- # TODO(rmcilroy,4680): Fails on Arm64 due to expecting to take less than 3
- # seconds.
- 'regress/regress-165637': [FAIL],
-}], # ignition == True and arch == arm64
-
-['ignition == True and (arch == arm or arch == arm64)', {
- # TODO(rmcilroy,4680): Arm / Arm64 specific timeouts.
- 'asm/construct-double': [SKIP],
- 'compiler/osr-nested': [SKIP],
- 'compiler/osr-one': [SKIP],
- 'compiler/osr-two': [SKIP],
- 'regress/regress-1257': [SKIP],
- 'regress/regress-165637': [SKIP],
- 'regress/regress-2185': [SKIP],
- 'regress/regress-91008': [SKIP],
- 'unicodelctest': [SKIP],
- 'unicodelctest-no-optimization': [SKIP],
-}], # ignition == True and (arch == arm or arch == arm64)
+ 'harmony/regress/regress-4482': [FAIL],
+ 'messages': [FAIL],
+ 'regress-3225': [FAIL],
+}], # ignition == True and system == windows and no_snap
##############################################################################
['gcov_coverage', {
# Tests taking too long.
'array-functions-prototype-misc': [SKIP],
- 'strong/implicit-conversions': [SKIP],
- 'strong/load-element-mutate-backing-store': [SKIP],
# Stack overflow.
'big-array-literal': [SKIP],
}], # 'gcov_coverage'
-##############################################################################
-# exclude test issues for which fixes for PPC did not make it into 5.0
-# These should be removed when we upgrade Node.js to use v8 5.1
-['arch == ppc64', {
- 'wasm/asm-wasm' : [SKIP],
-}], # 'arch == ppc64''
-
-##############################################################################
-# This test allocates a 2G block of memory and if there are multiple
-# varients this leads kills by the OOM killer, crashes or messages
-# indicating the OS cannot allocate memory, exclude for Node.js runs
-# re-evalute when we move up to v8 5.1
-[ALWAYS, {
-'regress/regress-crbug-514081': [PASS, NO_VARIANTS],
-}], # ALWAYS
-
]
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 53188d15b8..19860ff389 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -24,6 +24,8 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --harmony-function-name
var obj = {
a: 7,
@@ -218,7 +220,7 @@ function TestNumericNamesGetter(expectedKeys, object) {
assertEquals(expectedKeys, Object.keys(object));
expectedKeys.forEach(function(key) {
var descr = Object.getOwnPropertyDescriptor(object, key);
- assertEquals(key, descr.get.name);
+ assertEquals('get ' + key, descr.get.name);
});
}
TestNumericNamesGetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
@@ -242,7 +244,7 @@ function TestNumericNamesSetter(expectedKeys, object) {
assertEquals(expectedKeys, Object.keys(object));
expectedKeys.forEach(function(key) {
var descr = Object.getOwnPropertyDescriptor(object, key);
- assertEquals(key, descr.set.name);
+ assertEquals('set ' + key, descr.set.name);
});
}
TestNumericNamesSetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
diff --git a/deps/v8/test/mjsunit/parallel-optimize-disabled.js b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
index 1dbce06fdf..f8d5848f0d 100644
--- a/deps/v8/test/mjsunit/parallel-optimize-disabled.js
+++ b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nodead-code-elimination --concurrent-recompilation
-// Flags: --allow-natives-syntax --legacy-const
+// Flags: --allow-natives-syntax
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -35,7 +35,8 @@ if (!%IsConcurrentRecompilationSupported()) {
function g() { // g() cannot be optimized.
const x = 1;
- x++;
+ // TODO(adamk): Is this test still testing anything?
+ // x++;
}
function f(x) {
diff --git a/deps/v8/test/mjsunit/property-load-across-eval.js b/deps/v8/test/mjsunit/property-load-across-eval.js
index 222c0e965e..808ac5829e 100644
--- a/deps/v8/test/mjsunit/property-load-across-eval.js
+++ b/deps/v8/test/mjsunit/property-load-across-eval.js
@@ -25,21 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --legacy-const
-
// Tests loading of properties across eval calls.
var x = 1;
function global_function() { return 'global'; }
-const const_uninitialized;
-const const_initialized = function() { return "const_global"; }
// Test loading across an eval call that does not shadow variables.
function testNoShadowing() {
var y = 2;
function local_function() { return 'local'; }
- const local_const_uninitialized;
- const local_const_initialized = function() { return "const_local"; }
function f() {
eval('1');
assertEquals(1, x);
@@ -47,44 +41,12 @@ function testNoShadowing() {
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
- var exception = false;
- try {
- const_uninitialized();
- } catch(e) {
- exception = true;
- }
- assertTrue(exception);
- assertEquals('const_global', const_initialized());
- exception = false;
- try {
- local_const_uninitialized();
- } catch(e) {
- exception = true;
- }
- assertTrue(exception);
- assertEquals('const_local', local_const_initialized());
function g() {
assertEquals(1, x);
try { typeof(asdf); } catch(e) { assertUnreachable(); }
assertEquals(2, y);
assertEquals('global', global_function());
assertEquals('local', local_function());
- var exception = false;
- try {
- const_uninitialized();
- } catch(e) {
- exception = true;
- }
- assertTrue(exception);
- assertEquals('const_global', const_initialized());
- exception = false;
- try {
- local_const_uninitialized();
- } catch(e) {
- exception = true;
- }
- assertTrue(exception);
- assertEquals('const_local', local_const_initialized());
}
g();
}
diff --git a/deps/v8/test/mjsunit/readonly.js b/deps/v8/test/mjsunit/readonly.js
index 3b090cebc1..3839731e02 100644
--- a/deps/v8/test/mjsunit/readonly.js
+++ b/deps/v8/test/mjsunit/readonly.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-proxies
+// Flags: --allow-natives-syntax
// Different ways to create an object.
diff --git a/deps/v8/test/mjsunit/regexp-lastIndex.js b/deps/v8/test/mjsunit/regexp-lastIndex.js
new file mode 100644
index 0000000000..1445b9b2ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-lastIndex.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// lastIndex is set according to funny rules. It is typically set only
+// for global or sticky RegExps, but on a failure to find a match, it is
+// set unconditionally. If a set fails, then it acts as if in strict mode
+// and throws.
+
+var re = /x/;
+Object.defineProperty(re, 'lastIndex', {writable: false});
+assertThrows(() => re.exec(""), TypeError);
+assertEquals(["x"], re.exec("x"));
+
+var re = /x/y;
+Object.defineProperty(re, 'lastIndex', {writable: false});
+assertThrows(() => re.exec(""), TypeError);
+assertThrows(() => re.exec("x"), TypeError);
diff --git a/deps/v8/test/mjsunit/regexp-not-sticky-yet.js b/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
deleted file mode 100644
index 2002509d1d..0000000000
--- a/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --no-harmony-regexps
-// Test that sticky regexp support is not affecting V8 when the
-// --harmony-regexps flag is not on.
-
-assertThrows(function() { eval("/foo.bar/y"); }, SyntaxError);
-assertThrows(function() { eval("/foobar/y"); }, SyntaxError);
-assertThrows(function() { eval("/foo.bar/gy"); }, SyntaxError);
-assertThrows(function() { eval("/foobar/gy"); }, SyntaxError);
-assertThrows(function() { new RegExp("foo.bar", "y"); }, SyntaxError);
-assertThrows(function() { new RegExp("foobar", "y"); }, SyntaxError);
-assertThrows(function() { new RegExp("foo.bar", "gy"); }, SyntaxError);
-assertThrows(function() { new RegExp("foobar", "gy"); }, SyntaxError);
-
-var re = /foo.bar/;
-assertEquals("/foo.bar/", "" + re);
-var plain = /foobar/;
-assertEquals("/foobar/", "" + plain);
-
-re.compile("foo.bar");
-assertEquals(void 0, re.sticky);
-
-var global = /foo.bar/g;
-assertEquals("/foo.bar/g", "" + global);
-var plainglobal = /foobar/g;
-assertEquals("/foobar/g", "" + plainglobal);
-
-assertEquals(void 0, re.sticky);
-re.sticky = true; // Has no effect on the regexp, just sets a property.
-assertTrue(re.sticky);
-
-assertTrue(re.test("..foo.bar"));
-
-re.lastIndex = -1; // Ignored for non-global, non-sticky.
-assertTrue(re.test("..foo.bar"));
-assertEquals(-1, re.lastIndex);
-
-re.lastIndex = -1; // Ignored for non-global, non-sticky.
-assertTrue(!!re.exec("..foo.bar"));
-assertEquals(-1, re.lastIndex);
diff --git a/deps/v8/test/mjsunit/regexp-string-methods.js b/deps/v8/test/mjsunit/regexp-string-methods.js
index 56604a6539..fa01a33ce4 100644
--- a/deps/v8/test/mjsunit/regexp-string-methods.js
+++ b/deps/v8/test/mjsunit/regexp-string-methods.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-regexp-exec
+
// Regexp shouldn't use String.prototype.slice()
var s = new String("foo");
assertEquals("f", s.slice(0,1));
diff --git a/deps/v8/test/mjsunit/regress-3225.js b/deps/v8/test/mjsunit/regress-3225.js
index 97165a80dd..51fac89bf6 100644
--- a/deps/v8/test/mjsunit/regress-3225.js
+++ b/deps/v8/test/mjsunit/regress-3225.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
Debug = debug.Debug
@@ -15,13 +15,13 @@ function listener(event, exec_state, event_data, data) {
if (debug_step == 0) {
assertEquals(1, exec_state.frame(0).evaluate('a').value());
assertEquals(3, exec_state.frame(0).evaluate('b').value());
- exec_state.frame(0).evaluate("a = 4").value(); // no effect.
+ exec_state.frame(0).evaluate("a = 4").value();
debug_step++;
} else {
- assertEquals(1, exec_state.frame(0).evaluate('a').value());
+ assertEquals(4, exec_state.frame(0).evaluate('a').value());
assertEquals(3, exec_state.frame(0).evaluate('b').value());
exec_state.frame(0).evaluate("set_a_to_5()");
- exec_state.frame(0).evaluate("b = 5").value(); // no effect.
+ exec_state.frame(0).evaluate("b = 5").value();
}
} catch (e) {
failure = e;
@@ -43,10 +43,10 @@ function* generator(a, b) {
var foo = generator(1, 2);
-assertEquals(1, foo.next().value);
+assertEquals(4, foo.next().value);
assertEquals(3, foo.next().value);
assertEquals(5, foo.next().value);
-assertEquals(3, foo.next().value);
+assertEquals(5, foo.next().value);
assertNull(failure);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
index 2ce60aa836..0f7eeba0d3 100644
--- a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --block-concurrent-recompilation
-// Flags: --no-concurrent-osr
function Ctor() {
this.a = 1;
diff --git a/deps/v8/test/mjsunit/regress/get-array-keys-oob.js b/deps/v8/test/mjsunit/regress/get-array-keys-oob.js
new file mode 100644
index 0000000000..22a59e8beb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/get-array-keys-oob.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Array.prototype[10000000] = 1;
+Array(1000).join();
diff --git a/deps/v8/test/mjsunit/regress/regress-1178598.js b/deps/v8/test/mjsunit/regress/regress-1178598.js
index 2056a9d8da..cca5016d6b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1178598.js
+++ b/deps/v8/test/mjsunit/regress/regress-1178598.js
@@ -25,27 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --legacy-const
-
// Regression test cases for issue 1178598.
-// Make sure const-initialization doesn't conflict
-// with heap-allocated locals for catch variables.
-var value = (function(){
- try { } catch(e) {
- // Force the 'e' variable to be heap-allocated
- // by capturing it in a function closure.
- (function() { e; });
- }
- // Make sure the two definitions of 'e' do
- // not conflict in any way.
- eval("const e=1");
- return e;
-})();
-
-assertEquals(1, value);
-
-
// Make sure that catch variables can be accessed using eval.
var value = (function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-1182832.js b/deps/v8/test/mjsunit/regress/regress-1182832.js
deleted file mode 100644
index 4d214695b5..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1182832.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-var caught = false;
-try {
- (function () {
- var e = 0;
- eval("const e = 1;");
- })();
-} catch (e) {
- caught = true;
- assertTrue(e instanceof TypeError);
-}
-assertTrue(caught);
diff --git a/deps/v8/test/mjsunit/regress/regress-1199637.js b/deps/v8/test/mjsunit/regress/regress-1199637.js
index 34ab5144a2..ae7c5e03f0 100644
--- a/deps/v8/test/mjsunit/regress/regress-1199637.js
+++ b/deps/v8/test/mjsunit/regress/regress-1199637.js
@@ -25,13 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --legacy-const
+// Flags: --allow-natives-syntax
-// Make sure that we can introduce global variables (using
-// both var and const) that shadow even READ_ONLY variables
-// in the prototype chain.
-const NONE = 0;
-const READ_ONLY = 1;
+// Make sure that we can introduce global variables that shadow even
+// READ_ONLY variables in the prototype chain.
+var NONE = 0;
+var READ_ONLY = 1;
// Use DeclareGlobal...
%AddNamedProperty(this.__proto__, "a", 1234, NONE);
@@ -41,7 +40,7 @@ assertEquals(5678, a);
%AddNamedProperty(this.__proto__, "b", 1234, NONE);
assertEquals(1234, b);
-eval("const b = 5678;");
+eval("var b = 5678;");
assertEquals(5678, b);
%AddNamedProperty(this.__proto__, "c", 1234, READ_ONLY);
@@ -51,7 +50,7 @@ assertEquals(5678, c);
%AddNamedProperty(this.__proto__, "d", 1234, READ_ONLY);
assertEquals(1234, d);
-eval("const d = 5678;");
+eval("var d = 5678;");
assertEquals(5678, d);
// Use DeclareContextSlot...
@@ -62,7 +61,7 @@ assertEquals(5678, x);
%AddNamedProperty(this.__proto__, "y", 1234, NONE);
assertEquals(1234, y);
-eval("with({}) { const y = 5678; }");
+eval("with({}) { var y = 5678; }");
assertEquals(5678, y);
%AddNamedProperty(this.__proto__, "z", 1234, READ_ONLY);
@@ -72,5 +71,5 @@ assertEquals(5678, z);
%AddNamedProperty(this.__proto__, "w", 1234, READ_ONLY);
assertEquals(1234, w);
-eval("with({}) { const w = 5678; }");
+eval("with({}) { var w = 5678; }");
assertEquals(5678, w);
diff --git a/deps/v8/test/mjsunit/regress/regress-1201933.js b/deps/v8/test/mjsunit/regress/regress-1201933.js
deleted file mode 100644
index 4a7c65a5c1..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1201933.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-// Make sure this doesn't fail with an assertion
-// failure during lazy compilation.
-
-var caught = false;
-try {
- (function() {
- const a;
- var a;
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
diff --git a/deps/v8/test/mjsunit/regress/regress-1207276.js b/deps/v8/test/mjsunit/regress/regress-1207276.js
deleted file mode 100644
index b5d01815e2..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1207276.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-try {
- const x=n,Glo0al;
-} catch(e){}
-
-delete Date;
-function X(){String(Glo0al)}
-X();
-X();
-X();
diff --git a/deps/v8/test/mjsunit/regress/regress-1213575.js b/deps/v8/test/mjsunit/regress/regress-1213575.js
deleted file mode 100644
index fc35b88103..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1213575.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Make sure that a const definition does not try
-// to pass 'the hole' to a defined setter.
-
-// Flags: --legacy-const
-
-this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
-
-var caught = false;
-try {
- eval('const x = 1');
-} catch(e) {
- assertTrue(e instanceof TypeError);
- caught = true;
-}
-assertTrue(caught);
diff --git a/deps/v8/test/mjsunit/regress/regress-186.js b/deps/v8/test/mjsunit/regress/regress-186.js
index e10ed8f25d..0c2dbc696d 100644
--- a/deps/v8/test/mjsunit/regress/regress-186.js
+++ b/deps/v8/test/mjsunit/regress/regress-186.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --legacy-const
-
// Make sure that eval can introduce a local variable called __proto__.
// See http://code.google.com/p/v8/issues/detail?id=186
@@ -49,15 +47,6 @@ function testLocal() {
assertEquals(o, eval("__proto__"));
}
-function testConstLocal() {
- // Add const property called __proto__ to the extension object.
- eval("const __proto__ = o");
- // Check that the extension object's prototype did not change.
- eval("var x = 27");
- assertFalse(setterCalled, "prototype of extension object changed");
- assertEquals(o, eval("__proto__"));
-}
-
function testGlobal() {
// Assign to the global __proto__ property.
eval("__proto__ = o");
@@ -69,5 +58,4 @@ function testGlobal() {
}
runTest(testLocal);
-runTest(testConstLocal);
runTest(testGlobal);
diff --git a/deps/v8/test/mjsunit/regress/regress-220.js b/deps/v8/test/mjsunit/regress/regress-220.js
index 32c6471c48..cd38a478cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-220.js
+++ b/deps/v8/test/mjsunit/regress/regress-220.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-restrictive-declarations
+
function foo(f) { eval(f); }
// Ensure that compiling a declaration of a function does not crash.
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index b3cfffd92c..6e52bcad78 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax --no-concurrent-osr
+// Flags: --use-osr --allow-natives-syntax
function f() {
do {
diff --git a/deps/v8/test/mjsunit/regress/regress-3138.js b/deps/v8/test/mjsunit/regress/regress-3138.js
index 6f0430c855..4f607ed6d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-3138.js
+++ b/deps/v8/test/mjsunit/regress/regress-3138.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --legacy-const
-
(function f(){
assertEquals("function", typeof f);
})();
@@ -30,13 +28,3 @@ assertEquals("undefined", typeof f);
assertEquals("undefined", typeof a);
assertEquals(2, o.a);
})();
-
-// const initialization is not intercepted by with scope.
-(function() {
- var o = { a: 1 };
- with (o) {
- const a = 2;
- }
- assertEquals(2, a);
- assertEquals(1, o.a);
-})();
diff --git a/deps/v8/test/mjsunit/regress/regress-325676.js b/deps/v8/test/mjsunit/regress/regress-325676.js
index 7aae0cdaab..6c23d0a6bc 100644
--- a/deps/v8/test/mjsunit/regress/regress-325676.js
+++ b/deps/v8/test/mjsunit/regress/regress-325676.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
// If a function parameter is forced to be context allocated,
// debug evaluate need to resolve it to a context slot instead of
@@ -51,12 +51,12 @@ Debug.setListener(listener);
function f(arg) {
expected = arg;
debugger;
- assertEquals(expected, arg);
+ assertEquals("evaluated", arg);
arg = "value";
expected = arg;
debugger;
- assertEquals(expected, arg);
+ assertEquals("evaluated", arg);
// Forces arg to be context allocated even though a parameter.
function g() { arg; }
diff --git a/deps/v8/test/mjsunit/regress/regress-343609.js b/deps/v8/test/mjsunit/regress/regress-343609.js
index 5205ca1330..520b54e52c 100644
--- a/deps/v8/test/mjsunit/regress/regress-343609.js
+++ b/deps/v8/test/mjsunit/regress/regress-343609.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --block-concurrent-recompilation
-// Flags: --no-concurrent-osr --expose-gc
+// Flags: --expose-gc
function Ctor() {
this.a = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-436896.js b/deps/v8/test/mjsunit/regress/regress-436896.js
deleted file mode 100644
index fee44dee8c..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-436896.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --legacy-const
-
-function f(x) {
- const x = 0;
- return x;
-}
-
-function g(x) {
- return f(x);
-}
-
-%OptimizeFunctionOnNextCall(g);
-assertThrows(function() { g(42); }, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4576.js b/deps/v8/test/mjsunit/regress/regress-4576.js
deleted file mode 100644
index c55c69580a..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-4576.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-sloppy --legacy-const
-
-// Should trigger a runtime error, not an early error.
-function f() {
- const x;
- var x;
-}
-assertThrows(f, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4693.js b/deps/v8/test/mjsunit/regress/regress-4693.js
index 6145964607..13b4e2b68e 100644
--- a/deps/v8/test/mjsunit/regress/regress-4693.js
+++ b/deps/v8/test/mjsunit/regress/regress-4693.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy-function --nolegacy-const
+// Flags: --harmony-sloppy-function
// In sloppy mode we allow function redeclarations within blocks for webcompat.
(function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-4769.js b/deps/v8/test/mjsunit/regress/regress-4769.js
new file mode 100644
index 0000000000..6fbd01fe14
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4769.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// https://bugs.chromium.org/p/v8/issues/detail?id=4769
+
+Object.getPrototypeOf([])[Symbol.iterator] = () => assertUnreachable();
+
+JSON.stringify({foo: [42]});
+JSON.stringify({foo: [42]}, []);
+JSON.stringify({foo: [42]}, undefined, ' ');
+JSON.stringify({foo: [42]}, [], ' ');
diff --git a/deps/v8/test/mjsunit/regress/regress-4825.js b/deps/v8/test/mjsunit/regress/regress-4825.js
new file mode 100644
index 0000000000..5ad096f3ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4825.js
@@ -0,0 +1,95 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function enumerate(o) {
+ var keys = [];
+ for (var key in o) keys.push(key);
+ return keys;
+}
+
+(function testSlowSloppyArgumentsElements() {
+ function slowSloppyArguments(a, b, c) {
+ arguments[10000] = "last";
+ arguments[4000] = "first";
+ arguments[6000] = "second";
+ arguments[5999] = "x";
+ arguments[3999] = "y";
+ return arguments;
+ }
+ assertEquals(["0", "1", "2", "3999", "4000", "5999", "6000", "10000"],
+ Object.keys(slowSloppyArguments(1, 2, 3)));
+
+ assertEquals(["0", "1", "2", "3999", "4000", "5999", "6000", "10000"],
+ enumerate(slowSloppyArguments(1,2,3)));
+})();
+
+(function testSlowSloppyArgumentsElementsNotEnumerable() {
+ function slowSloppyArguments(a, b, c) {
+ Object.defineProperty(arguments, 10000, {
+ enumerable: false, configurable: false, value: "NOPE"
+ });
+ arguments[4000] = "first";
+ arguments[6000] = "second";
+ arguments[5999] = "x";
+ arguments[3999] = "y";
+ return arguments;
+ }
+
+ assertEquals(["0", "1", "2", "3999", "4000", "5999", "6000"],
+ Object.keys(slowSloppyArguments(1, 2, 3)));
+
+ assertEquals(["0", "1", "2", "3999", "4000", "5999", "6000"],
+ enumerate(slowSloppyArguments(1,2,3)));
+})();
+
+(function testFastSloppyArgumentsElements() {
+ function fastSloppyArguments(a, b, c) {
+ arguments[5] = 1;
+ arguments[7] = 0;
+ arguments[3] = 2;
+ return arguments;
+ }
+ assertEquals(["0", "1", "2", "3", "5", "7"],
+ Object.keys(fastSloppyArguments(1, 2, 3)));
+
+ assertEquals(
+ ["0", "1", "2", "3", "5", "7"], enumerate(fastSloppyArguments(1, 2, 3)));
+
+ function fastSloppyArguments2(a, b, c) {
+ delete arguments[0];
+ arguments[0] = "test";
+ return arguments;
+ }
+
+ assertEquals(["0", "1", "2"], Object.keys(fastSloppyArguments2(1, 2, 3)));
+ assertEquals(["0", "1", "2"], enumerate(fastSloppyArguments2(1, 2, 3)));
+})();
+
+(function testFastSloppyArgumentsElementsNotEnumerable() {
+ function fastSloppyArguments(a, b, c) {
+ Object.defineProperty(arguments, 5, {
+ enumerable: false, configurable: false, value: "NOPE"
+ });
+ arguments[7] = 0;
+ arguments[3] = 2;
+ return arguments;
+ }
+ assertEquals(
+ ["0", "1", "2", "3", "7"], Object.keys(fastSloppyArguments(1, 2, 3)));
+
+ assertEquals(
+ ["0", "1", "2", "3", "7"], enumerate(fastSloppyArguments(1,2,3)));
+
+ function fastSloppyArguments2(a, b, c) {
+ delete arguments[0];
+ Object.defineProperty(arguments, 1, {
+ enumerable: false, configurable: false, value: "NOPE"
+ });
+ arguments[0] = "test";
+ return arguments;
+ }
+
+ assertEquals(["0", "2"], Object.keys(fastSloppyArguments2(1, 2, 3)));
+ assertEquals(["0", "2"], enumerate(fastSloppyArguments2(1, 2, 3)));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-4970.js b/deps/v8/test/mjsunit/regress/regress-4970.js
new file mode 100644
index 0000000000..da0033b34f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4970.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function g() {
+ var f;
+ class C extends eval("f = () => delete C; Array") {}
+ f();
+}
+
+assertThrows(g, SyntaxError);
+%OptimizeFunctionOnNextCall(g);
+assertThrows(g, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-5006.js b/deps/v8/test/mjsunit/regress/regress-5006.js
new file mode 100644
index 0000000000..29f145de0a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5006.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return Math.imul(x|0, 2); }
+print(foo(1));
+print(foo(1));
+%OptimizeFunctionOnNextCall(foo);
+print(foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-5036.js b/deps/v8/test/mjsunit/regress/regress-5036.js
new file mode 100644
index 0000000000..036edd949f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5036.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+assertEquals(["1\u212a"], /\d\w/ui.exec("1\u212a"));
diff --git a/deps/v8/test/mjsunit/regress/regress-536751.js b/deps/v8/test/mjsunit/regress/regress-536751.js
index b2d19e422a..0707e008ea 100644
--- a/deps/v8/test/mjsunit/regress/regress-536751.js
+++ b/deps/v8/test/mjsunit/regress/regress-536751.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-sloppy --harmony-sloppy-function --harmony-sloppy-let
+// Flags: --no-harmony-restrictive-declarations
// At some point, this code led to DCHECK errors in debug mode
diff --git a/deps/v8/test/mjsunit/regress/regress-542099.js b/deps/v8/test/mjsunit/regress/regress-542099.js
index f3655da53c..eef49538cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-542099.js
+++ b/deps/v8/test/mjsunit/regress/regress-542099.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-sloppy --harmony-sloppy-function
+// Flags: --no-harmony-restrictive-declarations
// Previously, this caused a CHECK fail in debug mode
// https://code.google.com/p/chromium/issues/detail?id=542099
diff --git a/deps/v8/test/mjsunit/regress/regress-542100.js b/deps/v8/test/mjsunit/regress/regress-542100.js
index bc03e6f9e2..70fb5dc147 100644
--- a/deps/v8/test/mjsunit/regress/regress-542100.js
+++ b/deps/v8/test/mjsunit/regress/regress-542100.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-sloppy --harmony-sloppy-function
+// Flags: --no-harmony-restrictive-declarations
(function() {
var x = {a: 1}
diff --git a/deps/v8/test/mjsunit/regress/regress-552302.js b/deps/v8/test/mjsunit/regress/regress-552302.js
index b9f712a619..87c4718335 100644
--- a/deps/v8/test/mjsunit/regress/regress-552302.js
+++ b/deps/v8/test/mjsunit/regress/regress-552302.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring-bind --allow-natives-syntax
+// Flags: --allow-natives-syntax
assertThrows('var %OptimizeFunctionOnNextCall()', SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-554865.js b/deps/v8/test/mjsunit/regress/regress-554865.js
index 9b66d79b35..d16ac918ae 100644
--- a/deps/v8/test/mjsunit/regress/regress-554865.js
+++ b/deps/v8/test/mjsunit/regress/regress-554865.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-default-parameters
(function() {
var x = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-572589.js b/deps/v8/test/mjsunit/regress/regress-572589.js
index 36092a2bf4..1fd755ad1e 100644
--- a/deps/v8/test/mjsunit/regress/regress-572589.js
+++ b/deps/v8/test/mjsunit/regress/regress-572589.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --no-lazy
-// Flags: --harmony-destructuring-bind
"use strict";
eval();
diff --git a/deps/v8/test/mjsunit/regress/regress-575364.js b/deps/v8/test/mjsunit/regress/regress-575364.js
index 73136c5538..c0652058fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-575364.js
+++ b/deps/v8/test/mjsunit/regress/regress-575364.js
@@ -8,5 +8,5 @@ function f() {
"use asm";
}
-assertFalse(_WASMEXP_ == undefined);
-assertThrows(function() { _WASMEXP_.instantiateModuleFromAsm(f.toString()); });
+assertFalse(Wasm == undefined);
+assertThrows(function() { Wasm.instantiateModuleFromAsm(f.toString()); });
diff --git a/deps/v8/test/mjsunit/regress/regress-590074.js b/deps/v8/test/mjsunit/regress/regress-590074.js
new file mode 100644
index 0000000000..80ae41b276
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-590074.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var __v_5 = {};
+
+function __f_10() {
+ var __v_2 = [0, 0, 0];
+ __v_2[0] = 0;
+ gc();
+ return __v_2;
+}
+
+function __f_2(array) {
+ array[1] = undefined;
+}
+
+function __f_9() {
+ var __v_4 = __f_10();
+ __f_2(__f_10());
+ __v_5 = __f_10();
+ __v_4 = __f_10();
+ __f_2(__v_5);
+}
+__f_9();
+%OptimizeFunctionOnNextCall(__f_9);
+__f_9();
diff --git a/deps/v8/test/mjsunit/regress/regress-592341.js b/deps/v8/test/mjsunit/regress/regress-592341.js
new file mode 100644
index 0000000000..2fa475294f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-592341.js
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function id(a) {
+ return a;
+}
+
+(function LiteralCompareNullDeopt() {
+ function f() {
+ return id(null == %DeoptimizeNow());
+ }
+
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+})();
+
+(function LiteralCompareUndefinedDeopt() {
+ function f() {
+ return id(undefined == %DeoptimizeNow());
+ }
+
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+})();
+
+(function LiteralCompareTypeofDeopt() {
+ function f() {
+ return id("undefined" == typeof(%DeoptimizeNow()));
+ }
+
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-592353.js b/deps/v8/test/mjsunit/regress/regress-592353.js
new file mode 100644
index 0000000000..f4e3b6859a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-592353.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-lazy
+
+with ({}) {}
+f = ({x}) => { };
+%OptimizeFunctionOnNextCall(f);
+f({});
diff --git a/deps/v8/test/mjsunit/regress/regress-593299.js b/deps/v8/test/mjsunit/regress/regress-593299.js
new file mode 100644
index 0000000000..255a033f59
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-593299.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tailcalls
+
+"use strict";
+
+function h(global) { return global.boom(); }
+function g() { var r = h({}); return r; }
+function f() {
+ var o = {};
+ o.__defineGetter__('prop1', g);
+ o.prop1;
+}
+
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-595319.js b/deps/v8/test/mjsunit/regress/regress-595319.js
new file mode 100644
index 0000000000..46ca27444f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-595319.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// https://bugs.chromium.org/p/chromium/issues/detail?id=595319
+// Ensure exceptions are checked for by Array.prototype.concat from adding
+// an element, and that elements are added to array subclasses appropriately
+
+// If adding a property does throw, the exception is propagated
+class MyException extends Error { }
+class NoDefinePropertyArray extends Array {
+ constructor(...args) {
+ super(...args);
+ return new Proxy(this, {
+ defineProperty() { throw new MyException(); }
+ });
+ }
+}
+assertThrows(() => new NoDefinePropertyArray().concat([1]), MyException);
+
+// Ensure elements are added to the instance, rather than calling [[Set]].
+class ZeroGetterArray extends Array { get 0() {} };
+assertArrayEquals([1], new ZeroGetterArray().concat(1));
+
+// Frozen arrays lead to throwing
+
+class FrozenArray extends Array {
+ constructor(...args) { super(...args); Object.freeze(this); }
+}
+assertThrows(() => new FrozenArray().concat([1]), TypeError);
+
+// Non-configurable non-writable zero leads to throwing
+class ZeroFrozenArray extends Array {
+ constructor(...args) {
+ super(...args);
+ Object.defineProperty(this, 0, {value: 1});
+ }
+}
+assertThrows(() => new ZeroFrozenArray().concat([1]), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-596718.js b/deps/v8/test/mjsunit/regress/regress-596718.js
new file mode 100644
index 0000000000..6116427e73
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-596718.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = function(e, frames) { return frames; }
+assertThrows(() => new Error().stack[0].getMethodName.call({}), TypeError);
+
+Error.prepareStackTrace = function(e, frames) { return frames.map(frame => new Proxy(frame, {})); }
+assertThrows(() => new Error().stack[0].getMethodName(), TypeError);
+
+Error.prepareStackTrace = function(e, frames) { return frames; }
+assertEquals(null, new Error().stack[0].getMethodName());
diff --git a/deps/v8/test/mjsunit/regress/regress-599089-array-push.js b/deps/v8/test/mjsunit/regress/regress-599089-array-push.js
new file mode 100644
index 0000000000..9049a4b8b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599089-array-push.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var array = [1.2, 1.2];
+array.length = 0;
+array.push(undefined);
+assertEquals(1, array.length);
+assertEquals([undefined], array);
diff --git a/deps/v8/test/mjsunit/regress/regress-599412.js b/deps/v8/test/mjsunit/regress/regress-599412.js
new file mode 100644
index 0000000000..d5c411d0f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599412.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function h(a) {
+ if (!a) return false;
+ print();
+}
+
+function g(a) { return a.length; }
+g('0');
+g('1');
+
+function f() {
+ h(g([]));
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js b/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js
new file mode 100644
index 0000000000..3819233f99
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var largeArray = 'x'.repeat(999).split('');
+var a = largeArray;
+
+assertThrows(() => {
+ for (;;) {
+ a = a.concat(a, a, a, a, a, a);
+ }}, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-599710.js b/deps/v8/test/mjsunit/regress/regress-599710.js
new file mode 100644
index 0000000000..dd1ba8d969
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599710.js
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var f1 = function() { while (1) { } }
+
+function g1() {
+ var s = "hey";
+ f1 = function() { return true; }
+ if (f1()) { return s; }
+}
+
+%OptimizeFunctionOnNextCall(g1);
+assertEquals("hey", g1());
+
+var f2 = function() { do { } while (1); }
+
+function g2() {
+ var s = "hey";
+ f2 = function() { return true; }
+ if (f2()) { return s; }
+}
+
+%OptimizeFunctionOnNextCall(g2);
+assertEquals("hey", g2());
+
+var f3 = function() { for (;;); }
+
+function g3() {
+ var s = "hey";
+ f3 = function() { return true; }
+ if (f3()) { return s; }
+}
+
+%OptimizeFunctionOnNextCall(g3);
+assertEquals("hey", g3());
+
+var f4 = function() { for (;;); }
+
+function g4() {
+ var s = "hey";
+ f4 = function() { return true; }
+ while (f4()) { return s; }
+}
+
+%OptimizeFunctionOnNextCall(g4);
+assertEquals("hey", g4());
diff --git a/deps/v8/test/mjsunit/regress/regress-599825.js b/deps/v8/test/mjsunit/regress/regress-599825.js
new file mode 100644
index 0000000000..83075ee9ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599825.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function __f_97(stdlib, buffer) {
+ "use asm";
+ var __v_30 = new stdlib.Int32Array(buffer);
+ function __f_74() {
+ var __v_27 = 4;
+ __v_30[__v_27 >> __v_2] = ((__v_30[-1073741825]|-10) + 2) | 0;
+ }
+}
+assertThrows(function() {
+ var module = Wasm.instantiateModuleFromAsm( __f_97.toString());
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-602970.js b/deps/v8/test/mjsunit/regress/regress-602970.js
new file mode 100644
index 0000000000..927d3bf5a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-602970.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --debug-code
+
+// flag --debug-code ensures that we'll abort with a failed smi check without
+// the fix.
+
+var num = new Number(10);
+Array.prototype.__defineGetter__(0,function(){
+ return num;
+})
+Array.prototype.__defineSetter__(0,function(value){
+})
+var str=decodeURI("%E7%9A%84");
+assertEquals(0x7684, str.charCodeAt(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-641.js b/deps/v8/test/mjsunit/regress/regress-641.js
deleted file mode 100644
index c29b2af242..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-641.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Regression test for http://code.google.com/p/v8/issues/detail?id=641.
-
-// Flags: --legacy-const
-
- function f(){
- while (window + 1) {
- const window=[,];
- }
-}
-f()
diff --git a/deps/v8/test/mjsunit/regress/regress-799761.js b/deps/v8/test/mjsunit/regress/regress-799761.js
deleted file mode 100644
index 7d09da56fa..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-799761.js
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-// const variables should be read-only
-const c = 42;
-c = 87;
-assertEquals(42, c);
-
-
-// const variables are not behaving like other JS variables when it comes
-// to scoping - in fact they behave more sanely. Inside a 'with' they do
-// not interfere with the 'with' scopes.
-
-(function () {
- with ({ x: 42 }) {
- const x = 7;
- }
- x = 5;
- assertEquals(7, x);
-})();
-
-
-// const variables may be declared but never initialized, in which case
-// their value is undefined.
-
-(function (sel) {
- if (sel == 0)
- with ({ x: 42 }) {
- const x;
- }
- else
- x = 3;
- x = 5;
- assertTrue(typeof x == 'undefined');
-})(1);
-
-
-// const variables may be initialized to undefined.
-(function () {
- with ({ x: 42 }) {
- const x = undefined;
- }
- x = 5;
- assertTrue(typeof x == 'undefined');
-})();
-
-
-// const variables may be accessed in inner scopes like any other variable.
-(function () {
- function bar() {
- assertEquals(7, x);
- }
- with ({ x: 42 }) {
- const x = 7;
- }
- x = 5
- bar();
-})();
-
-
-// const variables may be declared via 'eval'
-(function () {
- with ({ x: 42 }) {
- eval('const x = 7');
- }
- x = 5;
- assertEquals(7, x);
-})();
diff --git a/deps/v8/test/mjsunit/regress/regress-88591.js b/deps/v8/test/mjsunit/regress/regress-88591.js
index e7f410d7b0..878e7918b1 100644
--- a/deps/v8/test/mjsunit/regress/regress-88591.js
+++ b/deps/v8/test/mjsunit/regress/regress-88591.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --legacy-const
-
// Regression test for a crash. A data property in the global object's
// prototype shadowed by a setter in the global object's prototype's
// prototype would crash or assert when seen by Runtime_DeclareContextSlot.
@@ -36,9 +34,9 @@ Object.prototype.__defineGetter__('x', function () { return 0; });
this.__proto__ = { x: 1 };
-try { fail; } catch (e) { eval('const x = 2'); }
+try { fail; } catch (e) { eval('var x = 2'); }
var o = Object.getOwnPropertyDescriptor(this, 'x');
assertFalse(called);
assertEquals(2, o.value);
-assertEquals(false, o.writable);
+assertEquals(true, o.writable);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
index 6e75729c18..ca543b068a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+// Flags: --expose-debug-as debug
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-476477-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-476477-1.js
new file mode 100644
index 0000000000..881c602bd7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-476477-1.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var obj = {
+ _leftTime: 12345678,
+ _divider: function() {
+ var s = Math.floor(this._leftTime / 3600);
+ var e = Math.floor(s / 24);
+ var i = s % 24;
+ return {
+ s: s,
+ e: e,
+ i: i,
+ }
+ }
+}
+
+for (var i = 0; i < 1000; i++) {
+ obj._divider();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js
new file mode 100644
index 0000000000..4dbb41b7d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ var s = Math.floor(x / 3600);
+ Math.floor(s);
+ return s % 24;
+}
+
+foo(12345678);
+foo(12345678);
+%OptimizeFunctionOnNextCall(foo);
+foo(12345678);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
index 855b36a054..aa235bc4ea 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
@@ -6,4 +6,6 @@
var sab = new SharedArrayBuffer(8);
var ta = new Int32Array(sab);
ta.__defineSetter__('length', function() {;});
-Atomics.compareExchange(ta, 4294967295, 0, 0);
+assertThrows(function() {
+ Atomics.compareExchange(ta, 4294967295, 0, 0);
+}, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505907.js b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
index c8d4bac9be..ec7b80baa8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
try {
var p = new Proxy({}, {
getPropertyDescriptor: function() { return [] }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506956.js b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
index 73eb2f2220..8bca70803f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
-
try {
var p = new Proxy({}, {
getPropertyDescriptor: function() { throw "boom"; }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513471.js b/deps/v8/test/mjsunit/regress/regress-crbug-513471.js
new file mode 100644
index 0000000000..48c793e512
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513471.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var g = (function*(){});
+var f = g();
+%OptimizeFunctionOnNextCall(g);
+f.next();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-537444.js b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
new file mode 100644
index 0000000000..1cb1f9ad92
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tailcalls --allow-natives-syntax
+
+"use strict";
+
+function f(x) {
+ return x;
+}
+
+function g(x) {
+ return false ? 0 : f(x, 1);
+}
+
+function h(x) {
+ var z = g(x, 1);
+ return z + 1;
+}
+
+%SetForceInlineFlag(g);
+%SetForceInlineFlag(f);
+
+h(1);
+h(1);
+%OptimizeFunctionOnNextCall(h);
+h("a");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-548580.js b/deps/v8/test/mjsunit/regress/regress-crbug-548580.js
index 4a2f5e152f..cf0afff1c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-548580.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-548580.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexps
-
function store(v) {
var re = /(?=[d#.])/;
re.a = v;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js
index 64dd6777c9..31f7e6e869 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js
@@ -8,7 +8,7 @@ var Debug = debug.Debug;
var expected = ["debugger;",
"var x = y;",
"new Promise(f).catch(call_f_with_deeper_stack);",
- "var a = 1;", "", "var a = 1;",
+ "var a = 1;", "var a = 1;",
"debugger;",
"var x = y;"];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-573858.js b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
index 37a9eb84e5..270df5a64a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
@@ -9,7 +9,7 @@ var throw_type_error = Object.getOwnPropertyDescriptor(
function create_initial_map() { this instanceof throw_type_error }
%OptimizeFunctionOnNextCall(create_initial_map);
-create_initial_map();
+assertThrows(create_initial_map);
function test() { new throw_type_error }
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-587068.js b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
new file mode 100644
index 0000000000..4cdd3ffbdb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// The Crankshaft fast case for String.fromCharCode used to unconditionally
+// deoptimize on non int32 indices.
+function foo(i) { return String.fromCharCode(i); }
+foo(33);
+foo(33);
+%OptimizeFunctionOnNextCall(foo);
+foo(33.3);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-589472.js b/deps/v8/test/mjsunit/regress/regress-crbug-589472.js
new file mode 100644
index 0000000000..41a5bd6329
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-589472.js
@@ -0,0 +1,94 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+// Not guaranteed to throw because the stack limits are different on all
+// architectures, hence we use try-catch instead of assertThrows here.
+try { f() } catch(e) { assertInstanceof(e, RangeError) }
+
+function f() {
+ return Math.max(
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" +
+ "boom", 1, 2, 3, 4, 5, 6, 7, 8, 9);
+};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-592340.js b/deps/v8/test/mjsunit/regress/regress-crbug-592340.js
new file mode 100644
index 0000000000..49794b815e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-592340.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class MyArray extends Array { }
+Object.prototype[Symbol.species] = MyArray;
+delete Array[Symbol.species];
+__v_1 = Math.pow(2, 31);
+__v_2 = [];
+__v_2[__v_1] = 31;
+__v_4 = [];
+__v_4[__v_1 - 2] = 33;
+assertThrows(() => __v_2.concat(__v_4), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js
new file mode 100644
index 0000000000..1fcbaebd83
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-593697-2.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+"use strict";
+
+%SetForceInlineFlag(Math.cos);
+
+var f5 = (function f6(stdlib) {
+ "use asm";
+ var cos = stdlib.Math.cos;
+ function f5() {
+ return cos();
+ }
+ return { f5: f5 };
+})(this, {}).f5();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-594955.js b/deps/v8/test/mjsunit/regress/regress-crbug-594955.js
new file mode 100644
index 0000000000..69db053f4a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-594955.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function g(s, key) { return s[key]; }
+
+assertEquals(g(new String("a"), "length"), 1);
+assertEquals(g(new String("a"), "length"), 1);
+assertEquals(g("a", 32), undefined);
+assertEquals(g("a", "length"), 1);
+assertEquals(g(new String("a"), "length"), 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-595615.js b/deps/v8/test/mjsunit/regress/regress-crbug-595615.js
new file mode 100644
index 0000000000..33095013fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-595615.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+"use strict";
+
+function f(o) {
+ return o.x();
+}
+try { f({ x: 1 }); } catch(e) {}
+try { f({ x: 1 }); } catch(e) {}
+%OptimizeFunctionOnNextCall(f);
+try { f({ x: 1 }); } catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-595738.js b/deps/v8/test/mjsunit/regress/regress-crbug-595738.js
new file mode 100644
index 0000000000..d2409ab4ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-595738.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() { return 1; }
+var x = {toJSON: foo.bind()};
+assertEquals("1", JSON.stringify(x));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-596394.js b/deps/v8/test/mjsunit/regress/regress-crbug-596394.js
new file mode 100644
index 0000000000..37c8d2777b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-596394.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// In ES#sec-array.prototype.concat
+// When concat makes a new integer-indexed exotic object, the resulting properties
+// are non-configurable and cannot have CreateDataPropertyOrThrow called on them,
+// so it throws a TypeError on failure to make a new property.
+
+__v_0 = new Uint8Array(100);
+array = new Array(10);
+array.__proto__ = __v_0;
+assertThrows(() => Array.prototype.concat.call(array), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-598998.js b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
new file mode 100644
index 0000000000..4cfe1a13af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tailcalls --allow-natives-syntax
+
+"use strict";
+
+function deopt_function(func) {
+ %DeoptimizeFunction(func);
+}
+
+function f(x) {
+ return deopt_function(h);
+}
+
+function g(x) {
+ return f(x, 1);
+}
+
+function h(x) {
+ g(x, 1);
+}
+
+%SetForceInlineFlag(g);
+%SetForceInlineFlag(f);
+%NeverOptimizeFunction(deopt_function);
+
+h(1);
+h(1);
+%OptimizeFunctionOnNextCall(h);
+h(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599067.js b/deps/v8/test/mjsunit/regress/regress-crbug-599067.js
new file mode 100644
index 0000000000..bc10aa44c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599067.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+ var o = {};
+ var p = new Proxy({}, o);
+ Error.captureStackTrace(p);
+} catch(e) {
+ assertEquals("Cannot pass private property name to proxy trap", e.message);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599073-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-599073-1.js
new file mode 100644
index 0000000000..fc89fe4c66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599073-1.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Boolean.prototype, "v", {get:constructor});
+
+function foo(b) { return b.v; }
+
+foo(true);
+foo(true);
+foo(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599073-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-599073-2.js
new file mode 100644
index 0000000000..947468a08b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599073-2.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Boolean.prototype, "v", {set:constructor});
+
+function foo(b) { b.v = 1; }
+
+foo(true);
+foo(true);
+foo(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599073-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-599073-3.js
new file mode 100644
index 0000000000..2892f562aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599073-3.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Number.prototype, "v", {get:constructor});
+
+function foo(b) { return b.v; }
+
+foo(2);
+foo(3);
+foo(4);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599073-4.js b/deps/v8/test/mjsunit/regress/regress-crbug-599073-4.js
new file mode 100644
index 0000000000..0d6854cbb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599073-4.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Number.prototype, "v", {set:constructor});
+
+function foo(b) { b.v = 1; }
+
+foo(2);
+foo(3);
+foo(4);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599714.js b/deps/v8/test/mjsunit/regress/regress-crbug-599714.js
new file mode 100644
index 0000000000..98de3406b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599714.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var custom_toString = function() {
+ var boom = custom_toString.caller;
+ return boom;
+}
+
+var object = {};
+object.toString = custom_toString;
+
+try { Object.hasOwnProperty(object); } catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-600995.js b/deps/v8/test/mjsunit/regress/regress-crbug-600995.js
new file mode 100644
index 0000000000..c532608799
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-600995.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noharmony-iterator-close
+
+// The {Set} function will produce a different type feedback vector layout
+// depending on whether Harmony iterator finalization is enabled or not.
+
+new Set();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-601617.js b/deps/v8/test/mjsunit/regress/regress-crbug-601617.js
new file mode 100644
index 0000000000..7d03fef838
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-601617.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+function h() {
+ var res = g.arguments[0].x;
+ return res;
+}
+
+function g(o) {
+ var res = h();
+ return res;
+}
+
+function f1() {
+ var o = { x : 1 };
+ var res = g(o);
+ return res;
+}
+
+function f0() {
+ "use strict";
+ return f1(5);
+}
+
+%NeverOptimizeFunction(h);
+f0();
+f0();
+%OptimizeFunctionOnNextCall(f0);
+assertEquals(1, f0());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-604680.js b/deps/v8/test/mjsunit/regress/regress-crbug-604680.js
new file mode 100644
index 0000000000..3df2f2047a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-604680.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+function h() {
+ var res = g.arguments;
+ return res;
+}
+
+function g(o) {
+ var res = h();
+ return res;
+}
+
+function f1() {
+ var o = { x : 42 };
+ var res = g(o);
+ return 1;
+}
+
+function f0(a, b) {
+ "use strict";
+ return f1(5);
+}
+
+function boom(b) {
+ if (b) throw new Error("boom!");
+}
+
+%NeverOptimizeFunction(h);
+f0();
+f0();
+%OptimizeFunctionOnNextCall(f0);
+
+boom(false);
+boom(false);
+%OptimizeFunctionOnNextCall(boom);
+
+try {
+ f0(1, 2, 3);
+ boom(true, 1, 2, 3);
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-608278.js b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
new file mode 100644
index 0000000000..6840159f30
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tailcalls --allow-natives-syntax
+
+"use strict";
+
+function h() {
+ var stack = (new Error("boom")).stack;
+ print(stack);
+ %DeoptimizeFunction(f1);
+ %DeoptimizeFunction(f2);
+ %DeoptimizeFunction(f3);
+ %DeoptimizeFunction(g);
+ %DeoptimizeFunction(h);
+ return 1;
+}
+%NeverOptimizeFunction(h);
+
+function g(v) {
+ return h();
+}
+%SetForceInlineFlag(g);
+
+
+function f1() {
+ var o = {};
+ o.__defineGetter__('p', g);
+ o.p;
+}
+
+f1();
+f1();
+%OptimizeFunctionOnNextCall(f1);
+f1();
+
+
+function f2() {
+ var o = {};
+ o.__defineSetter__('q', g);
+ o.q = 1;
+}
+
+f2();
+f2();
+%OptimizeFunctionOnNextCall(f2);
+f2();
+
+
+function A() {
+ return h();
+}
+
+function f3() {
+ new A();
+}
+
+f3();
+f3();
+%OptimizeFunctionOnNextCall(f3);
+f3();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-617524.js b/deps/v8/test/mjsunit/regress/regress-crbug-617524.js
new file mode 100644
index 0000000000..b32eeef5f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-617524.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --always-opt
+
+function f(a,b,c) {
+ a.a = b;
+ a[1] = c;
+ return a;
+}
+
+f(new Array(5),.5,0);
+var o1 = f(new Array(5),0,.5);
+gc();
+var o2 = f(new Array(5),0,0);
+var o3 = f(new Array(5),0);
+assertEquals(0, o3.a);
diff --git a/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js b/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
index 52c32e9cc3..e1d6ff2066 100644
--- a/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
+++ b/deps/v8/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
@@ -77,8 +77,8 @@ f();
g();
// Assert that break point is set at expected location.
-assertTrue(Debug.showBreakPoints(f).indexOf("[B0]var baz = bar;") > 0);
-assertTrue(Debug.showBreakPoints(g).indexOf("[B0]var baz = bar;") > 0);
+assertTrue(Debug.showBreakPoints(f).indexOf("var baz = [B0]bar;") > 0);
+assertTrue(Debug.showBreakPoints(g).indexOf("var baz = [B0]bar;") > 0);
assertEquals(2, listened);
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js b/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
deleted file mode 100644
index daa6fa7670..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --legacy-const
-
-function f(x) {
- // This function compiles into code that only throws a redeclaration
- // error. It contains no stack check and has no function body.
- const x = 0;
- return x;
-}
-
-function g() {
- f(0);
-}
-
-var exception = null;
-var called = false;
-var Debug = debug.Debug;
-Debug.setBreakOnException();
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Exception) return;
- try {
- called = true;
- Debug.setBreakPoint(f, 1);
- } catch (e) {
- exception = e;
- }
-}
-
-Debug.setListener(listener);
-
-assertThrows(g);
-assertNull(exception);
-assertTrue(called);
diff --git a/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js b/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
deleted file mode 100644
index fc4ba900db..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --always-opt --legacy-const
-
-var x = 0;
-
-function f() {
- const c;
- var c;
- return 0 + x;
-}
-
-assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js b/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js
deleted file mode 100644
index 2fa5001f90..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --strong-mode
-
-// This tests that inlining a constructor call to a function which cannot be
-// used as a constructor (e.g. strong mode function) still throws correctly.
-
-function g() {
- "use strong";
-}
-
-function f() {
- return new g();
-}
-
-assertThrows(f);
-assertThrows(f);
-%OptimizeFunctionOnNextCall(f);
-assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
deleted file mode 100644
index 6a5ba9195c..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-String.fromCharCode(0xFFF, 0xFFF);
-String.fromCharCode(0x7C, 0x7C);
-%OptimizeFunctionOnNextCall(String.fromCharCode);
-String.fromCharCode(0x7C, 0x7C);
-String.fromCharCode(0xFFF, 0xFFF);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5009.js b/deps/v8/test/mjsunit/regress/regress-v8-5009.js
new file mode 100644
index 0000000000..f4995488b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5009.js
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function fn1() {
+}
+
+function fn2() {
+}
+
+function fn3() {
+}
+
+function create(id) {
+ // Just some `FunctionTemplate` to hang on
+ var o = new version();
+
+ o.id = id;
+ o[0] = null;
+
+ return o;
+}
+
+function setM1(o) {
+ o.m1 = fn1;
+}
+
+function setM2(o) {
+ o.m2 = fn2;
+}
+
+function setAltM2(o) {
+ // Failing StoreIC happens here
+ o.m2 = fn3;
+}
+
+function setAltM1(o) {
+ o.m1 = null;
+}
+
+function test(o) {
+ o.m2();
+ o.m1();
+}
+
+var p0 = create(0);
+var p1 = create(1);
+var p2 = create(2);
+
+setM1(p0);
+setM1(p1);
+setM1(p2);
+
+setM2(p0);
+setAltM2(p0);
+setAltM1(p0);
+
+setAltM2(p1);
+
+setAltM2(p2);
+test(p2);
diff --git a/deps/v8/test/mjsunit/stack-traces-custom.js b/deps/v8/test/mjsunit/stack-traces-custom.js
index fbf650ddbf..75fad636e0 100644
--- a/deps/v8/test/mjsunit/stack-traces-custom.js
+++ b/deps/v8/test/mjsunit/stack-traces-custom.js
@@ -2,19 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var o = { f: function() { throw new Error(); } };
+var o = {
+ f: function() { throw new Error(); },
+ get j() { o.h(); },
+ set k(_) { o.j; },
+};
o.g1 = function() { o.f() }
o.g2 = o.g1;
o.h = function() { o.g1() }
-Error.prepareStackTrace = function(e, frames) { return frames; }
-
try {
- o.h();
+ o.k = 42;
} catch (e) {
+ Error.prepareStackTrace = function(e, frames) { return frames; };
var frames = e.stack;
+ Error.prepareStackTrace = undefined;
assertEquals("f", frames[0].getMethodName());
assertEquals(null, frames[1].getMethodName());
assertEquals("h", frames[2].getMethodName());
- assertEquals(null, frames[3].getMethodName());
+ assertEquals("j", frames[3].getMethodName());
+ assertEquals("k", frames[4].getMethodName());
+ assertEquals(null, frames[5].getMethodName());
}
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 63dc9d0bda..f7f95da88c 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -1149,7 +1149,7 @@ function CheckArgumentsPillDescriptor(func, name) {
function strict() {
"use strict";
- // Returning result via local variable to avoid tail call optimization.
+ // Returning result via local variable to avoid tail call elimination.
var res = return_my_caller();
return res;
}
@@ -1165,7 +1165,7 @@ function CheckArgumentsPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerPill() {
function strict(n) {
"use strict";
- // Returning result via local variable to avoid tail call optimization.
+ // Returning result via local variable to avoid tail call elimination.
var res = non_strict(n);
return res;
}
@@ -1195,7 +1195,7 @@ function CheckArgumentsPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerDescriptorPill() {
function strict(n) {
"use strict";
- // Returning result via local variable to avoid tail call optimization.
+ // Returning result via local variable to avoid tail call elimination.
var res = non_strict(n);
return res;
}
diff --git a/deps/v8/test/mjsunit/string-fromcharcode.js b/deps/v8/test/mjsunit/string-fromcharcode.js
index ad3f7a96fb..ac51682b72 100644
--- a/deps/v8/test/mjsunit/string-fromcharcode.js
+++ b/deps/v8/test/mjsunit/string-fromcharcode.js
@@ -103,11 +103,6 @@ for (var i = 0; i < 10; i++) {
test(i);
}
-assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
-assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
-%OptimizeFunctionOnNextCall(String.fromCharCode);
-assertEquals("AAAA", String.fromCharCode(65, 65, 65, 65));
-
// Test the custom IC works correctly when the map changes.
for (var i = 0; i < 10; i++) {
var expected = (i < 5) ? " " : 42;
diff --git a/deps/v8/test/mjsunit/strong/arrays.js b/deps/v8/test/mjsunit/strong/arrays.js
deleted file mode 100644
index b9e4fad357..0000000000
--- a/deps/v8/test/mjsunit/strong/arrays.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoEllisions() {
- assertThrows("'use strong'; [,]", SyntaxError);
- assertThrows("'use strong'; [,3]", SyntaxError);
- assertThrows("'use strong'; [3,,4]", SyntaxError);
- assertTrue(eval("'use strong'; [3,] !== [3,4,]"));
-})();
diff --git a/deps/v8/test/mjsunit/strong/class-extend-null.js b/deps/v8/test/mjsunit/strong/class-extend-null.js
deleted file mode 100644
index 3ed7b36dbb..0000000000
--- a/deps/v8/test/mjsunit/strong/class-extend-null.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-(function() {
-"use strict";
-
-let foo = null;
-
-function nullLiteral() {
- class Class1 extends null {
- constructor() {
- super();
- }
- }
-}
-
-function nullVariable() {
- class Class2 extends foo {
- constructor() {
- super();
- }
- }
-}
-
-function nullLiteralClassExpr() {
- (class extends null {});
-}
-
-function nullVariableClassExpr() {
- (class extends foo {});
-}
-
-assertDoesNotThrow(nullLiteral);
-%OptimizeFunctionOnNextCall(nullLiteral);
-assertDoesNotThrow(nullLiteral);
-
-assertDoesNotThrow(nullVariable);
-%OptimizeFunctionOnNextCall(nullVariable);
-assertDoesNotThrow(nullVariable);
-
-assertDoesNotThrow(nullLiteralClassExpr);
-%OptimizeFunctionOnNextCall(nullLiteralClassExpr);
-assertDoesNotThrow(nullLiteralClassExpr);
-
-assertDoesNotThrow(nullVariableClassExpr);
-%OptimizeFunctionOnNextCall(nullVariableClassExpr);
-assertDoesNotThrow(nullVariableClassExpr);
-})();
-
-(function() {
-"use strong";
-
-let foo = null;
-
-function nullLiteral() {
- class Class1 extends null {
- constructor() {
- super();
- }
- }
-}
-
-function nullVariable() {
- class Class2 extends foo {
- constructor() {
- super();
- }
- }
-}
-
-function nullLiteralClassExpr() {
- (class extends null {});
-}
-
-function nullVariableClassExpr() {
- (class extends foo {});
-}
-
-assertThrows(nullLiteral, TypeError);
-%OptimizeFunctionOnNextCall(nullLiteral);
-assertThrows(nullLiteral, TypeError);
-
-assertThrows(nullVariable, TypeError);
-%OptimizeFunctionOnNextCall(nullVariable);
-assertThrows(nullVariable, TypeError);
-
-assertThrows(nullLiteralClassExpr, TypeError);
-%OptimizeFunctionOnNextCall(nullLiteralClassExpr);
-assertThrows(nullLiteralClassExpr, TypeError);
-
-assertThrows(nullVariableClassExpr, TypeError);
-%OptimizeFunctionOnNextCall(nullVariableClassExpr);
-assertThrows(nullVariableClassExpr, TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/class-literals.js b/deps/v8/test/mjsunit/strong/class-literals.js
deleted file mode 100644
index a0e7280abc..0000000000
--- a/deps/v8/test/mjsunit/strong/class-literals.js
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-'use strict';
-
-function assertWeakClassWeakInstances(x) {
- assertFalse(%IsStrong(x));
- assertFalse(%IsStrong(x.prototype));
- assertFalse(%IsStrong(new x));
-}
-
-function assertWeakClassStrongInstances(x) {
- assertFalse(%IsStrong(x));
- assertFalse(%IsStrong(x.prototype));
- assertTrue(%IsStrong(new x));
-}
-
-function assertStrongClassWeakInstances(x) {
- assertTrue(%IsStrong(x));
- assertTrue(%IsStrong(x.prototype));
- assertFalse(%IsStrong(new x));
-}
-
-function assertStrongClassStrongInstances(x) {
- assertTrue(%IsStrong(x));
- assertTrue(%IsStrong(x.prototype));
- assertTrue(%IsStrong(new x));
-}
-
-function getWeakClass() {
- return (class {});
-}
-
-function getWeakClassExtends(x) {
- return (class extends x {});
-}
-
-function getStrongClass() {
- "use strong";
- return (class {});
-}
-
-function getStrongClassExtends(x) {
- "use strong";
- return (class extends x {});
-}
-
-(function SimpleWeakClassLiterals() {
- class C {};
- class D extends C {};
- class E extends Object {};
-
- assertWeakClassWeakInstances(C);
- assertWeakClassWeakInstances(D);
- assertWeakClassWeakInstances(E);
-
- assertWeakClassWeakInstances(class {});
- assertWeakClassWeakInstances(class extends Object {});
- assertWeakClassWeakInstances(class extends C {});
- assertWeakClassWeakInstances(class extends class {} {});
-})();
-
-(function SimpleStrongClassLiterals() {
- 'use strong';
- class C {};
- class D extends C {};
-
- assertStrongClassStrongInstances(C);
- assertStrongClassStrongInstances(D);
-
- assertStrongClassStrongInstances(class {});
- assertStrongClassStrongInstances(class extends C {});
- assertStrongClassStrongInstances(class extends class {} {});
-})();
-
-(function MixedWeakClassLiterals() {
- class C extends getStrongClass() {};
- class D extends getStrongClassExtends((class {})) {};
- class E extends getStrongClassExtends(C) {};
-
- assertWeakClassStrongInstances(C);
- assertWeakClassStrongInstances(class extends getStrongClass() {});
-
- assertWeakClassWeakInstances(D);
- assertWeakClassWeakInstances(
- class extends getStrongClassExtends((class {})) {});
-
- assertWeakClassStrongInstances(E);
- assertWeakClassStrongInstances(
- class extends getStrongClassExtends(class extends getStrongClass() {}) {});
-})();
-
-(function MixedStrongClassLiterals() {
- 'use strong';
- class C extends getWeakClass() {};
- class D extends getWeakClassExtends((class {})) {};
- class E extends getWeakClassExtends(C) {};
- class F extends Object {};
-
- assertStrongClassWeakInstances(C);
- assertStrongClassWeakInstances(class extends getWeakClass() {});
-
- assertStrongClassStrongInstances(D);
- assertStrongClassStrongInstances(
- class extends getWeakClassExtends((class {})) {});
-
- assertStrongClassWeakInstances(E);
- assertStrongClassWeakInstances(
- class extends getWeakClassExtends(class extends getWeakClass() {}) {});
-
- assertStrongClassWeakInstances(F);
- assertStrongClassWeakInstances(class extends Object {});
-})();
-
-(function WeakMonkeyPatchedClassLiterals() {
- class C {};
- assertWeakClassWeakInstances(C);
- C.__proto__ = getStrongClass();
- // C's default constructor doesn't call super.
- assertWeakClassWeakInstances(C);
-
- class D extends Object {};
- assertWeakClassWeakInstances(D);
- D.__proto__ = getStrongClass();
- // D is a derived class, so its default constructor calls super.
- assertWeakClassStrongInstances(D);
-
- class E extends (class {}) {};
- E.__proto__ = C;
- assertWeakClassWeakInstances(E);
-
- class F extends (class {}) {};
- F.__proto__ = D;
- assertWeakClassStrongInstances(F);
-
- class G extends getStrongClass() {};
- G.__proto__ = getWeakClass();
- assertWeakClassWeakInstances(G);
-})();
-
-(function StrongMonkeyPatchedClassLiterals() {
- let C = getStrongClassExtends(getWeakClassExtends(getStrongClass()));
- let D = getStrongClassExtends(getWeakClassExtends(getWeakClass()));
-
- assertStrongClassStrongInstances(C);
- C.__proto__.__proto__ = getWeakClass();
- assertStrongClassWeakInstances(C);
- C.__proto__.__proto__ = getStrongClass();
- assertStrongClassStrongInstances(C);
-
- assertStrongClassWeakInstances(D);
- D.__proto__.__proto__ = getStrongClass();
- assertStrongClassStrongInstances(D);
- D.__proto__.__proto__ = getWeakClass();
- assertStrongClassWeakInstances(D);
-})();
diff --git a/deps/v8/test/mjsunit/strong/class-object-frozen.js b/deps/v8/test/mjsunit/strong/class-object-frozen.js
deleted file mode 100644
index 2c442c0d51..0000000000
--- a/deps/v8/test/mjsunit/strong/class-object-frozen.js
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-"use strict";
-
-function getClass() {
- class Foo {
- static get bar() { return 0 }
- get bar() { return 0 }
- }
- return Foo;
-}
-
-function getClassExpr() {
- return (class { static get bar() { return 0 } get bar() { return 0 } });
-}
-
-function getClassStrong() {
- "use strong";
- class Foo {
- static get bar() { return 0 }
- get bar() { return 0 }
- }
- return Foo;
-}
-
-function getClassExprStrong() {
- "use strong";
- return (class { static get bar() { return 0 } get bar() { return 0 } });
-}
-
-function addProperty(o) {
- o.baz = 1;
-}
-
-function convertPropertyToData(o) {
- assertTrue(o.hasOwnProperty("bar"));
- Object.defineProperty(o, "bar", { value: 1 });
-}
-
-function testWeakClass(classFunc) {
- assertDoesNotThrow(function(){addProperty(classFunc())});
- assertDoesNotThrow(function(){addProperty(classFunc().prototype)});
- assertDoesNotThrow(function(){convertPropertyToData(classFunc())});
- assertDoesNotThrow(function(){convertPropertyToData(classFunc().prototype)});
-}
-
-function testStrongClass(classFunc) {
- assertThrows(function(){addProperty(classFunc())}, TypeError);
- assertThrows(function(){addProperty(classFunc().prototype)}, TypeError);
- assertThrows(function(){convertPropertyToData(classFunc())}, TypeError);
- assertThrows(function(){convertPropertyToData(classFunc().prototype)},
- TypeError);
-}
-
-testWeakClass(getClass);
-testWeakClass(getClassExpr);
-
-testStrongClass(getClassStrong);
-testStrongClass(getClassExprStrong);
-
-// Check strong classes don't freeze their parents.
-(function() {
- let parent = getClass();
-
- let classFunc = function() {
- "use strong";
- class Foo extends parent {
- static get bar() { return 0 }
- get bar() { return 0 }
- }
- return Foo;
- }
-
- testStrongClass(classFunc);
- assertDoesNotThrow(function(){addProperty(parent)});
- assertDoesNotThrow(function(){convertPropertyToData(parent)});
-})();
-
-// Check strong classes don't freeze their children.
-(function() {
- let parent = getClassStrong();
-
- let classFunc = function() {
- class Foo extends parent {
- static get bar() { return 0 }
- get bar() { return 0 }
- }
- return Foo;
- }
-
- assertThrows(function(){addProperty(parent)}, TypeError);
- assertThrows(function(){convertPropertyToData(parent)}, TypeError);
- testWeakClass(classFunc);
-})();
diff --git a/deps/v8/test/mjsunit/strong/classes.js b/deps/v8/test/mjsunit/strong/classes.js
deleted file mode 100644
index 92dea0f2a9..0000000000
--- a/deps/v8/test/mjsunit/strong/classes.js
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-'use strong';
-
-class C {}
-
-let indirect_eval = eval;
-
-function assertTypeError(script) { assertThrows(script, TypeError) }
-function assertSyntaxError(script) { assertThrows(script, SyntaxError) }
-function assertReferenceError(script) { assertThrows(script, ReferenceError) }
-
-(function ImmutableClassBindings() {
- class D {}
- assertTypeError(function(){ indirect_eval("C = 0") });
- assertEquals('function', typeof C);
- assertEquals('function', typeof D);
- assertTypeError("'use strong'; (function f() {class E {}; E = 0})()");
-})();
-
-function constructor(body) {
- return "'use strong'; " +
- "(class extends Object { constructor() { " + body + " } })";
-}
-
-(function NoSuperExceptCall() {
- assertSyntaxError(constructor("super.a;"));
- assertSyntaxError(constructor("super['a'];"));
- assertSyntaxError(constructor("super.f();"));
- assertSyntaxError(constructor("super.a;"));
- assertSyntaxError(constructor("{ super.a }"));
- assertSyntaxError(constructor("if (0) super.a;"));
- // TODO(rossberg): arrow functions do not handle 'super' yet.
- // assertSyntaxError(constructor("() => super.a;"));
- // assertSyntaxError(constructor("() => () => super.a;"));
- // assertSyntaxError(constructor("() => { () => if (0) { super.a; } }"));
-})();
-
-(function NoMissingSuper() {
- assertReferenceError(constructor(""));
- assertReferenceError(constructor("1"));
-})();
-
-(function NoNestedSuper() {
- assertSyntaxError(constructor("super(), 0;"));
- assertSyntaxError(constructor("(super());"));
- assertSyntaxError(constructor("super().a;"));
- assertSyntaxError(constructor("(() => super())();"));
- assertSyntaxError(constructor("{ super(); }"));
- assertSyntaxError(constructor("if (1) super();"));
- assertSyntaxError(constructor("label: super();"));
-})();
-
-(function NoDuplicateSuper() {
- assertSyntaxError(constructor("super(), super();"));
- assertSyntaxError(constructor("super(); super();"));
- assertSyntaxError(constructor("super(); (super());"));
- assertSyntaxError(constructor("super(); { super() }"));
- assertSyntaxError(constructor("super(); (() => super())();"));
-})();
-
-(function NoSuperAfterThis() {
- assertSyntaxError(constructor("this.a = 0, super();"));
- assertSyntaxError(constructor("this.a = 0; super();"));
- assertSyntaxError(constructor("this.a = 0; super(); this.b = 0;"));
- assertSyntaxError(constructor("this.a = 0; (super());"));
- assertSyntaxError(constructor("super(this.a = 0);"));
-})();
-
-(function NoReturnValue() {
- assertSyntaxError(constructor("return {};"));
- assertSyntaxError(constructor("return undefined;"));
- assertSyntaxError(constructor("return this;"));
- assertSyntaxError(constructor("return this.a = 0;"));
- assertSyntaxError(constructor("{ return {}; }"));
- assertSyntaxError(constructor("if (1) return {};"));
-})();
-
-(function NoReturnBeforeSuper() {
- assertSyntaxError(constructor("return; super();"));
- assertSyntaxError(constructor("if (0) return; super();"));
- assertSyntaxError(constructor("{ return; } super();"));
-})();
-
-(function NoReturnBeforeThis() {
- assertSyntaxError(constructor("return; this.a = 0;"));
- assertSyntaxError(constructor("if (0) return; this.a = 0;"));
- assertSyntaxError(constructor("{ return; } this.a = 0;"));
-})();
-
-(function NoThisExceptInitialization() {
- assertSyntaxError(constructor("this;"));
- assertSyntaxError(constructor("this.a;"));
- assertSyntaxError(constructor("this['a'];"));
- assertSyntaxError(constructor("this();"));
- assertSyntaxError(constructor("this.a();"));
- assertSyntaxError(constructor("this.a.b = 0;"));
- assertSyntaxError(constructor("{ this }"));
- assertSyntaxError(constructor("if (0) this;"));
- // TODO(rossberg): this does not handle arrow functions yet.
- // assertSyntaxError(constructor("() => this;"));
- // assertSyntaxError(constructor("() => () => this;"));
- // assertSyntaxError(constructor("() => { () => if (0) { this; } }"));
-})();
-
-(function NoNestedThis() {
- assertSyntaxError(constructor("(this.a = 0);"));
- assertSyntaxError(constructor("{ this.a = 0; }"));
- assertSyntaxError(constructor("if (0) this.a = 0;"));
- // TODO(rossberg): this does not handle arrow functions yet.
- // assertSyntaxError(constructor("() => this.a = 0;"));
- // assertSyntaxError(constructor("() => { this.a = 0; }"));
- assertSyntaxError(constructor("label: this.a = 0;"));
-})();
diff --git a/deps/v8/test/mjsunit/strong/delete.js b/deps/v8/test/mjsunit/strong/delete.js
deleted file mode 100644
index 349af0bf39..0000000000
--- a/deps/v8/test/mjsunit/strong/delete.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoDelete() {
- const o = {a: 0};
- assertThrows("'use strong'; delete o.a", SyntaxError);
- assertThrows("'use strong'; delete o", SyntaxError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/destructuring.js b/deps/v8/test/mjsunit/strong/destructuring.js
deleted file mode 100644
index 30f6183f62..0000000000
--- a/deps/v8/test/mjsunit/strong/destructuring.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-destructuring-bind
-// Flags: --strong-mode --allow-natives-syntax
-
-(function() {
- var f = (function() {
- "use strong";
- return function f({ x = function() { return []; } }) { return x(); };
- })();
- var a = f({ x: undefined });
- assertTrue(%IsStrong(a));
-
- // TODO(rossberg): Loading non-existent properties during destructuring should
- // not throw in strong mode.
- assertThrows(function() { f({}); }, TypeError);
-
- function weakf({ x = function() { return []; } }) { return x(); }
- a = weakf({});
- assertFalse(%IsStrong(a));
-
- function outerf() { return []; }
- var f2 = (function() {
- "use strong";
- return function f2({ x = outerf }) { return x(); };
- })();
- a = f2({ x: undefined });
- assertFalse(%IsStrong(a));
-})();
diff --git a/deps/v8/test/mjsunit/strong/empty-statement.js b/deps/v8/test/mjsunit/strong/empty-statement.js
deleted file mode 100644
index 65edf74733..0000000000
--- a/deps/v8/test/mjsunit/strong/empty-statement.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoEmptySubStatement() {
- assertThrows("'use strong'; if (1);", SyntaxError);
- assertThrows("'use strong'; if (1) {} else;", SyntaxError);
- assertThrows("'use strong'; while (1);", SyntaxError);
- assertThrows("'use strong'; do; while (1);", SyntaxError);
- assertThrows("'use strong'; for (;;);", SyntaxError);
- assertThrows("'use strong'; for (x in []);", SyntaxError);
- assertThrows("'use strong'; for (x of []);", SyntaxError);
- assertThrows("'use strong'; for (let x;;);", SyntaxError);
- assertThrows("'use strong'; for (let x in []);", SyntaxError);
- assertThrows("'use strong'; for (let x of []);", SyntaxError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/equality.js b/deps/v8/test/mjsunit/strong/equality.js
deleted file mode 100644
index 5e2464c372..0000000000
--- a/deps/v8/test/mjsunit/strong/equality.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoSloppyEquality() {
- assertThrows("'use strong'; 0 == 0", SyntaxError);
- assertThrows("'use strong'; 0 != 0", SyntaxError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/eval-direct.js b/deps/v8/test/mjsunit/strong/eval-direct.js
deleted file mode 100644
index bb5387e5b3..0000000000
--- a/deps/v8/test/mjsunit/strong/eval-direct.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-// In strong mode, direct calls to eval are forbidden
-
-assertThrows("'use strong'; eval();", SyntaxError);
-assertThrows("'use strong'; (eval)();", SyntaxError);
-assertThrows("'use strong'; (((eval)))();", SyntaxError);
-assertThrows("'use strong'; eval([]);", SyntaxError);
-assertThrows("'use strong'; eval('function f() {}');", SyntaxError);
-assertThrows("'use strong'; function f() {eval()}", SyntaxError);
-
-assertDoesNotThrow("'use strong'; eval;");
-assertDoesNotThrow("'use strong'; let foo = eval; foo();");
-assertDoesNotThrow("'use strong'; (1, eval)();");
-
-// TODO(neis): The tagged template triggers %ObjectFreeze on an array, which
-// throws when trying to redefine 'length'.
-// assertDoesNotThrow("'use strong'; eval`foo`;");
diff --git a/deps/v8/test/mjsunit/strong/for-in.js b/deps/v8/test/mjsunit/strong/for-in.js
deleted file mode 100644
index 641248c408..0000000000
--- a/deps/v8/test/mjsunit/strong/for-in.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoForInStatement() {
- assertThrows("'use strong'; for (x in []) {}", SyntaxError);
- assertThrows("'use strong'; for (let x in []) {}", SyntaxError);
- assertThrows("'use strong'; for (const x in []) {}", SyntaxError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/function-arity.js b/deps/v8/test/mjsunit/strong/function-arity.js
deleted file mode 100644
index 11ee212a64..0000000000
--- a/deps/v8/test/mjsunit/strong/function-arity.js
+++ /dev/null
@@ -1,341 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --harmony-reflect
-// Flags: --allow-natives-syntax
-
-'use strict';
-
-
-function generateArguments(n, prefix) {
- let a = [];
- if (prefix) {
- a.push(prefix);
- }
- for (let i = 0; i < n; i++) {
- a.push(String(i));
- }
-
- return a.join(', ');
-}
-
-
-function generateParams(n, directive_in_body) {
- let a = [];
- for (let i = 0; i < n; i++) {
- a[i] = `p${i}`;
- }
- return a.join(', ');
-}
-
-function generateParamsWithRest(n, directive_in_body) {
- let a = [];
- let i = 0;
- for (; i < n; i++) {
- a[i] = `p${i}`;
- }
- if (!directive_in_body) {
- // If language mode directive occurs in body, rest parameters will trigger
- // an early error regardless of language mode.
- a.push(`...p${i}`);
- }
- return a.join(', ');
-}
-
-
-function generateSpread(n) {
- return `...[${generateArguments(n)}]`;
-}
-
-
-(function FunctionCall() {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `'use strong'; function f(${generateParams(parameterCount)}) {}`,
- `'use strong'; function f(${generateParamsWithRest(parameterCount)}) {}`,
- `'use strong'; function* f(${generateParams(parameterCount)}) {}`,
- `'use strong'; function* f(${generateParamsWithRest(parameterCount)}) {}`,
- `'use strong'; let f = (${generateParams(parameterCount)}) => {}`,
- `function f(${generateParams(parameterCount)}) { 'use strong'; }`,
- `function* f(${generateParams(parameterCount)}) { 'use strong'; }`,
- `let f = (${generateParams(parameterCount)}) => { 'use strong'; }`,
- ];
- for (let def of defs) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- let calls = [
- `f(${generateArguments(argumentCount)})`,
- `f(${generateSpread(argumentCount)})`,
- `f.call(${generateArguments(argumentCount, 'undefined')})`,
- `f.call(undefined, ${generateSpread(argumentCount)})`,
- `f.apply(undefined, [${generateArguments(argumentCount)}])`,
- `f.bind(undefined)(${generateArguments(argumentCount)})`,
- `%_Call(f, ${generateArguments(argumentCount, 'undefined')})`,
- `%Call(f, ${generateArguments(argumentCount, 'undefined')})`,
- `%Apply(f, undefined, [${generateArguments(argumentCount)}], 0,
- ${argumentCount})`,
- ];
-
- for (let call of calls) {
- let code = `'use strict'; ${def}; ${call};`;
- if (argumentCount < parameterCount) {
- print(code);
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
-
- let calls = [
- `f.call()`,
- `f.apply()`,
- `f.apply(undefined)`,
- ];
- for (let call of calls) {
- let code = `'use strict'; ${def}; ${call};`;
- if (parameterCount > 0) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
-})();
-
-
-(function MethodCall() {
- for (let genParams of [generateParams, generateParamsWithRest]) {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `let o = new class {
- m(${genParams(parameterCount, true)}) { 'use strong'; }
- }`,
- `let o = new class {
- *m(${genParams(parameterCount, true)}) { 'use strong'; }
- }`,
- `let o = { m(${genParams(parameterCount, true)}) { 'use strong'; } }`,
- `let o = { *m(${genParams(parameterCount, true)}) { 'use strong'; } }`,
- `'use strong';
- let o = new class { m(${genParams(parameterCount)}) {} }`,
- `'use strong';
- let o = new class { *m(${genParams(parameterCount)}) {} }`,
- `'use strong'; let o = { m(${genParams(parameterCount)}) {} }`,
- `'use strong'; let o = { *m(${genParams(parameterCount)}) {} }`,
- ];
- for (let def of defs) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- let calls = [
- `o.m(${generateArguments(argumentCount)})`,
- `o.m(${generateSpread(argumentCount)})`,
- `o.m.call(${generateArguments(argumentCount, 'o')})`,
- `o.m.call(o, ${generateSpread(argumentCount)})`,
- `o.m.apply(o, [${generateArguments(argumentCount)}])`,
- `o.m.bind(o)(${generateArguments(argumentCount)})`,
- `%_Call(o.m, ${generateArguments(argumentCount, 'o')})`,
- `%Call(o.m, ${generateArguments(argumentCount, 'o')})`,
- `%Apply(o.m, o, [${generateArguments(argumentCount)}], 0,
- ${argumentCount})`,
- ];
-
- for (let call of calls) {
- let code = `'use strict'; ${def}; ${call};`;
- if (argumentCount < parameterCount) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
-
- let calls = [
- `o.m.call()`,
- `o.m.apply()`,
- `o.m.apply(o)`,
- ];
- for (let call of calls) {
- let code = `'use strict'; ${def}; ${call};`;
- if (parameterCount > 0) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
- }
-})();
-
-
-(function Constructor() {
- for (let genParams of [generateParams, generateParamsWithRest]) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `'use strong';
- class C { constructor(${genParams(parameterCount)}) {} }`,
- ];
- for (let def of defs) {
- let calls = [
- `new C(${generateArguments(argumentCount)})`,
- `new C(${generateSpread(argumentCount)})`,
- `Reflect.construct(C, [${generateArguments(argumentCount)}])`,
- ];
- for (let call of calls) {
- let code = `${def}; ${call};`;
- if (argumentCount < parameterCount) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
- }
- }
-})();
-
-
-(function DerivedConstructor() {
- for (let genParams of [generateParams, generateParamsWithRest]) {
- for (let genArgs of [generateArguments, generateSpread]) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `'use strong';
- class B {
- constructor(${genParams(parameterCount)}) {}
- }
- class C extends B {
- constructor() {
- super(${genArgs(argumentCount)});
- }
- }`,
- ];
- for (let def of defs) {
- let code = `${def}; new C();`;
- if (argumentCount < parameterCount) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
- }
- }
-})();
-
-
-(function DerivedConstructorDefaultConstructorInDerivedClass() {
- for (let genParams of [generateParams, generateParamsWithRest]) {
- for (let genArgs of [generateArguments, generateSpread]) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `'use strong';
- class B {
- constructor(${genParams(parameterCount)}) {}
- }
- class C extends B {}`,
- ];
- for (let def of defs) {
- let code = `${def}; new C(${genArgs(argumentCount)})`;
- if (argumentCount < parameterCount) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
- }
- }
-})();
-
-
-(function TestOptimized() {
- function f(x, y) { 'use strong'; }
-
- assertThrows(f, TypeError);
- %OptimizeFunctionOnNextCall(f);
- assertThrows(f, TypeError);
-
- function g() {
- f(1);
- }
- assertThrows(g, TypeError);
- %OptimizeFunctionOnNextCall(g);
- assertThrows(g, TypeError);
-
- f(1, 2);
- %OptimizeFunctionOnNextCall(f);
- f(1, 2);
-})();
-
-
-(function TestOptimized2() {
- 'use strong';
- function f(x, y) {}
-
- assertThrows(f, TypeError);
- %OptimizeFunctionOnNextCall(f);
- assertThrows(f, TypeError);
-
- function g() {
- f(1);
- }
- assertThrows(g, TypeError);
- %OptimizeFunctionOnNextCall(g);
- assertThrows(g, TypeError);
-
- f(1, 2);
- %OptimizeFunctionOnNextCall(f);
- f(1, 2);
-})();
-
-
-(function TestOptimized3() {
- function f(x, y) {}
- function g() {
- 'use strong';
- f(1);
- }
-
- g();
- %OptimizeFunctionOnNextCall(f);
- g();
-})();
-
-
-(function ParametersSuper() {
- for (let genArgs of [generateArguments, generateSpread]) {
- for (let argumentCount = 0; argumentCount < 3; argumentCount++) {
- for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
- let defs = [
- `'use strict';
- class B {
- m(${generateParams(parameterCount)} ){ 'use strong' }
- }`,
- `'use strong'; class B { m(${generateParams(parameterCount)}) {} }`,
- ];
- for (let def of defs) {
- let code = `${def};
- class D extends B {
- m() {
- super.m(${genArgs(argumentCount)});
- }
- }
- new D().m()`;
- print('\n\n' + code);
- if (argumentCount < parameterCount) {
- assertThrows(code, TypeError);
- } else {
- assertDoesNotThrow(code);
- }
- }
- }
- }
- }
-})();
diff --git a/deps/v8/test/mjsunit/strong/functions.js b/deps/v8/test/mjsunit/strong/functions.js
deleted file mode 100644
index a237270977..0000000000
--- a/deps/v8/test/mjsunit/strong/functions.js
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-'use strong';
-
-function f() {}
-function* g() {}
-
-(function NoArguments() {
- assertThrows("'use strong'; arguments", SyntaxError);
- assertThrows("'use strong'; function f() { arguments }", SyntaxError);
- assertThrows("'use strong'; function* g() { arguments }", SyntaxError);
- assertThrows("'use strong'; let f = function() { arguments }", SyntaxError);
- assertThrows("'use strong'; let g = function*() { arguments }", SyntaxError);
- assertThrows("'use strong'; let f = () => arguments", SyntaxError);
- // The following are strict mode errors already.
- assertThrows("'use strong'; let arguments", SyntaxError);
- assertThrows("'use strong'; function f(arguments) {}", SyntaxError);
- assertThrows("'use strong'; function* g(arguments) {}", SyntaxError);
- assertThrows("'use strong'; let f = (arguments) => {}", SyntaxError);
-})();
-
-(function NoArgumentsProperty() {
- assertFalse(f.hasOwnProperty("arguments"));
- assertFalse(g.hasOwnProperty("arguments"));
- assertThrows(function(){ f.arguments = 0 }, TypeError);
- assertThrows(function(){ g.arguments = 0 }, TypeError);
-})();
-
-(function NoCaller() {
- assertFalse(f.hasOwnProperty("caller"));
- assertFalse(g.hasOwnProperty("caller"));
- assertThrows(function(){ f.caller = 0 }, TypeError);
- assertThrows(function(){ g.caller = 0 }, TypeError);
-})();
-
-(function NoCallee() {
- assertFalse("callee" in f);
- assertFalse("callee" in g);
- assertThrows(function(){ f.callee = 0 }, TypeError);
- assertThrows(function(){ g.callee = 0 }, TypeError);
-})();
-
-(function LexicalBindings(global) {
- assertEquals('function', typeof f);
- assertEquals('function', typeof g);
- assertFalse(global.hasOwnProperty("f"));
- assertFalse(global.hasOwnProperty("g"));
-})(this);
-
-(function ImmutableBindings() {
- function f2() {}
- function* g2() {}
- assertThrows(function(){ f = 0 }, TypeError);
- assertThrows(function(){ g = 0 }, TypeError);
- assertThrows(function(){ f2 = 0 }, TypeError);
- assertThrows(function(){ g2 = 0 }, TypeError);
- assertEquals('function', typeof f);
- assertEquals('function', typeof g);
- assertEquals('function', typeof f2);
- assertEquals('function', typeof g2);
-})();
-
-(function NonExtensible() {
- assertThrows(function(){ f.a = 0 }, TypeError);
- assertThrows(function(){ g.a = 0 }, TypeError);
- assertThrows(function(){ Object.defineProperty(f, "a", {value: 0}) }, TypeError);
- assertThrows(function(){ Object.defineProperty(g, "a", {value: 0}) }, TypeError);
- assertThrows(function(){ Object.setPrototypeOf(f, {}) }, TypeError);
- assertThrows(function(){ Object.setPrototypeOf(g, {}) }, TypeError);
-})();
-
-(function NoPrototype() {
- assertFalse("prototype" in f);
- assertFalse(g.hasOwnProperty("prototype"));
- assertThrows(function(){ f.prototype = 0 }, TypeError);
- assertThrows(function(){ g.prototype = 0 }, TypeError);
- assertThrows(function(){ f.prototype.a = 0 }, TypeError);
-})();
-
-(function NonConstructor() {
- assertThrows(function(){ new f }, TypeError);
- assertThrows(function(){ new g }, TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/implicit-conversions-constants.js b/deps/v8/test/mjsunit/strong/implicit-conversions-constants.js
deleted file mode 100644
index c24056299f..0000000000
--- a/deps/v8/test/mjsunit/strong/implicit-conversions-constants.js
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-"use strict";
-
-function getTestFuncs() {
- "use strong";
- return [
- function(x){return 1 + true;},
- function(x){return 1 - true;},
- function(x){return 1 * true;},
- function(x){return 1 / true;},
- function(x){return 1 % true;},
- function(x){return 1 | true;},
- function(x){return 1 & true;},
- function(x){return 1 ^ true;},
- function(x){return 1 << true;},
- function(x){return 1 >> true;},
- function(x){return 1 >>> true;},
- function(x){return 1 < true;},
- function(x){return 1 > true;},
- function(x){return 1 <= true;},
- function(x){return 1 >= true;},
- function(x){return 1 + undefined;},
- function(x){return 1 - undefined;},
- function(x){return 1 * undefined;},
- function(x){return 1 / undefined;},
- function(x){return 1 % undefined;},
- function(x){return 1 | undefined;},
- function(x){return 1 & undefined;},
- function(x){return 1 ^ undefined;},
- function(x){return 1 << undefined;},
- function(x){return 1 >> undefined;},
- function(x){return 1 >>> undefined;},
- function(x){return 1 < undefined;},
- function(x){return 1 > undefined;},
- function(x){return 1 <= undefined;},
- function(x){return 1 >= undefined;},
- function(x){return 1 + null;},
- function(x){return 1 - null;},
- function(x){return 1 * null;},
- function(x){return 1 / null;},
- function(x){return 1 % null;},
- function(x){return 1 | null;},
- function(x){return 1 & null;},
- function(x){return 1 ^ null;},
- function(x){return 1 << null;},
- function(x){return 1 >> null;},
- function(x){return 1 >>> null;},
- function(x){return 1 < null;},
- function(x){return 1 > null;},
- function(x){return 1 <= null;},
- function(x){return 1 >= null;},
- function(x){return NaN + true;},
- function(x){return NaN - true;},
- function(x){return NaN * true;},
- function(x){return NaN / true;},
- function(x){return NaN % true;},
- function(x){return NaN | true;},
- function(x){return NaN & true;},
- function(x){return NaN ^ true;},
- function(x){return NaN << true;},
- function(x){return NaN >> true;},
- function(x){return NaN >>> true;},
- function(x){return NaN < true;},
- function(x){return NaN > true;},
- function(x){return NaN <= true;},
- function(x){return NaN >= true;},
- function(x){return NaN + undefined;},
- function(x){return NaN - undefined;},
- function(x){return NaN * undefined;},
- function(x){return NaN / undefined;},
- function(x){return NaN % undefined;},
- function(x){return NaN | undefined;},
- function(x){return NaN & undefined;},
- function(x){return NaN ^ undefined;},
- function(x){return NaN << undefined;},
- function(x){return NaN >> undefined;},
- function(x){return NaN >>> undefined;},
- function(x){return NaN < undefined;},
- function(x){return NaN > undefined;},
- function(x){return NaN <= undefined;},
- function(x){return NaN >= undefined;},
- function(x){return NaN + null;},
- function(x){return NaN - null;},
- function(x){return NaN * null;},
- function(x){return NaN / null;},
- function(x){return NaN % null;},
- function(x){return NaN | null;},
- function(x){return NaN & null;},
- function(x){return NaN ^ null;},
- function(x){return NaN << null;},
- function(x){return NaN >> null;},
- function(x){return NaN >>> null;},
- function(x){return NaN < null;},
- function(x){return NaN > null;},
- function(x){return NaN <= null;},
- function(x){return NaN >= null;},
- function(x){return true + 1;},
- function(x){return true - 1;},
- function(x){return true * 1;},
- function(x){return true / 1;},
- function(x){return true % 1;},
- function(x){return true | 1;},
- function(x){return true & 1;},
- function(x){return true ^ 1;},
- function(x){return true << 1;},
- function(x){return true >> 1;},
- function(x){return true >>> 1;},
- function(x){return true < 1;},
- function(x){return true > 1;},
- function(x){return true <= 1;},
- function(x){return true >= 1;},
- function(x){return undefined + 1;},
- function(x){return undefined - 1;},
- function(x){return undefined * 1;},
- function(x){return undefined / 1;},
- function(x){return undefined % 1;},
- function(x){return undefined | 1;},
- function(x){return undefined & 1;},
- function(x){return undefined ^ 1;},
- function(x){return undefined << 1;},
- function(x){return undefined >> 1;},
- function(x){return undefined >>> 1;},
- function(x){return undefined < 1;},
- function(x){return undefined > 1;},
- function(x){return undefined <= 1;},
- function(x){return undefined >= 1;},
- function(x){return null + 1;},
- function(x){return null - 1;},
- function(x){return null * 1;},
- function(x){return null / 1;},
- function(x){return null % 1;},
- function(x){return null | 1;},
- function(x){return null & 1;},
- function(x){return null ^ 1;},
- function(x){return null << 1;},
- function(x){return null >> 1;},
- function(x){return null >>> 1;},
- function(x){return null < 1;},
- function(x){return null > 1;},
- function(x){return null <= 1;},
- function(x){return null >= 1;},
- function(x){return true + NaN;},
- function(x){return true - NaN;},
- function(x){return true * NaN;},
- function(x){return true / NaN;},
- function(x){return true % NaN;},
- function(x){return true | NaN;},
- function(x){return true & NaN;},
- function(x){return true ^ NaN;},
- function(x){return true << NaN;},
- function(x){return true >> NaN;},
- function(x){return true >>> NaN;},
- function(x){return true < NaN;},
- function(x){return true > NaN;},
- function(x){return true <= NaN;},
- function(x){return true >= NaN;},
- function(x){return undefined + NaN;},
- function(x){return undefined - NaN;},
- function(x){return undefined * NaN;},
- function(x){return undefined / NaN;},
- function(x){return undefined % NaN;},
- function(x){return undefined | NaN;},
- function(x){return undefined & NaN;},
- function(x){return undefined ^ NaN;},
- function(x){return undefined << NaN;},
- function(x){return undefined >> NaN;},
- function(x){return undefined >>> NaN;},
- function(x){return undefined < NaN;},
- function(x){return undefined > NaN;},
- function(x){return undefined <= NaN;},
- function(x){return undefined >= NaN;},
- function(x){return null + NaN;},
- function(x){return null - NaN;},
- function(x){return null * NaN;},
- function(x){return null / NaN;},
- function(x){return null % NaN;},
- function(x){return null | NaN;},
- function(x){return null & NaN;},
- function(x){return null ^ NaN;},
- function(x){return null << NaN;},
- function(x){return null >> NaN;},
- function(x){return null >>> NaN;},
- function(x){return null < NaN;},
- function(x){return null > NaN;},
- function(x){return null <= NaN;},
- function(x){return null >= NaN;}
- ];
-}
-
-for (let func of getTestFuncs()) {
- assertThrows(func, TypeError);
- assertThrows(func, TypeError);
- assertThrows(func, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(func, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(func, TypeError);
-}
diff --git a/deps/v8/test/mjsunit/strong/implicit-conversions-count.js b/deps/v8/test/mjsunit/strong/implicit-conversions-count.js
deleted file mode 100644
index 88ed3c2068..0000000000
--- a/deps/v8/test/mjsunit/strong/implicit-conversions-count.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-"use strict";
-
-function pre_inc(x) {
- return ++x;
-}
-
-function post_inc(x) {
- return x++;
-}
-
-function pre_dec(x) {
- return --x;
-}
-
-function post_dec(x) {
- return x--;
-}
-
-function getTestFuncs() {
- return [
- function(x){
- "use strong";
- let y = x;
- assertEquals(++y, pre_inc(x));
- try {
- assertEquals(x+1, y)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let y = x;
- assertEquals(y++, post_inc(x));
- try {
- assertEquals(x+1, y)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let y = x;
- assertEquals(--y, pre_dec(x));
- try {
- assertEquals(x-1, y)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let y = x;
- assertEquals(y--, post_dec(x));
- try {
- assertEquals(x-1, y)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let obj = { foo: x };
- let y = ++obj.foo;
- assertEquals(y, pre_inc(x));
- try {
- assertEquals(x+1, obj.foo)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let obj = { foo: x };
- let y = obj.foo++;
- assertEquals(y, post_inc(x));
- try {
- assertEquals(x+1, obj.foo)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let obj = { foo: x };
- let y = --obj.foo;
- assertEquals(y, pre_dec(x));
- try {
- assertEquals(x-1, obj.foo)
- } catch (e) {
- assertUnreachable();
- }
- },
- function(x){
- "use strong";
- let obj = { foo: x };
- let y = obj.foo--;
- assertEquals(y, post_dec(x));
- try {
- assertEquals(x-1, obj.foo)
- } catch (e) {
- assertUnreachable();
- }
- },
- ];
-}
-
-let nonNumberValues = [
- {},
- (function(){}),
- [],
- (class Foo {}),
- "",
- "foo",
- "NaN",
- Object(""),
- false,
- null,
- undefined
-];
-
-// Check prior input of None works
-for (let func of getTestFuncs()) {
- for (let value of nonNumberValues) {
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(value)}, TypeError);
- %DeoptimizeFunction(func);
- }
-}
-
-// Check prior input of Smi works
-for (let func of getTestFuncs()) {
- func(1);
- func(1);
- func(1);
- for (let value of nonNumberValues) {
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(value)}, TypeError);
- %DeoptimizeFunction(func);
- }
-}
-
-// Check prior input of Number works
-for (let func of getTestFuncs()) {
- func(9999999999999);
- func(9999999999999);
- func(9999999999999);
- for (let value of nonNumberValues) {
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- assertThrows(function(){func(value)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(value)}, TypeError);
- %DeoptimizeFunction(func);
- }
-}
diff --git a/deps/v8/test/mjsunit/strong/implicit-conversions-inlining.js b/deps/v8/test/mjsunit/strong/implicit-conversions-inlining.js
deleted file mode 100644
index 15997a37aa..0000000000
--- a/deps/v8/test/mjsunit/strong/implicit-conversions-inlining.js
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-"use strict";
-
-//******************************************************************************
-// Number function declarations
-function inline_add_strong(x, y) {
- "use strong";
- return x + y;
-}
-
-function inline_add_strong_outer(x, y) {
- return inline_add_strong(x, y);
-}
-
-function inline_sub_strong(x, y) {
- "use strong";
- return x - y;
-}
-
-function inline_sub_strong_outer(x, y) {
- return inline_sub_strong(x, y);
-}
-
-function inline_mul_strong(x, y) {
- "use strong";
- return x * y;
-}
-
-function inline_mul_strong_outer(x, y) {
- return inline_mul_strong(x, y);
-}
-
-function inline_div_strong(x, y) {
- "use strong";
- return x / y;
-}
-
-function inline_div_strong_outer(x, y) {
- return inline_div_strong(x, y);
-}
-
-function inline_mod_strong(x, y) {
- "use strong";
- return x % y;
-}
-
-function inline_mod_strong_outer(x, y) {
- return inline_mod_strong(x, y);
-}
-
-function inline_or_strong(x, y) {
- "use strong";
- return x | y;
-}
-
-function inline_or_strong_outer(x, y) {
- return inline_or_strong(x, y);
-}
-
-function inline_and_strong(x, y) {
- "use strong";
- return x & y;
-}
-
-function inline_and_strong_outer(x, y) {
- return inline_and_strong(x, y);
-}
-
-function inline_xor_strong(x, y) {
- "use strong";
- return x ^ y;
-}
-
-function inline_xor_strong_outer(x, y) {
- return inline_xor_strong(x, y);
-}
-
-function inline_shl_strong(x, y) {
- "use strong";
- return x << y;
-}
-
-function inline_shl_strong_outer(x, y) {
- return inline_shl_strong(x, y);
-}
-
-function inline_shr_strong(x, y) {
- "use strong";
- return x >> y;
-}
-
-function inline_shr_strong_outer(x, y) {
- return inline_shr_strong(x, y);
-}
-
-function inline_sar_strong(x, y) {
- "use strong";
- return x >>> y;
-}
-
-function inline_sar_strong_outer(x, y) {
- return inline_sar_strong(x, y);
-}
-
-function inline_less_strong(x, y) {
- "use strong";
- return x < y;
-}
-
-function inline_less_strong_outer(x, y) {
- return inline_less_strong(x, y);
-}
-
-function inline_greater_strong(x, y) {
- "use strong";
- return x > y;
-}
-
-function inline_greater_strong_outer(x, y) {
- return inline_greater_strong(x, y);
-}
-
-function inline_less_equal_strong(x, y) {
- "use strong";
- return x <= y;
-}
-
-function inline_less_equal_strong_outer(x, y) {
- return inline_less_equal_strong(x, y);
-}
-
-function inline_greater_equal_strong(x, y) {
- "use strong";
- return x >= y;
-}
-
-function inline_greater_equal_strong_outer(x, y) {
- return inline_greater_equal_strong(x, y);
-}
-
-function inline_add(x, y) {
- return x + y;
-}
-
-function inline_add_outer_strong(x, y) {
- "use strong";
- return inline_add(x, y);
-}
-
-function inline_sub(x, y) {
- return x - y;
-}
-
-function inline_sub_outer_strong(x, y) {
- "use strong";
- return inline_sub(x, y);
-}
-
-function inline_mul(x, y) {
- return x * y;
-}
-
-function inline_mul_outer_strong(x, y) {
- "use strong";
- return inline_mul(x, y);
-}
-
-function inline_div(x, y) {
- return x / y;
-}
-
-function inline_div_outer_strong(x, y) {
- "use strong";
- return inline_div(x, y);
-}
-
-function inline_mod(x, y) {
- return x % y;
-}
-
-function inline_mod_outer_strong(x, y) {
- "use strong";
- return inline_mod(x, y);
-}
-
-function inline_or(x, y) {
- return x | y;
-}
-
-function inline_or_outer_strong(x, y) {
- "use strong";
- return inline_or(x, y);
-}
-
-function inline_and(x, y) {
- return x & y;
-}
-
-function inline_and_outer_strong(x, y) {
- "use strong";
- return inline_and(x, y);
-}
-
-function inline_xor(x, y) {
- return x ^ y;
-}
-
-function inline_xor_outer_strong(x, y) {
- "use strong";
- return inline_xor(x, y);
-}
-
-function inline_shl(x, y) {
- return x << y;
-}
-
-function inline_shl_outer_strong(x, y) {
- "use strong";
- return inline_shl(x, y);
-}
-
-function inline_shr(x, y) {
- return x >> y;
-}
-
-function inline_shr_outer_strong(x, y) {
- "use strong";
- return inline_shr(x, y);
-}
-
-function inline_sar(x, y) {
- return x >>> y;
-}
-
-function inline_sar_outer_strong(x, y) {
- "use strong";
- return inline_sar(x, y);
-}
-
-function inline_less(x, y) {
- return x < y;
-}
-
-function inline_less_outer_strong(x, y) {
- "use strong";
- return inline_less(x, y);
-}
-
-function inline_greater(x, y) {
- return x > y;
-}
-
-function inline_greater_outer_strong(x, y) {
- "use strong";
- return inline_greater(x, y);
-}
-
-function inline_less_equal(x, y) {
- return x <= y;
-}
-
-function inline_less_equal_outer_strong(x, y) {
- "use strong";
- return inline_less_equal(x, y);
-}
-
-function inline_greater_equal(x, y) {
- return x >>> y;
-}
-
-function inline_greater_equal_outer_strong(x, y) {
- "use strong";
- return inline_greater_equal(x, y);
-}
-
-//******************************************************************************
-// String function declarations
-function inline_add_string_strong(x, y) {
- "use strong";
- return x + y;
-}
-
-function inline_add_string_strong_outer(x, y) {
- return inline_add_string_strong(x, y);
-}
-
-function inline_less_string_strong(x, y) {
- "use strong";
- return x < y;
-}
-
-function inline_less_string_strong_outer(x, y) {
- return inline_less_string_strong(x, y);
-}
-
-function inline_greater_string_strong(x, y) {
- "use strong";
- return x > y;
-}
-
-function inline_greater_string_strong_outer(x, y) {
- return inline_greater_string_strong(x, y);
-}
-
-function inline_less_equal_string_strong(x, y) {
- "use strong";
- return x <= y;
-}
-
-function inline_less_equal_string_strong_outer(x, y) {
- return inline_less_equal_string_strong(x, y);
-}
-
-function inline_greater_equal_string_strong(x, y) {
- "use strong";
- return x >= y;
-}
-
-function inline_greater_equal_string_strong_outer(x, y) {
- return inline_greater_equal_string_strong(x, y);
-}
-
-function inline_add_string(x, y) {
- return x + y;
-}
-
-function inline_add_string_outer_strong(x, y) {
- "use strong";
- return inline_add_string(x, y);
-}
-
-function inline_less_string(x, y) {
- return x < y;
-}
-
-function inline_less_string_outer_strong(x, y) {
- "use strong";
- return inline_less_string(x, y);
-}
-
-function inline_greater_string(x, y) {
- return x > y;
-}
-
-function inline_greater_string_outer_strong(x, y) {
- "use strong";
- return inline_greater_string(x, y);
-}
-
-function inline_less_equal_string(x, y) {
- return x <= y;
-}
-
-function inline_less_equal_string_outer_strong(x, y) {
- "use strong";
- return inline_less_equal_string(x, y);
-}
-
-function inline_greater_equal_string(x, y) {
- return x >= y;
-}
-
-function inline_greater_equal_string_outer_strong(x, y) {
- "use strong";
- return inline_greater_equal_string(x, y);
-}
-
-
-//******************************************************************************
-// Testing
-let strong_inner_funcs_num = [inline_add_strong_outer, inline_sub_strong_outer,
- inline_mul_strong_outer, inline_div_strong_outer,
- inline_mod_strong_outer, inline_or_strong_outer,
- inline_and_strong_outer, inline_xor_strong_outer,
- inline_shl_strong_outer, inline_shr_strong_outer,
- inline_less_strong_outer,
- inline_greater_strong_outer,
- inline_less_equal_strong_outer,
- inline_greater_equal_strong_outer];
-
-let strong_outer_funcs_num = [inline_add_outer_strong, inline_sub_outer_strong,
- inline_mul_outer_strong, inline_div_outer_strong,
- inline_mod_outer_strong, inline_or_outer_strong,
- inline_and_outer_strong, inline_xor_outer_strong,
- inline_shl_outer_strong, inline_shr_outer_strong,
- inline_less_outer_strong,
- inline_greater_outer_strong,
- inline_less_equal_outer_strong,
- inline_greater_equal_outer_strong];
-
-let strong_inner_funcs_string = [inline_add_string_strong_outer,
- inline_less_string_strong_outer,
- inline_greater_string_strong_outer,
- inline_less_equal_string_strong_outer,
- inline_greater_equal_string_strong_outer];
-
-let strong_outer_funcs_string = [inline_add_string_outer_strong,
- inline_less_string_outer_strong,
- inline_greater_string_outer_strong,
- inline_less_equal_string_outer_strong,
- inline_greater_equal_string_outer_strong];
-
-for (let strong_inner_func of strong_inner_funcs_num) {
- assertThrows(function(){strong_inner_func(1, {})}, TypeError);
- for (var i = 0; i < 100; i++) {
- strong_inner_func(1, 2);
- }
- %OptimizeFunctionOnNextCall(strong_inner_func);
- assertThrows(function(){strong_inner_func(1, {})}, TypeError);
-}
-
-for (let strong_outer_func of strong_outer_funcs_num) {
- assertDoesNotThrow(function(){strong_outer_func(1, {})});
- for (var i = 0; i < 100; i++) {
- strong_outer_func(1, 2);
- }
- %OptimizeFunctionOnNextCall(strong_outer_func);
- assertDoesNotThrow(function(){strong_outer_func(1, {})});
-}
-
-for (let strong_inner_func of strong_inner_funcs_string) {
- assertThrows(function(){strong_inner_func("foo", {})}, TypeError);
- for (var i = 0; i < 100; i++) {
- strong_inner_func("foo", "bar");
- }
- %OptimizeFunctionOnNextCall(strong_inner_func);
- assertThrows(function(){strong_inner_func("foo", {})}, TypeError);
-}
-
-for (let strong_outer_func of strong_outer_funcs_string) {
- assertDoesNotThrow(function(){strong_outer_func("foo", {})});
- for (var i = 0; i < 100; i++) {
- strong_outer_func("foo", "bar");
- }
- %OptimizeFunctionOnNextCall(strong_outer_func);
- assertDoesNotThrow(function(){strong_outer_func("foo", {})});
-}
diff --git a/deps/v8/test/mjsunit/strong/implicit-conversions.js b/deps/v8/test/mjsunit/strong/implicit-conversions.js
deleted file mode 100644
index cd8acf7085..0000000000
--- a/deps/v8/test/mjsunit/strong/implicit-conversions.js
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-"use strict";
-
-// Boolean indicates whether an operator can be part of a compound assignment.
-let strongNumberBinops = [
- ["-", true],
- ["*", true],
- ["/", true],
- ["%", true],
- ["|", true],
- ["&", true],
- ["^", true],
- ["<<", true],
- [">>", true],
- [">>>", true]
-];
-
-let strongStringOrNumberBinops = [
- ["+", true],
- ["<", false],
- [">", false],
- ["<=", false],
- [">=", false]
-];
-
-let strongBinops = strongNumberBinops.concat(strongStringOrNumberBinops);
-
-let strongUnops = [
- "~",
- "+",
- "-"
-];
-
-let nonStringOrNumberValues = [
- "null",
- "undefined",
- "{}",
- "false",
- "(function(){})",
- "[]",
- "(class Foo {})"
-];
-
-let stringValues = [
- "''",
- "' '",
- "'foo'",
- "'f\\u006F\\u006F'",
- "'0'",
- "'NaN'"
-];
-
-let nonNumberValues = nonStringOrNumberValues.concat(stringValues);
-
-let numberValues = [
- "0",
- "(-0)",
- "1",
- "(-4294967295)",
- "(-4294967296)",
- "9999999999999",
- "(-9999999999999)",
- "NaN",
- "Infinity",
- "(-Infinity)"
-];
-
-//******************************************************************************
-// Relational comparison function declarations
-function add_strong(x, y) {
- "use strong";
- return x + y;
-}
-
-function add_num_strong(x, y) {
- "use strong";
- return x + y;
-}
-
-function sub_strong(x, y) {
- "use strong";
- return x - y;
-}
-
-function mul_strong(x, y) {
- "use strong";
- return x * y;
-}
-
-function div_strong(x, y) {
- "use strong";
- return x / y;
-}
-
-function mod_strong(x, y) {
- "use strong";
- return x % y;
-}
-
-function or_strong(x, y) {
- "use strong";
- return x | y;
-}
-
-function and_strong(x, y) {
- "use strong";
- return x & y;
-}
-
-function xor_strong(x, y) {
- "use strong";
- return x ^ y;
-}
-
-function shl_strong(x, y) {
- "use strong";
- return x << y;
-}
-
-function shr_strong(x, y) {
- "use strong";
- return x >> y;
-}
-
-function sar_strong(x, y) {
- "use strong";
- return x >>> y;
-}
-
-function less_strong(x, y) {
- "use strong";
- return x < y;
-}
-
-function less_num_strong(x, y) {
- "use strong";
- return x < y;
-}
-
-function greater_strong(x, y) {
- "use strong";
- return x > y;
-}
-
-function greater_num_strong(x, y) {
- "use strong";
- return x > y;
-}
-
-function less_equal_strong(x, y) {
- "use strong";
- return x <= y;
-}
-
-function less_equal_num_strong(x, y) {
- "use strong";
- return x <= y;
-}
-
-function greater_equal_strong(x, y) {
- "use strong";
- return x >= y;
-}
-
-function greater_equal_num_strong(x, y) {
- "use strong";
- return x >= y;
-}
-
-function typed_add_strong(x, y) {
- "use strong";
- return (+x) + (+y);
-}
-
-function typed_sub_strong(x, y) {
- "use strong";
- return (+x) - (+y);
-}
-
-function typed_mul_strong(x, y) {
- "use strong";
- return (+x) * (+y);
-}
-
-function typed_div_strong(x, y) {
- "use strong";
- return (+x) / (+y);
-}
-
-function typed_mod_strong(x, y) {
- "use strong";
- return (+x) % (+y);
-}
-
-function typed_or_strong(x, y) {
- "use strong";
- return (+x) | (+y);
-}
-
-function typed_and_strong(x, y) {
- "use strong";
- return (+x) & (+y);
-}
-
-function typed_xor_strong(x, y) {
- "use strong";
- return (+x) ^ (+y);
-}
-
-function typed_shl_strong(x, y) {
- "use strong";
- return (+x) << (+y);
-}
-
-function typed_shr_strong(x, y) {
- "use strong";
- return (+x) >> (+y);
-}
-
-function typed_sar_strong(x, y) {
- "use strong";
- return (+x) >>> (+y);
-}
-
-function typed_less_strong(x, y) {
- "use strong";
- return (+x) < (+y);
-}
-
-function typed_greater_strong(x, y) {
- "use strong";
- return (+x) > (+y);
-}
-
-function typed_less_equal_strong(x, y) {
- "use strong";
- return (+x) <= (+y);
-}
-
-function typed_greater_equal_strong(x, y) {
- "use strong";
- return (+x) >= (+y);
-}
-
-//******************************************************************************
-// (in)equality function declarations
-function str_equal_strong(x, y) {
- "use strong";
- return x === y;
-}
-
-function str_ineq_strong(x, y) {
- "use strong";
- return x !== y;
-}
-
-let strongNumberFuncs = [add_num_strong, sub_strong, mul_strong, div_strong,
- mod_strong, or_strong, and_strong, xor_strong,
- shl_strong, shr_strong, sar_strong, less_num_strong,
- greater_num_strong, less_equal_num_strong,
- greater_equal_num_strong, typed_add_strong,
- typed_sub_strong, typed_mul_strong, typed_div_strong,
- typed_mod_strong, typed_or_strong, typed_and_strong,
- typed_xor_strong, typed_shl_strong, typed_shr_strong,
- typed_sar_strong, typed_less_strong,
- typed_greater_strong, typed_less_equal_strong,
- typed_greater_equal_strong];
-
-let strongStringOrNumberFuncs = [add_strong, less_strong, greater_strong,
- less_equal_strong, greater_equal_strong];
-
-let strongFuncs = strongNumberFuncs.concat(strongStringOrNumberFuncs);
-
-function assertStrongNonThrowBehaviour(expr) {
- assertEquals(eval(expr), eval("'use strong';" + expr));
- assertDoesNotThrow("'use strong'; " + expr + ";");
- assertDoesNotThrow("'use strong'; let v = " + expr + ";");
-}
-
-function assertStrongThrowBehaviour(expr) {
- assertDoesNotThrow("'use strict'; " + expr + ";");
- assertDoesNotThrow("'use strict'; let v = " + expr + ";");
- assertThrows("'use strong'; " + expr + ";", TypeError);
- assertThrows("'use strong'; let v = " + expr + ";", TypeError);
-}
-
-function checkArgumentCombinations(op, leftList, rightList, willThrow) {
- for (let v1 of leftList) {
- let assignExpr = "foo " + op[0] + "= " + v1 + ";";
- for (let v2 of rightList) {
- let compoundAssignment = "'use strong'; let foo = " + v2 + "; " +
- assignExpr;
- if (willThrow) {
- if (op[1]) {
- assertThrows(compoundAssignment, TypeError);
- }
- assertStrongThrowBehaviour("(" + v1 + op[0] + v2 + ")");
- } else {
- if (op[1]) {
- assertDoesNotThrow(compoundAssignment);
- }
- assertStrongNonThrowBehaviour("(" + v1 + op[0] + v2 + ")");
- }
- }
- }
-}
-
-for (let op of strongBinops) {
- checkArgumentCombinations(op, numberValues, numberValues, false);
- checkArgumentCombinations(op, numberValues, nonNumberValues, true);
-}
-
-for (let op of strongNumberBinops) {
- checkArgumentCombinations(op, nonNumberValues,
- numberValues.concat(nonNumberValues), true);
-}
-
-for (let op of strongStringOrNumberBinops) {
- checkArgumentCombinations(op, nonNumberValues,
- numberValues.concat(nonStringOrNumberValues), true);
- checkArgumentCombinations(op, nonStringOrNumberValues, stringValues, true);
- checkArgumentCombinations(op, stringValues, stringValues, false);
-}
-
-for (let op of strongUnops) {
- for (let value of numberValues) {
- assertStrongNonThrowBehaviour("(" + op + value + ")");
- }
- for (let value of nonNumberValues) {
- assertStrongThrowBehaviour("(" + op + value + ")");
- }
-}
-
-for (let func of strongNumberFuncs) {
- // Check IC None*None->None throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
- func(4, 5);
- func(4, 5);
- // Check IC Smi*Smi->Smi throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
- func(NaN, NaN);
- func(NaN, NaN);
- // Check IC Number*Number->Number throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
-}
-
-for (let func of strongStringOrNumberFuncs) {
- // Check IC None*None->None throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
- func("foo", "bar");
- func("foo", "bar");
- // Check IC String*String->String throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
- func(NaN, NaN);
- func(NaN, NaN);
- // Check IC Generic*Generic->Generic throws
- for (let v of nonNumberValues) {
- let value = eval(v);
- assertThrows(function(){func(2, value);}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(2, value);}, TypeError);
- %DeoptimizeFunction(func);
- }
-}
-
-for (let func of [str_equal_strong, str_ineq_strong]) {
- assertDoesNotThrow(function(){func(2, undefined)});
- assertDoesNotThrow(function(){func(2, undefined)});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(2, undefined)});
- %DeoptimizeFunction(func);
- assertDoesNotThrow(function(){func(true, {})});
- assertDoesNotThrow(function(){func(true, {})});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(true, {})});
- %DeoptimizeFunction(func);
-}
diff --git a/deps/v8/test/mjsunit/strong/literals.js b/deps/v8/test/mjsunit/strong/literals.js
deleted file mode 100644
index 6bdf0f0057..0000000000
--- a/deps/v8/test/mjsunit/strong/literals.js
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-// Flags: --harmony-destructuring-bind
-
-'use strict';
-
-(function WeakObjectLiterals() {
- function assertWeakObject(x) {
- assertFalse(%IsStrong(x));
- assertSame(Object.prototype, Object.getPrototypeOf(x));
- }
- assertWeakObject({});
- assertWeakObject({a: 0, b: 0});
- assertWeakObject({a: [], b: {}});
- assertWeakObject({a: [], b: {}}.b);
- assertWeakObject({a: {b: {c: {}}}}.a);
- assertWeakObject({a: {b: {c: {}}}}.a.b);
- assertWeakObject({a: {b: {c: {}}}}.a.b.c);
- assertWeakObject([[1], {}, [[3]]][1]);
- assertWeakObject({f: function(){}});
- assertWeakObject(
- Realm.eval(Realm.current(), "({f: function(){}})"));
-})();
-
-(function StrongObjectLiterals() {
- 'use strong';
- function assertStrongObject(x) {
- assertTrue(%IsStrong(x));
- assertSame(Object.prototype, Object.getPrototypeOf(x));
- }
- assertStrongObject({});
- assertStrongObject({a: 0, b: 0});
- assertStrongObject({a: [], b: {}});
- assertStrongObject({a: [], b: {}}.b);
- assertStrongObject({a: {b: {c: {}}}}.a);
- assertStrongObject({a: {b: {c: {}}}}.a.b);
- assertStrongObject({a: {b: {c: {}}}}.a.b.c);
- // Maps for literals with too many properties are not cached.
- assertStrongObject({
- x001: 0, x002: 0, x003: 0, x004: 0, x005: 0,
- x006: 0, x007: 0, x008: 0, x009: 0, x010: 0,
- x011: 0, x012: 0, x013: 0, x014: 0, x015: 0,
- x016: 0, x017: 0, x018: 0, x019: 0, x020: 0,
- x021: 0, x022: 0, x023: 0, x024: 0, x025: 0,
- x026: 0, x027: 0, x028: 0, x029: 0, x030: 0,
- x031: 0, x032: 0, x033: 0, x034: 0, x035: 0,
- x036: 0, x037: 0, x038: 0, x039: 0, x040: 0,
- x041: 0, x042: 0, x043: 0, x044: 0, x045: 0,
- x046: 0, x047: 0, x048: 0, x049: 0, x050: 0,
- x051: 0, x052: 0, x053: 0, x054: 0, x055: 0,
- x056: 0, x057: 0, x058: 0, x059: 0, x060: 0,
- x061: 0, x062: 0, x063: 0, x064: 0, x065: 0,
- x066: 0, x067: 0, x068: 0, x069: 0, x070: 0,
- x071: 0, x072: 0, x073: 0, x074: 0, x075: 0,
- x076: 0, x077: 0, x078: 0, x079: 0, x080: 0,
- x081: 0, x082: 0, x083: 0, x084: 0, x085: 0,
- x086: 0, x087: 0, x088: 0, x089: 0, x090: 0,
- x091: 0, x092: 0, x093: 0, x094: 0, x095: 0,
- x096: 0, x097: 0, x098: 0, x099: 0, x100: 0,
- x101: 0, x102: 0, x103: 0, x104: 0, x105: 0,
- x106: 0, x107: 0, x108: 0, x109: 0, x110: 0,
- x111: 0, x112: 0, x113: 0, x114: 0, x115: 0,
- x116: 0, x117: 0, x118: 0, x119: 0, x120: 0,
- x121: 0, x122: 0, x123: 0, x124: 0, x125: 0,
- x126: 0, x127: 0, x128: 0, x129: 0, x130: 0,
- x131: 0, x132: 0, x133: 0, x134: 0, x135: 0,
- x136: 0, x137: 0, x138: 0, x139: 0, x140: 0,
- x141: 0, x142: 0, x143: 0, x144: 0, x145: 0,
- x146: 0, x147: 0, x148: 0, x149: 0, x150: 0,
- x151: 0, x152: 0, x153: 0, x154: 0, x155: 0,
- x156: 0, x157: 0, x158: 0, x159: 0, x160: 0,
- x161: 0, x162: 0, x163: 0, x164: 0, x165: 0,
- x166: 0, x167: 0, x168: 0, x169: 0, x170: 0,
- x171: 0, x172: 0, x173: 0, x174: 0, x175: 0,
- x176: 0, x177: 0, x178: 0, x179: 0, x180: 0,
- x181: 0, x182: 0, x183: 0, x184: 0, x185: 0,
- x186: 0, x187: 0, x188: 0, x189: 0, x190: 0,
- x191: 0, x192: 0, x193: 0, x194: 0, x195: 0,
- x196: 0, x197: 0, x198: 0, x199: 0, x200: 0,
- });
- assertStrongObject([[1], {}, [[3]]][1]);
- assertStrongObject({[Date() + ""]: 0, [Symbol()]: 0});
- assertStrongObject({m() { super.m() }});
- assertTrue(%IsStrong({__proto__: {}, get a() {}, set b(x) {}}));
- // Object literals with constant functions are treated specially,
- // but currently only on the toplevel (using Realm.eval to emulate that).
- assertStrongObject({f: function(){}});
- assertStrongObject(
- Realm.eval(Realm.current(), "'use strong'; ({f: function(){}})"));
-})();
-
-(function WeakArrayLiterals(...args) {
- function assertWeakArray(x) {
- assertFalse(%IsStrong(x));
- assertSame(Array.prototype, Object.getPrototypeOf(x));
- }
- let [...r] = [];
- assertWeakArray(args);
- assertWeakArray(r);
- assertWeakArray([]);
- assertWeakArray([1, 2, 3]);
- assertWeakArray([1, 2, ...[3, 4], 5]);
- assertWeakArray([[[]]]);
- assertWeakArray([[1], {}, [[3]]]);
- assertWeakArray([[1], {}, [[3]]][0]);
- assertWeakArray([[1], {}, [[3]]][2]);
- assertWeakArray([[1], {}, [[3]]][2][0]);
- assertWeakArray({a: [], b: {}}.a);
-})();
-
-(function StrongArrayLiterals() {
- 'use strong';
- function assertStrongArray(x) {
- assertTrue(%IsStrong(x));
- assertSame(Array.prototype, Object.getPrototypeOf(x));
- }
- let [...r] = [];
- assertStrongArray((function(...a) { return a; })());
- assertStrongArray(r);
- assertStrongArray([]);
- assertStrongArray([1, 2, 3]);
- assertStrongArray([1, 2, ...[3, 4], 5]);
- assertStrongArray([[[]]]);
- assertStrongArray([[1], {}, [[3]]]);
- assertStrongArray([[1], {}, [[3]]][0]);
- assertStrongArray([[1], {}, [[3]]][2]);
- assertStrongArray([[1], {}, [[3]]][2][0]);
- assertStrongArray({a: [], b: {}}.a);
-})();
-
-(function WeakFunctionLiterals() {
- function assertWeakFunction(x) {
- assertFalse(%IsStrong(x));
- assertFalse(%IsStrong(x.prototype));
- assertSame(Function.prototype, Object.getPrototypeOf(x));
- }
- function f() {}
- assertWeakFunction(f);
- assertWeakFunction(function(){});
- assertWeakFunction(function f(){});
- assertWeakFunction(() => {});
- assertWeakFunction(x => x);
- assertWeakFunction({m(){}}.m);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- {get a(){}}, 'a').get);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- {set a(x){}}, 'a').set);
- assertWeakFunction((class {static m(){}}).m);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- class {static get a(){}}, 'a').get);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- class {static set a(x){}}, 'a').set);
- assertWeakFunction((new class {m(){}}).m);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- (class {get a(){}}).prototype, 'a').get);
- assertWeakFunction(Object.getOwnPropertyDescriptor(
- (class {set a(x){}}).prototype, 'a').set);
-})();
-
-(function StrongFunctionLiterals() {
- 'use strong';
- function assertStrongFunction(x) {
- assertTrue(%IsStrong(x));
- assertFalse('prototype' in x);
- assertSame(Function.prototype, Object.getPrototypeOf(x));
- }
- function f() {}
- assertStrongFunction(f);
- assertStrongFunction(function(){});
- assertStrongFunction(function f(){});
- assertStrongFunction(() => {});
- assertStrongFunction(x => x);
- assertStrongFunction({m(){}}.m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- {get a(){}}, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- {set a(x){}}, 'a').set);
- assertStrongFunction((class {static m(){}}).m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- class {static get a(){}}, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- class {static set a(x){}}, 'a').set);
- assertStrongFunction((new class {m(){}}).m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- (class {get a(){}}).prototype, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- (class {set a(x){}}).prototype, 'a').set);
-})();
-
-(function SelfStrongFunctionLiterals() {
- function assertStrongFunction(x) {
- assertTrue(%IsStrong(x));
- assertFalse('prototype' in x);
- assertSame(Function.prototype, Object.getPrototypeOf(x));
- }
- function f() {'use strong'}
- assertStrongFunction(f);
- assertStrongFunction(function(){'use strong'});
- assertStrongFunction(function f(){'use strong'});
- assertStrongFunction(() => {'use strong'});
- assertStrongFunction(x => {'use strong'});
- assertStrongFunction({m(){'use strong'}}.m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- {get a(){'use strong'}}, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- {set a(x){'use strong'}}, 'a').set);
- assertStrongFunction((class {static m(){'use strong'}}).m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- class {static get a(){'use strong'}}, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- class {static set a(x){'use strong'}}, 'a').set);
- assertStrongFunction((new class {m(){'use strong'}}).m);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- (class {get a(){'use strong'}}).prototype, 'a').get);
- assertStrongFunction(Object.getOwnPropertyDescriptor(
- (class {set a(x){'use strong'}}).prototype, 'a').set);
-})();
-
-let GeneratorPrototype = (function*(){}).__proto__;
-
-(function WeakGeneratorLiterals() {
- function assertWeakGenerator(x) {
- assertFalse(%IsStrong(x));
- assertFalse(%IsStrong(x.prototype));
- assertSame(GeneratorPrototype, Object.getPrototypeOf(x));
- assertFalse(%IsStrong(x()));
- }
- function* g() {}
- assertWeakGenerator(g);
- assertWeakGenerator(function*(){});
- assertWeakGenerator(function* g(){});
- assertWeakGenerator({*m(){}}.m);
- assertWeakGenerator((class {static *m(){}}).m);
- assertWeakGenerator((new class {*m(){}}).m);
-})();
-
-(function StrongGeneratorLiterals() {
- 'use strong';
- function assertStrongGenerator(x) {
- assertTrue(%IsStrong(x));
- // TODO(rossberg): strongify generator prototypes
- // assertTrue(%IsStrong(x.prototype));
- assertSame(GeneratorPrototype, Object.getPrototypeOf(x));
- // TODO(rossberg): strongify generator instances
- // assertTrue(%IsStrong(x()));
- }
- function* g() {}
- assertStrongGenerator(g);
- assertStrongGenerator(function*(){});
- assertStrongGenerator(function* g(){});
- assertStrongGenerator({*m(){}}.m);
- assertStrongGenerator((class {static *m(){}}).m);
- assertStrongGenerator((new class {*m(){}}).m);
-})();
-
-(function SelfStrongGeneratorLiterals() {
- function assertStrongGenerator(x) {
- assertTrue(%IsStrong(x));
- // TODO(rossberg): strongify generator prototypes
- // assertTrue(%IsStrong(x.prototype));
- assertSame(GeneratorPrototype, Object.getPrototypeOf(x));
- // TODO(rossberg): strongify generator instances
- // assertTrue(%IsStrong(x()));
- }
- function* g() {'use strong'}
- assertStrongGenerator(g);
- assertStrongGenerator(function*(){'use strong'});
- assertStrongGenerator(function* g(){'use strong'});
- assertStrongGenerator({*m(){'use strong'}}.m);
- assertStrongGenerator((class {static *m(){'use strong'}}).m);
- assertStrongGenerator((new class {*m(){'use strong'}}).m);
-})();
-
-(function WeakRegExpLiterals() {
- function assertWeakRegExp(x) {
- assertFalse(%IsStrong(x));
- }
- assertWeakRegExp(/abc/);
-})();
-
-(function StrongRegExpLiterals() {
- 'use strong';
- function assertStrongRegExp(x) {
- // TODO(rossberg): strongify regexps
- // assertTrue(%IsStrong(x));
- }
- assertStrongRegExp(/abc/);
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-builtins.js b/deps/v8/test/mjsunit/strong/load-builtins.js
deleted file mode 100644
index 6638ff3332..0000000000
--- a/deps/v8/test/mjsunit/strong/load-builtins.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-function getGlobal() {
- return this;
-}
-
-function polluteGlobal() {
- bar = 0;
-}
-
-(function() {
- "use strict";
-
- let builtins = [
- Array,
- Object,
- Function,
- getGlobal()
- ];
-
- for (let builtin of builtins) {
- assertThrows(function(){"use strong"; builtin.foo}, TypeError);
- assertThrows(function(){"use strong"; builtin[0]}, TypeError);
- assertThrows(function(){"use strong"; builtin[10000]}, TypeError);
- builtin.foo = 1;
- assertDoesNotThrow(function(){"use strong"; builtin.foo});
- assertThrows(function(){"use strong"; builtin.bar});
- assertThrows(function(){"use strong"; builtin[0]}, TypeError);
- assertThrows(function(){"use strong"; builtin[10000]}, TypeError);
- builtin[0] = 1;
- assertDoesNotThrow(function(){"use strong"; builtin.foo});
- assertThrows(function(){"use strong"; builtin.bar});
- assertDoesNotThrow(function(){"use strong"; builtin[0]});
- assertThrows(function(){"use strong"; builtin[10000]}, TypeError);
- }
- polluteGlobal();
- assertDoesNotThrow(function(){"use strong"; getGlobal().bar});
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-element-mutate-backing-store.js b/deps/v8/test/mjsunit/strong/load-element-mutate-backing-store.js
deleted file mode 100644
index f3465028b7..0000000000
--- a/deps/v8/test/mjsunit/strong/load-element-mutate-backing-store.js
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-function getSloppyArguments() {
- return arguments;
-}
-
-function getObjects() {
- "use strict";
- return [
- {},
- Object(""),
- [],
- (function(){}),
- (class Foo {}),
- getSloppyArguments(),
- arguments,
- new Date(),
- ];
-}
-
-// TODO(conradw): add tests for non-inheritance once semantics are implemented.
-function getNonInheritingObjects() {
- "use strong";
- return [
- Object(""),
- [],
- // TODO(conradw): uncomment and correct test once Object.defineProperty is
- // fixed.
- // new Uint32Array(0)
- ];
-}
-
-function readFromObjectElementSloppy(o) {
- return o[0];
-}
-
-function readFromObjectElementSparseSloppy(o) {
- return o[100000];
-}
-
-function readFromObjectElementNonSmiSloppy(o) {
- return o[3000000000];
-}
-
-function readFromObjectNonIndexSloppy(o) {
- return o[5000000000];
-}
-
-function readFromObjectElementVarSloppy(o) {
- var a = 0;
- return o[a];
-}
-
-function readFromObjectElementSparseVarSloppy(o) {
- var a = 100000;
- return o[a];
-}
-
-function readFromObjectElementNonSmiVarSloppy(o) {
- var a = 3000000000;
- return o[a];
-}
-
-function readFromObjectNonIndexVarSloppy(o) {
- var a = 5000000000;
- return o[a];
-}
-
-function readFromObjectElementStrong(o) {
- "use strong";
- return o[0];
-}
-
-function readFromObjectElementSparseStrong(o) {
- "use strong";
- return o[100000];
-}
-
-function readFromObjectElementNonSmiStrong(o) {
- "use strong";
- return o[3000000000];
-}
-
-function readFromObjectNonIndexStrong(o) {
- "use strong";
- return o[5000000000];
-}
-
-function readFromObjectElementLetStrong(o) {
- "use strong";
- let a = 0;
- return o[a];
-}
-
-function readFromObjectElementSparseLetStrong(o) {
- "use strong";
- let a = 100000;
- return o[a];
-}
-
-function readFromObjectElementNonSmiLetStrong(o) {
- "use strong";
- let a = 3000000000;
- return o[a];
-}
-
-function readFromObjectNonIndexLetStrong(o) {
- "use strong";
- let a = 5000000000;
- return o[a];
-}
-
-function getDescs(x) {
- return [
- {value: x},
- {configurable: true, enumerable: true, writable: true, value: x},
- {configurable: true, enumerable: true, get: (function() {return x}) },
- ];
-}
-
-function assertStrongSemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(object)}, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(function(){func(object)}, TypeError);
-}
-
-function assertSloppySemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(object)});
- %DeoptimizeFunction(func);
- assertDoesNotThrow(function(){func(object)});
-}
-
-(function () {
- "use strict";
-
- let goodKeys = [
- "0",
- "100000",
- "3000000000",
- "5000000000"
- ]
-
- let badKeys = [
- "bar",
- "1",
- "100001",
- "3000000001",
- "5000000001"
- ];
-
- let values = [
- "string",
- 1,
- 100001,
- 30000000001,
- 50000000001,
- NaN,
- {},
- undefined
- ];
-
- let badAccessorDescs = [
- { set: (function(){}) },
- { configurable: true, enumerable: true, set: (function(){}) }
- ];
-
- let readSloppy = [
- readFromObjectElementSloppy,
- readFromObjectElementSparseSloppy,
- readFromObjectElementNonSmiSloppy,
- readFromObjectNonIndexSloppy,
- readFromObjectElementVarSloppy,
- readFromObjectElementSparseVarSloppy,
- readFromObjectElementNonSmiVarSloppy,
- readFromObjectNonIndexVarSloppy
- ];
-
- let readStrong = [
- readFromObjectElementStrong,
- readFromObjectElementSparseStrong,
- readFromObjectElementNonSmiStrong,
- readFromObjectNonIndexStrong,
- readFromObjectElementLetStrong,
- readFromObjectElementSparseLetStrong,
- readFromObjectElementNonSmiLetStrong,
- readFromObjectNonIndexLetStrong
- ];
-
- let dummyProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyProto, key, { value: undefined });
- }
-
- // After altering the backing store, accessing a missing property should still
- // throw.
- for (let key of badKeys) {
- for (let value of values) {
- for (let desc of getDescs(value)) {
- let objects = getObjects();
- let nonInheritingObjects = getNonInheritingObjects();
- for (let object of objects.concat(nonInheritingObjects)) {
- Object.defineProperty(object, key, desc);
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- for (let object of objects) {
- // Accessing a property which is on the prototype chain of the object
- // should not throw.
- object.__proto__ = dummyProto;
- for (let key of goodKeys) {
- for (let func of readStrong.concat(readSloppy)) {
- assertSloppySemantics(func, object);
- }
- }
- }
- }
- }
- }
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-element.js b/deps/v8/test/mjsunit/strong/load-element.js
deleted file mode 100644
index 4007b7db61..0000000000
--- a/deps/v8/test/mjsunit/strong/load-element.js
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-function getSloppyArguments() {
- return arguments;
-}
-
-function getObjects() {
- "use strict";
- return [
- {},
- Object(""),
- [],
- (function(){}),
- (class Foo {}),
- getSloppyArguments(),
- arguments,
- new Date()
- ];
-}
-
-//TODO(conradw): add tests for non-inheritance once semantics are implemented.
-function getNonInheritingObjects() {
- "use strong";
- return [
- Object(""),
- [],
- new Uint32Array(0)
- ];
-}
-
-function readFromObjectElementSloppy(o) {
- return o[0];
-}
-
-function readFromObjectElementSparseSloppy(o) {
- return o[100000];
-}
-
-function readFromObjectElementNonSmiSloppy(o) {
- return o[3000000000];
-}
-
-function readFromObjectNonIndexSloppy(o) {
- return o[5000000000];
-}
-
-function readFromObjectElementVarSloppy(o) {
- var a = 0;
- return o[a];
-}
-
-function readFromObjectElementSparseVarSloppy(o) {
- var a = 100000;
- return o[a];
-}
-
-function readFromObjectElementNonSmiVarSloppy(o) {
- var a = 3000000000;
- return o[a];
-}
-
-function readFromObjectNonIndexVarSloppy(o) {
- var a = 5000000000;
- return o[a];
-}
-
-function readFromObjectElementStrong(o) {
- "use strong";
- return o[0];
-}
-
-function readFromObjectElementSparseStrong(o) {
- "use strong";
- return o[100000];
-}
-
-function readFromObjectElementNonSmiStrong(o) {
- "use strong";
- return o[3000000000];
-}
-
-function readFromObjectNonIndexStrong(o) {
- "use strong";
- return o[5000000000];
-}
-
-function readFromObjectElementLetStrong(o) {
- "use strong";
- let a = 0;
- return o[a];
-}
-
-function readFromObjectElementSparseLetStrong(o) {
- "use strong";
- let a = 100000;
- return o[a];
-}
-
-function readFromObjectElementNonSmiLetStrong(o) {
- "use strong";
- let a = 3000000000;
- return o[a];
-}
-
-function readFromObjectNonIndexLetStrong(o) {
- "use strong";
- let a = 5000000000;
- return o[a];
-}
-
-function getDescs(x) {
- return [
- {value: x},
- {configurable: true, enumerable: true, writable: true, value: x},
- {configurable: true, enumerable: true, get: (function() {return x}) },
- ];
-}
-
-function assertStrongSemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(object)}, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(function(){func(object)}, TypeError);
-}
-
-function assertSloppySemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(object)});
- %DeoptimizeFunction(func);
- assertDoesNotThrow(function(){func(object)});
-}
-
-(function () {
- "use strict";
-
- let goodKeys = [
- "0",
- "100000",
- "3000000000",
- "5000000000"
- ]
-
- let badKeys = [
- "bar",
- "1",
- "100001",
- "3000000001",
- "5000000001"
- ];
-
- let values = [
- "string",
- 1,
- 100001,
- 30000000001,
- 50000000001,
- NaN,
- {},
- undefined
- ];
-
- let literals = [0, NaN, true, ""];
-
- let badAccessorDescs = [
- { set: (function(){}) },
- { configurable: true, enumerable: true, set: (function(){}) }
- ];
-
- let readSloppy = [
- readFromObjectElementSloppy,
- readFromObjectElementSparseSloppy,
- readFromObjectElementNonSmiSloppy,
- readFromObjectNonIndexSloppy,
- readFromObjectElementVarSloppy,
- readFromObjectElementSparseVarSloppy,
- readFromObjectElementNonSmiVarSloppy,
- readFromObjectNonIndexVarSloppy
- ];
-
- let readStrong = [
- readFromObjectElementStrong,
- readFromObjectElementSparseStrong,
- readFromObjectElementNonSmiStrong,
- readFromObjectNonIndexStrong,
- readFromObjectElementLetStrong,
- readFromObjectElementSparseLetStrong,
- readFromObjectElementNonSmiLetStrong,
- readFromObjectNonIndexLetStrong
- ];
-
- let dummyProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyProto, key, { value: undefined });
- }
-
- let dummyAccessorProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyAccessorProto, key, { set: (function(){}) })
- }
-
- // String literals/objects should not throw on character index access
- assertDoesNotThrow(function() {"use strong"; return "string"[0]; });
- assertDoesNotThrow(function() {"use strong"; return Object("string")[0]; });
-
- // Attempting to access a property on an object with no defined properties
- // should throw.
- for (let object of getObjects().concat(getNonInheritingObjects(), literals)) {
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- for (let object of getObjects()) {
- // Accessing a property which is on the prototype chain of the object should
- // not throw.
- object.__proto__ = dummyProto;
- for (let key of goodKeys) {
- for (let func of readStrong.concat(readSloppy)) {
- assertSloppySemantics(func, object);
- }
- }
- }
- // Properties with accessor descriptors missing 'get' should throw on access.
- for (let desc of badAccessorDescs) {
- for (let key of goodKeys) {
- for (let object of getObjects()) {
- Object.defineProperty(object, key, desc);
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- }
- }
- // The same behaviour should be expected for bad accessor properties on the
- // prototype chain.
- for (let object of getObjects()) {
- object.__proto__ = dummyAccessorProto;
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- assertThrows(function(){"use strong"; typeof ({})[1];}, TypeError);
- assertThrows(
- function(){"use strong"; typeof ({})[1] === "undefined"}, TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-property-mutate-backing-store.js b/deps/v8/test/mjsunit/strong/load-property-mutate-backing-store.js
deleted file mode 100644
index 5ed45530c4..0000000000
--- a/deps/v8/test/mjsunit/strong/load-property-mutate-backing-store.js
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-function getSloppyArguments() {
- return arguments;
-}
-
-function getObjects() {
- "use strict";
- return [
- {},
- Object(""),
- [],
- (function(){}),
- (class Foo {}),
- getSloppyArguments(),
- arguments,
- new Date(),
- // TODO(conradw): uncomment once Object.defineProperty is fixed.
- // new Uint32Array(0)
- ];
-}
-
-function readFromObjectSloppy(o) {
- return o.foo;
-}
-
-function readFromObjectKeyedSloppy(o) {
- return o["foo"];
-}
-
-function readFromObjectKeyedVarSloppy(o) {
- var a = "foo";
- return o[a];
-}
-
-function readFromObjectKeyedComputedSloppy(o) {
- var a = "o";
- return o["fo" + a];
-}
-
-function readFromObjectStrong(o) {
- "use strong";
- return o.foo;
-}
-
-function readFromObjectKeyedStrong(o) {
- "use strong";
- return o["foo"];
-}
-
-function readFromObjectKeyedLetStrong(o) {
- "use strong";
- let a = "foo";
- return o[a];
-}
-
-function readFromObjectKeyedComputedStrong(o) {
- "use strong";
- let a = "o";
- return o["fo" + a];
-}
-
-function getDescs(x) {
- return [
- {value: x},
- {configurable: true, enumerable: true, writable: true, value: x},
- {configurable: true, enumerable: true, get: (function() {return x}) },
- ];
-}
-
-function assertStrongSemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(object)}, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(function(){func(object)}, TypeError);
-}
-
-function assertSloppySemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(object)});
- %DeoptimizeFunction(func);
- assertDoesNotThrow(function(){func(object)});
-}
-
-(function () {
- "use strict";
-
- let goodKeys = [
- "foo"
- ]
-
- let badKeys = [
- "bar",
- "1",
- "100001",
- "3000000001",
- "5000000001"
- ];
-
- let values = [
- "string",
- 1,
- 100001,
- 30000000001,
- 50000000001,
- NaN,
- {},
- undefined
- ];
-
- let badAccessorDescs = [
- { set: (function(){}) },
- { configurable: true, enumerable: true, set: (function(){}) }
- ];
-
- let readSloppy = [
- readFromObjectSloppy,
- readFromObjectKeyedSloppy,
- readFromObjectKeyedVarSloppy,
- readFromObjectKeyedComputedSloppy
- ];
-
- let readStrong = [
- readFromObjectStrong,
- readFromObjectKeyedStrong,
- readFromObjectKeyedLetStrong,
- readFromObjectKeyedComputedStrong
- ];
-
- let dummyProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyProto, key, { value: undefined });
- }
-
- // After altering the backing store, accessing a missing property should still
- // throw.
- for (let key of badKeys) {
- for (let value of values) {
- for (let desc of getDescs(value)) {
- for (let object of getObjects()) {
- Object.defineProperty(object, key, desc);
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- // Accessing a property which is on the prototype chain of the object
- // should not throw.
- object.__proto__ = dummyProto;
- for (let key of goodKeys) {
- for (let func of readStrong.concat(readSloppy)) {
- assertSloppySemantics(func, object);
- }
- }
- }
- }
- }
- }
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-property.js b/deps/v8/test/mjsunit/strong/load-property.js
deleted file mode 100644
index ddbcbb67fb..0000000000
--- a/deps/v8/test/mjsunit/strong/load-property.js
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-function getSloppyArguments() {
- return arguments;
-}
-
-function getObjects() {
- "use strict";
- return [
- {},
- Object(""),
- [],
- (function(){}),
- (class Foo {}),
- getSloppyArguments(),
- arguments,
- new Date(),
- new Uint32Array(0)
- ];
-}
-
-function readFromObjectSloppy(o) {
- return o.foo;
-}
-
-function readFromObjectKeyedSloppy(o) {
- return o["foo"];
-}
-
-function readFromObjectKeyedVarSloppy(o) {
- var a = "foo";
- return o[a];
-}
-
-function readFromObjectKeyedComputedSloppy(o) {
- var a = "o";
- return o["fo" + a];
-}
-
-function readFromObjectStrong(o) {
- "use strong";
- return o.foo;
-}
-
-function readFromObjectKeyedStrong(o) {
- "use strong";
- return o["foo"];
-}
-
-function readFromObjectKeyedLetStrong(o) {
- "use strong";
- let a = "foo";
- return o[a];
-}
-
-function readFromObjectKeyedComputedStrong(o) {
- "use strong";
- let a = "o";
- return o["fo" + a];
-}
-
-function getDescs(x) {
- return [
- {value: x},
- {configurable: true, enumerable: true, writable: true, value: x},
- {configurable: true, enumerable: true, get: (function() {return x}) },
- ];
-}
-
-function assertStrongSemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- assertThrows(function(){func(object)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(object)}, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(function(){func(object)}, TypeError);
-}
-
-function assertSloppySemantics(func, object) {
- %DeoptimizeFunction(func);
- %ClearFunctionTypeFeedback(func);
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- assertDoesNotThrow(function(){func(object)});
- %OptimizeFunctionOnNextCall(func);
- assertDoesNotThrow(function(){func(object)});
- %DeoptimizeFunction(func);
- assertDoesNotThrow(function(){func(object)});
-}
-
-(function () {
- "use strict";
-
- let goodKeys = [
- "foo"
- ]
-
- let badKeys = [
- "bar",
- "1",
- "100001",
- "3000000001",
- "5000000001"
- ];
-
- let values = [
- "string",
- 1,
- 100001,
- 30000000001,
- 50000000001,
- NaN,
- {},
- undefined
- ];
-
- let literals = [0, NaN, true, "string"];
-
- let badAccessorDescs = [
- { set: (function(){}) },
- { configurable: true, enumerable: true, set: (function(){}) }
- ];
-
- let readSloppy = [
- readFromObjectSloppy,
- readFromObjectKeyedSloppy,
- readFromObjectKeyedVarSloppy,
- readFromObjectKeyedComputedSloppy
- ];
-
- let readStrong = [
- readFromObjectStrong,
- readFromObjectKeyedStrong,
- readFromObjectKeyedLetStrong,
- readFromObjectKeyedComputedStrong
- ];
-
- let dummyProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyProto, key, { value: undefined });
- }
-
- let dummyAccessorProto = {};
- for (let key of goodKeys) {
- Object.defineProperty(dummyAccessorProto, key, { set: (function(){}) })
- }
-
- // Attempting to access a property on an object with no defined properties
- // should throw.
- for (let object of getObjects().concat(literals)) {
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- for (let object of getObjects()) {
- // Accessing a property which is on the prototype chain of the object should
- // not throw.
- object.__proto__ = dummyProto;
- for (let key of goodKeys) {
- for (let func of readStrong.concat(readSloppy)) {
- assertSloppySemantics(func, object);
- }
- }
- }
- // Properties with accessor descriptors missing 'get' should throw on access.
- for (let desc of badAccessorDescs) {
- for (let key of goodKeys) {
- for (let object of getObjects()) {
- Object.defineProperty(object, key, desc);
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- }
- }
- // The same behaviour should be expected for bad accessor properties on the
- // prototype chain.
- for (let object of getObjects()) {
- object.__proto__ = dummyAccessorProto;
- for (let func of readStrong) {
- assertStrongSemantics(func, object);
- }
- for (let func of readSloppy) {
- assertSloppySemantics(func, object);
- }
- }
- assertThrows(function(){"use strong"; typeof ({}).foo;}, TypeError);
- assertThrows(
- function(){"use strong"; typeof ({}).foo === "undefined"}, TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-proxy.js b/deps/v8/test/mjsunit/strong/load-proxy.js
deleted file mode 100644
index 417a41faf8..0000000000
--- a/deps/v8/test/mjsunit/strong/load-proxy.js
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-proxies --strong-mode
-
-// Forwarding proxies adapted from proposal definition
-function handlerMaker1(obj) {
- return {
- getPropertyDescriptor: function(name) {
- var desc;
- var searchObj = obj;
- while (desc === undefined && searchObj != null) {
- desc = Object.getOwnPropertyDescriptor(searchObj, name);
- searchObj = searchObj.__proto__;
- }
- // a trapping proxy's properties must always be configurable
- if (desc !== undefined) { desc.configurable = true; }
- return desc;
- },
- fix: function() {
- if (Object.isFrozen(obj)) {
- var result = {};
- Object.getOwnPropertyNames(obj).forEach(function(name) {
- result[name] = Object.getOwnPropertyDescriptor(obj, name);
- });
- return result;
- }
- // As long as obj is not frozen, the proxy won't allow itself to be fixed
- return undefined; // will cause a TypeError to be thrown
- }
- };
-}
-function handlerMaker2(obj) {
- return {
- get: function(receiver, name) {
- return obj[name];
- },
- fix: function() {
- if (Object.isFrozen(obj)) {
- var result = {};
- Object.getOwnPropertyNames(obj).forEach(function(name) {
- result[name] = Object.getOwnPropertyDescriptor(obj, name);
- });
- return result;
- }
- // As long as obj is not frozen, the proxy won't allow itself to be fixed
- return undefined; // will cause a TypeError to be thrown
- }
- };
-}
-var baseObj = {};
-var proxy1 = new Proxy({}, handlerMaker1(baseObj));
-var proxy2 = new Proxy({}, handlerMaker2(baseObj));
-var childObj1 = { __proto__: proxy1 };
-var childObj2 = { __proto__: proxy2 };
-var childObjAccessor1 = { set foo(_){}, set "1"(_){}, __proto__: proxy1 };
-var childObjAccessor2 = { set foo(_){}, set "1"(_){}, __proto__: proxy2 };
-
-(function() {
- "use strong";
- // TODO(conradw): These asserts are sanity checking V8's proxy implementation.
- // Strong mode semantics for ES6 proxies still need to be explored.
- assertDoesNotThrow(function(){proxy1.foo});
- assertDoesNotThrow(function(){proxy1[1]});
- assertDoesNotThrow(function(){proxy2.foo});
- assertDoesNotThrow(function(){proxy2[1]});
- assertDoesNotThrow(function(){childObj1.foo});
- assertDoesNotThrow(function(){childObj1[1]});
- assertDoesNotThrow(function(){childObj2.foo});
- assertDoesNotThrow(function(){childObj2[1]});
- assertThrows(function(){baseObj.foo}, TypeError);
- assertThrows(function(){baseObj[1]}, TypeError);
- assertThrows(function(){childObjAccessor1.foo}, TypeError);
- assertThrows(function(){childObjAccessor1[1]}, TypeError);
- assertThrows(function(){childObjAccessor2.foo}, TypeError);
- assertThrows(function(){childObjAccessor2[1]}, TypeError);
-
- // Once the proxy is no longer trapping, property access should have strong
- // semantics.
- Object.freeze(baseObj);
-
- // TODO(neis): Reenable once proxies properly support freeze.
- //
- // Object.freeze(proxy1);
- // assertThrows(function(){proxy1.foo}, TypeError);
- // assertThrows(function(){proxy1[1]}, TypeError);
- // assertThrows(function(){childObj1.foo}, TypeError);
- // assertThrows(function(){childObj1[1]}, TypeError);
- // assertThrows(function(){childObjAccessor1.foo}, TypeError);
- // assertThrows(function(){childObjAccessor1[1]}, TypeError);
- //
- // Object.freeze(proxy2);
- // assertThrows(function(){proxy2.foo}, TypeError);
- // assertThrows(function(){proxy2[1]}, TypeError);
- // assertThrows(function(){childObj2.foo}, TypeError);
- // assertThrows(function(){childObj2[1]}, TypeError);
- // assertThrows(function(){childObjAccessor2.foo}, TypeError);
- // assertThrows(function(){childObjAccessor2[1]}, TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/load-super.js b/deps/v8/test/mjsunit/strong/load-super.js
deleted file mode 100644
index 4aa91c222a..0000000000
--- a/deps/v8/test/mjsunit/strong/load-super.js
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-"use strong";
-
-function testSuper(object) {
- assertEquals(0, object.validLoad());
- assertThrows(function(){ return object.propertyLoad() }, TypeError);
- assertThrows(function(){ return object.elementLoad() }, TypeError);
- assertThrows(function(){ return object.accessorLoad() }, TypeError);
-}
-
-class A {
- constructor() {}
- foo() {
- return 0;
- }
- get bar() {
- return 0;
- }
- set baz(_) {
- return;
- }
-}
-
-class B extends A {
- constructor() {
- super();
- }
- validLoad() {
- return super.foo() + super.bar;
- }
- propertyLoad() {
- return super.x;
- }
- elementLoad() {
- return super[1];
- }
- accessorLoad() {
- return super.baz;
- }
-}
-
-class C extends A {
- constructor() {
- super();
- this[1] = 0;
- this.x = 0;
- }
- get baz() {
- return 0;
- }
- validLoad() {
- return super.foo() + super.bar;
- }
- propertyLoad() {
- return super.x;
- }
- elementLoad() {
- return super[1];
- }
- accessorLoad() {
- return super.baz;
- }
-}
-
-let b = new B();
-let c = new C();
-testSuper(b);
-testSuper(c);
-
-let d = {
- "0": 0,
- foo: 0,
- bar: (function(){return 0}),
- get baz(){return 0},
- set qux(_){return}
-}
-
-let e = {
- __proto__: d,
- "1": 0,
- x: 0,
- get baz(){return 0},
- validLoad() {
- return super[0] + super.foo + super.bar() + super.baz;
- },
- propertyLoad() {
- return super.x;
- },
- elementLoad() {
- return super[1];
- },
- accessorLoad() {
- return super.qux;
- }
-}
-
-testSuper(e);
diff --git a/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js b/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js
deleted file mode 100644
index 726eed839c..0000000000
--- a/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-"use strong";
-
-function foo(param, fooCount, barCount) {
- if (param === 0)
- return {'foo': fooCount, 'bar': barCount};
- return bar(param - 1, fooCount + 1, barCount);
-}
-
-function bar(param, fooCount, barCount) {
- if (param === 0)
- return {'foo': fooCount, 'bar': barCount};
- return foo(param - 1, fooCount, barCount + 1);
-}
-
-(function TestMutuallyRecursiveFunctions() {
- let obj = foo(10, 0, 0);
- assertEquals(obj.foo, 5);
- assertEquals(obj.bar, 5);
-})();
diff --git a/deps/v8/test/mjsunit/strong/object-delete.js b/deps/v8/test/mjsunit/strong/object-delete.js
deleted file mode 100644
index a655b65c78..0000000000
--- a/deps/v8/test/mjsunit/strong/object-delete.js
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-// TODO(conradw): Track implementation of strong bit for other objects, add
-// tests.
-
-function getSloppyObjects() {
- return [(function(){}), ({})];
-}
-
-function getStrictObjects() {
- "use strict";
- return [(function(){}), ({})];
-}
-
-function getWeakObjects() {
- return getSloppyObjects().concat(getStrictObjects());
-}
-
-function getStrongObjects() {
- "use strong";
-// Strong functions can't have properties added to them, and will be tested as a
-// special case.
- return [({})];
-}
-
-function strongFunction() {
- "use strong";
-}
-
-function deleteFromObjectSloppy(o) {
- return delete o.foo;
-}
-
-function deleteFromObjectKeyedSloppy(o) {
- return delete o["foo"];
-}
-
-function deleteFromObjectKeyedVarSloppy(o) {
- var a = "foo";
- return delete o[a];
-}
-
-function deleteFromObjectKeyedComputedSloppy(o) {
- var a = "o";
- return delete o["fo" + a];
-}
-
-function deleteFromObjectWith(o) {
- with (o) {
- return delete foo;
- }
-}
-
-function deleteFromObjectElementSloppy(o) {
- return delete o[0];
-}
-
-function deleteFromObjectElementVarSloppy(o) {
- var a = 0;
- return delete o[a];
-}
-
-function deleteFromObjectElementSparseSloppy(o) {
- return delete o[100000];
-}
-
-function deleteFromObjectElementVarSloppy(o) {
- var a = 0;
- return delete o[a];
-}
-
-function deleteFromObjectElementSparseVarSloppy(o) {
- var a = 100000;
- return delete o[a];
-}
-
-function deleteFromObjectStrict(o) {
- "use strict";
- return delete o.foo;
-}
-
-function deleteFromObjectKeyedStrict(o) {
- "use strict";
- return delete o["foo"];
-}
-
-function deleteFromObjectKeyedVarStrict(o) {
- "use strict";
- var a = "foo";
- return delete o[a];
-}
-
-function deleteFromObjectKeyedComputedStrict(o) {
- "use strict";
- var a = "o";
- return delete o["fo" + a];
-}
-
-function deleteFromObjectElementStrict(o) {
- "use strict";
- return delete o[0];
-}
-
-function deleteFromObjectElementSparseStrict(o) {
- "use strict";
- return delete o[100000];
-}
-
-function deleteFromObjectElementVarStrict(o) {
- "use strict";
- var a = 0;
- return delete o[a];
-}
-
-function deleteFromObjectElementSparseVarStrict(o) {
- "use strict";
- var a = 100000;
- return delete o[a];
-}
-
-function testStrongObjectDelete() {
- "use strict";
-
- let deletePropertyFuncsSloppy = [
- deleteFromObjectSloppy,
- deleteFromObjectKeyedSloppy,
- deleteFromObjectKeyedVarSloppy,
- deleteFromObjectKeyedComputedSloppy,
- deleteFromObjectWith
- ];
- let deletePropertyFuncsStrict = [
- deleteFromObjectStrict,
- deleteFromObjectKeyedStrict,
- deleteFromObjectKeyedVarStrict,
- deleteFromObjectKeyedComputedStrict
- ];
- let deleteElementFuncsSloppy = [
- deleteFromObjectElementSloppy,
- deleteFromObjectElementVarSloppy
- ];
- let deleteElementSparseFuncsSloppy = [
- deleteFromObjectElementSparseSloppy,
- deleteFromObjectElementSparseVarSloppy
- ];
- let deleteElementFuncsStrict = [
- deleteFromObjectElementStrict,
- deleteFromObjectElementVarStrict
- ];
- let deleteElementSparseFuncsStrict = [
- deleteFromObjectElementSparseStrict,
- deleteFromObjectElementSparseVarStrict
- ];
- let deleteFuncs = deletePropertyFuncsSloppy.concat(
- deletePropertyFuncsStrict, deleteElementFuncsSloppy,
- deleteElementSparseFuncsSloppy, deleteElementFuncsStrict,
- deleteElementSparseFuncsStrict);
-
- for (let deleteFunc of deleteFuncs) {
- assertTrue(deleteFunc(strongFunction));
- }
-
- let testCasesSloppy = [
- [deletePropertyFuncsSloppy, "foo"],
- [deleteElementFuncsSloppy, "0"],
- [deleteElementSparseFuncsSloppy, "100000"]
- ];
-
- let testCasesStrict = [
- [deletePropertyFuncsStrict, "foo"],
- [deleteElementFuncsStrict, "0"],
- [deleteElementSparseFuncsStrict, "100000"]
- ];
-
- let propDescs = [
- {configurable: true, value: "bar"},
- {configurable: true, value: 1},
- {configurable: true, enumerable: true, writable: true, value: "bar"},
- {configurable: true, enumerable: true, writable: true, value: 1},
- {configurable: true, get: (function(){return 0}), set: (function(x){})}
- ];
-
- for (let propDesc of propDescs) {
- for (let testCase of testCasesSloppy) {
- let name = testCase[1];
- for (let deleteFunc of testCase[0]) {
- for (let o of getWeakObjects()) {
- Object.defineProperty(o, name, propDesc);
- assertTrue(delete o["bar"]);
- assertTrue(delete o[5000]);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- %OptimizeFunctionOnNextCall(deleteFunc);
- Object.defineProperty(o, name, propDesc);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- %DeoptimizeFunction(deleteFunc);
- Object.defineProperty(o, name, propDesc);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- }
- for (let o of getStrongObjects()) {
- Object.defineProperty(o, name, propDesc);
- assertTrue(delete o["bar"]);
- assertTrue(delete o[5000]);
- assertFalse(deleteFunc(o));
- assertTrue(o.hasOwnProperty(name));
- %OptimizeFunctionOnNextCall(deleteFunc);
- assertFalse(deleteFunc(o));
- assertTrue(o.hasOwnProperty(name));
- %DeoptimizeFunction(deleteFunc);
- assertFalse(deleteFunc(o));
- assertTrue(o.hasOwnProperty(name));
- }
- }
- }
- for (let testCase of testCasesStrict) {
- let name = testCase[1];
- for (let deleteFunc of testCase[0]) {
- for (let o of getWeakObjects()) {
- Object.defineProperty(o, name, propDesc);
- assertTrue(delete o["bar"]);
- assertTrue(delete o[5000]);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- %OptimizeFunctionOnNextCall(deleteFunc);
- Object.defineProperty(o, name, propDesc);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- %DeoptimizeFunction(deleteFunc);
- Object.defineProperty(o, name, propDesc);
- assertTrue(deleteFunc(o));
- assertFalse(o.hasOwnProperty(name));
- }
- for (let o of getStrongObjects()) {
- Object.defineProperty(o, name, propDesc);
- assertTrue(delete o["bar"]);
- assertTrue(delete o[5000]);
- assertThrows(function(){deleteFunc(o)}, TypeError);
- assertTrue(o.hasOwnProperty(name));
- %OptimizeFunctionOnNextCall(deleteFunc);
- assertThrows(function(){deleteFunc(o)}, TypeError);
- assertTrue(o.hasOwnProperty(name));
- %DeoptimizeFunction(deleteFunc);
- assertThrows(function(){deleteFunc(o)}, TypeError);
- assertTrue(o.hasOwnProperty(name));
- }
- }
- }
- }
-}
-testStrongObjectDelete();
diff --git a/deps/v8/test/mjsunit/strong/object-freeze-property.js b/deps/v8/test/mjsunit/strong/object-freeze-property.js
deleted file mode 100644
index e76af1bfc6..0000000000
--- a/deps/v8/test/mjsunit/strong/object-freeze-property.js
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-// TODO(conradw): Track implementation of strong bit for other objects, add
-// tests.
-
-function getSloppyObjects() {
- return [(function(){}), ({})];
-}
-
-function getStrictObjects() {
- "use strict";
- return [(function(){}), ({})];
-}
-
-function getStrongObjects() {
- "use strong";
- // Strong functions can't have properties added to them.
- return [{}];
-}
-
-(function testStrongObjectFreezePropValid() {
- "use strict";
- let strongObjects = getStrongObjects();
-
- for (let o of strongObjects) {
- Object.defineProperty(o, "foo", { configurable: true, writable: true });
- assertDoesNotThrow(
- function() {
- "use strong";
- Object.defineProperty(o, "foo", {configurable: true, writable: false });
- });
- }
-})();
-
-(function testStrongObjectFreezePropInvalid() {
- "use strict";
- let sloppyObjects = getSloppyObjects();
- let strictObjects = getStrictObjects();
- let strongObjects = getStrongObjects();
- let weakObjects = sloppyObjects.concat(strictObjects);
-
- for (let o of weakObjects) {
- Object.defineProperty(o, "foo", { writable: true });
- assertDoesNotThrow(
- function() {
- "use strong";
- Object.defineProperty(o, "foo", { writable: false });
- });
- }
- for (let o of strongObjects) {
- function defProp(o) {
- Object.defineProperty(o, "foo", { writable: false });
- }
- function defProps(o) {
- Object.defineProperties(o, { "foo": { writable: false } });
- }
- function freezeProp(o) {
- Object.freeze(o);
- }
- Object.defineProperty(o, "foo", { writable: true });
- for (let func of [defProp, defProps, freezeProp]) {
- assertThrows(function(){func(o)}, TypeError);
- assertThrows(function(){func(o)}, TypeError);
- assertThrows(function(){func(o)}, TypeError);
- %OptimizeFunctionOnNextCall(func);
- assertThrows(function(){func(o)}, TypeError);
- %DeoptimizeFunction(func);
- assertThrows(function(){func(o)}, TypeError);
- }
- }
-})();
diff --git a/deps/v8/test/mjsunit/strong/object-set-prototype.js b/deps/v8/test/mjsunit/strong/object-set-prototype.js
deleted file mode 100644
index 53706df1ee..0000000000
--- a/deps/v8/test/mjsunit/strong/object-set-prototype.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-// TODO(conradw): Track implementation of strong bit for other objects, add
-// tests.
-
-function getSloppyObjects() {
- return [(function(){}), ({})];
-}
-
-function getStrictObjects() {
- "use strict";
- return [(function(){}), ({})];
-}
-
-function getStrongObjects() {
- "use strong";
- return [(function(){}), ({})];
-}
-
-function declareObjectLiteralWithProtoSloppy() {
- return {__proto__: {}};
-}
-
-function declareObjectLiteralWithProtoStrong() {
- "use strong";
- return {__proto__: {}};
-}
-
-function testStrongObjectSetProto() {
- "use strict";
- let sloppyObjects = getSloppyObjects();
- let strictObjects = getStrictObjects();
- let strongObjects = getStrongObjects();
- let weakObjects = sloppyObjects.concat(strictObjects);
-
- for (let o of weakObjects) {
- let setProtoBuiltin = function(o){Object.setPrototypeOf(o, {})};
- let setProtoProperty = function(o){o.__proto__ = {}};
- for (let setProtoFunc of [setProtoBuiltin, setProtoProperty]) {
- assertDoesNotThrow(function(){setProtoFunc(o)});
- assertDoesNotThrow(function(){setProtoFunc(o)});
- assertDoesNotThrow(function(){setProtoFunc(o)});
- %OptimizeFunctionOnNextCall(setProtoFunc);
- assertDoesNotThrow(function(){setProtoFunc(o)});
- %DeoptimizeFunction(setProtoFunc);
- assertDoesNotThrow(function(){setProtoFunc(o)});
- }
- }
- for (let o of strongObjects) {
- let setProtoBuiltin = function(o){Object.setPrototypeOf(o, {})};
- let setProtoProperty = function(o){o.__proto__ = {}};
- for (let setProtoFunc of [setProtoBuiltin, setProtoProperty]) {
- assertThrows(function(){setProtoFunc(o)}, TypeError);
- assertThrows(function(){setProtoFunc(o)}, TypeError);
- assertThrows(function(){setProtoFunc(o)}, TypeError);
- %OptimizeFunctionOnNextCall(setProtoFunc);
- assertThrows(function(){setProtoFunc(o)}, TypeError);
- %DeoptimizeFunction(setProtoFunc);
- assertThrows(function(){setProtoFunc(o)}, TypeError);
- }
- }
-
- assertDoesNotThrow(declareObjectLiteralWithProtoSloppy);
- assertDoesNotThrow(declareObjectLiteralWithProtoSloppy);
- assertDoesNotThrow(declareObjectLiteralWithProtoSloppy);
- %OptimizeFunctionOnNextCall(declareObjectLiteralWithProtoSloppy);
- assertDoesNotThrow(declareObjectLiteralWithProtoSloppy);
- %DeoptimizeFunction(declareObjectLiteralWithProtoSloppy);
- assertDoesNotThrow(declareObjectLiteralWithProtoSloppy);
-
- assertDoesNotThrow(declareObjectLiteralWithProtoStrong);
- assertDoesNotThrow(declareObjectLiteralWithProtoStrong);
- assertDoesNotThrow(declareObjectLiteralWithProtoStrong);
- %OptimizeFunctionOnNextCall(declareObjectLiteralWithProtoStrong);
- assertDoesNotThrow(declareObjectLiteralWithProtoStrong);
- %DeoptimizeFunction(declareObjectLiteralWithProtoStrong);
- assertDoesNotThrow(declareObjectLiteralWithProtoStrong);
-}
-testStrongObjectSetProto();
diff --git a/deps/v8/test/mjsunit/strong/super.js b/deps/v8/test/mjsunit/strong/super.js
deleted file mode 100644
index bd289f204f..0000000000
--- a/deps/v8/test/mjsunit/strong/super.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --allow-natives-syntax
-
-'use strong';
-
-
-function desc(obj, n) {
- return Object.getOwnPropertyDescriptor(obj, n);
-}
-
-
-(function TestClass() {
- class C {
- m() {
- super.x;
- }
- get x() {
- super.x;
- }
- set y(_) {
- super.x;
- }
- static m() {
- super.x;
- }
- static get x() {
- super.x;
- }
- static set y(_) {
- super.x;
- }
- }
-
- assertEquals(C.prototype, C.prototype.m[%HomeObjectSymbol()]);
- assertEquals(C.prototype, desc(C.prototype, 'x').get[%HomeObjectSymbol()]);
- assertEquals(C.prototype, desc(C.prototype, 'y').set[%HomeObjectSymbol()]);
- assertEquals(C, C.m[%HomeObjectSymbol()]);
- assertEquals(C, desc(C, 'x').get[%HomeObjectSymbol()]);
- assertEquals(C, desc(C, 'y').set[%HomeObjectSymbol()]);
-})();
-
-
-(function TestObjectLiteral() {
- let o = {
- m() {
- super.x;
- },
- get x() {
- super.x;
- },
- set y(_) {
- super.x;
- }
- };
-
- assertEquals(o, o.m[%HomeObjectSymbol()]);
- assertEquals(o, desc(o, 'x').get[%HomeObjectSymbol()]);
- assertEquals(o, desc(o, 'y').set[%HomeObjectSymbol()]);
-})();
diff --git a/deps/v8/test/mjsunit/strong/switch.js b/deps/v8/test/mjsunit/strong/switch.js
deleted file mode 100644
index 96ee1eec79..0000000000
--- a/deps/v8/test/mjsunit/strong/switch.js
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-"use strict";
-
-function CheckSwitch() {
- let jumpStatements = [
- "break; ",
- "continue; ",
- "break foo; ",
- "continue foo; ",
- "return; ",
- "throw new TypeError(); ",
- "if(1) break; else continue; ",
- "if(1) {1+1; {break;}} else continue; "
- ]
-
- let otherStatements = [
- "null; ",
- "1+1; ",
- "{} ",
- "for(;false;) {break;} ",
- "for(;false;) {1+1; {throw new TypeError();}} ",
- "(function(){return});",
- "(function(){throw new TypeError();});",
- "{break; 1+1;} ",
- "if(1) break; ",
- "if(1) break; else 1+1; ",
- "if(1) 1+1; else break; ",
- ]
-
- let successContexts = [
- ["switch(1) {case 1: ", "case 2: }"],
- ["switch(1) {case 1: case 2: ", "default: }"],
- ["switch(1) {case 1: case 2: ", "default: {}}"],
- ["switch(1) {case 1: case 2: ", "default: 1+1}"],
- ["switch(1) {case 1: break; case 2: ", "default: }"],
- ["switch(1) {case 1: case 2: break; case 3: ", "case 4: default: }"],
- ["switch(1) {case 1: if(1) break; else {", "} default: break;}"]
- ]
-
- let strongThrowContexts = [
- ["switch(1) {case 1: 1+1; case 2: ", "}"],
- ["switch(1) {case 1: bar: break foo; case 2: ", "}"],
- ["switch(1) {case 1: bar:", " case 2: }"],
- ["switch(1) {case 1: bar:{ ", "} case 2: }"],
- ["switch(1) {case 1: bar:{ ", "} default: break;}"],
- ["switch(1) {case 1: { bar:{ { ", "} } } default: break;}"],
- ["switch(1) {case 1: { { { ", "} 1+1;} } default: break;}"],
- ["switch(1) {case 1: if(1) {", "} default: break;}"],
- ["switch(1) {case 1: bar:if(1) break; else {", "} default: break;}"]
- ]
-
- let sloppy_wrap = ["function f() { foo:for(;;) {", "}}"];
- let strong_wrap = ["function f() { 'use strong'; foo:for(;;) {", "}}"];
-
- for (let context of successContexts) {
- let sloppy_prefix = sloppy_wrap[0] + context[0];
- let sloppy_suffix = context[1] + sloppy_wrap[1];
- let strong_prefix = strong_wrap[0] + context[0];
- let strong_suffix = context[1] + strong_wrap[1];
-
- for (let code of jumpStatements) {
- assertDoesNotThrow(strong_wrap[0] + "switch(1) {case 1: " + code + "}}}");
- assertDoesNotThrow(strong_prefix + code + strong_suffix);
- assertDoesNotThrow(strong_prefix + "{ 1+1; " + code + "}" +
- strong_suffix);
- assertDoesNotThrow(strong_prefix + "{ 1+1; { 1+1; " + code + "}}" +
- strong_suffix);
- assertDoesNotThrow(strong_prefix + "if(1) " + code + "else break;" +
- strong_suffix);
- assertDoesNotThrow(strong_prefix + "if(1) " + code +
- "else if (1) break; else " + code + strong_suffix);
- }
- for (let code of otherStatements) {
- assertDoesNotThrow(sloppy_prefix + code + sloppy_suffix);
- assertThrows(strong_prefix + code + strong_suffix, SyntaxError);
- }
- }
-
- for (let context of strongThrowContexts) {
- let sloppy_prefix = sloppy_wrap[0] + context[0];
- let sloppy_suffix = context[1] + sloppy_wrap[1];
- let strong_prefix = strong_wrap[0] + context[0];
- let strong_suffix = context[1] + strong_wrap[1];
-
- for (let code of jumpStatements.concat(otherStatements)) {
- assertDoesNotThrow(sloppy_prefix + code + sloppy_suffix);
- assertThrows(strong_prefix + code + strong_suffix, SyntaxError);
- }
- }
-
- for (let code of otherStatements) {
- assertDoesNotThrow("switch(1) {default: " + code + "}");
- assertDoesNotThrow("switch(1) {case 1: " + code + "}");
- assertDoesNotThrow("switch(1) {case 1: default: " + code + "}");
- assertDoesNotThrow("switch(1) {case 1: break; default: " + code + "}");
- assertDoesNotThrow("switch(1) {case 1: " + code + "break; default: }");
- }
-}
-
-CheckSwitch();
-
-assertDoesNotThrow("'use strong'; switch(1) {}");
-assertDoesNotThrow("'use strong'; switch(1) {case 1:}");
-assertDoesNotThrow("'use strong'; switch(1) {default:}");
-assertDoesNotThrow("'use strong'; switch(1) {case 1: case 2: default:}");
diff --git a/deps/v8/test/mjsunit/strong/undefined.js b/deps/v8/test/mjsunit/strong/undefined.js
deleted file mode 100644
index 218547a192..0000000000
--- a/deps/v8/test/mjsunit/strong/undefined.js
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode --harmony-sloppy
-
-// Repurposing the strict mode 'eval' and 'arguments' tests to test for correct
-// behaviour of 'undefined' as an identifier in strong mode.
-"use strict";
-
-function CheckStrongMode(code) {
- let strictContexts = [
- ["'use strict';", ""],
- ["function outer() { 'use strict';", "}"],
- ["function outer() { 'use strict'; function inner() {", "}}"],
- ["class C { m() {", "} }"]
- ]
- let strongContexts = [
- ["'use strong';", ""],
- ["function outer() { 'use strong';", "}"],
- ["function outer() { 'use strong'; function inner() {", "}}"],
- ["class C { m() { 'use strong';", "} }"]
- ]
-
- for (let context of strictContexts) {
- assertThrows(context[0] + code + context[1] + "; throw new TypeError();",
- TypeError);
- }
- for (let context of strongContexts) {
- assertThrows(context[0] + code + context[1], SyntaxError);
- }
-}
-
-// Binding 'undefined'
-CheckStrongMode("var undefined;");
-CheckStrongMode("let undefined;");
-CheckStrongMode("var undefined = 0;");
-CheckStrongMode("let undefined = 0;");
-CheckStrongMode("const undefined = 0;");
-CheckStrongMode("var x, y = 0, undefined;");
-CheckStrongMode("let x, y = 0, undefined;");
-
-// Function identifier is 'undefined'
-// Function declaration
-CheckStrongMode("function undefined() {}");
-assertThrows("function undefined() {'use strong';}", SyntaxError);
-
-// Generator function
-CheckStrongMode("function* undefined() {}");
-assertThrows("function* undefined() {'use strong';}", SyntaxError);
-
-// Function expression
-CheckStrongMode("(function undefined() {});");
-assertThrows("(function undefined() {'use strong';});", SyntaxError);
-CheckStrongMode("{foo: (function undefined(){})};");
-assertThrows("{foo: (function undefined(){'use strong';})};", SyntaxError);
-
-//Generator function expression
-CheckStrongMode("(function* undefined() {})");
-assertThrows("(function* undefined() {'use strong';})", SyntaxError);
-CheckStrongMode("{foo: (function* undefined(){})};");
-assertThrows("{foo: (function* undefined(){'use strong';})};", SyntaxError);
-
-// Function parameter named 'undefined'
-// Function declaration
-CheckStrongMode("function foo(a, b, undefined, c, d) {}");
-assertThrows("function foo(a, b, undefined, c, d) {'use strong';}",
- SyntaxError);
-
-// Generator function declaration
-CheckStrongMode("function* foo(a, b, undefined, c, d) {}");
-assertThrows("function* foo(a, b, undefined, c, d) {'use strong';}",
- SyntaxError);
-
-// Function expression
-CheckStrongMode("(function foo(a, b, undefined, c, d) {});");
-assertThrows("(function foo(a, b, undefined, c, d) {'use strong';})",
- SyntaxError);
-CheckStrongMode("{foo: (function foo(a, b, undefined, c, d) {})};");
-assertThrows("{foo: (function foo(a, b, undefined, c, d) {'use strong';})};",
- SyntaxError);
-
-// Generator function expression
-CheckStrongMode("(function* foo(a, b, undefined, c, d) {});");
-assertThrows("(function* foo(a, b, undefined, c, d) {'use strong';})",
- SyntaxError);
-CheckStrongMode("{foo: (function* foo(a, b, undefined, c, d) {})};");
-assertThrows("{foo: (function* foo(a, b, undefined, c, d) {'use strong';})};",
- SyntaxError);
-
-// Method parameter named 'undefined'
-// Class method
-CheckStrongMode("class C { foo(a, b, undefined, c, d) {} }");
-assertThrows("class C { foo(a, b, undefined, c, d) {'use strong';} }",
- SyntaxError);
-
-//Class generator method
-CheckStrongMode("class C { *foo(a, b, undefined, c, d) {} }");
-assertThrows("class C { *foo(a, b, undefined, c, d) {'use strong';} }",
- SyntaxError);
-
-//Object literal method
-CheckStrongMode("({ foo(a, b, undefined, c, d) {} });");
-assertThrows("({ foo(a, b, undefined, c, d) {'use strong';} });", SyntaxError);
-
-//Object literal generator method
-CheckStrongMode("({ *foo(a, b, undefined, c, d) {} });");
-assertThrows("({ *foo(a, b, undefined, c, d) {'use strong';} });", SyntaxError);
-
-// Class declaration named 'undefined'
-CheckStrongMode("class undefined {}");
-assertThrows("class undefined {'use strong'}", SyntaxError);
-
-// Class expression named 'undefined'
-CheckStrongMode("(class undefined {});");
-assertThrows("(class undefined {'use strong'});", SyntaxError);
-
-// Binding/assigning to 'undefined' in for
-CheckStrongMode("for(undefined = 0;false;);");
-CheckStrongMode("for(var undefined = 0;false;);");
-CheckStrongMode("for(let undefined = 0;false;);");
-CheckStrongMode("for(const undefined = 0;false;);");
-
-// Binding/assigning to 'undefined' in for-in
-CheckStrongMode("for(undefined in {});");
-CheckStrongMode("for(var undefined in {});");
-CheckStrongMode("for(let undefined in {});");
-CheckStrongMode("for(const undefined in {});");
-
-// Binding/assigning to 'undefined' in for-of
-CheckStrongMode("for(undefined of []);");
-CheckStrongMode("for(var undefined of []);");
-CheckStrongMode("for(let undefined of []);");
-CheckStrongMode("for(const undefined of []);");
-
-// Property accessor parameter named 'undefined'.
-CheckStrongMode("let o = { set foo(undefined) {} }");
-assertThrows("let o = { set foo(undefined) {'use strong';} }", SyntaxError);
-
-// catch(undefined)
-CheckStrongMode("try {} catch(undefined) {};");
-
-// Assignment to undefined
-CheckStrongMode("undefined = 0;");
-CheckStrongMode("print(undefined = 0);");
-CheckStrongMode("let x = undefined = 0;");
-
-// Compound assignment to undefined
-CheckStrongMode("undefined *= 0;");
-CheckStrongMode("undefined /= 0;");
-CheckStrongMode("print(undefined %= 0);");
-CheckStrongMode("let x = undefined += 0;");
-CheckStrongMode("let x = undefined -= 0;");
-CheckStrongMode("undefined <<= 0;");
-CheckStrongMode("undefined >>= 0;");
-CheckStrongMode("print(undefined >>>= 0);");
-CheckStrongMode("print(undefined &= 0);");
-CheckStrongMode("let x = undefined ^= 0;");
-CheckStrongMode("let x = undefined |= 0;");
-
-// Postfix increment with undefined
-CheckStrongMode("undefined++;");
-CheckStrongMode("print(undefined++);");
-CheckStrongMode("let x = undefined++;");
-
-// Postfix decrement with undefined
-CheckStrongMode("undefined--;");
-CheckStrongMode("print(undefined--);");
-CheckStrongMode("let x = undefined--;");
-
-// Prefix increment with undefined
-CheckStrongMode("++undefined;");
-CheckStrongMode("print(++undefined);");
-CheckStrongMode("let x = ++undefined;");
-
-// Prefix decrement with undefined
-CheckStrongMode("--undefined;");
-CheckStrongMode("print(--undefined);");
-CheckStrongMode("let x = --undefined;");
-
-// Function constructor: 'undefined' parameter name
-assertDoesNotThrow(function() {
- Function("undefined", "");
-});
-assertThrows(function() {
- Function("undefined", "'use strong';");
-}, SyntaxError);
-
-// Arrow functions with undefined parameters
-CheckStrongMode("(undefined => {return});");
-assertThrows("(undefined => {'use strong';});");
-
-CheckStrongMode("((undefined, b, c) => {return});");
-assertThrows("((undefined, b, c) => {'use strong';});");
-
-CheckStrongMode("((a, undefined, c) => {return});");
-assertThrows("((a, undefined, c) => {'use strong';});");
-
-CheckStrongMode("((a, b, undefined) => {return});");
-assertThrows("((a, b, undefined) => {'use strong';});");
diff --git a/deps/v8/test/mjsunit/strong/use-strong.js b/deps/v8/test/mjsunit/strong/use-strong.js
deleted file mode 100644
index 127dc35e2a..0000000000
--- a/deps/v8/test/mjsunit/strong/use-strong.js
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function UseStrongScoping() {
- assertThrows("'use strong'; 0 == 0", SyntaxError);
- assertThrows("'use strong'; try {} catch(e) { { 0 == 0 } }", SyntaxError);
- assertThrows("function f() { 'use strong'; 0 == 0 }", SyntaxError);
- assertThrows("'use strong'; function f() { 0 == 0 }", SyntaxError);
- assertThrows("'use strong'; function f() { function g() { 0 == 0 } }", SyntaxError);
- assertTrue(eval("function f() { 'use strong' } 0 == 0"));
- assertTrue(eval("eval('\\\'use strong\\\''); 0 == 0"));
-})();
-
-(function UseStrongMixed() {
- assertThrows("'use strict'; 'use strong'; 0 == 0", SyntaxError);
- assertThrows("'use strong'; 'use strict'; 0 == 0", SyntaxError);
- assertThrows("'use strong'; 'use strong'; 0 == 0", SyntaxError);
- assertThrows("'use strict'; function f() { 'use strong'; 0 == 0 }", SyntaxError);
- assertThrows("'use strong'; function f() { 'use strict'; 0 == 0 }", SyntaxError);
- assertTrue(eval("'use strict'; function f() { 'use strong' } 0 == 0"));
- assertTrue(eval("var x; function f() { 'use strong' } delete x"));
- assertThrows("'use strict'; var x; function f() { 'use strong' } delete x", SyntaxError);
-})();
diff --git a/deps/v8/test/mjsunit/strong/var-let-const.js b/deps/v8/test/mjsunit/strong/var-let-const.js
deleted file mode 100644
index 5545ccfa58..0000000000
--- a/deps/v8/test/mjsunit/strong/var-let-const.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-(function NoVar() {
- assertThrows("'use strong'; var x = 0;", SyntaxError);
- assertThrows("'use strong'; for(var i = 0; i < 10; ++i) { };", SyntaxError);
-})();
-
-
-(function LetIsOkay() {
- assertTrue(eval("'use strong'; let x = 0; x === 0;"));
- assertTrue(eval("'use strong'; for(let i = 0; i < 10; ++i) { } 0 === 0;"));
-})();
-
-
-(function ConstIsOkay() {
- assertTrue(eval("'use strong'; const x = 0; x === 0;"));
- assertTrue(eval("'use strong'; for(const i = 0; false;) { } 0 === 0;"));
-})();
diff --git a/deps/v8/test/mjsunit/tail-call-intrinsic.js b/deps/v8/test/mjsunit/tail-call-intrinsic.js
deleted file mode 100644
index a74f153732..0000000000
--- a/deps/v8/test/mjsunit/tail-call-intrinsic.js
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --nostress-opt --turbo
-// Flags: --nonative-context-specialization
-
-var p0 = new Object();
-var p1 = new Object();
-var p2 = new Object();
-
-// Ensure 1 parameter passed straight-through is handled correctly
-var count1 = 100000;
-tailee1 = function() {
- "use strict";
- if (count1-- == 0) {
- return this;
- }
- return %_TailCall(tailee1, this);
-};
-
-%OptimizeFunctionOnNextCall(tailee1);
-assertEquals(p0, tailee1.call(p0));
-
-// Ensure 2 parameters passed straight-through trigger a tail call are handled
-// correctly and don't cause a stack overflow.
-var count2 = 100000;
-tailee2 = function(px) {
- "use strict";
- assertEquals(p2, px);
- assertEquals(p1, this);
- count2 = ((count2 | 0) - 1) | 0;
- if ((count2 | 0) === 0) {
- return this;
- }
- return %_TailCall(tailee2, this, px);
-};
-
-%OptimizeFunctionOnNextCall(tailee2);
-assertEquals(p1, tailee2.call(p1, p2));
-
-// Ensure swapped 2 parameters trigger a tail call and do the appropriate
-// parameters swapping
-var count3 = 999999;
-tailee3 = function(px) {
- "use strict";
- if (count3-- == 0) {
- return this;
- }
- return %_TailCall(tailee3, px, this);
-};
-
-%OptimizeFunctionOnNextCall(tailee3);
-assertEquals(p2, tailee3.call(p1, p2));
-
-// Ensure too many parameters defeats the tail call optimization (currently
-// unsupported).
-var count4 = 1000000;
-tailee4 = function(px) {
- "use strict";
- if (count4-- == 0) {
- return this;
- }
- return %_TailCall(tailee4, this, px, undefined);
-};
-
-%OptimizeFunctionOnNextCall(tailee4);
-assertThrows(function() { tailee4.call(p1, p2); });
-
-// Ensure that calling the arguments adapter defeats the tail call optimization.
-var count5 = 1000000;
-tailee5 = function(px) {
- "use strict";
- if (count5-- == 0) {
- return this;
- }
- return %_TailCall(tailee5, this);
-};
-
-%OptimizeFunctionOnNextCall(tailee5);
-assertThrows(function() { tailee5.call(p1, p2); });
-
-// Ensure tail calls with fewer stack parameters properly re-arranges the stack.
-tailee6 = function(px) {
- return px;
-}
-
-tailee7 = function(px, py, pz, pa, pb, pc) {
- "use strict";
- return %_TailCall(tailee6, this, pc);
-};
-
-%OptimizeFunctionOnNextCall(tailee6);
-%OptimizeFunctionOnNextCall(tailee7);
-assertEquals(110, tailee7.call(null, 15, 16, 17, 18, 0, 110));
-
-tailee8 = function(px, py, pz, pa, pb) {
- return pb + pz + px;
-}
-
-tailee9 = function(px, py, pz, pa, pb, pc) {
- "use strict";
- return %_TailCall(tailee8, this, pb, py, px, pa, pz);
-};
-
-%OptimizeFunctionOnNextCall(tailee8);
-%OptimizeFunctionOnNextCall(tailee9);
-assertEquals(32, tailee9.call(null, 15, 16, 17, 18, 0, 110));
diff --git a/deps/v8/test/mjsunit/tools/profviz-test.log b/deps/v8/test/mjsunit/tools/profviz-test.log
index fe4b7ffcbc..f7cbe5b1b0 100644
--- a/deps/v8/test/mjsunit/tools/profviz-test.log
+++ b/deps/v8/test/mjsunit/tools/profviz-test.log
@@ -793,8 +793,8 @@ code-creation,Stub,2,0x2b81f2a0,197,"StoreBufferOverflowStub"
code-creation,Stub,2,0x2b81f380,79,"StubFailureTrampolineStub"
code-creation,Stub,2,0x2b81f3e0,80,"StubFailureTrampolineStub"
tick,0xf776d430,16272,0,0x0,3
-timer-event-start,"V8.ParseLazy",16854
-timer-event-end,"V8.ParseLazy",17081
+timer-event-start,"V8.ParseLazyMicroSeconds",16854
+timer-event-end,"V8.ParseLazyMicroSeconds",17081
timer-event-start,"V8.CompileLazy",17098
timer-event-start,"V8.CompileFullCode",17125
tick,0xf74c79de,17348,0,0xff820034,2
@@ -805,8 +805,8 @@ timer-event-end,"V8.CompileFullCode",17910
code-creation,LazyCompile,0,0x2b81b3c0,572,"Instantiate native apinatives.js:44",0x44223cdc,~
timer-event-end,"V8.CompileLazy",17948
code-creation,Stub,13,0x2b81b600,116,"CompareICStub"
-timer-event-start,"V8.ParseLazy",18020
-timer-event-end,"V8.ParseLazy",18170
+timer-event-start,"V8.ParseLazyMicroSeconds",18020
+timer-event-end,"V8.ParseLazyMicroSeconds",18170
timer-event-start,"V8.CompileLazy",18187
timer-event-start,"V8.CompileFullCode",18208
code-creation,Stub,12,0x2b81b680,88,"BinaryOpStub_BIT_AND_Alloc_Uninitialized+Uninitialized"
@@ -821,8 +821,8 @@ code-creation,CallIC,7,0x2b82baa0,129,"InstantiateFunction"
code-creation,LoadIC,5,0x2b82bb40,103,"kApiFunctionCache"
code-creation,Stub,12,0x2b82bbc0,146,"BinaryOpStub_BIT_AND_Alloc_Smi+Smi"
code-creation,Stub,15,0x2b82bc60,132,"ToBooleanStub(Smi)"
-timer-event-start,"V8.ParseLazy",19172
-timer-event-end,"V8.ParseLazy",19253
+timer-event-start,"V8.ParseLazyMicroSeconds",19172
+timer-event-end,"V8.ParseLazyMicroSeconds",19253
timer-event-start,"V8.CompileLazy",19268
timer-event-start,"V8.CompileFullCode",19285
timer-event-end,"V8.CompileFullCode",19350
@@ -844,45 +844,45 @@ code-creation,Stub,14,0x2b82c800,144,"CompareNilICStub(NullValue)(MonomorphicMap
tick,0x8132a60,20593,0,0x8141e5e,0,0x2b822c4e,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
code-creation,Stub,14,0x2b82c8a0,124,"CompareNilICStub(NullValue)(Undefined,Null,Undetectable,Generic)"
code-creation,Stub,13,0x2b82c920,156,"CompareICStub"
-timer-event-start,"V8.ParseLazy",20736
-timer-event-end,"V8.ParseLazy",20818
+timer-event-start,"V8.ParseLazyMicroSeconds",20736
+timer-event-end,"V8.ParseLazyMicroSeconds",20818
timer-event-start,"V8.CompileLazy",20838
timer-event-start,"V8.CompileFullCode",20854
code-creation,Stub,2,0x2b82c9c0,587,"FastCloneShallowArrayStub"
timer-event-end,"V8.CompileFullCode",21298
code-creation,LazyCompile,0,0x2b82cc20,812,"DefaultNumber native runtime.js:645",0x44226390,~
timer-event-end,"V8.CompileLazy",21330
-timer-event-start,"V8.ParseLazy",21352
-timer-event-end,"V8.ParseLazy",21381
+timer-event-start,"V8.ParseLazyMicroSeconds",21352
+timer-event-end,"V8.ParseLazyMicroSeconds",21381
timer-event-start,"V8.CompileLazy",21393
timer-event-start,"V8.CompileFullCode",21405
timer-event-end,"V8.CompileFullCode",21436
code-creation,LazyCompile,0,0x2b82cf60,184,"valueOf native v8natives.js:245",0x4421ee84,~
timer-event-end,"V8.CompileLazy",21465
-timer-event-start,"V8.ParseLazy",21482
-timer-event-end,"V8.ParseLazy",21544
+timer-event-start,"V8.ParseLazyMicroSeconds",21482
+timer-event-end,"V8.ParseLazyMicroSeconds",21544
timer-event-start,"V8.CompileLazy",21557
timer-event-start,"V8.CompileFullCode",21571
timer-event-end,"V8.CompileFullCode",21651
code-creation,LazyCompile,0,0x2b82d020,652,"ToObject native runtime.js:584",0x44226150,~
timer-event-end,"V8.CompileLazy",21690
tick,0x80eabe2,21708,0,0xff81f7a8,2,0x2b82cfe4,0x2b82cd79,0x2b825c84,0x2b822ca7,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
-timer-event-start,"V8.ParseLazy",21761
-timer-event-end,"V8.ParseLazy",21796
+timer-event-start,"V8.ParseLazyMicroSeconds",21761
+timer-event-end,"V8.ParseLazyMicroSeconds",21796
timer-event-start,"V8.CompileLazy",21808
timer-event-start,"V8.CompileFullCode",21820
timer-event-end,"V8.CompileFullCode",21845
code-creation,LazyCompile,0,0x2b82d2c0,220,"IsPrimitive native runtime.js:636",0x44226330,~
timer-event-end,"V8.CompileLazy",21873
-timer-event-start,"V8.ParseLazy",21895
-timer-event-end,"V8.ParseLazy",21921
+timer-event-start,"V8.ParseLazyMicroSeconds",21895
+timer-event-end,"V8.ParseLazyMicroSeconds",21921
timer-event-start,"V8.CompileLazy",21932
timer-event-start,"V8.CompileFullCode",21946
timer-event-end,"V8.CompileFullCode",21966
code-creation,LazyCompile,0,0x2b82d3a0,184,"toString native v8natives.js:1721",0x44220834,~
timer-event-end,"V8.CompileLazy",21994
-timer-event-start,"V8.ParseLazy",22009
-timer-event-end,"V8.ParseLazy",22087
+timer-event-start,"V8.ParseLazyMicroSeconds",22009
+timer-event-end,"V8.ParseLazyMicroSeconds",22087
timer-event-start,"V8.CompileLazy",22101
timer-event-start,"V8.CompileFullCode",22116
timer-event-end,"V8.CompileFullCode",22221
@@ -902,31 +902,31 @@ code-creation,Stub,2,0x2b82dc80,98,"toString"
code-creation,LoadIC,5,0x2b82dd00,93,"toString"
code-creation,CallIC,7,0x2b82dd60,129,"FunctionSourceString"
code-creation,CallIC,7,0x2b82de00,128,"ToNumber"
-timer-event-start,"V8.Parse",22650
+timer-event-start,"V8.ParseMicroSeconds",22650
tick,0xf776d430,22726,0,0x0,2
-timer-event-end,"V8.Parse",22773
+timer-event-end,"V8.ParseMicroSeconds",22773
timer-event-start,"V8.Compile",22785
timer-event-start,"V8.CompileFullCode",22801
timer-event-end,"V8.CompileFullCode",22822
code-creation,Script,0,0x2b82de80,264,"native arraybuffer.js",0x4423ab7c,~
timer-event-end,"V8.Compile",22836
-timer-event-start,"V8.ParseLazy",22859
-timer-event-end,"V8.ParseLazy",22881
+timer-event-start,"V8.ParseLazyMicroSeconds",22859
+timer-event-end,"V8.ParseLazyMicroSeconds",22881
timer-event-start,"V8.CompileLazy",22887
timer-event-start,"V8.CompileFullCode",22899
timer-event-end,"V8.CompileFullCode",22918
code-creation,LazyCompile,0,0x2b82dfa0,480,"SetUpArrayBuffer native arraybuffer.js:84",0x4423aac0,~
timer-event-end,"V8.CompileLazy",22934
-timer-event-start,"V8.ParseLazy",22943
-timer-event-end,"V8.ParseLazy",22962
+timer-event-start,"V8.ParseLazyMicroSeconds",22943
+timer-event-end,"V8.ParseLazyMicroSeconds",22962
timer-event-start,"V8.CompileLazy",22967
timer-event-start,"V8.CompileFullCode",22972
timer-event-end,"V8.CompileFullCode",22987
code-creation,LazyCompile,0,0x2b82e180,324,"ArrayBufferConstructor native arraybuffer.js:34",0x4423a9a0,~
timer-event-end,"V8.CompileLazy",23000
code-creation,LazyCompile,0,0x2b82e180,324,"ArrayBufferConstructor native arraybuffer.js:34",0x4423a9a0,
-timer-event-start,"V8.ParseLazy",23021
-timer-event-end,"V8.ParseLazy",23037
+timer-event-start,"V8.ParseLazyMicroSeconds",23021
+timer-event-end,"V8.ParseLazyMicroSeconds",23037
timer-event-start,"V8.CompileLazy",23042
timer-event-start,"V8.CompileFullCode",23047
timer-event-end,"V8.CompileFullCode",23057
@@ -934,23 +934,23 @@ code-creation,LazyCompile,0,0x2b82e2e0,252,"InstallGetter native v8natives.js:63
timer-event-end,"V8.CompileLazy",23069
code-creation,KeyedLoadIC,6,0x2b82e3e0,91,""
code-creation,LoadIC,5,0x2b82e440,93,"length"
-timer-event-start,"V8.Parse",23160
-timer-event-end,"V8.Parse",23613
+timer-event-start,"V8.ParseMicroSeconds",23160
+timer-event-end,"V8.ParseMicroSeconds",23613
timer-event-start,"V8.Compile",23621
timer-event-start,"V8.CompileFullCode",23666
timer-event-end,"V8.CompileFullCode",23702
code-creation,Script,0,0x2b82e4a0,720,"native typedarray.js",0x4423bc04,~
timer-event-end,"V8.Compile",23724
-timer-event-start,"V8.ParseLazy",23755
+timer-event-start,"V8.ParseLazyMicroSeconds",23755
tick,0xf776d430,23782,0,0x0,2
-timer-event-end,"V8.ParseLazy",23867
+timer-event-end,"V8.ParseLazyMicroSeconds",23867
timer-event-start,"V8.CompileLazy",23905
timer-event-start,"V8.CompileFullCode",23916
timer-event-end,"V8.CompileFullCode",23939
code-creation,LazyCompile,0,0x2b82e780,664,"SetupTypedArray native typedarray.js:170",0x4423b238,~
timer-event-end,"V8.CompileLazy",23971
-timer-event-start,"V8.ParseLazy",23979
-timer-event-end,"V8.ParseLazy",24064
+timer-event-start,"V8.ParseLazyMicroSeconds",23979
+timer-event-end,"V8.ParseLazyMicroSeconds",24064
timer-event-start,"V8.CompileLazy",24071
timer-event-start,"V8.CompileFullCode",24085
code-creation,Stub,2,0x2b82ea20,175,"FastNewContextStub"
@@ -959,8 +959,8 @@ code-creation,Stub,2,0x2b82ec20,1448,"RecordWriteStub"
timer-event-end,"V8.CompileFullCode",24149
code-creation,LazyCompile,0,0x2b82f1e0,460,"CreateTypedArrayConstructor native typedarray.js:38",0x4423af98,~
timer-event-end,"V8.CompileLazy",24163
-timer-event-start,"V8.ParseLazy",24170
-timer-event-end,"V8.ParseLazy",24198
+timer-event-start,"V8.ParseLazyMicroSeconds",24170
+timer-event-end,"V8.ParseLazyMicroSeconds",24198
timer-event-start,"V8.CompileLazy",24203
timer-event-start,"V8.CompileFullCode",24211
code-creation,Stub,2,0x2b82f3c0,331,"CallFunctionStub_Args4_Recording"
@@ -970,8 +970,8 @@ timer-event-end,"V8.CompileFullCode",24435
code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,~
timer-event-end,"V8.CompileLazy",24448
code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-timer-event-start,"V8.ParseLazy",24478
-timer-event-end,"V8.ParseLazy",24519
+timer-event-start,"V8.ParseLazyMicroSeconds",24478
+timer-event-end,"V8.ParseLazyMicroSeconds",24519
timer-event-start,"V8.CompileLazy",24525
timer-event-start,"V8.CompileFullCode",24533
timer-event-end,"V8.CompileFullCode",24546
@@ -999,15 +999,15 @@ tick,0xf7492ece,24846,0,0xff81ff10,0,0x2b82e839,0x2b82e5f9
code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
code-creation,LazyCompile,0,0x2b82f8a0,824," native typedarray.js:88",0x4423c580,
-timer-event-start,"V8.ParseLazy",25032
-timer-event-end,"V8.ParseLazy",25074
+timer-event-start,"V8.ParseLazyMicroSeconds",25032
+timer-event-end,"V8.ParseLazyMicroSeconds",25074
timer-event-start,"V8.CompileLazy",25081
timer-event-start,"V8.CompileFullCode",25093
timer-event-end,"V8.CompileFullCode",25115
code-creation,LazyCompile,0,0x2b8304a0,888,"SetupDataView native typedarray.js:434",0x4423ba78,~
timer-event-end,"V8.CompileLazy",25128
-timer-event-start,"V8.ParseLazy",25136
-timer-event-end,"V8.ParseLazy",25175
+timer-event-start,"V8.ParseLazyMicroSeconds",25136
+timer-event-end,"V8.ParseLazyMicroSeconds",25175
timer-event-start,"V8.CompileLazy",25181
timer-event-start,"V8.CompileFullCode",25188
code-creation,Stub,12,0x2b830820,88,"BinaryOpStub_SUB_Alloc_Uninitialized+Uninitialized"
@@ -1034,24 +1034,24 @@ code-creation,CallIC,7,0x2b8313c0,128,"IsPrimitive"
code-creation,CallIC,7,0x2b831440,129,"FunctionSourceString"
code-creation,CallIC,7,0x2b8314e0,128,"ToNumber"
tick,0xf776d430,25914,0,0x90ec418,0,0x2b82cda7,0x2b825c84,0x2b822ca7,0x2b82af24,0x2b81b4f0,0x2b82beff,0x2b81b59f,0x2b82beff,0x2b81b589,0x2b82b0b8,0x2b81b4f0
-timer-event-start,"V8.ParseLazy",25965
-timer-event-end,"V8.ParseLazy",25985
+timer-event-start,"V8.ParseLazyMicroSeconds",25965
+timer-event-end,"V8.ParseLazyMicroSeconds",25985
timer-event-start,"V8.CompileLazy",25991
timer-event-start,"V8.RecompileSynchronous",25996
code-creation,LazyCompile,0,0x2b831560,184,"valueOf native v8natives.js:245",0x4421ee84,~
timer-event-end,"V8.RecompileSynchronous",26121
code-creation,LazyCompile,1,0x2b831620,180,"valueOf native v8natives.js:245",0x4421ee84,*
timer-event-end,"V8.CompileLazy",26138
-timer-event-start,"V8.ParseLazy",26144
-timer-event-end,"V8.ParseLazy",26156
+timer-event-start,"V8.ParseLazyMicroSeconds",26144
+timer-event-end,"V8.ParseLazyMicroSeconds",26156
timer-event-start,"V8.CompileLazy",26161
timer-event-start,"V8.RecompileSynchronous",26166
code-creation,LazyCompile,0,0x2b8316e0,220,"IsPrimitive native runtime.js:636",0x44226330,~
timer-event-end,"V8.RecompileSynchronous",26250
code-creation,LazyCompile,1,0x2b8317c0,170,"IsPrimitive native runtime.js:636",0x44226330,*
timer-event-end,"V8.CompileLazy",26266
-timer-event-start,"V8.ParseLazy",26271
-timer-event-end,"V8.ParseLazy",26282
+timer-event-start,"V8.ParseLazyMicroSeconds",26271
+timer-event-end,"V8.ParseLazyMicroSeconds",26282
timer-event-start,"V8.CompileLazy",26286
timer-event-start,"V8.RecompileSynchronous",26291
code-creation,LazyCompile,0,0x2b831880,184,"toString native v8natives.js:1721",0x44220834,~
@@ -1130,173 +1130,173 @@ code-creation,LoadIC,5,0x2b833760,103,"DataViewSetFloat64"
code-creation,CallMiss,7,0x2b8337e0,189,"args_count: 32"
code-creation,CallIC,7,0x2b8338a0,136,"$Array"
code-creation,LoadIC,5,0x2b833940,93,"length"
-timer-event-start,"V8.Parse",28734
-timer-event-start,"V8.PreParse",28760
-timer-event-end,"V8.PreParse",28785
-timer-event-start,"V8.PreParse",28796
-timer-event-end,"V8.PreParse",28803
-timer-event-start,"V8.PreParse",28810
-timer-event-end,"V8.PreParse",28817
-timer-event-start,"V8.PreParse",28828
-timer-event-end,"V8.PreParse",28862
-timer-event-start,"V8.PreParse",28872
-timer-event-end,"V8.PreParse",28878
-timer-event-start,"V8.PreParse",28884
-timer-event-end,"V8.PreParse",28890
-timer-event-start,"V8.PreParse",28905
-timer-event-end,"V8.PreParse",28931
-timer-event-start,"V8.PreParse",28938
-timer-event-end,"V8.PreParse",28970
-timer-event-start,"V8.PreParse",28980
-timer-event-end,"V8.PreParse",28989
-timer-event-start,"V8.PreParse",28995
-timer-event-end,"V8.PreParse",29005
-timer-event-start,"V8.PreParse",29012
-timer-event-end,"V8.PreParse",29019
-timer-event-start,"V8.PreParse",29026
-timer-event-end,"V8.PreParse",29045
-timer-event-start,"V8.PreParse",29052
-timer-event-end,"V8.PreParse",29059
-timer-event-start,"V8.PreParse",29066
-timer-event-end,"V8.PreParse",29072
-timer-event-start,"V8.PreParse",29078
-timer-event-end,"V8.PreParse",29087
+timer-event-start,"V8.ParseMicroSeconds",28734
+timer-event-start,"V8.PreParseMicroSeconds",28760
+timer-event-end,"V8.PreParseMicroSeconds",28785
+timer-event-start,"V8.PreParseMicroSeconds",28796
+timer-event-end,"V8.PreParseMicroSeconds",28803
+timer-event-start,"V8.PreParseMicroSeconds",28810
+timer-event-end,"V8.PreParseMicroSeconds",28817
+timer-event-start,"V8.PreParseMicroSeconds",28828
+timer-event-end,"V8.PreParseMicroSeconds",28862
+timer-event-start,"V8.PreParseMicroSeconds",28872
+timer-event-end,"V8.PreParseMicroSeconds",28878
+timer-event-start,"V8.PreParseMicroSeconds",28884
+timer-event-end,"V8.PreParseMicroSeconds",28890
+timer-event-start,"V8.PreParseMicroSeconds",28905
+timer-event-end,"V8.PreParseMicroSeconds",28931
+timer-event-start,"V8.PreParseMicroSeconds",28938
+timer-event-end,"V8.PreParseMicroSeconds",28970
+timer-event-start,"V8.PreParseMicroSeconds",28980
+timer-event-end,"V8.PreParseMicroSeconds",28989
+timer-event-start,"V8.PreParseMicroSeconds",28995
+timer-event-end,"V8.PreParseMicroSeconds",29005
+timer-event-start,"V8.PreParseMicroSeconds",29012
+timer-event-end,"V8.PreParseMicroSeconds",29019
+timer-event-start,"V8.PreParseMicroSeconds",29026
+timer-event-end,"V8.PreParseMicroSeconds",29045
+timer-event-start,"V8.PreParseMicroSeconds",29052
+timer-event-end,"V8.PreParseMicroSeconds",29059
+timer-event-start,"V8.PreParseMicroSeconds",29066
+timer-event-end,"V8.PreParseMicroSeconds",29072
+timer-event-start,"V8.PreParseMicroSeconds",29078
+timer-event-end,"V8.PreParseMicroSeconds",29087
tick,0xf776d430,29099,0,0x0,2
-timer-event-start,"V8.PreParse",29187
-timer-event-end,"V8.PreParse",29241
-timer-event-start,"V8.PreParse",29253
-timer-event-end,"V8.PreParse",29261
-timer-event-start,"V8.PreParse",29274
-timer-event-end,"V8.PreParse",29286
-timer-event-start,"V8.PreParse",29293
-timer-event-end,"V8.PreParse",29305
-timer-event-start,"V8.PreParse",29314
-timer-event-end,"V8.PreParse",29324
-timer-event-start,"V8.PreParse",29331
-timer-event-end,"V8.PreParse",29344
-timer-event-start,"V8.PreParse",29355
-timer-event-end,"V8.PreParse",29369
-timer-event-start,"V8.PreParse",29375
-timer-event-end,"V8.PreParse",29391
-timer-event-start,"V8.PreParse",29400
-timer-event-end,"V8.PreParse",29408
-timer-event-start,"V8.PreParse",29416
-timer-event-end,"V8.PreParse",29422
-timer-event-start,"V8.PreParse",29435
-timer-event-end,"V8.PreParse",29442
-timer-event-start,"V8.PreParse",29448
-timer-event-end,"V8.PreParse",29461
-timer-event-start,"V8.PreParse",29467
-timer-event-end,"V8.PreParse",29480
-timer-event-start,"V8.PreParse",29489
-timer-event-end,"V8.PreParse",29508
-timer-event-start,"V8.PreParse",29516
-timer-event-end,"V8.PreParse",29547
-timer-event-start,"V8.PreParse",29561
-timer-event-end,"V8.PreParse",29579
-timer-event-start,"V8.PreParse",29587
-timer-event-end,"V8.PreParse",29605
-timer-event-start,"V8.PreParse",29613
-timer-event-end,"V8.PreParse",29639
-timer-event-start,"V8.PreParse",29646
-timer-event-end,"V8.PreParse",29667
-timer-event-start,"V8.PreParse",29677
-timer-event-end,"V8.PreParse",29702
-timer-event-start,"V8.PreParse",29709
-timer-event-end,"V8.PreParse",29735
-timer-event-start,"V8.PreParse",29741
-timer-event-end,"V8.PreParse",29758
-timer-event-start,"V8.PreParse",29764
-timer-event-end,"V8.PreParse",29773
-timer-event-start,"V8.PreParse",29781
-timer-event-end,"V8.PreParse",29796
-timer-event-start,"V8.PreParse",29805
-timer-event-end,"V8.PreParse",29813
-timer-event-start,"V8.PreParse",29821
-timer-event-end,"V8.PreParse",29839
-timer-event-start,"V8.PreParse",29847
-timer-event-end,"V8.PreParse",29861
-timer-event-start,"V8.PreParse",29868
-timer-event-end,"V8.PreParse",29873
-timer-event-start,"V8.PreParse",29880
-timer-event-end,"V8.PreParse",29908
-timer-event-start,"V8.PreParse",29914
-timer-event-end,"V8.PreParse",29923
-timer-event-start,"V8.PreParse",29930
-timer-event-end,"V8.PreParse",29937
-timer-event-start,"V8.PreParse",29944
-timer-event-end,"V8.PreParse",29955
-timer-event-start,"V8.PreParse",29960
-timer-event-end,"V8.PreParse",29970
-timer-event-start,"V8.PreParse",29977
-timer-event-end,"V8.PreParse",29982
-timer-event-start,"V8.PreParse",29989
-timer-event-end,"V8.PreParse",29999
-timer-event-start,"V8.PreParse",30031
-timer-event-end,"V8.PreParse",30041
-timer-event-start,"V8.PreParse",30047
-timer-event-end,"V8.PreParse",30054
-timer-event-start,"V8.PreParse",30060
-timer-event-end,"V8.PreParse",30069
-timer-event-start,"V8.PreParse",30080
-timer-event-end,"V8.PreParse",30106
-timer-event-start,"V8.PreParse",30113
-timer-event-end,"V8.PreParse",30121
-timer-event-start,"V8.PreParse",30127
-timer-event-end,"V8.PreParse",30133
-timer-event-start,"V8.PreParse",30139
-timer-event-end,"V8.PreParse",30148
+timer-event-start,"V8.PreParseMicroSeconds",29187
+timer-event-end,"V8.PreParseMicroSeconds",29241
+timer-event-start,"V8.PreParseMicroSeconds",29253
+timer-event-end,"V8.PreParseMicroSeconds",29261
+timer-event-start,"V8.PreParseMicroSeconds",29274
+timer-event-end,"V8.PreParseMicroSeconds",29286
+timer-event-start,"V8.PreParseMicroSeconds",29293
+timer-event-end,"V8.PreParseMicroSeconds",29305
+timer-event-start,"V8.PreParseMicroSeconds",29314
+timer-event-end,"V8.PreParseMicroSeconds",29324
+timer-event-start,"V8.PreParseMicroSeconds",29331
+timer-event-end,"V8.PreParseMicroSeconds",29344
+timer-event-start,"V8.PreParseMicroSeconds",29355
+timer-event-end,"V8.PreParseMicroSeconds",29369
+timer-event-start,"V8.PreParseMicroSeconds",29375
+timer-event-end,"V8.PreParseMicroSeconds",29391
+timer-event-start,"V8.PreParseMicroSeconds",29400
+timer-event-end,"V8.PreParseMicroSeconds",29408
+timer-event-start,"V8.PreParseMicroSeconds",29416
+timer-event-end,"V8.PreParseMicroSeconds",29422
+timer-event-start,"V8.PreParseMicroSeconds",29435
+timer-event-end,"V8.PreParseMicroSeconds",29442
+timer-event-start,"V8.PreParseMicroSeconds",29448
+timer-event-end,"V8.PreParseMicroSeconds",29461
+timer-event-start,"V8.PreParseMicroSeconds",29467
+timer-event-end,"V8.PreParseMicroSeconds",29480
+timer-event-start,"V8.PreParseMicroSeconds",29489
+timer-event-end,"V8.PreParseMicroSeconds",29508
+timer-event-start,"V8.PreParseMicroSeconds",29516
+timer-event-end,"V8.PreParseMicroSeconds",29547
+timer-event-start,"V8.PreParseMicroSeconds",29561
+timer-event-end,"V8.PreParseMicroSeconds",29579
+timer-event-start,"V8.PreParseMicroSeconds",29587
+timer-event-end,"V8.PreParseMicroSeconds",29605
+timer-event-start,"V8.PreParseMicroSeconds",29613
+timer-event-end,"V8.PreParseMicroSeconds",29639
+timer-event-start,"V8.PreParseMicroSeconds",29646
+timer-event-end,"V8.PreParseMicroSeconds",29667
+timer-event-start,"V8.PreParseMicroSeconds",29677
+timer-event-end,"V8.PreParseMicroSeconds",29702
+timer-event-start,"V8.PreParseMicroSeconds",29709
+timer-event-end,"V8.PreParseMicroSeconds",29735
+timer-event-start,"V8.PreParseMicroSeconds",29741
+timer-event-end,"V8.PreParseMicroSeconds",29758
+timer-event-start,"V8.PreParseMicroSeconds",29764
+timer-event-end,"V8.PreParseMicroSeconds",29773
+timer-event-start,"V8.PreParseMicroSeconds",29781
+timer-event-end,"V8.PreParseMicroSeconds",29796
+timer-event-start,"V8.PreParseMicroSeconds",29805
+timer-event-end,"V8.PreParseMicroSeconds",29813
+timer-event-start,"V8.PreParseMicroSeconds",29821
+timer-event-end,"V8.PreParseMicroSeconds",29839
+timer-event-start,"V8.PreParseMicroSeconds",29847
+timer-event-end,"V8.PreParseMicroSeconds",29861
+timer-event-start,"V8.PreParseMicroSeconds",29868
+timer-event-end,"V8.PreParseMicroSeconds",29873
+timer-event-start,"V8.PreParseMicroSeconds",29880
+timer-event-end,"V8.PreParseMicroSeconds",29908
+timer-event-start,"V8.PreParseMicroSeconds",29914
+timer-event-end,"V8.PreParseMicroSeconds",29923
+timer-event-start,"V8.PreParseMicroSeconds",29930
+timer-event-end,"V8.PreParseMicroSeconds",29937
+timer-event-start,"V8.PreParseMicroSeconds",29944
+timer-event-end,"V8.PreParseMicroSeconds",29955
+timer-event-start,"V8.PreParseMicroSeconds",29960
+timer-event-end,"V8.PreParseMicroSeconds",29970
+timer-event-start,"V8.PreParseMicroSeconds",29977
+timer-event-end,"V8.PreParseMicroSeconds",29982
+timer-event-start,"V8.PreParseMicroSeconds",29989
+timer-event-end,"V8.PreParseMicroSeconds",29999
+timer-event-start,"V8.PreParseMicroSeconds",30031
+timer-event-end,"V8.PreParseMicroSeconds",30041
+timer-event-start,"V8.PreParseMicroSeconds",30047
+timer-event-end,"V8.PreParseMicroSeconds",30054
+timer-event-start,"V8.PreParseMicroSeconds",30060
+timer-event-end,"V8.PreParseMicroSeconds",30069
+timer-event-start,"V8.PreParseMicroSeconds",30080
+timer-event-end,"V8.PreParseMicroSeconds",30106
+timer-event-start,"V8.PreParseMicroSeconds",30113
+timer-event-end,"V8.PreParseMicroSeconds",30121
+timer-event-start,"V8.PreParseMicroSeconds",30127
+timer-event-end,"V8.PreParseMicroSeconds",30133
+timer-event-start,"V8.PreParseMicroSeconds",30139
+timer-event-end,"V8.PreParseMicroSeconds",30148
tick,0x825e06c,30162,0,0x0,2
-timer-event-start,"V8.PreParse",30217
-timer-event-end,"V8.PreParse",30285
-timer-event-start,"V8.PreParse",30293
-timer-event-end,"V8.PreParse",30319
-timer-event-start,"V8.PreParse",30326
-timer-event-end,"V8.PreParse",30344
-timer-event-start,"V8.PreParse",30350
-timer-event-end,"V8.PreParse",30367
-timer-event-start,"V8.PreParse",30374
-timer-event-end,"V8.PreParse",30385
-timer-event-start,"V8.PreParse",30392
-timer-event-end,"V8.PreParse",30400
-timer-event-start,"V8.PreParse",30407
-timer-event-end,"V8.PreParse",30415
-timer-event-start,"V8.PreParse",30429
-timer-event-end,"V8.PreParse",30446
-timer-event-start,"V8.PreParse",30456
-timer-event-end,"V8.PreParse",30461
-timer-event-start,"V8.PreParse",30469
-timer-event-end,"V8.PreParse",30480
-timer-event-start,"V8.PreParse",30488
-timer-event-end,"V8.PreParse",30497
-timer-event-start,"V8.PreParse",30503
-timer-event-end,"V8.PreParse",30511
-timer-event-start,"V8.PreParse",30517
-timer-event-end,"V8.PreParse",30528
-timer-event-start,"V8.PreParse",30535
-timer-event-end,"V8.PreParse",30539
-timer-event-start,"V8.PreParse",30546
-timer-event-end,"V8.PreParse",30550
-timer-event-start,"V8.PreParse",30568
-timer-event-end,"V8.PreParse",30577
-timer-event-start,"V8.PreParse",30586
-timer-event-end,"V8.PreParse",30591
-timer-event-start,"V8.PreParse",30600
-timer-event-end,"V8.PreParse",30610
-timer-event-start,"V8.PreParse",30616
-timer-event-end,"V8.PreParse",30621
-timer-event-start,"V8.PreParse",30630
-timer-event-end,"V8.PreParse",30638
-timer-event-start,"V8.PreParse",30649
-timer-event-end,"V8.PreParse",30665
-timer-event-start,"V8.PreParse",30672
-timer-event-end,"V8.PreParse",30682
-timer-event-start,"V8.PreParse",30692
-timer-event-end,"V8.PreParse",30706
-timer-event-start,"V8.PreParse",30719
-timer-event-end,"V8.PreParse",30730
-timer-event-start,"V8.PreParse",30737
-timer-event-end,"V8.PreParse",30749
+timer-event-start,"V8.PreParseMicroSeconds",30217
+timer-event-end,"V8.PreParseMicroSeconds",30285
+timer-event-start,"V8.PreParseMicroSeconds",30293
+timer-event-end,"V8.PreParseMicroSeconds",30319
+timer-event-start,"V8.PreParseMicroSeconds",30326
+timer-event-end,"V8.PreParseMicroSeconds",30344
+timer-event-start,"V8.PreParseMicroSeconds",30350
+timer-event-end,"V8.PreParseMicroSeconds",30367
+timer-event-start,"V8.PreParseMicroSeconds",30374
+timer-event-end,"V8.PreParseMicroSeconds",30385
+timer-event-start,"V8.PreParseMicroSeconds",30392
+timer-event-end,"V8.PreParseMicroSeconds",30400
+timer-event-start,"V8.PreParseMicroSeconds",30407
+timer-event-end,"V8.PreParseMicroSeconds",30415
+timer-event-start,"V8.PreParseMicroSeconds",30429
+timer-event-end,"V8.PreParseMicroSeconds",30446
+timer-event-start,"V8.PreParseMicroSeconds",30456
+timer-event-end,"V8.PreParseMicroSeconds",30461
+timer-event-start,"V8.PreParseMicroSeconds",30469
+timer-event-end,"V8.PreParseMicroSeconds",30480
+timer-event-start,"V8.PreParseMicroSeconds",30488
+timer-event-end,"V8.PreParseMicroSeconds",30497
+timer-event-start,"V8.PreParseMicroSeconds",30503
+timer-event-end,"V8.PreParseMicroSeconds",30511
+timer-event-start,"V8.PreParseMicroSeconds",30517
+timer-event-end,"V8.PreParseMicroSeconds",30528
+timer-event-start,"V8.PreParseMicroSeconds",30535
+timer-event-end,"V8.PreParseMicroSeconds",30539
+timer-event-start,"V8.PreParseMicroSeconds",30546
+timer-event-end,"V8.PreParseMicroSeconds",30550
+timer-event-start,"V8.PreParseMicroSeconds",30568
+timer-event-end,"V8.PreParseMicroSeconds",30577
+timer-event-start,"V8.PreParseMicroSeconds",30586
+timer-event-end,"V8.PreParseMicroSeconds",30591
+timer-event-start,"V8.PreParseMicroSeconds",30600
+timer-event-end,"V8.PreParseMicroSeconds",30610
+timer-event-start,"V8.PreParseMicroSeconds",30616
+timer-event-end,"V8.PreParseMicroSeconds",30621
+timer-event-start,"V8.PreParseMicroSeconds",30630
+timer-event-end,"V8.PreParseMicroSeconds",30638
+timer-event-start,"V8.PreParseMicroSeconds",30649
+timer-event-end,"V8.PreParseMicroSeconds",30665
+timer-event-start,"V8.PreParseMicroSeconds",30672
+timer-event-end,"V8.PreParseMicroSeconds",30682
+timer-event-start,"V8.PreParseMicroSeconds",30692
+timer-event-end,"V8.PreParseMicroSeconds",30706
+timer-event-start,"V8.PreParseMicroSeconds",30719
+timer-event-end,"V8.PreParseMicroSeconds",30730
+timer-event-start,"V8.PreParseMicroSeconds",30737
+timer-event-end,"V8.PreParseMicroSeconds",30749
tick,0x82b07f6,31208,0,0x0,2
tick,0x824d3ad,32274,0,0x0,2
tick,0x82b07c6,33327,0,0x0,2
@@ -1309,19 +1309,19 @@ tick,0x81fc625,39722,0,0x0,2
tick,0x81fc61e,40783,0,0x0,2
tick,0x821c1a1,41846,0,0x0,2
tick,0x81fc62c,42913,0,0x0,2
-timer-event-start,"V8.PreParse",43415
-timer-event-end,"V8.PreParse",43428
-timer-event-start,"V8.PreParse",43446
-timer-event-end,"V8.PreParse",43481
-timer-event-end,"V8.Parse",43493
+timer-event-start,"V8.PreParseMicroSeconds",43415
+timer-event-end,"V8.PreParseMicroSeconds",43428
+timer-event-start,"V8.PreParseMicroSeconds",43446
+timer-event-end,"V8.PreParseMicroSeconds",43481
+timer-event-end,"V8.ParseMicroSeconds",43493
timer-event-start,"V8.Compile",43498
timer-event-start,"V8.CompileFullCode",43528
timer-event-end,"V8.CompileFullCode",43671
code-creation,Script,0,0x2b8339a0,6060,"bsuite/kraken-once/stanford-crypto-ccm.js",0x2f33b684,~
timer-event-end,"V8.Compile",43688
timer-event-start,"V8.Execute",43739
-timer-event-start,"V8.ParseLazy",43914
-timer-event-end,"V8.ParseLazy",43936
+timer-event-start,"V8.ParseLazyMicroSeconds",43914
+timer-event-end,"V8.ParseLazyMicroSeconds",43936
timer-event-start,"V8.CompileLazy",43942
timer-event-start,"V8.CompileFullCode",43947
timer-event-end,"V8.CompileFullCode",43966
@@ -1331,8 +1331,8 @@ timer-event-end,"V8.CompileLazy",44194
code-creation,Stub,2,0x2b8352e0,188,"KeyedLoadElementStub"
code-creation,KeyedLoadIC,6,0x2b8353a0,91,""
code-creation,CallPreMonomorphic,7,0x2b835400,178,"args_count: 0"
-timer-event-start,"V8.ParseLazy",44292
-timer-event-end,"V8.ParseLazy",44326
+timer-event-start,"V8.ParseLazyMicroSeconds",44292
+timer-event-end,"V8.ParseLazyMicroSeconds",44326
timer-event-start,"V8.CompileLazy",44333
timer-event-start,"V8.CompileFullCode",44340
code-creation,Stub,12,0x2b8354c0,88,"BinaryOpStub_MOD_Alloc_Uninitialized+Uninitialized"
@@ -1342,16 +1342,16 @@ code-creation,LazyCompile,0,0x2b835580,906,"sjcl.hash.sha256.w bsuite/kraken-onc
timer-event-end,"V8.CompileLazy",44407
code-creation,Stub,12,0x2b835920,167,"BinaryOpStub_MUL_Alloc_Smi+Smi"
code-creation,Stub,13,0x2b8359e0,122,"CompareICStub"
-timer-event-start,"V8.ParseLazy",44439
-timer-event-end,"V8.ParseLazy",44460
+timer-event-start,"V8.ParseLazyMicroSeconds",44439
+timer-event-end,"V8.ParseLazyMicroSeconds",44460
timer-event-start,"V8.CompileLazy",44465
timer-event-start,"V8.CompileFullCode",44471
code-creation,Stub,2,0x2b835a60,501,"MathPowStub"
timer-event-end,"V8.CompileFullCode",44505
code-creation,LazyCompile,0,0x2b835c60,304,"pow native math.js:181",0x4422259c,~
timer-event-end,"V8.CompileLazy",44517
-timer-event-start,"V8.ParseLazy",44522
-timer-event-end,"V8.ParseLazy",44534
+timer-event-start,"V8.ParseLazyMicroSeconds",44522
+timer-event-end,"V8.ParseLazyMicroSeconds",44534
timer-event-start,"V8.CompileLazy",44539
timer-event-start,"V8.CompileFullCode",44545
code-creation,Stub,12,0x2b835da0,88,"BinaryOpStub_MUL_OverwriteLeft_Uninitialized+Uninitialized"
@@ -1359,8 +1359,8 @@ code-creation,Stub,12,0x2b835e00,88,"BinaryOpStub_BIT_OR_OverwriteLeft_Uninitial
timer-event-end,"V8.CompileFullCode",44570
code-creation,LazyCompile,0,0x2b835e60,228,"a bsuite/kraken-once/stanford-crypto-ccm.js:17",0x2f33d150,~
timer-event-end,"V8.CompileLazy",44582
-timer-event-start,"V8.ParseLazy",44590
-timer-event-end,"V8.ParseLazy",44609
+timer-event-start,"V8.ParseLazyMicroSeconds",44590
+timer-event-end,"V8.ParseLazyMicroSeconds",44609
timer-event-start,"V8.CompileLazy",44614
timer-event-start,"V8.CompileFullCode",44619
code-creation,Stub,12,0x2b835f60,88,"BinaryOpStub_SHR_Alloc_Uninitialized+Uninitialized"
@@ -1390,8 +1390,8 @@ code-creation,Stub,2,0x2b837900,95,"a"
code-creation,LoadIC,5,0x2b837960,93,"a"
code-creation,Stub,12,0x2b8379c0,190,"BinaryOpStub_MOD_Alloc_Smi+Smi"
code-creation,Stub,12,0x2b837a80,181,"BinaryOpStub_MOD_Alloc_Smi+Smi"
-timer-event-start,"V8.ParseLazy",45383
-timer-event-end,"V8.ParseLazy",45402
+timer-event-start,"V8.ParseLazyMicroSeconds",45383
+timer-event-end,"V8.ParseLazyMicroSeconds",45402
timer-event-start,"V8.CompileLazy",45408
timer-event-start,"V8.CompileFullCode",45413
timer-event-end,"V8.CompileFullCode",45428
@@ -1401,15 +1401,15 @@ code-creation,StoreIC,9,0x2b837c60,138,"codec"
code-creation,StoreIC,9,0x2b837d00,141,"hex"
tick,0x8294f6f,46096,0,0xff820124,0,0x2b834ff0
code-creation,StoreIC,9,0x2b837da0,171,"ccm"
-timer-event-start,"V8.ParseLazy",46605
-timer-event-end,"V8.ParseLazy",46625
+timer-event-start,"V8.ParseLazyMicroSeconds",46605
+timer-event-end,"V8.ParseLazyMicroSeconds",46625
timer-event-start,"V8.CompileLazy",46630
timer-event-start,"V8.CompileFullCode",46635
timer-event-end,"V8.CompileFullCode",46649
code-creation,LazyCompile,0,0x2b837e60,300,"sjcl.test.TestCase bsuite/kraken-once/stanford-crypto-ccm.js:99",0x2f33b210,~
timer-event-end,"V8.CompileLazy",46663
-timer-event-start,"V8.ParseLazy",46681
-timer-event-end,"V8.ParseLazy",46712
+timer-event-start,"V8.ParseLazyMicroSeconds",46681
+timer-event-end,"V8.ParseLazyMicroSeconds",46712
timer-event-start,"V8.CompileLazy",46718
timer-event-start,"V8.CompileFullCode",46725
code-creation,CallInitialize,7,0x2b837fa0,178,"args_count: 4"
@@ -1419,8 +1419,8 @@ timer-event-end,"V8.CompileLazy",46788
code-creation,Stub,13,0x2b838420,485,"CompareICStub"
code-creation,CallIC,7,0x2b838620,128,"ToString"
code-creation,CallPreMonomorphic,7,0x2b8386a0,178,"args_count: 4"
-timer-event-start,"V8.ParseLazy",46859
-timer-event-end,"V8.ParseLazy",46876
+timer-event-start,"V8.ParseLazyMicroSeconds",46859
+timer-event-end,"V8.ParseLazyMicroSeconds",46876
timer-event-start,"V8.CompileLazy",46881
timer-event-start,"V8.CompileFullCode",46888
code-creation,CallInitialize,7,0x2b838760,178,"args_count: 5"
@@ -1428,24 +1428,24 @@ timer-event-end,"V8.CompileFullCode",46910
code-creation,LazyCompile,0,0x2b838820,320,"browserUtil.cpsMap bsuite/kraken-once/stanford-crypto-ccm.js:63",0x2f33b030,~
timer-event-end,"V8.CompileLazy",46922
code-creation,CallPreMonomorphic,7,0x2b838960,178,"args_count: 5"
-timer-event-start,"V8.ParseLazy",46937
-timer-event-end,"V8.ParseLazy",46959
+timer-event-start,"V8.ParseLazyMicroSeconds",46937
+timer-event-end,"V8.ParseLazyMicroSeconds",46959
timer-event-start,"V8.CompileLazy",46965
timer-event-start,"V8.CompileFullCode",46972
code-creation,Stub,2,0x2b838a20,172,"FastNewContextStub"
timer-event-end,"V8.CompileFullCode",46995
code-creation,LazyCompile,0,0x2b838ae0,420,"browserUtil.cpsIterate bsuite/kraken-once/stanford-crypto-ccm.js:49",0x2f33afd0,~
timer-event-end,"V8.CompileLazy",47008
-timer-event-start,"V8.ParseLazy",47013
-timer-event-end,"V8.ParseLazy",47029
+timer-event-start,"V8.ParseLazyMicroSeconds",47013
+timer-event-end,"V8.ParseLazyMicroSeconds",47029
timer-event-start,"V8.CompileLazy",47034
timer-event-start,"V8.CompileFullCode",47041
code-creation,Stub,2,0x2b838ca0,328,"CallFunctionStub_Args0_Recording"
timer-event-end,"V8.CompileFullCode",47070
code-creation,LazyCompile,0,0x2b838e00,372,"go bsuite/kraken-once/stanford-crypto-ccm.js:50",0x2f33da7c,~
timer-event-end,"V8.CompileLazy",47082
-timer-event-start,"V8.ParseLazy",47088
-timer-event-end,"V8.ParseLazy",47110
+timer-event-start,"V8.ParseLazyMicroSeconds",47088
+timer-event-end,"V8.ParseLazyMicroSeconds",47110
timer-event-start,"V8.CompileLazy",47115
timer-event-start,"V8.CompileFullCode",47121
timer-event-end,"V8.CompileFullCode",47134
@@ -1453,8 +1453,8 @@ code-creation,LazyCompile,0,0x2b838f80,236," bsuite/kraken-once/stanford-crypto-
timer-event-end,"V8.CompileLazy",47146
tick,0xf776d430,47160,0,0x90ec418,2,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-creation,KeyedLoadIC,6,0x2b839080,91,""
-timer-event-start,"V8.ParseLazy",47296
-timer-event-end,"V8.ParseLazy",47317
+timer-event-start,"V8.ParseLazyMicroSeconds",47296
+timer-event-end,"V8.ParseLazyMicroSeconds",47317
timer-event-start,"V8.CompileLazy",47323
timer-event-start,"V8.CompileFullCode",47329
code-creation,CallInitialize,7,0x2b8390e0,178,"args_count: 3"
@@ -1462,36 +1462,36 @@ timer-event-end,"V8.CompileFullCode",47355
code-creation,LazyCompile,0,0x2b8391a0,260," bsuite/kraken-once/stanford-crypto-ccm.js:192",0x2f33d920,~
timer-event-end,"V8.CompileLazy",47368
code-creation,CallPreMonomorphic,7,0x2b8392c0,178,"args_count: 3"
-timer-event-start,"V8.ParseLazy",47390
-timer-event-end,"V8.ParseLazy",47409
+timer-event-start,"V8.ParseLazyMicroSeconds",47390
+timer-event-end,"V8.ParseLazyMicroSeconds",47409
timer-event-start,"V8.CompileLazy",47415
timer-event-start,"V8.CompileFullCode",47421
timer-event-end,"V8.CompileFullCode",47438
code-creation,LazyCompile,0,0x2b839380,344,"sjcl.test.TestCase.run bsuite/kraken-once/stanford-crypto-ccm.js:168",0x2f33b450,~
timer-event-end,"V8.CompileLazy",47452
-timer-event-start,"V8.ParseLazy",47462
-timer-event-end,"V8.ParseLazy",47476
+timer-event-start,"V8.ParseLazyMicroSeconds",47462
+timer-event-end,"V8.ParseLazyMicroSeconds",47476
timer-event-start,"V8.CompileLazy",47481
timer-event-start,"V8.CompileFullCode",47485
timer-event-end,"V8.CompileFullCode",47496
code-creation,LazyCompile,0,0x2b8394e0,208,"valueOf native date.js:361",0x44218984,~
timer-event-end,"V8.CompileLazy",47507
-timer-event-start,"V8.ParseLazy",47517
-timer-event-end,"V8.ParseLazy",47526
+timer-event-start,"V8.ParseLazyMicroSeconds",47517
+timer-event-end,"V8.ParseLazyMicroSeconds",47526
timer-event-start,"V8.CompileLazy",47531
timer-event-start,"V8.CompileFullCode",47536
timer-event-end,"V8.CompileFullCode",47545
code-creation,LazyCompile,0,0x2b8395c0,192,"browserUtil.pauseAndThen bsuite/kraken-once/stanford-crypto-ccm.js:47",0x2f33af70,~
timer-event-end,"V8.CompileLazy",47557
-timer-event-start,"V8.ParseLazy",47561
-timer-event-end,"V8.ParseLazy",47571
+timer-event-start,"V8.ParseLazyMicroSeconds",47561
+timer-event-end,"V8.ParseLazyMicroSeconds",47571
timer-event-start,"V8.CompileLazy",47576
timer-event-start,"V8.CompileFullCode",47581
timer-event-end,"V8.CompileFullCode",47591
code-creation,LazyCompile,0,0x2b839680,192," bsuite/kraken-once/stanford-crypto-ccm.js:171",0x2f33dc70,~
timer-event-end,"V8.CompileLazy",47602
-timer-event-start,"V8.ParseLazy",47608
-timer-event-end,"V8.ParseLazy",47674
+timer-event-start,"V8.ParseLazyMicroSeconds",47608
+timer-event-end,"V8.ParseLazyMicroSeconds",47674
timer-event-start,"V8.CompileLazy",47681
timer-event-start,"V8.CompileFullCode",47693
code-creation,Stub,2,0x2b839740,196,"FastNewContextStub"
@@ -1500,8 +1500,8 @@ timer-event-end,"V8.CompileFullCode",47755
code-creation,LazyCompile,0,0x2b839880,716," bsuite/kraken-once/stanford-crypto-ccm.js:7235",0x2f33b5d0,~
timer-event-end,"V8.CompileLazy",47768
code-creation,Stub,12,0x2b839b60,196,"BinaryOpStub_DIV_Alloc_Smi+Smi"
-timer-event-start,"V8.ParseLazy",47798
-timer-event-end,"V8.ParseLazy",47845
+timer-event-start,"V8.ParseLazyMicroSeconds",47798
+timer-event-end,"V8.ParseLazyMicroSeconds",47845
timer-event-start,"V8.CompileLazy",47851
timer-event-start,"V8.CompileFullCode",47863
code-creation,Stub,12,0x2b839c40,88,"BinaryOpStub_MUL_OverwriteRight_Uninitialized+Uninitialized"
@@ -1509,24 +1509,24 @@ timer-event-end,"V8.CompileFullCode",47917
code-creation,LazyCompile,0,0x2b839ca0,2065," bsuite/kraken-once/stanford-crypto-ccm.js:7243",0x2f33de10,
timer-event-end,"V8.CompileLazy",47930
code-creation,Stub,12,0x2b83a4c0,167,"BinaryOpStub_MUL_OverwriteRight_Smi+Smi"
-timer-event-start,"V8.ParseLazy",47958
-timer-event-end,"V8.ParseLazy",47986
+timer-event-start,"V8.ParseLazyMicroSeconds",47958
+timer-event-end,"V8.ParseLazyMicroSeconds",47986
timer-event-start,"V8.CompileLazy",47992
timer-event-start,"V8.CompileFullCode",47998
code-creation,Stub,12,0x2b83a580,88,"BinaryOpStub_BIT_XOR_Alloc_Uninitialized+Uninitialized"
timer-event-end,"V8.CompileFullCode",48031
code-creation,LazyCompile,0,0x2b83a5e0,717,"sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13",0x2f339e90,~
timer-event-end,"V8.CompileLazy",48044
-timer-event-start,"V8.ParseLazy",48061
-timer-event-end,"V8.ParseLazy",48119
+timer-event-start,"V8.ParseLazyMicroSeconds",48061
+timer-event-end,"V8.ParseLazyMicroSeconds",48119
timer-event-start,"V8.CompileLazy",48126
timer-event-start,"V8.CompileFullCode",48135
timer-event-end,"V8.CompileFullCode",48188
code-creation,LazyCompile,0,0x2b83a8c0,1601,"DoConstructRegExp native regexp.js:39",0x44222a28,~
timer-event-end,"V8.CompileLazy",48203
-timer-event-start,"V8.ParseLazy",48213
+timer-event-start,"V8.ParseLazyMicroSeconds",48213
tick,0x80eabd3,48226,0,0xff81fb44,2,0x2b821ae3,0x2b83a6a4,0x2b839e4e,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazy",48288
+timer-event-end,"V8.ParseLazyMicroSeconds",48288
timer-event-start,"V8.CompileLazy",48309
timer-event-start,"V8.CompileFullCode",48323
timer-event-end,"V8.CompileFullCode",48372
@@ -1534,8 +1534,8 @@ code-creation,LazyCompile,0,0x2b83af20,1284,"charAt native string.js:64",0x44215
timer-event-end,"V8.CompileLazy",48386
code-creation,Stub,14,0x2b83b440,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
code-creation,Stub,5,0x2b83b4e0,97,"StringLengthStub"
-timer-event-start,"V8.ParseLazy",48435
-timer-event-end,"V8.ParseLazy",48536
+timer-event-start,"V8.ParseLazyMicroSeconds",48435
+timer-event-end,"V8.ParseLazyMicroSeconds",48536
timer-event-start,"V8.CompileLazy",48543
timer-event-start,"V8.CompileFullCode",48555
code-creation,Stub,2,0x2b83b560,828,"SubStringStub"
@@ -1545,8 +1545,8 @@ timer-event-end,"V8.CompileLazy",48654
code-creation,StoreIC,9,0x2b83c220,135,"lastIndex"
code-creation,Stub,14,0x2b83c2c0,124,"CompareNilICStub(NullValue)(Null)"
code-creation,RegExp,4,0x2b83c340,758,"\\s|0x"
-timer-event-start,"V8.ParseLazy",48827
-timer-event-end,"V8.ParseLazy",48873
+timer-event-start,"V8.ParseLazyMicroSeconds",48827
+timer-event-end,"V8.ParseLazyMicroSeconds",48873
timer-event-start,"V8.CompileLazy",48879
timer-event-start,"V8.CompileFullCode",48886
timer-event-end,"V8.CompileFullCode",48916
@@ -1554,8 +1554,8 @@ code-creation,LazyCompile,0,0x2b83c640,960,"substr native string.js:749",0x44216
timer-event-end,"V8.CompileLazy",48930
code-creation,Stub,14,0x2b83ca00,144,"CompareNilICStub(NullValue)(MonomorphicMap)"
code-creation,Stub,13,0x2b83caa0,122,"CompareICStub"
-timer-event-start,"V8.ParseLazy",48959
-timer-event-end,"V8.ParseLazy",49000
+timer-event-start,"V8.ParseLazyMicroSeconds",48959
+timer-event-end,"V8.ParseLazyMicroSeconds",49000
timer-event-start,"V8.CompileLazy",49006
timer-event-start,"V8.CompileFullCode",49012
code-creation,Stub,12,0x2b83cb20,88,"BinaryOpStub_BIT_OR_Alloc_Uninitialized+Uninitialized"
@@ -1569,8 +1569,8 @@ code-creation,CallIC,7,0x2b83d280,129,"parseInt"
code-creation,Stub,2,0x2b83d320,1433,"RecordWriteStub"
code-creation,Stub,2,0x2b83d8c0,611,"RecordWriteStub"
code-creation,CallIC,7,0x2b83db40,656,"push"
-timer-event-start,"V8.ParseLazy",49192
-timer-event-end,"V8.ParseLazy",49229
+timer-event-start,"V8.ParseLazyMicroSeconds",49192
+timer-event-end,"V8.ParseLazyMicroSeconds",49229
timer-event-start,"V8.CompileLazy",49235
timer-event-start,"V8.CompileFullCode",49242
code-creation,Stub,12,0x2b83dde0,88,"BinaryOpStub_SAR_OverwriteRight_Uninitialized+Uninitialized"
@@ -1579,15 +1579,15 @@ tick,0x8250358,49284,0,0xff81fe84,2,0x2b83a871,0x2b839e4e,0x2b838f3f,0x2b838c50,
timer-event-end,"V8.CompileFullCode",49346
code-creation,LazyCompile,0,0x2b83dea0,536,"sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339b30,~
timer-event-end,"V8.CompileLazy",49390
-timer-event-start,"V8.ParseLazy",49402
-timer-event-end,"V8.ParseLazy",49416
+timer-event-start,"V8.ParseLazyMicroSeconds",49402
+timer-event-end,"V8.ParseLazyMicroSeconds",49416
timer-event-start,"V8.CompileLazy",49421
timer-event-start,"V8.CompileFullCode",49426
timer-event-end,"V8.CompileFullCode",49438
code-creation,LazyCompile,0,0x2b83e0c0,248,"ceil native math.js:81",0x442222fc,~
timer-event-end,"V8.CompileLazy",49450
-timer-event-start,"V8.ParseLazy",49466
-timer-event-end,"V8.ParseLazy",49529
+timer-event-start,"V8.ParseLazyMicroSeconds",49466
+timer-event-end,"V8.ParseLazyMicroSeconds",49529
timer-event-start,"V8.CompileLazy",49535
timer-event-start,"V8.CompileFullCode",49544
code-creation,Stub,2,0x2b83e1c0,647,"FastCloneShallowArrayStub"
@@ -1597,8 +1597,8 @@ code-creation,Stub,12,0x2b83e520,88,"BinaryOpStub_BIT_XOR_OverwriteLeft_Uninitia
timer-event-end,"V8.CompileFullCode",49801
code-creation,LazyCompile,0,0x2b83e580,3002,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,~
timer-event-end,"V8.CompileLazy",49816
-timer-event-start,"V8.ParseLazy",49829
-timer-event-end,"V8.ParseLazy",49886
+timer-event-start,"V8.ParseLazyMicroSeconds",49829
+timer-event-end,"V8.ParseLazyMicroSeconds",49886
timer-event-start,"V8.CompileLazy",49893
timer-event-start,"V8.CompileFullCode",49902
code-creation,Stub,12,0x2b83f140,88,"BinaryOpStub_BIT_XOR_OverwriteRight_Uninitialized+Uninitialized"
@@ -1666,8 +1666,8 @@ code-creation,CallIC,7,0x2b8434a0,136,"slice"
code-creation,Stub,12,0x2b843540,264,"BinaryOpStub_DIV_Alloc_Smi+Smi"
code-creation,Stub,12,0x2b843660,407,"BinaryOpStub_SAR_OverwriteRight_Number+Smi"
code-creation,Stub,12,0x2b843800,383,"BinaryOpStub_BIT_AND_OverwriteRight_Int32+Smi"
-timer-event-start,"V8.ParseLazy",51907
-timer-event-end,"V8.ParseLazy",51924
+timer-event-start,"V8.ParseLazyMicroSeconds",51907
+timer-event-end,"V8.ParseLazyMicroSeconds",51924
timer-event-start,"V8.CompileLazy",51930
timer-event-start,"V8.CompileFullCode",51935
code-creation,Stub,12,0x2b843980,88,"BinaryOpStub_SHL_OverwriteRight_Uninitialized+Uninitialized"
@@ -1682,8 +1682,8 @@ code-creation,Stub,2,0x2b843f00,240,"KeyedStoreElementStub"
code-creation,KeyedStoreIC,10,0x2b844000,91,""
code-creation,CallMiss,7,0x2b844060,178,"args_count: 3"
code-creation,CallIC,7,0x2b844120,113,"partial"
-timer-event-start,"V8.ParseLazy",52165
-timer-event-end,"V8.ParseLazy",52210
+timer-event-start,"V8.ParseLazyMicroSeconds",52165
+timer-event-end,"V8.ParseLazyMicroSeconds",52210
timer-event-start,"V8.CompileLazy",52217
timer-event-start,"V8.CompileFullCode",52225
code-creation,Stub,12,0x2b8441a0,88,"BinaryOpStub_SHR_OverwriteRight_Uninitialized+Uninitialized"
@@ -1691,24 +1691,24 @@ code-creation,CallInitialize,7,0x2b844200,178,"args_count: 6"
timer-event-end,"V8.CompileFullCode",52278
code-creation,LazyCompile,0,0x2b8442c0,1057,"sjcl.mode.ccm.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:19",0x2f33a250,~
timer-event-end,"V8.CompileLazy",52296
-timer-event-start,"V8.ParseLazy",52306
-timer-event-end,"V8.ParseLazy",52324
+timer-event-start,"V8.ParseLazyMicroSeconds",52306
+timer-event-end,"V8.ParseLazyMicroSeconds",52324
timer-event-start,"V8.CompileLazy",52329
timer-event-start,"V8.CompileFullCode",52334
timer-event-end,"V8.CompileFullCode",52349
code-creation,LazyCompile,0,0x2b844700,336,"sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339ad0,~
timer-event-end,"V8.CompileLazy",52362
-timer-event-start,"V8.ParseLazy",52375
-timer-event-end,"V8.ParseLazy",52388
+timer-event-start,"V8.ParseLazyMicroSeconds",52375
+timer-event-end,"V8.ParseLazyMicroSeconds",52388
timer-event-start,"V8.CompileLazy",52392
timer-event-start,"V8.CompileFullCode",52398
timer-event-end,"V8.CompileFullCode",52420
code-creation,LazyCompile,0,0x2b844860,236,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,~
timer-event-end,"V8.CompileLazy",52433
code-creation,Stub,12,0x2b844960,264,"BinaryOpStub_DIV_Alloc_Int32+Number"
-timer-event-start,"V8.ParseLazy",52455
+timer-event-start,"V8.ParseLazyMicroSeconds",52455
tick,0x8092495,52475,0,0xff81fcd0,2,0x2b844833,0x2b84437f,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazy",52539
+timer-event-end,"V8.ParseLazyMicroSeconds",52539
timer-event-start,"V8.CompileLazy",52558
timer-event-start,"V8.CompileFullCode",52577
timer-event-end,"V8.CompileFullCode",52590
@@ -1719,8 +1719,8 @@ code-creation,Stub,12,0x2b844c00,238,"BinaryOpStub_DIV_Alloc_Number+Number"
code-creation,CallIC,7,0x2b844d00,113,"round"
code-creation,Stub,12,0x2b844d80,167,"BinaryOpStub_SHR_OverwriteRight_Smi+Smi"
code-creation,CallPreMonomorphic,7,0x2b844e40,178,"args_count: 6"
-timer-event-start,"V8.ParseLazy",52676
-timer-event-end,"V8.ParseLazy",52738
+timer-event-start,"V8.ParseLazyMicroSeconds",52676
+timer-event-end,"V8.ParseLazyMicroSeconds",52738
timer-event-start,"V8.CompileLazy",52745
timer-event-start,"V8.CompileFullCode",52753
code-creation,Stub,12,0x2b844f00,88,"BinaryOpStub_SHL_OverwriteLeft_Uninitialized+Uninitialized"
@@ -1733,16 +1733,16 @@ code-creation,Stub,12,0x2b845800,167,"BinaryOpStub_SHL_OverwriteLeft_Smi+Smi"
code-creation,Stub,12,0x2b8458c0,145,"BinaryOpStub_BIT_OR_OverwriteRight_Smi+Smi"
code-creation,Stub,12,0x2b845960,145,"BinaryOpStub_BIT_OR_OverwriteLeft_Smi+Smi"
code-creation,Stub,12,0x2b845a00,167,"BinaryOpStub_SHL_OverwriteRight_Smi+Smi"
-timer-event-start,"V8.ParseLazy",52912
-timer-event-end,"V8.ParseLazy",52936
+timer-event-start,"V8.ParseLazyMicroSeconds",52912
+timer-event-end,"V8.ParseLazyMicroSeconds",52936
timer-event-start,"V8.CompileLazy",52941
timer-event-start,"V8.CompileFullCode",52947
timer-event-end,"V8.CompileFullCode",52966
code-creation,LazyCompile,0,0x2b845ac0,560,"sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a70,~
timer-event-end,"V8.CompileLazy",52980
code-creation,Stub,12,0x2b845d00,399,"BinaryOpStub_BIT_OR_Alloc_Number+Smi"
-timer-event-start,"V8.ParseLazy",53013
-timer-event-end,"V8.ParseLazy",53049
+timer-event-start,"V8.ParseLazyMicroSeconds",53013
+timer-event-end,"V8.ParseLazyMicroSeconds",53049
timer-event-start,"V8.CompileLazy",53055
timer-event-start,"V8.CompileFullCode",53062
timer-event-end,"V8.CompileFullCode",53095
@@ -1752,15 +1752,15 @@ code-creation,Stub,13,0x2b846320,485,"CompareICStub"
code-creation,Stub,12,0x2b846520,383,"BinaryOpStub_BIT_OR_OverwriteRight_Int32+Smi"
code-creation,Stub,12,0x2b8466a0,383,"BinaryOpStub_SHL_OverwriteRight_Int32+Smi"
code-creation,Stub,12,0x2b846820,407,"BinaryOpStub_BIT_OR_OverwriteRight_Number+Smi"
-timer-event-start,"V8.ParseLazy",53194
-timer-event-end,"V8.ParseLazy",53206
+timer-event-start,"V8.ParseLazyMicroSeconds",53194
+timer-event-end,"V8.ParseLazyMicroSeconds",53206
timer-event-start,"V8.CompileLazy",53211
timer-event-start,"V8.CompileFullCode",53216
timer-event-end,"V8.CompileFullCode",53226
code-creation,LazyCompile,0,0x2b8469c0,184,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,~
timer-event-end,"V8.CompileLazy",53243
-timer-event-start,"V8.ParseLazy",53249
-timer-event-end,"V8.ParseLazy",53325
+timer-event-start,"V8.ParseLazyMicroSeconds",53249
+timer-event-end,"V8.ParseLazyMicroSeconds",53325
timer-event-start,"V8.CompileLazy",53332
timer-event-start,"V8.CompileFullCode",53343
code-creation,Stub,12,0x2b846a80,88,"BinaryOpStub_SUB_OverwriteLeft_Uninitialized+Uninitialized"
@@ -1777,8 +1777,8 @@ code-creation,Stub,15,0x2b847f40,164,"ToBooleanStub(Smi,HeapNumber)"
code-creation,CallMiss,7,0x2b848000,178,"args_count: 4"
code-creation,CallIC,7,0x2b8480c0,113,"P"
code-creation,LoadIC,5,0x2b848140,103,"undefined"
-timer-event-start,"V8.ParseLazy",54007
-timer-event-end,"V8.ParseLazy",54027
+timer-event-start,"V8.ParseLazyMicroSeconds",54007
+timer-event-end,"V8.ParseLazyMicroSeconds",54027
timer-event-start,"V8.CompileLazy",54032
timer-event-start,"V8.CompileFullCode",54038
timer-event-end,"V8.CompileFullCode",54052
@@ -1790,8 +1790,8 @@ code-creation,Stub,2,0x2b848460,95,"h"
code-creation,LoadIC,5,0x2b8484c0,93,"h"
code-creation,CallIC,7,0x2b848520,132,"encrypt"
code-creation,Stub,12,0x2b8485c0,371,"BinaryOpStub_BIT_XOR_Alloc_Int32+Number"
-timer-event-start,"V8.ParseLazy",54281
-timer-event-end,"V8.ParseLazy",54332
+timer-event-start,"V8.ParseLazyMicroSeconds",54281
+timer-event-end,"V8.ParseLazyMicroSeconds",54332
timer-event-start,"V8.CompileLazy",54339
timer-event-start,"V8.CompileFullCode",54347
code-creation,Stub,2,0x2b848740,663,"FastCloneShallowArrayStub"
@@ -1800,8 +1800,8 @@ code-creation,LazyCompile,0,0x2b8489e0,1221,"sjcl.mode.ccm.I bsuite/kraken-once/
timer-event-end,"V8.CompileLazy",54577
tick,0x82f2dd2,54590,0,0xff81f67c,2,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-creation,Stub,12,0x2b848ec0,371,"BinaryOpStub_BIT_XOR_Alloc_Number+Int32"
-timer-event-start,"V8.ParseLazy",54663
-timer-event-end,"V8.ParseLazy",54685
+timer-event-start,"V8.ParseLazyMicroSeconds",54663
+timer-event-end,"V8.ParseLazyMicroSeconds",54685
timer-event-start,"V8.CompileLazy",54691
timer-event-start,"V8.CompileFullCode",54697
code-creation,Stub,12,0x2b849040,88,"BinaryOpStub_SUB_OverwriteRight_Uninitialized+Uninitialized"
@@ -1814,8 +1814,8 @@ code-creation,CallMegamorphic,7,0x2b8494e0,685,"args_count: 2"
code-creation,Stub,12,0x2b8497a0,246,"BinaryOpStub_ADD_Alloc_Number+Smi"
code-creation,LoadPolymorphicIC,5,0x2b8498a0,105,"length"
code-creation,LoadPolymorphicIC,5,0x2b849920,105,"length"
-timer-event-start,"V8.ParseLazy",54933
-timer-event-end,"V8.ParseLazy",54956
+timer-event-start,"V8.ParseLazyMicroSeconds",54933
+timer-event-end,"V8.ParseLazyMicroSeconds",54956
timer-event-start,"V8.CompileLazy",54962
timer-event-start,"V8.CompileFullCode",54968
timer-event-end,"V8.CompileFullCode",54989
@@ -1824,23 +1824,23 @@ timer-event-end,"V8.CompileLazy",55003
code-creation,Stub,12,0x2b849c00,395,"BinaryOpStub_BIT_XOR_Alloc_Number+Number"
code-creation,Stub,12,0x2b849da0,133,"BinaryOpStub_ADD_Alloc_String+Smi"
code-creation,Stub,12,0x2b849e40,133,"BinaryOpStub_ADD_OverwriteLeft_String+Smi"
-timer-event-start,"V8.ParseLazy",55131
-timer-event-end,"V8.ParseLazy",55149
+timer-event-start,"V8.ParseLazyMicroSeconds",55131
+timer-event-end,"V8.ParseLazyMicroSeconds",55149
timer-event-start,"V8.CompileLazy",55155
timer-event-start,"V8.CompileFullCode",55160
timer-event-end,"V8.CompileFullCode",55177
code-creation,LazyCompile,0,0x2b849ee0,292,"sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131",0x2f33b390,~
timer-event-end,"V8.CompileLazy",55190
-timer-event-start,"V8.ParseLazy",55198
-timer-event-end,"V8.ParseLazy",55206
+timer-event-start,"V8.ParseLazyMicroSeconds",55198
+timer-event-end,"V8.ParseLazyMicroSeconds",55206
timer-event-start,"V8.CompileLazy",55211
timer-event-start,"V8.CompileFullCode",55216
timer-event-end,"V8.CompileFullCode",55228
code-creation,LazyCompile,0,0x2b84a020,208,"sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110",0x2f33b270,~
timer-event-end,"V8.CompileLazy",55240
code-creation,StoreIC,9,0x2b84a100,103,"passes"
-timer-event-start,"V8.ParseLazy",55261
-timer-event-end,"V8.ParseLazy",55307
+timer-event-start,"V8.ParseLazyMicroSeconds",55261
+timer-event-end,"V8.ParseLazyMicroSeconds",55307
timer-event-start,"V8.CompileLazy",55313
timer-event-start,"V8.CompileFullCode",55321
code-creation,Stub,12,0x2b84a180,88,"BinaryOpStub_DIV_OverwriteLeft_Uninitialized+Uninitialized"
@@ -1916,8 +1916,8 @@ code-creation,Stub,2,0x2b84d640,578,"KeyedStoreElementStub"
code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,""
code-creation,KeyedStorePolymorphicIC,10,0x2b84d8a0,107,"args_count: 0"
timer-event-start,"V8.RecompileSynchronous",57494
-timer-event-start,"V8.ParseLazy",57505
-timer-event-end,"V8.ParseLazy",57586
+timer-event-start,"V8.ParseLazyMicroSeconds",57505
+timer-event-end,"V8.ParseLazyMicroSeconds",57586
code-creation,LazyCompile,0,0x2b84d920,3418,"sjcl.cipher.aes.H bsuite/kraken-once/stanford-crypto-ccm.js:7",0x2f3399b0,~
tick,0x8092457,57778,0,0x19e,2,0x2b846a46,0x2b8455f6,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-end,"V8.RecompileSynchronous",57904
@@ -1931,8 +1931,8 @@ code-creation,LoadPolymorphicIC,5,0x2b84ea40,105,"length"
code-creation,LoadPolymorphicIC,5,0x2b84eac0,105,"length"
code-creation,LoadPolymorphicIC,5,0x2b84eb40,105,"length"
timer-event-start,"V8.RecompileSynchronous",58447
-timer-event-start,"V8.ParseLazy",58457
-timer-event-end,"V8.ParseLazy",58501
+timer-event-start,"V8.ParseLazyMicroSeconds",58457
+timer-event-end,"V8.ParseLazyMicroSeconds",58501
code-creation,LazyCompile,0,0x2b84ebc0,1096,"parseInt native v8natives.js:130",0x4421ec1c,~
timer-event-end,"V8.RecompileSynchronous",58637
timer-event-start,"V8.GCScavenger",58779
@@ -1952,14 +1952,14 @@ timer-event-start,"V8.RecompileSynchronous",59590
code-creation,LazyCompile,1,0x2b850220,1662,"parseInt native v8natives.js:130",0x4421ec1c,*
timer-event-end,"V8.RecompileSynchronous",59672
timer-event-start,"V8.RecompileSynchronous",59682
-timer-event-start,"V8.ParseLazy",59687
-timer-event-end,"V8.ParseLazy",59701
+timer-event-start,"V8.ParseLazyMicroSeconds",59687
+timer-event-end,"V8.ParseLazyMicroSeconds",59701
code-creation,LazyCompile,0,0x2b8508a0,236,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,~
timer-event-end,"V8.RecompileSynchronous",59750
timer-event-start,"V8.RecompileConcurrent",59776
timer-event-start,"V8.RecompileSynchronous",59811
-timer-event-start,"V8.ParseLazy",59820
-timer-event-end,"V8.ParseLazy",59838
+timer-event-start,"V8.ParseLazyMicroSeconds",59820
+timer-event-end,"V8.ParseLazyMicroSeconds",59838
code-creation,LazyCompile,0,0x2b8509a0,388,"sjcl.bitArray.k bsuite/kraken-once/stanford-crypto-ccm.js:11",0x2f339d10,~
timer-event-end,"V8.RecompileConcurrent",59909
timer-event-start,"V8.RecompileConcurrent",59926
@@ -1969,9 +1969,9 @@ tick,0xf776d430,59966,0,0x90ec418,2,0x2b8455e6,0x2b84a5e0,0x2b83a281,0x2b838f3f,
code-creation,LazyCompile,1,0x2b850b40,536,"sjcl.bitArray.getPartial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339bf0,*
timer-event-end,"V8.RecompileSynchronous",60077
timer-event-start,"V8.RecompileSynchronous",60141
-timer-event-start,"V8.ParseLazy",60149
+timer-event-start,"V8.ParseLazyMicroSeconds",60149
timer-event-end,"V8.RecompileConcurrent",60177
-timer-event-end,"V8.ParseLazy",60195
+timer-event-end,"V8.ParseLazyMicroSeconds",60195
code-creation,LazyCompile,0,0x2b850d60,960,"substr native string.js:749",0x44216608,~
timer-event-end,"V8.RecompileSynchronous",60329
timer-event-start,"V8.RecompileConcurrent",60356
@@ -1988,30 +1988,30 @@ code-creation,LazyCompile,1,0x2b8518a0,1792,"substr native string.js:749",0x4421
timer-event-end,"V8.RecompileSynchronous",60803
tick,0x2b849c4b,60997,0,0x2b849afa,0,0x2b83a0df,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileSynchronous",61042
-timer-event-start,"V8.ParseLazy",61054
-timer-event-end,"V8.ParseLazy",61066
+timer-event-start,"V8.ParseLazyMicroSeconds",61054
+timer-event-end,"V8.ParseLazyMicroSeconds",61066
code-creation,LazyCompile,0,0x2b851fa0,184,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,~
timer-event-end,"V8.RecompileSynchronous",61112
timer-event-start,"V8.RecompileConcurrent",61139
timer-event-start,"V8.RecompileSynchronous",61159
-timer-event-start,"V8.ParseLazy",61168
-timer-event-end,"V8.ParseLazy",61186
+timer-event-start,"V8.ParseLazyMicroSeconds",61168
+timer-event-end,"V8.ParseLazyMicroSeconds",61186
timer-event-end,"V8.RecompileConcurrent",61201
code-creation,LazyCompile,0,0x2b852060,336,"sjcl.bitArray.bitLength bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339ad0,~
-timer-event-start,"V8.ParseLazy",61249
-timer-event-end,"V8.ParseLazy",61264
+timer-event-start,"V8.ParseLazyMicroSeconds",61249
+timer-event-end,"V8.ParseLazyMicroSeconds",61264
timer-event-end,"V8.RecompileSynchronous",61290
timer-event-start,"V8.RecompileSynchronous",61309
timer-event-start,"V8.RecompileConcurrent",61317
code-creation,LazyCompile,1,0x2b8521c0,196,"sjcl.cipher.aes.encrypt bsuite/kraken-once/stanford-crypto-ccm.js:6",0x2f339890,*
timer-event-end,"V8.RecompileSynchronous",61348
timer-event-start,"V8.RecompileSynchronous",61374
-timer-event-start,"V8.ParseLazy",61381
-timer-event-end,"V8.ParseLazy",61394
+timer-event-start,"V8.ParseLazyMicroSeconds",61381
+timer-event-end,"V8.ParseLazyMicroSeconds",61394
timer-event-end,"V8.RecompileSynchronous",61418
timer-event-start,"V8.RecompileSynchronous",61424
-timer-event-start,"V8.ParseLazy",61429
-timer-event-end,"V8.ParseLazy",61442
+timer-event-start,"V8.ParseLazyMicroSeconds",61429
+timer-event-end,"V8.ParseLazyMicroSeconds",61442
code-creation,LazyCompile,0,0x2b8522a0,248,"round native math.js:193",0x4422265c,~
timer-event-end,"V8.RecompileConcurrent",61471
timer-event-start,"V8.RecompileConcurrent",61480
@@ -2030,16 +2030,16 @@ code-creation,LazyCompile,1,0x2b852940,242,"round native math.js:193",0x4422265c
timer-event-end,"V8.RecompileSynchronous",61629
code-creation,LoadPolymorphicIC,5,0x2b852a40,105,"length"
timer-event-start,"V8.RecompileSynchronous",61726
-timer-event-start,"V8.ParseLazy",61731
-timer-event-end,"V8.ParseLazy",61757
+timer-event-start,"V8.ParseLazyMicroSeconds",61731
+timer-event-end,"V8.ParseLazyMicroSeconds",61757
code-creation,LazyCompile,0,0x2b852ac0,536,"sjcl.bitArray.clamp bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339b30,~
-timer-event-start,"V8.ParseLazy",61847
-timer-event-end,"V8.ParseLazy",61865
+timer-event-start,"V8.ParseLazyMicroSeconds",61847
+timer-event-end,"V8.ParseLazyMicroSeconds",61865
code-creation,Function,0,0x2b852ce0,288,"sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339b90,~
timer-event-end,"V8.RecompileSynchronous",61926
timer-event-start,"V8.RecompileSynchronous",61933
-timer-event-start,"V8.ParseLazy",61939
-timer-event-end,"V8.ParseLazy",61953
+timer-event-start,"V8.ParseLazyMicroSeconds",61939
+timer-event-end,"V8.ParseLazyMicroSeconds",61953
timer-event-start,"V8.RecompileConcurrent",61961
code-creation,LazyCompile,0,0x2b852e00,248,"ceil native math.js:81",0x442222fc,~
timer-event-end,"V8.RecompileSynchronous",62019
@@ -2058,8 +2058,8 @@ timer-event-end,"V8.RecompileSynchronous",62445
code-creation,LoadPolymorphicIC,5,0x2b853d00,105,"length"
code-creation,LoadPolymorphicIC,5,0x2b853d80,105,"length"
timer-event-start,"V8.RecompileSynchronous",63048
-timer-event-start,"V8.ParseLazy",63067
-timer-event-end,"V8.ParseLazy",63085
+timer-event-start,"V8.ParseLazyMicroSeconds",63067
+timer-event-end,"V8.ParseLazyMicroSeconds",63085
timer-event-end,"V8.RecompileSynchronous",63117
tick,0xf776d430,63132,0,0x90ec418,0,0x2b8462cc,0x2b845cd7,0x2b848b0a,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileConcurrent",63203
@@ -2068,24 +2068,24 @@ timer-event-start,"V8.RecompileSynchronous",63329
code-creation,LazyCompile,1,0x2b853e00,644,"sjcl.bitArray.partial bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339b90,*
timer-event-end,"V8.RecompileSynchronous",63379
timer-event-start,"V8.RecompileSynchronous",63494
-timer-event-start,"V8.ParseLazy",63503
-timer-event-end,"V8.ParseLazy",63517
+timer-event-start,"V8.ParseLazyMicroSeconds",63503
+timer-event-end,"V8.ParseLazyMicroSeconds",63517
timer-event-end,"V8.RecompileSynchronous",63544
timer-event-start,"V8.RecompileConcurrent",63572
timer-event-start,"V8.RecompileSynchronous",63641
-timer-event-start,"V8.ParseLazy",63651
+timer-event-start,"V8.ParseLazyMicroSeconds",63651
timer-event-end,"V8.RecompileConcurrent",63664
-timer-event-end,"V8.ParseLazy",63678
+timer-event-end,"V8.ParseLazyMicroSeconds",63678
code-creation,LazyCompile,0,0x2b8540a0,560,"sjcl.bitArray.concat bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a70,~
-timer-event-start,"V8.ParseLazy",63757
-timer-event-end,"V8.ParseLazy",63772
-timer-event-start,"V8.ParseLazy",63808
-timer-event-end,"V8.ParseLazy",63848
+timer-event-start,"V8.ParseLazyMicroSeconds",63757
+timer-event-end,"V8.ParseLazyMicroSeconds",63772
+timer-event-start,"V8.ParseLazyMicroSeconds",63808
+timer-event-end,"V8.ParseLazyMicroSeconds",63848
code-creation,Function,0,0x2b8542e0,1126,"sjcl.bitArray.P bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339cb0,~
-timer-event-start,"V8.ParseLazy",63977
-timer-event-end,"V8.ParseLazy",63994
-timer-event-start,"V8.ParseLazy",64023
-timer-event-end,"V8.ParseLazy",64039
+timer-event-start,"V8.ParseLazyMicroSeconds",63977
+timer-event-end,"V8.ParseLazyMicroSeconds",63994
+timer-event-start,"V8.ParseLazyMicroSeconds",64023
+timer-event-end,"V8.ParseLazyMicroSeconds",64039
timer-event-end,"V8.RecompileSynchronous",64072
timer-event-start,"V8.RecompileSynchronous",64079
timer-event-start,"V8.RecompileConcurrent",64099
@@ -2094,8 +2094,8 @@ timer-event-end,"V8.RecompileSynchronous",64194
tick,0xf776d430,64209,0,0x4059,2,0x2b845c29,0x2b848b0a,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-deopt,64271,544
timer-event-start,"V8.RecompileSynchronous",64467
-timer-event-start,"V8.ParseLazy",64476
-timer-event-end,"V8.ParseLazy",64542
+timer-event-start,"V8.ParseLazyMicroSeconds",64476
+timer-event-end,"V8.ParseLazyMicroSeconds",64542
code-creation,LazyCompile,0,0x2b854980,3002,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,~
timer-event-end,"V8.RecompileSynchronous",64818
timer-event-end,"V8.RecompileConcurrent",64871
@@ -2125,8 +2125,8 @@ code-creation,Stub,2,0x2b857c20,1785,"RecordWriteStub"
code-creation,LazyCompile,1,0x2b858320,4397,"sjcl.cipher.aes bsuite/kraken-once/stanford-crypto-ccm.js:4",0x2f339830,*
timer-event-end,"V8.RecompileSynchronous",66661
timer-event-start,"V8.RecompileSynchronous",66788
-timer-event-start,"V8.ParseLazy",66797
-timer-event-end,"V8.ParseLazy",66878
+timer-event-start,"V8.ParseLazyMicroSeconds",66797
+timer-event-end,"V8.ParseLazyMicroSeconds",66878
timer-event-end,"V8.RecompileSynchronous",67067
timer-event-start,"V8.RecompileConcurrent",67094
tick,0x2b8473da,67403,0,0x2f392d35,0,0x2b852252,0x2b8455f6,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
@@ -2151,17 +2151,17 @@ code-creation,LoadPolymorphicIC,5,0x2b85a980,117,"length"
code-creation,LoadPolymorphicIC,5,0x2b85aa00,117,"length"
tick,0x81168ba,70588,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f04,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileSynchronous",71064
-timer-event-start,"V8.ParseLazy",71076
-timer-event-end,"V8.ParseLazy",71094
+timer-event-start,"V8.ParseLazyMicroSeconds",71076
+timer-event-end,"V8.ParseLazyMicroSeconds",71094
code-creation,LazyCompile,0,0x2b85aa80,292,"sjcl.test.TestCase.require bsuite/kraken-once/stanford-crypto-ccm.js:131",0x2f33b390,~
-timer-event-start,"V8.ParseLazy",71142
-timer-event-end,"V8.ParseLazy",71152
+timer-event-start,"V8.ParseLazyMicroSeconds",71142
+timer-event-end,"V8.ParseLazyMicroSeconds",71152
code-creation,Function,0,0x2b85abc0,208,"sjcl.test.TestCase.pass bsuite/kraken-once/stanford-crypto-ccm.js:110",0x2f33b270,~
timer-event-end,"V8.RecompileSynchronous",71195
timer-event-start,"V8.RecompileSynchronous",71204
-timer-event-start,"V8.ParseLazy",71210
+timer-event-start,"V8.ParseLazyMicroSeconds",71210
timer-event-start,"V8.RecompileConcurrent",71216
-timer-event-end,"V8.ParseLazy",71228
+timer-event-end,"V8.ParseLazyMicroSeconds",71228
timer-event-end,"V8.RecompileSynchronous",71254
timer-event-end,"V8.RecompileConcurrent",71304
timer-event-start,"V8.RecompileConcurrent",71312
@@ -2182,25 +2182,25 @@ timer-event-end,"V8.GCScavenger",72596
tick,0x8116878,72711,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f04,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
code-creation,CallIC,7,0x2b85aee0,136,"concat"
timer-event-start,"V8.RecompileSynchronous",72947
-timer-event-start,"V8.ParseLazy",72956
-timer-event-end,"V8.ParseLazy",72977
+timer-event-start,"V8.ParseLazyMicroSeconds",72956
+timer-event-end,"V8.ParseLazyMicroSeconds",72977
code-creation,LazyCompile,0,0x2b85af80,392,"sjcl.bitArray.bitSlice bsuite/kraken-once/stanford-crypto-ccm.js:9",0x2f339a10,~
-timer-event-start,"V8.ParseLazy",73044
-timer-event-end,"V8.ParseLazy",73083
-timer-event-start,"V8.ParseLazy",73169
-timer-event-end,"V8.ParseLazy",73185
-timer-event-start,"V8.ParseLazy",73217
-timer-event-end,"V8.ParseLazy",73232
-timer-event-start,"V8.ParseLazy",73263
-timer-event-end,"V8.ParseLazy",73289
-timer-event-start,"V8.ParseLazy",73339
-timer-event-end,"V8.ParseLazy",73356
+timer-event-start,"V8.ParseLazyMicroSeconds",73044
+timer-event-end,"V8.ParseLazyMicroSeconds",73083
+timer-event-start,"V8.ParseLazyMicroSeconds",73169
+timer-event-end,"V8.ParseLazyMicroSeconds",73185
+timer-event-start,"V8.ParseLazyMicroSeconds",73217
+timer-event-end,"V8.ParseLazyMicroSeconds",73232
+timer-event-start,"V8.ParseLazyMicroSeconds",73263
+timer-event-end,"V8.ParseLazyMicroSeconds",73289
+timer-event-start,"V8.ParseLazyMicroSeconds",73339
+timer-event-end,"V8.ParseLazyMicroSeconds",73356
timer-event-end,"V8.RecompileSynchronous",73393
timer-event-start,"V8.RecompileConcurrent",73422
tick,0x82eea09,73786,0,0x90de9b0,0,0x2b85056f,0x2b83a782,0x2b839e4e,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazy",74228
+timer-event-start,"V8.ParseLazyMicroSeconds",74228
timer-event-end,"V8.RecompileConcurrent",74243
-timer-event-end,"V8.ParseLazy",74259
+timer-event-end,"V8.ParseLazyMicroSeconds",74259
timer-event-start,"V8.CompileLazy",74267
timer-event-start,"V8.CompileFullCode",74273
timer-event-end,"V8.CompileFullCode",74291
@@ -2220,13 +2220,13 @@ timer-event-end,"V8.GCScavenger",76262
tick,0x81168ba,76974,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839fd2,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x81168ba,78047,0,0x90d5060,0,0x2b85056f,0x2b83a782,0x2b839f55,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileSynchronous",78403
-timer-event-start,"V8.ParseLazy",78415
-timer-event-end,"V8.ParseLazy",78444
+timer-event-start,"V8.ParseLazyMicroSeconds",78415
+timer-event-end,"V8.ParseLazyMicroSeconds",78444
code-creation,LazyCompile,0,0x2b85c2c0,717,"sjcl.codec.hex.toBits bsuite/kraken-once/stanford-crypto-ccm.js:13",0x2f339e90,~
-timer-event-start,"V8.ParseLazy",78530
-timer-event-end,"V8.ParseLazy",78559
-timer-event-start,"V8.ParseLazy",78614
-timer-event-end,"V8.ParseLazy",78632
+timer-event-start,"V8.ParseLazyMicroSeconds",78530
+timer-event-end,"V8.ParseLazyMicroSeconds",78559
+timer-event-start,"V8.ParseLazyMicroSeconds",78614
+timer-event-end,"V8.ParseLazyMicroSeconds",78632
timer-event-end,"V8.RecompileSynchronous",78666
timer-event-start,"V8.RecompileConcurrent",78695
timer-event-end,"V8.RecompileConcurrent",79073
@@ -2268,58 +2268,58 @@ timer-event-start,"V8.External",87822
timer-event-end,"V8.External",87829
timer-event-end,"V8.GCScavenger",87833
timer-event-start,"V8.RecompileSynchronous",88294
-timer-event-start,"V8.ParseLazy",88303
-timer-event-end,"V8.ParseLazy",88361
+timer-event-start,"V8.ParseLazyMicroSeconds",88303
+timer-event-end,"V8.ParseLazyMicroSeconds",88361
code-creation,LazyCompile,0,0x2b85d420,1221,"sjcl.mode.ccm.I bsuite/kraken-once/stanford-crypto-ccm.js:21",0x2f33a370,~
-timer-event-start,"V8.ParseLazy",88473
-timer-event-end,"V8.ParseLazy",88492
-timer-event-start,"V8.ParseLazy",88532
-timer-event-end,"V8.ParseLazy",88545
-timer-event-start,"V8.ParseLazy",88572
-timer-event-end,"V8.ParseLazy",88588
-timer-event-start,"V8.ParseLazy",88612
-timer-event-end,"V8.ParseLazy",88645
-timer-event-start,"V8.ParseLazy",88688
-timer-event-end,"V8.ParseLazy",88714
+timer-event-start,"V8.ParseLazyMicroSeconds",88473
+timer-event-end,"V8.ParseLazyMicroSeconds",88492
+timer-event-start,"V8.ParseLazyMicroSeconds",88532
+timer-event-end,"V8.ParseLazyMicroSeconds",88545
+timer-event-start,"V8.ParseLazyMicroSeconds",88572
+timer-event-end,"V8.ParseLazyMicroSeconds",88588
+timer-event-start,"V8.ParseLazyMicroSeconds",88612
+timer-event-end,"V8.ParseLazyMicroSeconds",88645
+timer-event-start,"V8.ParseLazyMicroSeconds",88688
+timer-event-end,"V8.ParseLazyMicroSeconds",88714
tick,0x81fc61b,88727,0,0xff81ebbc,2,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazy",88792
-timer-event-end,"V8.ParseLazy",88867
-timer-event-start,"V8.ParseLazy",88951
-timer-event-end,"V8.ParseLazy",88967
-timer-event-start,"V8.ParseLazy",88996
-timer-event-end,"V8.ParseLazy",89012
+timer-event-start,"V8.ParseLazyMicroSeconds",88792
+timer-event-end,"V8.ParseLazyMicroSeconds",88867
+timer-event-start,"V8.ParseLazyMicroSeconds",88951
+timer-event-end,"V8.ParseLazyMicroSeconds",88967
+timer-event-start,"V8.ParseLazyMicroSeconds",88996
+timer-event-end,"V8.ParseLazyMicroSeconds",89012
timer-event-end,"V8.RecompileSynchronous",89134
timer-event-start,"V8.RecompileConcurrent",89160
timer-event-start,"V8.RecompileSynchronous",89215
-timer-event-start,"V8.ParseLazy",89224
-timer-event-end,"V8.ParseLazy",89245
+timer-event-start,"V8.ParseLazyMicroSeconds",89224
+timer-event-end,"V8.ParseLazyMicroSeconds",89245
code-creation,LazyCompile,0,0x2b85d900,585,"sjcl.bitArray.equal bsuite/kraken-once/stanford-crypto-ccm.js:10",0x2f339c50,~
-timer-event-start,"V8.ParseLazy",89309
-timer-event-end,"V8.ParseLazy",89326
-timer-event-start,"V8.ParseLazy",89356
-timer-event-end,"V8.ParseLazy",89369
-timer-event-start,"V8.ParseLazy",89391
-timer-event-end,"V8.ParseLazy",89406
-timer-event-start,"V8.ParseLazy",89433
-timer-event-end,"V8.ParseLazy",89445
+timer-event-start,"V8.ParseLazyMicroSeconds",89309
+timer-event-end,"V8.ParseLazyMicroSeconds",89326
+timer-event-start,"V8.ParseLazyMicroSeconds",89356
+timer-event-end,"V8.ParseLazyMicroSeconds",89369
+timer-event-start,"V8.ParseLazyMicroSeconds",89391
+timer-event-end,"V8.ParseLazyMicroSeconds",89406
+timer-event-start,"V8.ParseLazyMicroSeconds",89433
+timer-event-end,"V8.ParseLazyMicroSeconds",89445
timer-event-end,"V8.RecompileSynchronous",89485
timer-event-start,"V8.RecompileSynchronous",89730
-timer-event-start,"V8.ParseLazy",89740
+timer-event-start,"V8.ParseLazyMicroSeconds",89740
tick,0x81168ba,89761,0,0x90d5060,0,0x2b85056f,0x2b85cd2d,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-end,"V8.ParseLazy",89805
+timer-event-end,"V8.ParseLazyMicroSeconds",89805
code-creation,LazyCompile,0,0x2b85db60,1838,"sjcl.mode.ccm.G bsuite/kraken-once/stanford-crypto-ccm.js:20",0x2f33a310,~
-timer-event-start,"V8.ParseLazy",89969
-timer-event-end,"V8.ParseLazy",89990
-timer-event-start,"V8.ParseLazy",90016
-timer-event-end,"V8.ParseLazy",90042
-timer-event-start,"V8.ParseLazy",90084
-timer-event-end,"V8.ParseLazy",90098
-timer-event-start,"V8.ParseLazy",90129
-timer-event-end,"V8.ParseLazy",90170
-timer-event-start,"V8.ParseLazy",90271
-timer-event-end,"V8.ParseLazy",90286
-timer-event-start,"V8.ParseLazy",90326
-timer-event-end,"V8.ParseLazy",90344
+timer-event-start,"V8.ParseLazyMicroSeconds",89969
+timer-event-end,"V8.ParseLazyMicroSeconds",89990
+timer-event-start,"V8.ParseLazyMicroSeconds",90016
+timer-event-end,"V8.ParseLazyMicroSeconds",90042
+timer-event-start,"V8.ParseLazyMicroSeconds",90084
+timer-event-end,"V8.ParseLazyMicroSeconds",90098
+timer-event-start,"V8.ParseLazyMicroSeconds",90129
+timer-event-end,"V8.ParseLazyMicroSeconds",90170
+timer-event-start,"V8.ParseLazyMicroSeconds",90271
+timer-event-end,"V8.ParseLazyMicroSeconds",90286
+timer-event-start,"V8.ParseLazyMicroSeconds",90326
+timer-event-end,"V8.ParseLazyMicroSeconds",90344
timer-event-end,"V8.RecompileSynchronous",90480
tick,0x2b8596f9,90829,0,0x8,0,0x2b852252,0x2b8454f6,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-end,"V8.RecompileConcurrent",91133
@@ -2404,8 +2404,8 @@ timer-event-start,"V8.External",107317
timer-event-end,"V8.External",107323
timer-event-end,"V8.GCScavenger",107327
timer-event-start,"V8.RecompileSynchronous",107462
-timer-event-start,"V8.ParseLazy",107471
-timer-event-end,"V8.ParseLazy",107537
+timer-event-start,"V8.ParseLazyMicroSeconds",107471
+timer-event-end,"V8.ParseLazyMicroSeconds",107537
timer-event-end,"V8.RecompileSynchronous",107729
timer-event-start,"V8.RecompileConcurrent",107764
tick,0x2b859da9,107874,0,0x2,0,0x2b852252,0x2b848b65,0x2b84a58b,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
@@ -2426,24 +2426,24 @@ timer-event-end,"V8.GCScavenger",110725
tick,0x2b85a1d8,111072,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b848d38,112161,0,0x4c,0,0x2b844670,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileSynchronous",112323
-timer-event-start,"V8.ParseLazy",112335
-timer-event-end,"V8.ParseLazy",112387
-timer-event-start,"V8.ParseLazy",112444
-timer-event-end,"V8.ParseLazy",112463
-timer-event-start,"V8.ParseLazy",112496
-timer-event-end,"V8.ParseLazy",112509
-timer-event-start,"V8.ParseLazy",112536
-timer-event-end,"V8.ParseLazy",112552
-timer-event-start,"V8.ParseLazy",112576
-timer-event-end,"V8.ParseLazy",112598
-timer-event-start,"V8.ParseLazy",112639
-timer-event-end,"V8.ParseLazy",112653
-timer-event-start,"V8.ParseLazy",112685
-timer-event-end,"V8.ParseLazy",112722
-timer-event-start,"V8.ParseLazy",112803
-timer-event-end,"V8.ParseLazy",112819
-timer-event-start,"V8.ParseLazy",112848
-timer-event-end,"V8.ParseLazy",112863
+timer-event-start,"V8.ParseLazyMicroSeconds",112335
+timer-event-end,"V8.ParseLazyMicroSeconds",112387
+timer-event-start,"V8.ParseLazyMicroSeconds",112444
+timer-event-end,"V8.ParseLazyMicroSeconds",112463
+timer-event-start,"V8.ParseLazyMicroSeconds",112496
+timer-event-end,"V8.ParseLazyMicroSeconds",112509
+timer-event-start,"V8.ParseLazyMicroSeconds",112536
+timer-event-end,"V8.ParseLazyMicroSeconds",112552
+timer-event-start,"V8.ParseLazyMicroSeconds",112576
+timer-event-end,"V8.ParseLazyMicroSeconds",112598
+timer-event-start,"V8.ParseLazyMicroSeconds",112639
+timer-event-end,"V8.ParseLazyMicroSeconds",112653
+timer-event-start,"V8.ParseLazyMicroSeconds",112685
+timer-event-end,"V8.ParseLazyMicroSeconds",112722
+timer-event-start,"V8.ParseLazyMicroSeconds",112803
+timer-event-end,"V8.ParseLazyMicroSeconds",112819
+timer-event-start,"V8.ParseLazyMicroSeconds",112848
+timer-event-end,"V8.ParseLazyMicroSeconds",112863
timer-event-end,"V8.RecompileSynchronous",112986
timer-event-start,"V8.RecompileConcurrent",113012
tick,0x2b867dc3,113148,0,0x100,0,0x2b839e65,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
@@ -2522,24 +2522,24 @@ timer-event-end,"V8.External",134708
timer-event-end,"V8.GCScavenger",134712
tick,0x2b8594d3,135617,0,0x80c5e06,0,0x2b852252,0x2b8654d2,0x2b844628,0x2b83a0cc,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
timer-event-start,"V8.RecompileSynchronous",135776
-timer-event-start,"V8.ParseLazy",135787
-timer-event-end,"V8.ParseLazy",135838
-timer-event-start,"V8.ParseLazy",135894
-timer-event-end,"V8.ParseLazy",135913
-timer-event-start,"V8.ParseLazy",135946
-timer-event-end,"V8.ParseLazy",135960
-timer-event-start,"V8.ParseLazy",135987
-timer-event-end,"V8.ParseLazy",136002
-timer-event-start,"V8.ParseLazy",136026
-timer-event-end,"V8.ParseLazy",136048
-timer-event-start,"V8.ParseLazy",136089
-timer-event-end,"V8.ParseLazy",136103
-timer-event-start,"V8.ParseLazy",136135
-timer-event-end,"V8.ParseLazy",136172
-timer-event-start,"V8.ParseLazy",136253
-timer-event-end,"V8.ParseLazy",136270
-timer-event-start,"V8.ParseLazy",136301
-timer-event-end,"V8.ParseLazy",136317
+timer-event-start,"V8.ParseLazyMicroSeconds",135787
+timer-event-end,"V8.ParseLazyMicroSeconds",135838
+timer-event-start,"V8.ParseLazyMicroSeconds",135894
+timer-event-end,"V8.ParseLazyMicroSeconds",135913
+timer-event-start,"V8.ParseLazyMicroSeconds",135946
+timer-event-end,"V8.ParseLazyMicroSeconds",135960
+timer-event-start,"V8.ParseLazyMicroSeconds",135987
+timer-event-end,"V8.ParseLazyMicroSeconds",136002
+timer-event-start,"V8.ParseLazyMicroSeconds",136026
+timer-event-end,"V8.ParseLazyMicroSeconds",136048
+timer-event-start,"V8.ParseLazyMicroSeconds",136089
+timer-event-end,"V8.ParseLazyMicroSeconds",136103
+timer-event-start,"V8.ParseLazyMicroSeconds",136135
+timer-event-end,"V8.ParseLazyMicroSeconds",136172
+timer-event-start,"V8.ParseLazyMicroSeconds",136253
+timer-event-end,"V8.ParseLazyMicroSeconds",136270
+timer-event-start,"V8.ParseLazyMicroSeconds",136301
+timer-event-end,"V8.ParseLazyMicroSeconds",136317
timer-event-end,"V8.RecompileSynchronous",136440
timer-event-start,"V8.RecompileConcurrent",136466
tick,0x2b859c6e,136680,0,0x0,0,0x2b852252,0x2b8658f7,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
@@ -2580,16 +2580,16 @@ timer-event-end,"V8.External",144434
timer-event-end,"V8.GCScavenger",144438
tick,0x81168ba,145212,0,0x90d5060,0,0x2b85056f,0x2b85cd2d,0x2b839f04,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
tick,0x2b851430,146268,0,0xff81fd00,0,0x2b8657f1,0x2b84a5e0,0x2b83a281,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b85b238,0x2b83a474,0x2b838f3f,0x2b838c50,0x2b839b39,0x2b83970e,0x2b83964b,0x2b8394a8,0x2b839273,0x2b839036,0x2b838f3f,0x2b838c50,0x2b83892e,0x2b8383df,0x2b8350ac
-timer-event-start,"V8.ParseLazy",146339
-timer-event-end,"V8.ParseLazy",146358
+timer-event-start,"V8.ParseLazyMicroSeconds",146339
+timer-event-end,"V8.ParseLazyMicroSeconds",146358
timer-event-start,"V8.CompileLazy",146364
timer-event-start,"V8.CompileFullCode",146369
timer-event-end,"V8.CompileFullCode",146386
code-creation,LazyCompile,0,0x5120eb40,212," bsuite/kraken-once/stanford-crypto-ccm.js:172",0x2f33dd88,~
timer-event-end,"V8.CompileLazy",146400
code-creation,Stub,12,0x5120ec20,311,"BinaryOpStub_SUB_Alloc_Generic+Generic"
-timer-event-start,"V8.ParseLazy",146431
-timer-event-end,"V8.ParseLazy",146461
+timer-event-start,"V8.ParseLazyMicroSeconds",146431
+timer-event-end,"V8.ParseLazyMicroSeconds",146461
timer-event-start,"V8.CompileLazy",146467
timer-event-start,"V8.CompileFullCode",146475
timer-event-end,"V8.CompileFullCode",146495
@@ -2598,8 +2598,8 @@ timer-event-end,"V8.CompileLazy",146508
code-creation,Stub,2,0x5120efc0,98,"valueOf"
code-creation,LoadPolymorphicIC,5,0x5120f040,117,"valueOf"
code-creation,CallIC,7,0x5120f0c0,129,"ToNumber"
-timer-event-start,"V8.ParseLazy",146556
-timer-event-end,"V8.ParseLazy",146569
+timer-event-start,"V8.ParseLazyMicroSeconds",146556
+timer-event-end,"V8.ParseLazyMicroSeconds",146569
timer-event-start,"V8.CompileLazy",146574
timer-event-start,"V8.CompileFullCode",146580
timer-event-end,"V8.CompileFullCode",146591
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index 4ea25f9445..73af098e7f 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -384,7 +384,6 @@ function driveTickProcessorTest(
TickProcessor.CALL_GRAPH_SIZE,
ignoreUnknown,
stateFilter,
- undefined,
"0",
"auto,auto",
false,
diff --git a/deps/v8/test/mjsunit/undetectable-compare.js b/deps/v8/test/mjsunit/undetectable-compare.js
index 3b97f5ee94..c78593439c 100644
--- a/deps/v8/test/mjsunit/undetectable-compare.js
+++ b/deps/v8/test/mjsunit/undetectable-compare.js
@@ -92,4 +92,16 @@ for (var i = 0; i < 5; i++) {
}
+assertFalse(undetectable == %GetUndetectable());
assertFalse(undetectable === %GetUndetectable());
+
+
+function test2(a, b) {
+ return a == b;
+}
+test2(0, 1);
+test2(undetectable, {});
+%OptimizeFunctionOnNextCall(test2);
+for (var i = 0; i < 5; ++i) {
+ assertFalse(test2(undetectable, %GetUndetectable()));
+}
diff --git a/deps/v8/test/mjsunit/wasm/adapter-frame.js b/deps/v8/test/mjsunit/wasm/adapter-frame.js
new file mode 100644
index 0000000000..0e5d4b8c74
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/adapter-frame.js
@@ -0,0 +1,321 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const JS = false; // for testing the tests.
+const WRONG1 = 0x0DEDFACE;
+const WRONG2 = 0x0DEDBABE;
+const WRONG3 = 0x0DEDD011
+
+function makeSelect(type, args, which) {
+ if (JS) {
+ // For testing the tests.
+ return function() {
+ var val = +arguments[which];
+ print(" " + val);
+ if (type == kAstI32) return val | 0;
+ if (type == kAstF32) return Math.fround(val);
+ if (type == kAstF64) return val;
+ return undefined;
+ }
+ }
+
+ var builder = new WasmModuleBuilder();
+ var sig = new Array();
+ sig.push(type);
+ for (var i = 0; i < args; i++) sig.push(type);
+ builder.addFunction("select", sig)
+ .addBody([kExprGetLocal, which])
+ .exportFunc();
+
+ return builder.instantiate().exports.select;
+}
+
+const inputs = [
+ -1, 0, 2.2, 3.3, 3000.11, Infinity, -Infinity, NaN
+];
+
+(function TestInt1() {
+ print("i32 1(0)...");
+ var C = function(v) { return v | 0; }
+ var select1 = makeSelect(kAstI32, 1, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select1(val));
+
+ // under args
+ assertEquals(C(undefined), select1());
+ // over args
+ assertEquals(C(val), select1(val, WRONG1));
+ assertEquals(C(val), select1(val, WRONG1, WRONG2));
+ }
+})();
+
+(function TestInt2() {
+ print("i32 2(0)...");
+ var C = function(v) { return v | 0; }
+ var select = makeSelect(kAstI32, 2, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("i32 2(1)...");
+ var select = makeSelect(kAstI32, 2, 1);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val));
+ assertEquals(C(val), select(WRONG1, val, WRONG2));
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+})();
+
+(function TestInt3() {
+ print("i32 3(0)...");
+ var C = function(v) { return v | 0; }
+ var select = makeSelect(kAstI32, 3, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ assertEquals(C(val), select(val, WRONG1));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("i32 3(1)...");
+ var select = makeSelect(kAstI32, 3, 1);
+
+ for (val of inputs) {
+ assertEquals(val | 0, select(WRONG1, val, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(val), select(WRONG1, val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+
+ print("i32 3(2)...");
+ var select = makeSelect(kAstI32, 3, 2);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, WRONG2, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(undefined), select(WRONG1, WRONG2));
+ // over args
+ assertEquals(C(val), select(WRONG1, WRONG2, val, WRONG3));
+ }
+})();
+
+(function TestFloat32_1() {
+ print("f32 1(0)...");
+ var C = function(v) { return Math.fround(v); }
+ var select1 = makeSelect(kAstF32, 1, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select1(val));
+
+ // under args
+ assertEquals(C(undefined), select1());
+ // over args
+ assertEquals(C(val), select1(val, WRONG1));
+ assertEquals(C(val), select1(val, WRONG1, WRONG2));
+ }
+})();
+
+(function TestFloat32_2() {
+ print("f32 2(0)...");
+ var C = function(v) { return Math.fround(v); }
+ var select = makeSelect(kAstF32, 2, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("f32 2(1)...");
+ var select = makeSelect(kAstF32, 2, 1);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val));
+ assertEquals(C(val), select(WRONG1, val, WRONG2));
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+})();
+
+(function TestFloat32_2() {
+ print("f32 3(0)...");
+ var C = function(v) { return Math.fround(v); }
+ var select = makeSelect(kAstF32, 3, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ assertEquals(C(val), select(val, WRONG1));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("f32 3(1)...");
+ var select = makeSelect(kAstF32, 3, 1);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, val, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(val), select(WRONG1, val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+
+ print("f32 3(2)...");
+ var select = makeSelect(kAstF32, 3, 2);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, WRONG2, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(undefined), select(WRONG1, WRONG2));
+ // over args
+ assertEquals(C(val), select(WRONG1, WRONG2, val, WRONG3));
+ }
+})();
+
+
+(function TestFloat64_1() {
+ print("f64 1(0)...");
+ var C = function(v) { return +v; }
+ var select1 = makeSelect(kAstF64, 1, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select1(val));
+
+ // under args
+ assertEquals(C(undefined), select1());
+ // over args
+ assertEquals(C(val), select1(val, WRONG1));
+ assertEquals(C(val), select1(val, WRONG1, WRONG2));
+ }
+})();
+
+(function TestFloat64_2() {
+ print("f64 2(0)...");
+ var C = function(v) { return +v; }
+ var select = makeSelect(kAstF64, 2, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("f64 2(1)...");
+ var select = makeSelect(kAstF64, 2, 1);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val));
+ assertEquals(C(val), select(WRONG1, val, WRONG2));
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+})();
+
+(function TestFloat64_2() {
+ print("f64 3(0)...");
+ var C = function(v) { return +v; }
+ var select = makeSelect(kAstF64, 3, 0);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(val, WRONG1, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(val), select(val));
+ assertEquals(C(val), select(val, WRONG1));
+ // over args
+ assertEquals(C(val), select(val, WRONG1, WRONG2, WRONG3));
+ }
+
+ print("f64 3(1)...");
+ var select = makeSelect(kAstF64, 3, 1);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, val, WRONG2));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(val), select(WRONG1, val));
+ // over args
+ assertEquals(C(val), select(WRONG1, val, WRONG2, WRONG3));
+ }
+
+ print("f64 3(2)...");
+ var select = makeSelect(kAstF64, 3, 2);
+
+ for (val of inputs) {
+ assertEquals(C(val), select(WRONG1, WRONG2, val));
+
+ // under args
+ assertEquals(C(undefined), select());
+ assertEquals(C(undefined), select(0xDEDFACE));
+ assertEquals(C(undefined), select(WRONG1, WRONG2));
+ // over args
+ assertEquals(C(val), select(WRONG1, WRONG2, val, WRONG3));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js b/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js
new file mode 100644
index 0000000000..35c5f76ef1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+(function TestCopyBug() {
+ // This was tickling a register allocation issue with
+ // idiv in embenchen/copy.
+ function asmModule(){
+ 'use asm';
+ function func() {
+ var ret = 0;
+ var x = 1, y = 0, z = 0;
+ var a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0;
+ do {
+ y = (x + 0) | 0;
+ z = (y | 0) % 2 | 0;
+ ret = (y + z + a + b + c + d + e + f + g) | 0;
+ } while(0);
+ return ret | 0;
+ }
+ return { func: func };
+ }
+ var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
+ assertEquals(asmModule().func(), wasm.func());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js b/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js
new file mode 100644
index 0000000000..4b16b71239
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+// Flags: --allow-natives-syntax
+
+(function TestDeoptimizeArgMismatch() {
+ function deopt() {
+ %DeoptimizeFunction(test);
+ }
+ function Module(global, env, buffer) {
+ "use asm";
+ var deopt = env.deopt;
+ function _main(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ deopt();
+ return i5 | 0;
+ }
+ return {'_main': _main}
+ }
+ function test() {
+ var wasm = Wasm.instantiateModuleFromAsm(
+ Module.toString(), {'deopt': deopt});
+ wasm._main(0, 0, 0);
+ }
+ test();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
new file mode 100644
index 0000000000..a94994d26f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
@@ -0,0 +1,242 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function WrapInAsmModule(func) {
+ function MODULE_NAME(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var Math_ceil = stdlib.Math.ceil;
+ var Math_floor = stdlib.Math.floor;
+ var Math_sqrt = stdlib.Math.sqrt;
+ var Math_abs = stdlib.Math.abs;
+ var Math_min = stdlib.Math.min;
+ var Math_max = stdlib.Math.max;
+
+ FUNC_BODY
+ return {main: FUNC_NAME};
+ }
+
+ var source = MODULE_NAME.toString()
+ .replace(/MODULE_NAME/g, func.name + "_module")
+ .replace(/FUNC_BODY/g, func.toString())
+ .replace(/FUNC_NAME/g, func.name);
+ return eval("(" + source + ")");
+}
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ var stdlib = {Math: Math};
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
+ expect(wasm_module);
+}
+
+const fround = Math.fround;
+const Math_ceil = Math.ceil;
+const Math_floor = Math.floor;
+const Math_sqrt = Math.sqrt;
+const Math_abs = Math.abs;
+const Math_min = Math.min;
+const Math_max = Math.max;
+
+function f32_add(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(fround(a) + fround(b));
+}
+
+function f32_sub(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(fround(a) - fround(b));
+}
+
+function f32_mul(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(fround(a) * fround(b));
+}
+
+function f32_div(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(fround(a) / fround(b));
+}
+
+function f32_ceil(a) {
+ a = fround(a);
+ return fround(Math_ceil(fround(a)));
+}
+
+function f32_floor(a) {
+ a = fround(a);
+ return fround(Math_floor(fround(a)));
+}
+
+function f32_sqrt(a) {
+ a = fround(a);
+ return fround(Math_sqrt(fround(a)));
+}
+
+function f32_abs(a) {
+ a = fround(a);
+ return fround(Math_abs(fround(a)));
+}
+
+function f32_min(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(Math_min(fround(a), fround(b)));
+}
+
+function f32_max(a, b) {
+ a = fround(a);
+ b = fround(b);
+ return fround(Math_max(fround(a), fround(b)));
+}
+
+function f32_eq(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) == fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+function f32_ne(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) != fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+function f32_lt(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) < fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+function f32_lteq(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) <= fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+function f32_gt(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) > fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+function f32_gteq(a, b) {
+ a = fround(a);
+ b = fround(b);
+ if (fround(a) >= fround(b)) {
+ return 1;
+ }
+ return 0;
+}
+
+
+var inputs = [
+ 0, 1, 2, 3, 4,
+ NaN,
+ Infinity,
+ -Infinity,
+ 10, 20, 30, 31, 32, 33, 100, 2000,
+ 30000, 400000, 5000000,
+ 100000000, 2000000000,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 2147483649,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ -0,
+ -1, -2, -3, -4,
+ -10, -20, -30, -31, -32, -33, -100, -2000,
+ -30000, -400000, -5000000,
+ -100000000, -2000000000,
+ -2147483646,
+ -2147483647,
+ -2147483648,
+ -2147483649,
+ 0.1,
+ 1.1e-2,
+ 1.2e-4,
+ 1.3e-8,
+ 1.4e-11,
+ 1.5e-12,
+ 1.6e-13
+];
+
+var funcs = [
+ f32_add,
+ f32_sub,
+ f32_mul,
+ f32_div,
+ f32_ceil,
+ f32_floor,
+// TODO(bradnelson) f32_sqrt,
+// TODO(bradnelson) f32_abs,
+// TODO(bradnelson) f32_min is wrong for -0
+// TODO(bradnelson) f32_max is wrong for -0
+ f32_eq,
+ f32_ne,
+ f32_lt,
+ f32_lteq,
+ f32_gt,
+ f32_gteq,
+];
+
+(function () {
+ for (func of funcs) {
+ RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ if (func.length == 1) {
+ for (a of inputs) {
+ assertEquals(func(a), module.main(a));
+ assertEquals(func(a / 11), module.main(a / 11));
+ assertEquals(func(a / 430.9), module.main(a / 430.9));
+ assertEquals(func(a / -31.1), module.main(a / -31.1));
+ }
+ } else {
+ for (a of inputs) {
+ for (b of inputs) {
+ assertEquals(func(a, b), module.main(a, b));
+ assertEquals(func(a / 11, b), module.main(a / 11, b));
+ assertEquals(func(a, b / 420.9), module.main(a, b / 420.9));
+ assertEquals(func(a / -31.1, b), module.main(a / -31.1, b));
+ }
+ }
+ }
+ });
+ }
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
new file mode 100644
index 0000000000..11f9da38f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
@@ -0,0 +1,313 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function WrapInAsmModule(func) {
+ function MODULE_NAME(stdlib) {
+ "use asm";
+ var Math_ceil = stdlib.Math.ceil;
+ var Math_floor = stdlib.Math.floor;
+ var Math_sqrt = stdlib.Math.sqrt;
+ var Math_abs = stdlib.Math.abs;
+ var Math_min = stdlib.Math.min;
+ var Math_max = stdlib.Math.max;
+ var Math_acos = stdlib.Math.acos;
+ var Math_asin = stdlib.Math.asin;
+ var Math_atan = stdlib.Math.atan;
+ var Math_cos = stdlib.Math.cos;
+ var Math_sin = stdlib.Math.sin;
+ var Math_tan = stdlib.Math.tan;
+ var Math_exp = stdlib.Math.exp;
+ var Math_log = stdlib.Math.log;
+ var Math_atan2 = stdlib.Math.atan2;
+
+ FUNC_BODY
+ return {main: FUNC_NAME};
+ }
+
+ var source = MODULE_NAME.toString()
+ .replace(/MODULE_NAME/g, func.name + "_module")
+ .replace(/FUNC_BODY/g, func.toString())
+ .replace(/FUNC_NAME/g, func.name);
+ return eval("(" + source + ")");
+}
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ var stdlib = {Math: Math};
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
+ expect(wasm_module);
+}
+
+const Math_ceil = Math.ceil;
+const Math_floor = Math.floor;
+const Math_sqrt = Math.sqrt;
+const Math_abs = Math.abs;
+const Math_min = Math.min;
+const Math_max = Math.max;
+const Math_acos = Math.acos;
+const Math_asin = Math.asin;
+const Math_atan = Math.atan;
+const Math_cos = Math.cos;
+const Math_sin = Math.sin;
+const Math_tan = Math.tan;
+const Math_exp = Math.exp;
+const Math_log = Math.log;
+const Math_atan2 = Math.atan2;
+
+function f64_add(a, b) {
+ a = +a;
+ b = +b;
+ return +(+a + +b);
+}
+
+function f64_sub(a, b) {
+ a = +a;
+ b = +b;
+ return +(+a - +b);
+}
+
+function f64_mul(a, b) {
+ a = +a;
+ b = +b;
+ return +(+a * +b);
+}
+
+function f64_div(a, b) {
+ a = +a;
+ b = +b;
+ return +(+a / +b);
+}
+
+function f64_eq(a, b) {
+ a = +a;
+ b = +b;
+ if (+a == +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_ne(a, b) {
+ a = +a;
+ b = +b;
+ if (+a != +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_lt(a, b) {
+ a = +a;
+ b = +b;
+ if (+a < +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_lteq(a, b) {
+ a = +a;
+ b = +b;
+ if (+a <= +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_gt(a, b) {
+ a = +a;
+ b = +b;
+ if (+a > +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_gteq(a, b) {
+ a = +a;
+ b = +b;
+ if (+a >= +b) {
+ return 1;
+ }
+ return 0;
+}
+
+function f64_ceil(a) {
+ a = +a;
+ return +(Math_ceil(+a));
+}
+
+function f64_floor(a) {
+ a = +a;
+ return +(Math_floor(+a));
+}
+
+function f64_sqrt(a) {
+ a = +a;
+ return +(Math_sqrt(+a));
+}
+
+function f64_abs(a) {
+ a = +a;
+ return +(Math_abs(+a));
+}
+
+function f64_min(a, b) {
+ a = +a;
+ b = +b;
+ return +(Math_min(+a, +b));
+}
+
+function f64_max(a, b) {
+ a = +a;
+ b = +b;
+ return +(Math_max(+a, +b));
+}
+
+function f64_acos(a) {
+ a = +a;
+ return +Math_acos(+a);
+}
+
+function f64_asin(a) {
+ a = +a;
+ return +Math_asin(+a);
+}
+
+function f64_atan(a) {
+ a = +a;
+ return +Math_atan(+a);
+}
+
+function f64_cos(a) {
+ a = +a;
+ return +Math_cos(+a);
+}
+
+function f64_sin(a) {
+ a = +a;
+ return +Math_sin(+a);
+}
+
+function f64_tan(a) {
+ a = +a;
+ return +Math_tan(+a);
+}
+
+function f64_exp(a, b) {
+ a = +a;
+ b = +b;
+ return +Math_exp(+a, +b);
+}
+
+function f64_log(a, b) {
+ a = +a;
+ b = +b;
+ return +Math_log(+a, +b);
+}
+
+function f64_atan2(a) {
+ a = +a;
+ return +Math_atan2(+a);
+}
+
+
+var inputs = [
+ 0, 1, 2, 3, 4,
+ NaN,
+ Infinity,
+ -Infinity,
+ 10, 20, 30, 31, 32, 33, 100, 2000,
+ 30000, 400000, 5000000,
+ 100000000, 2000000000,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 2147483649,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ -0,
+ -1, -2, -3, -4,
+ -10, -20, -30, -31, -32, -33, -100, -2000,
+ -30000, -400000, -5000000,
+ -100000000, -2000000000,
+ -2147483646,
+ -2147483647,
+ -2147483648,
+ -2147483649,
+ 0.1,
+ 1.1e-2,
+ 1.2e-4,
+ 1.3e-8,
+ 1.4e-11,
+ 1.5e-12,
+ 1.6e-13
+];
+
+var funcs = [
+ f64_add,
+ f64_sub,
+ f64_mul,
+ f64_div,
+ f64_eq,
+ f64_ne,
+ f64_lt,
+ f64_lteq,
+ f64_gt,
+ f64_gteq,
+ f64_ceil,
+ f64_floor,
+// TODO(bradnelson) f64_sqrt,
+ f64_abs,
+// TODO(bradnelson) f64_min is wrong for -0
+// TODO(bradnelson) f64_max is wrong for -0
+// TODO(bradnelson) f64_acos,
+// TODO(bradnelson) f64_asin,
+// TODO(bradnelson) f64_atan,
+// TODO(bradnelson) f64_cos,
+// TODO(bradnelson) f64_sin,
+// TODO(bradnelson) f64_tan,
+// TODO(bradnelson) f64_exp,
+// TODO(bradnelson) f64_log,
+// TODO(bradnelson) f64_atan2,
+];
+
+(function () {
+ for (func of funcs) {
+ RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ if (func.length == 1) {
+ for (a of inputs) {
+ assertEquals(func(a), module.main(a));
+ assertEquals(func(a / 10), module.main(a / 10));
+ assertEquals(func(a / 440.9), module.main(a / 440.9));
+ assertEquals(func(a / -33.1), module.main(a / -33.1));
+ }
+ } else {
+ for (a of inputs) {
+ for (b of inputs) {
+ assertEquals(func(a, b), module.main(a, b));
+ assertEquals(func(a / 10, b), module.main(a / 10, b));
+ assertEquals(func(a, b / 440.9), module.main(a, b / 440.9));
+ assertEquals(func(a / -33.1, b), module.main(a / -33.1, b));
+ }
+ }
+ }
+ });
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js b/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js
new file mode 100644
index 0000000000..055b1e94a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js
@@ -0,0 +1,239 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+const stdlib = {
+ Math: Math,
+ Int8Array: Int8Array,
+ Int16Array: Int16Array,
+ Int32Array: Int32Array,
+ Uint8Array: Uint8Array,
+ Uint16Array: Uint16Array,
+ Uint32Array: Uint32Array,
+ Float32Array: Float32Array,
+ Float64Array: Float64Array,
+};
+
+const buffer = new ArrayBuffer(65536);
+const BASE = 1000000000;
+
+const OOB_INDEXES = [
+ buffer.byteLength,
+ buffer.byteLength + 1,
+ buffer.byteLength + 2,
+ buffer.byteLength + 3,
+ buffer.byteLength + 4,
+ buffer.byteLength + 5,
+ buffer.byteLength + 6,
+ buffer.byteLength + 7,
+ buffer.byteLength + 8,
+ buffer.byteLength + 9,
+ buffer.byteLength + 10,
+ 0x80000000,
+ 0x80000004,
+ 0xF0000000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFE,
+ -1, -2, -3, -4, -5, -6, -7, -8
+];
+
+function resetBuffer() {
+ var view = new Int32Array(buffer);
+ for (var i = 0; i < view.length; i++) {
+ view[i] = BASE | (i << 2);
+ }
+}
+resetBuffer();
+
+
+function checkView(view, load, shift) {
+ for (var i = 0; i < 300; i++) {
+ assertEquals(view[i >> shift], load(i));
+ }
+}
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib, {}, buffer);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, null, buffer);
+ expect(wasm_module);
+}
+
+function LoadAt_i32(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return HEAP32[a >> 2] | 0;
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_i32, function(module) {
+ var load = module.load;
+ assertEquals(BASE, load(0));
+ assertEquals(BASE | 0x30, load(0x30));
+ assertEquals(BASE | 0x704, load(0x704));
+ assertEquals(BASE | 0x704, load(0x705));
+ assertEquals(BASE | 0x704, load(0x706));
+ assertEquals(BASE | 0x704, load(0x707));
+
+ var length = buffer.byteLength;
+ assertEquals(BASE | (length - 4), load(length - 4));
+ assertEquals(BASE | (length - 4), load(length - 4 + 1));
+ assertEquals(BASE | (length - 4), load(length - 4 + 2));
+ assertEquals(BASE | (length - 4), load(length - 4 + 3));
+
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Int32Array(buffer), load, 2);
+});
+
+function LoadAt_i16(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP16 = new stdlib.Int16Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return HEAP16[a >> 1] | 0;
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_i16, function(module) {
+ var load = module.load;
+ var LOWER = (BASE << 16) >> 16;
+ var UPPER = BASE >> 16;
+ assertEquals(LOWER, load(0));
+ assertEquals(UPPER, load(2));
+
+ assertEquals(LOWER | 0x30, load(0x30));
+ assertEquals(UPPER, load(0x32));
+
+ assertEquals(LOWER | 0x504, load(0x504));
+ assertEquals(LOWER | 0x504, load(0x505));
+
+ assertEquals(UPPER, load(0x706));
+ assertEquals(UPPER, load(0x707));
+
+ var length = buffer.byteLength;
+ assertEquals(LOWER | (length - 4), load(length - 4));
+ assertEquals(LOWER | (length - 4), load(length - 4 + 1));
+ assertEquals(UPPER, load(length - 4 + 2));
+ assertEquals(UPPER, load(length - 4 + 3));
+
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Int16Array(buffer), load, 1);
+});
+
+function LoadAt_u16(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP16 = new stdlib.Uint16Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return HEAP16[a >> 1] | 0;
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_u16, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Uint16Array(buffer), load, 1);
+});
+
+function LoadAt_i8(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP8 = new stdlib.Int8Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return HEAP8[a >> 0] | 0;
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_i8, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Int8Array(buffer), load, 0);
+});
+
+function LoadAt_u8(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP8 = new stdlib.Uint8Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return HEAP8[a >> 0] | 0;
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_u8, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Uint8Array(buffer), load, 0);
+});
+
+
+function LoadAt_u32(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Uint32Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return +(HEAP32[a >> 2] >>> 0);
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_u32, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(0, load(index));
+ checkView(new Uint32Array(buffer), load, 2);
+});
+
+function LoadAt_f32(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Float32Array(buffer);
+ var fround = stdlib.Math.fround;
+ function load(a) {
+ a = a | 0;
+ return fround(HEAP32[a >> 2]);
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_f32, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(NaN, load(index));
+ checkView(new Float32Array(buffer), load, 2);
+});
+
+function LoadAt_f64(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP64 = new stdlib.Float64Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return +HEAP64[a >> 3];
+ }
+ return {load: load};
+}
+
+RunThreeWayTest(LoadAt_f64, function(module) {
+ var load = module.load;
+ for (index of OOB_INDEXES) assertEquals(NaN, load(index));
+ checkView(new Float64Array(buffer), load, 3);
+});
+
+// TODO(titzer): constant heap indexes
+// TODO(titzer): heap accesses with offsets and arithmetic
+// TODO(titzer): [i >> K] where K is greater than log(size)
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
new file mode 100644
index 0000000000..6224e8fa1f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
@@ -0,0 +1,252 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function WrapInAsmModule(func) {
+ function MODULE_NAME(stdlib) {
+ "use asm";
+ var imul = stdlib.Math.imul;
+ var Math_max = stdlib.Math.max;
+ var Math_min = stdlib.Math.min;
+ var Math_abs = stdlib.Math.abs;
+
+ FUNC_BODY
+ return {main: FUNC_NAME};
+ }
+
+ var source = MODULE_NAME.toString()
+ .replace(/MODULE_NAME/g, func.name + "_module")
+ .replace(/FUNC_BODY/g, func.toString())
+ .replace(/FUNC_NAME/g, func.name);
+ return eval("(" + source + ")");
+}
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ var stdlib = {Math: Math};
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
+ expect(wasm_module);
+}
+
+const imul = Math.imul;
+const Math_max = Math.max;
+const Math_min = Math.min;
+const Math_abs = Math.abs;
+
+function i32_add(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a + b) | 0;
+}
+
+function i32_sub(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a - b) | 0;
+}
+
+function i32_mul(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return imul(a, b) | 0;
+}
+
+function i32_div(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a / b) | 0;
+}
+
+function i32_mod(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a % b) | 0;
+}
+
+function i32_and(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a & b) | 0;
+}
+
+function i32_or(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a | b) | 0;
+}
+
+function i32_xor(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a ^ b) | 0;
+}
+
+function i32_shl(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a << b) | 0;
+}
+
+function i32_shr(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a >> b) | 0;
+}
+
+function i32_sar(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a >>> b) | 0;
+}
+
+function i32_eq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) == (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_ne(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) < (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_lt(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) < (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_lteq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) <= (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_gt(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) > (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_gteq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a | 0) >= (b | 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function i32_min(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return Math_min(a | 0, b | 0) | 0;
+}
+
+function i32_max(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return Math_max(a | 0, b | 0) | 0;
+}
+
+function i32_abs(a) {
+ a = a | 0;
+ return Math_abs(a | 0) | 0;
+}
+
+var inputs = [
+ 0, 1, 2, 3, 4,
+ 10, 20, 30, 31, 32, 33, 100, 2000,
+ 30000, 400000, 5000000,
+ 100000000, 2000000000,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 2147483649,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ -1, -2, -3, -4,
+ -10, -20, -30, -31, -32, -33, -100, -2000,
+ -30000, -400000, -5000000,
+ -100000000, -2000000000,
+ -2147483646,
+ -2147483647,
+ -2147483648,
+ -2147483649,
+];
+
+var funcs = [
+ i32_add,
+ i32_sub,
+ i32_mul,
+ i32_div,
+ i32_mod,
+ i32_and,
+ i32_or,
+ i32_xor,
+ i32_shl,
+ i32_shr,
+ i32_sar,
+ i32_eq,
+ i32_ne,
+ i32_lt,
+ i32_lteq,
+ i32_gt,
+ i32_gteq,
+ i32_min,
+ i32_max,
+ i32_abs
+];
+
+(function () {
+ for (func of funcs) {
+ RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ if (func.length == 1) {
+ for (a of inputs) {
+ assertEquals(func(a), module.main(a));
+ }
+ } else {
+ for (a of inputs) {
+ for (b of inputs) {
+ assertEquals(func(a, b), module.main(a, b));
+ }
+ }
+ }
+ });
+ }
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js b/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js
new file mode 100644
index 0000000000..e4e312f1d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js
@@ -0,0 +1,261 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ var stdlib = {Math: Math};
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
+ expect(wasm_module);
+}
+
+function PositiveIntLiterals() {
+ "use asm";
+ function f0() { return 0; }
+ function f1() { return 1; }
+ function f4() { return 4; }
+ function f64() { return 64; }
+ function f127() { return 127; }
+ function f128() { return 128; }
+ function f256() { return 256; }
+ function f1000() { return 1000; }
+ function f2000000() { return 2000000; }
+ function fmax() { return 2147483647; }
+ return {f0: f0, f1: f1, f4: f4, f64: f64, f127: f127, f128: f128,
+ f256: f256, f1000: f1000, f2000000, fmax: fmax};
+}
+
+RunThreeWayTest(PositiveIntLiterals, function(module) {
+ assertEquals(0, module.f0());
+ assertEquals(1, module.f1());
+ assertEquals(4, module.f4());
+ assertEquals(64, module.f64());
+ assertEquals(128, module.f128());
+ assertEquals(256, module.f256());
+ assertEquals(1000, module.f1000());
+ assertEquals(2000000, module.f2000000());
+ assertEquals(2147483647, module.fmax());
+});
+
+function NegativeIntLiterals() {
+ "use asm";
+ function f1() { return -1; }
+ function f4() { return -4; }
+ function f64() { return -64; }
+ function f127() { return -127; }
+ function f128() { return -128; }
+ function f256() { return -256; }
+ function f1000() { return -1000; }
+ function f2000000() { return -2000000; }
+ function fmin() { return -2147483648; }
+ return {f1: f1, f4: f4, f64: f64, f127: f127, f128: f128,
+ f256: f256, f1000: f1000, f2000000, fmin: fmin};
+}
+
+RunThreeWayTest(NegativeIntLiterals, function (module) {
+ assertEquals(-1, module.f1());
+ assertEquals(-4, module.f4());
+ assertEquals(-64, module.f64());
+ assertEquals(-127, module.f127());
+ assertEquals(-128, module.f128());
+ assertEquals(-256, module.f256());
+ assertEquals(-1000, module.f1000());
+ assertEquals(-2000000, module.f2000000());
+ assertEquals(-2147483648, module.fmin());
+});
+
+function PositiveUnsignedLiterals() {
+ "use asm";
+ function f0() { return 0 >>> 0; }
+ function f1() { return 1 >>> 0; }
+ function f4() { return 4 >>> 0; }
+ function f64() { return 64 >>> 0; }
+ function f127() { return 127 >>> 0; }
+ function f128() { return 128 >>> 0; }
+ function f256() { return 256 >>> 0; }
+ function f1000() { return 1000 >>> 0; }
+ function f2000000() { return 2000000 >>> 0; }
+ function fmax() { return 2147483647 >>> 0; }
+ return {f0: f0, f1: f1, f4: f4, f64: f64, f127: f127, f128: f128,
+ f256: f256, f1000: f1000, f2000000, fmax: fmax};
+}
+
+RunThreeWayTest(PositiveUnsignedLiterals, function (module) {
+ assertEquals(0, module.f0());
+ assertEquals(1, module.f1());
+ assertEquals(4, module.f4());
+ assertEquals(64, module.f64());
+ assertEquals(128, module.f128());
+ assertEquals(256, module.f256());
+ assertEquals(1000, module.f1000());
+ assertEquals(2000000, module.f2000000());
+ assertEquals(2147483647, module.fmax());
+});
+
+function LargeUnsignedLiterals() {
+ "use asm";
+ function a() {
+ var x = 2147483648;
+ return +(x >>> 0);
+ }
+ function b() {
+ var x = 2147483649;
+ return +(x >>> 0);
+ }
+ function c() {
+ var x = 0x80000000;
+ return +(x >>> 0);
+ }
+ function d() {
+ var x = 0x80000001;
+ return +(x >>> 0);
+ }
+ function e() {
+ var x = 0xffffffff;
+ return +(x >>> 0);
+ }
+ return {a: a, b: b, c: c, d: d, e: e};
+}
+
+RunThreeWayTest(LargeUnsignedLiterals, function(module) {
+ assertEquals(2147483648, module.a());
+ assertEquals(2147483649, module.b());
+ assertEquals(0x80000000, module.c());
+ assertEquals(0x80000001, module.d());
+ assertEquals(0xffffffff, module.e());
+});
+
+function ManyI32() {
+ "use asm";
+ function main() {
+ var a = 1 + -2 + 3 + -4 | 0;
+ var b = 11 + -22 + 33 + -44 | 0;
+ var c = 111 + -222 + 333 + -444 | 0;
+ var d = 1111 + -2222 + 3333 + -4444 | 0;
+ var e = 11111 + -22222 + 33333 + -44444 | 0;
+ var f = 155555 + -266666 + 377777 + -488888 | 0;
+ var g = 1155555 + -2266666 + 3377777 + -4488888 | 0;
+ var h = 11155555 + -22266666 + 33377777 + -44488888 | 0;
+ var i = 111155555 + -222266666 + 333377777 + -444488888 | 0;
+ var j = (
+ 0x1 + 0x2 + 0x4 + 0x8 +
+ 0x10 + 0x20 + 0x40 + 0x80 +
+ 0x10F + 0x200 + 0x400 + 0x800 +
+ 0x10E0 + 0x20F0 + 0x4000 + 0x8000 +
+ 0x10D00 + 0x20E00 + 0x400F0 + 0x80002 +
+ 0x10C000 + 0x20D000 + 0x400E00 + 0x800030 +
+ 0x10B0000 + 0x20C0000 + 0x400D000 + 0x8000400 +
+ 0x10A00000 + 0x20B00000 + 0x400C0000 + 0x80005000
+ ) | 0;
+ return (a + b + c + d + e + f + g + h + i + j) | 0;
+ }
+ return {main: main};
+}
+
+RunThreeWayTest(ManyI32, function(module) {
+ assertEquals(-222411306, module.main());
+});
+
+
+function ManyF64a() {
+ "use asm";
+ function main() {
+ var a = +( 0.1 + -0.2 + 0.3 + -0.4);
+ var b = +( 1.1 + -2.2 + 0.33 + -4.4);
+ var c = +( 11.1 + -22.2 + 3.33 + -4.44);
+ var d = +( 111.1 + -222.2 + 33.33 + -4.444);
+ var e = +( 1111.1 + -2222.2 + 333.33 + -4.4444);
+ var f = +( 15555.5 + -26666.6 + 3777.77 + -4.88888);
+ var g = +( 115555.5 + -226666.6 + 33777.77 + -4.488888);
+ var h = +( 1115555.5 + -2226666.6 + 333777.77 + -4.4488888);
+ var i = +(11115555.5 + -22226666.6 + 3333777.77 + -4.44488888);
+ return +(a + b + c + d + e + f + g + h + i);
+ }
+ return {main: main};
+}
+
+RunThreeWayTest(ManyF64a, function(module) {
+ assertEquals(-8640233.599945681, module.main());
+});
+
+function ManyF64b() {
+ "use asm";
+ function k1() { return +(1.0e-25 + 3.0e-25 + 5.0e-25 + 6.0e-25 + 9.0e-25); }
+ function k2() { return +(1.0e-20 + 3.0e-20 + 5.0e-20 + 6.0e-20 + 9.0e-20); }
+ function k3() { return +(1.0e-15 + 3.0e-15 + 5.0e-15 + 6.0e-15 + 9.0e-15); }
+ function k4() { return +(1.0e-10 + 3.0e-10 + 5.0e-10 + 6.0e-10 + 9.0e-10); }
+ function k5() { return +(1.0e-5 + 3.0e-5 + 5.0e-5 + 6.0e-5 + 9.0e-5); }
+ function k6() { return +(1.1e+0 + 3.1e+0 + 5.1e+0 + 6.1e+0 + 9.1e+0); }
+
+ return {k1: k1, k2: k2, k3: k3, k4: k4, k5: k5, k6: k6};
+}
+
+RunThreeWayTest(ManyF64b, function(module) {
+ assertEquals(2.4e-24, module.k1());
+ assertEquals(2.4e-19, module.k2());
+ assertEquals(2.4e-14, module.k3());
+ assertEquals(2.4e-9, module.k4());
+ assertEquals(0.00024000000000000003, module.k5());
+ assertEquals(24.5, module.k6());
+});
+
+
+function ManyF64c() {
+ "use asm";
+ function k1() { return +(1.0e+25 + 3.0e+25 + 5.0e+25 + 6.0e+25 + 9.0e+25); }
+ function k2() { return +(1.0e+20 + 3.0e+20 + 5.0e+20 + 6.0e+20 + 9.0e+20); }
+ function k3() { return +(1.0e+15 + 3.0e+15 + 5.0e+15 + 6.0e+15 + 9.0e+15); }
+ function k4() { return +(1.0e+10 + 3.0e+10 + 5.0e+10 + 6.0e+10 + 9.0e+10); }
+ function k5() { return +(1.0e+5 + 3.0e+5 + 5.0e+5 + 6.0e+5 + 9.0e+5); }
+ function k6() { return +(1.4e+0 + 3.4e+0 + 5.4e+0 + 6.4e+0 + 9.4e+0); }
+
+ return {k1: k1, k2: k2, k3: k3, k4: k4, k5: k5, k6: k6};
+}
+
+RunThreeWayTest(ManyF64c, function(module) {
+ assertEquals(2.4000000000000004e+26, module.k1());
+ assertEquals(2.4e+21, module.k2());
+ assertEquals(2.4e+16, module.k3());
+ assertEquals(2.4e+11, module.k4());
+ assertEquals(2.4e+6, module.k5());
+ assertEquals(26, module.k6());
+});
+
+function ManyF32a(stdlib) {
+ "use asm";
+ var F = stdlib.Math.fround;
+
+ function k1() { return F(F(1.0e-25) + F(5.0e-25) + F(6.0e-25) + F(9.0e-25)); }
+ function k2() { return F(F(1.0e-20) + F(5.0e-20) + F(6.0e-20) + F(9.0e-20)); }
+ function k3() { return F(F(1.0e-15) + F(5.0e-15) + F(6.0e-15) + F(9.0e-15)); }
+ function k4() { return F(F(1.0e-10) + F(5.0e-10) + F(6.0e-10) + F(9.0e-10)); }
+ function k5() { return F(F(1.0e-5) + F(5.0e-5) + F(6.0e-5) + F(9.0e-5)); }
+ function k6() { return F(F(1.1e+0) + F(5.1e+0) + F(6.1e+0) + F(9.1e+0)); }
+
+ return {k1: k1, k2: k2, k3: k3, k4: k4, k5: k5, k6: k6};
+}
+
+if (false) {
+ // TODO(bradnelson): fails validation of F32 literals somehow.
+RunThreeWayTest(ManyF32a, function(module) {
+ assertEquals(2.0999999917333043e-24, module.k1());
+ assertEquals(2.099999868734112e-19, module.k2());
+ assertEquals(2.099999997029825e-14, module.k3());
+ assertEquals(2.099999951710174e-9, module.k4());
+ assertEquals(0.0002099999983329326, module.k5());
+ assertEquals(21.399999618530273, module.k6());
+});
+}
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js b/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js
new file mode 100644
index 0000000000..fe39a30a88
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js
@@ -0,0 +1,358 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+(function TestStdlibConstants() {
+ function Module(stdlib) {
+ "use asm";
+
+ var StdlibInfinity = stdlib.Infinity;
+ var StdlibNaN = stdlib.NaN;
+ var StdlibMathE = stdlib.Math.E;
+ var StdlibMathLN10 = stdlib.Math.LN10;
+ var StdlibMathLN2 = stdlib.Math.LN2;
+ var StdlibMathLOG2E = stdlib.Math.LOG2E;
+ var StdlibMathLOG10E = stdlib.Math.LOG10E;
+ var StdlibMathPI = stdlib.Math.PI;
+ var StdlibMathSQRT1_2 = stdlib.Math.SQRT1_2;
+ var StdlibMathSQRT2 = stdlib.Math.SQRT2;
+
+ function caller() {
+ if (StdlibInfinity != 1.0 / 0.0) return 0;
+ if (StdlibMathE != 2.718281828459045) return 0;
+ if (StdlibMathLN10 != 2.302585092994046) return 0;
+ if (StdlibMathLN2 != 0.6931471805599453) return 0;
+ if (StdlibMathLOG2E != 1.4426950408889634) return 0;
+ if (StdlibMathLOG10E != 0.4342944819032518) return 0;
+ if (StdlibMathPI != 3.141592653589793) return 0;
+ if (StdlibMathSQRT1_2 != 0.7071067811865476) return 0;
+ if (StdlibMathSQRT2 != 1.4142135623730951) return 0;
+ return 1;
+ }
+
+ function nanCheck() {
+ return +StdlibNaN;
+ }
+
+ return {caller:caller, nanCheck:nanCheck};
+ }
+
+ var m =Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(1, m.caller());
+ assertTrue(isNaN(m.nanCheck()));
+})();
+
+
+(function TestStdlibFunctionsInside() {
+ function Module(stdlib) {
+ "use asm";
+
+ var StdlibMathCeil = stdlib.Math.ceil;
+ var StdlibMathFloor = stdlib.Math.floor;
+ var StdlibMathSqrt = stdlib.Math.sqrt;
+ var StdlibMathAbs = stdlib.Math.abs;
+ var StdlibMathMin = stdlib.Math.min;
+ var StdlibMathMax = stdlib.Math.max;
+
+ var StdlibMathAcos = stdlib.Math.acos;
+ var StdlibMathAsin = stdlib.Math.asin;
+ var StdlibMathAtan = stdlib.Math.atan;
+ var StdlibMathCos = stdlib.Math.cos;
+ var StdlibMathSin = stdlib.Math.sin;
+ var StdlibMathTan = stdlib.Math.tan;
+ var StdlibMathExp = stdlib.Math.exp;
+ var StdlibMathLog = stdlib.Math.log;
+
+ var StdlibMathAtan2 = stdlib.Math.atan2;
+ var StdlibMathPow = stdlib.Math.pow;
+ var StdlibMathImul = stdlib.Math.imul;
+
+ var fround = stdlib.Math.fround;
+
+ function deltaEqual(x, y) {
+ x = +x;
+ y = +y;
+ var t = 0.0;
+ t = x - y;
+ if (t < 0.0) {
+ t = t * -1.0;
+ }
+ return (t < 1.0e-13) | 0;
+ }
+
+ function caller() {
+ if (!deltaEqual(StdlibMathSqrt(123.0), 11.090536506409418)) return 0;
+ if (StdlibMathSqrt(fround(256.0)) != fround(16.0)) return 0;
+ if (StdlibMathCeil(123.7) != 124.0) return 0;
+ if (StdlibMathCeil(fround(123.7)) != fround(124.0)) return 0;
+ if (StdlibMathFloor(123.7) != 123.0) return 0;
+ if (StdlibMathFloor(fround(123.7)) != fround(123.0)) return 0;
+ if (StdlibMathAbs(-123.0) != 123.0) return 0;
+ if (StdlibMathAbs(fround(-123.0)) != fround(123.0)) return 0;
+ if (StdlibMathMin(123.4, 1236.4) != 123.4) return 0;
+ if (StdlibMathMin(fround(123.4),
+ fround(1236.4)) != fround(123.4)) return 0;
+ if (StdlibMathMax(123.4, 1236.4) != 1236.4) return 0;
+ if (StdlibMathMax(fround(123.4), fround(1236.4))
+ != fround(1236.4)) return 0;
+
+ if (!deltaEqual(StdlibMathAcos(0.1), 1.4706289056333368)) return 0;
+ if (!deltaEqual(StdlibMathAsin(0.2), 0.2013579207903308)) return 0;
+ if (!deltaEqual(StdlibMathAtan(0.2), 0.19739555984988078)) return 0;
+ if (!deltaEqual(StdlibMathCos(0.2), 0.9800665778412416)) return 0;
+ if (!deltaEqual(StdlibMathSin(0.2), 0.19866933079506122)) return 0;
+ if (!deltaEqual(StdlibMathTan(0.2), 0.20271003550867250)) return 0;
+ if (!deltaEqual(StdlibMathExp(0.2), 1.2214027581601699)) return 0;
+ if (!deltaEqual(StdlibMathLog(0.2), -1.6094379124341003)) return 0;
+
+ if (StdlibMathImul(6, 7) != 42) return 0;
+ if (!deltaEqual(StdlibMathAtan2(6.0, 7.0), 0.7086262721276703)) return 0;
+ if (StdlibMathPow(6.0, 7.0) != 279936.0) return 0;
+
+ return 1;
+ }
+
+ return {caller:caller};
+ }
+
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(1, m.caller());
+})();
+
+
+(function TestStdlibFunctionOutside() {
+ function looseEqual(x, y, delta) {
+ if (delta === undefined) {
+ delta = 1.0e-10;
+ }
+ if (isNaN(x) && isNaN(y)) {
+ return true;
+ }
+ if (!isFinite(x) && !isFinite(y)) {
+ return true;
+ }
+ x = +x;
+ y = +y;
+ var t = 0.0;
+ t = x - y;
+ if (t < 0.0) {
+ t = t * -1.0;
+ }
+ return (t < delta) | 0;
+ }
+
+ function plainEqual(x, y) {
+ if (isNaN(x) && isNaN(y)) {
+ return true;
+ }
+ return x === y;
+ }
+
+ function Module(stdlib) {
+ "use asm";
+ var ceil = stdlib.Math.ceil;
+ var floor = stdlib.Math.floor;
+ var sqrt = stdlib.Math.sqrt;
+ var abs = stdlib.Math.abs;
+ var fround = stdlib.Math.fround;
+ var fround2 = stdlib.Math.fround;
+
+ var acos = stdlib.Math.acos;
+ var asin = stdlib.Math.asin;
+ var atan = stdlib.Math.atan;
+ var cos = stdlib.Math.cos;
+ var sin = stdlib.Math.sin;
+ var tan = stdlib.Math.tan;
+ var exp = stdlib.Math.exp;
+ var log = stdlib.Math.log;
+
+ var atan2 = stdlib.Math.atan2;
+ var pow = stdlib.Math.pow;
+ var imul = stdlib.Math.imul;
+ var min = stdlib.Math.min;
+ var max = stdlib.Math.max;
+
+ function ceil_f64(x) { x = +x; return +ceil(x); }
+ function ceil_f32(x) { x = fround(x); return fround(ceil(x)); }
+
+ function floor_f64(x) { x = +x; return +floor(x); }
+ function floor_f32(x) { x = fround(x); return fround(floor(x)); }
+
+ function sqrt_f64(x) { x = +x; return +sqrt(x); }
+ function sqrt_f32(x) { x = fround(x); return fround(sqrt(x)); }
+
+ function abs_f64(x) { x = +x; return +abs(x); }
+ function abs_f32(x) { x = fround(x); return fround(abs(x)); }
+ function abs_i32(x) { x = x | 0; return abs(x|0) | 0; }
+
+ function acos_f64(x) { x = +x; return +acos(x); }
+ function asin_f64(x) { x = +x; return +asin(x); }
+ function atan_f64(x) { x = +x; return +atan(x); }
+ function cos_f64(x) { x = +x; return +cos(x); }
+ function sin_f64(x) { x = +x; return +sin(x); }
+ function tan_f64(x) { x = +x; return +tan(x); }
+ function exp_f64(x) { x = +x; return +exp(x); }
+ function log_f64(x) { x = +x; return +log(x); }
+
+ function atan2_f64(x, y) { x = +x; y = +y; return +atan2(x, y); }
+ function pow_f64(x, y) { x = +x; y = +y; return +atan2(x, y); }
+
+ function imul_i32(x, y) { x = x | 0; y = y | 0; return imul(x, y) | 0; }
+ function imul_u32(x, y) {
+ x = x | 0; y = y | 0; return imul(x>>>0, y>>>0) | 0; }
+
+ // type -> f32
+ function fround_i32(x) { x = x | 0; return fround(x|0); }
+ function fround_u32(x) { x = x | 0; return fround(x>>>0); }
+ function fround_f32(x) { x = fround(x); return fround(x); }
+ function fround_f64(x) { x = +x; return fround(x); }
+
+ // type -> f32 -> type
+ function fround2_i32(x) { x = x | 0; return ~~fround2(x|0) | 0; }
+ function fround2_u32(x) { x = x | 0; return ~~fround2(x>>>0) | 0; }
+ function fround2_f32(x) { x = fround2(x); return fround2(x); }
+ function fround2_f64(x) { x = +x; return +fround2(x); }
+
+ function min_i32(x, y) { x = x | 0; y = y | 0; return min(x|0, y|0) | 0; }
+ function min_f32(x, y) {
+ x = fround(x); y = fround(y); return fround(min(x, y)); }
+ function min_f64(x, y) { x = +x; y = +y; return +min(x, y); }
+
+ function max_i32(x, y) { x = x | 0; y = y | 0; return max(x|0, y|0) | 0; }
+ function max_f32(x, y) {
+ x = fround(x); y = fround(y); return fround(max(x, y)); }
+ function max_f64(x, y) { x = +x; y = +y; return +max(x, y); }
+
+ return {
+ ceil_f64: ceil_f64,
+ ceil_f32: ceil_f32,
+ floor_f64: floor_f64,
+ floor_f32: floor_f32,
+ sqrt_f64: sqrt_f64,
+ sqrt_f32: sqrt_f32,
+ abs_f64: abs_f64,
+ abs_f32: abs_f32,
+ abs_i32: abs_i32,
+ acos_f64: acos_f64,
+ asin_f64: asin_f64,
+ atan_f64: atan_f64,
+ cos_f64: cos_f64,
+ sin_f64: sin_f64,
+ tan_f64: tan_f64,
+ exp_f64: exp_f64,
+ log_f64: log_f64,
+ imul_i32: imul_i32,
+ imul_u32: imul_u32,
+ fround_i32: fround_i32,
+ fround_u32: fround_u32,
+ fround_f32: fround_f32,
+ fround_f64: fround_f64,
+ fround2_i32: fround2_i32,
+ fround2_u32: fround2_u32,
+ fround2_f32: fround2_f32,
+ fround2_f64: fround2_f64,
+ min_i32: min_i32,
+ min_f32: min_f32,
+ min_f64: min_f64,
+ max_i32: max_i32,
+ max_f32: max_f32,
+ max_f64: max_f64,
+ };
+ }
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ var values = {
+ i32: [
+ 0, 1, -1, 123, 456, -123, -456,
+ 0x40000000, 0x7FFFFFFF, -0x80000000,
+ ],
+ u32: [
+ 0, 1, 123, 456,
+ 0x40000000, 0x7FFFFFFF, 0xFFFFFFFF, 0x80000000,
+ ],
+ f32: [
+ 0, -0, 1, -1, 0.25, 0.125, 0.9, -0.9, 1.414,
+ 0x7F, -0x80, -0x8000, -0x80000000,
+ 0x7FFF, 0x7FFFFFFF, Infinity, -Infinity, NaN,
+ ],
+ f64: [
+ 0, -0, 1, -1, 0.25, 0.125, 0.9, -0.9, 1.414,
+ 0x7F, -0x80, -0x8000, -0x80000000,
+ 0x7FFF, 0x7FFFFFFF, Infinity, -Infinity, NaN,
+ ],
+ };
+ var converts = {
+ i32: function(x) { return x | 0; },
+ u32: function(x) { return x >>> 0; },
+ f32: function(x) { return Math.fround(x); },
+ f64: function(x) { return x; },
+ };
+ var two_args = {atan2: true, pow: true, imul: true,
+ min: true, max: true};
+ var funcs = {
+ ceil: ['f32', 'f64'],
+ floor: ['f32', 'f64'],
+ sqrt: ['f32', 'f64'],
+ abs: ['i32', 'f32', 'f64'],
+ acos: ['f64'],
+ asin: ['f64'],
+ atan: ['f64'],
+ cos: ['f64'],
+ sin: ['f64'],
+ tan: ['f64'],
+ exp: ['f64'],
+ log: ['f64'],
+ imul: ['i32', 'u32'],
+ fround: ['i32', 'u32', 'f32', 'f64'],
+ min: ['i32', 'f32', 'f64'],
+ max: ['i32', 'f32', 'f64'],
+ };
+ var per_func_equals = {
+ // JS uses fdlib for these, so they may not match.
+ // ECMAscript does not required them to have a particular precision.
+ exp_f64: function(x, y) { return looseEqual(x, y, 1e55); },
+ sqrt_f32: function(x, y) { return looseEqual(x, y, 1e-5); },
+ cos_f64: looseEqual,
+ sin_f64: looseEqual,
+ tan_f64: looseEqual,
+ // TODO(bradnelson):
+ // Figure out why some builds (avx2, rel_ng) return a uint.
+ imul_u32: function(x, y) { return (x | 0) === (y | 0); },
+ };
+ for (var func in funcs) {
+ var types = funcs[func];
+ for (var i = 0; i < types.length; i++) {
+ var type = types[i];
+ var interesting = values[type];
+ for (var j = 0; j < interesting.length; j++) {
+ for (var k = 0; k < interesting.length; k++) {
+ var val0 = interesting[j];
+ var val1 = interesting[k];
+ var name = func + '_' + type;
+ if (func === 'fround') {
+ // fround returns f32 regardless of input.
+ var expected = Math[func](val0);
+ var actual = m[name](val0);
+ } else if (two_args[func]) {
+ var expected = converts[type](Math[func](val0, val1));
+ var actual = m[name](val0, val1);
+ } else {
+ var expected = converts[type](Math[func](val0, val1));
+ var actual = m[name](val0, val1);
+ }
+ var compare = per_func_equals[name];
+ if (compare === undefined) {
+ compare = plainEqual;
+ }
+ assertTrue(typeof(compare) === 'function');
+ if (!compare(expected, actual)) {
+ print(expected + ' !== ' + actual + ' for ' + name +
+ ' with input ' + val0 + ' ' + val1);
+ assertTrue(false);
+ }
+ }
+ }
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
new file mode 100644
index 0000000000..514ddefb7e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
@@ -0,0 +1,225 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function WrapInAsmModule(func) {
+ function MODULE_NAME(stdlib) {
+ "use asm";
+ var imul = stdlib.Math.imul;
+
+ FUNC_BODY
+ return {main: FUNC_NAME};
+ }
+
+ var source = MODULE_NAME.toString()
+ .replace(/MODULE_NAME/g, func.name + "_module")
+ .replace(/FUNC_BODY/g, func.toString())
+ .replace(/FUNC_NAME/g, func.name);
+ return eval("(" + source + ")");
+}
+
+function RunThreeWayTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ var stdlib = {Math: Math};
+
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
+ print("Testing " + asmfunc.name + " (js)...");
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib);
+ expect(asm_module);
+
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
+ expect(wasm_module);
+}
+
+const imul = Math.imul;
+
+function u32_add(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +(((a >>> 0) + (b >>> 0)) >>> 0);
+}
+
+function u32_sub(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +(((a >>> 0) - (b >>> 0)) >>> 0);
+}
+
+function u32_mul(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +imul(a >>> 0, b >>> 0);
+}
+
+function u32_div(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +(((a >>> 0) / (b >>> 0)) >>> 0);
+}
+
+function u32_mod(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +(((a >>> 0) % (b >>> 0)) >>> 0);
+}
+
+function u32_and(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +((a >>> 0) & (b >>> 0));
+}
+
+function u32_or(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +((a >>> 0) | (b >>> 0));
+}
+
+function u32_xor(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +((a >>> 0) ^ (b >>> 0));
+}
+
+function u32_shl(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +((a >>> 0) << (b >>> 0));
+}
+
+function u32_shr(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return +((a >>> 0) >> (b >>> 0));
+}
+
+function u32_sar(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return ((a >>> 0) >>> (b >>> 0)) | 0;
+}
+
+function u32_eq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) == (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function u32_ne(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) < (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function u32_lt(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) < (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function u32_lteq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) <= (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function u32_gt(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) > (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+function u32_gteq(a, b) {
+ a = a | 0;
+ b = b | 0;
+ if ((a >>> 0) >= (b >>> 0)) {
+ return 1;
+ }
+ return 0;
+}
+
+
+var inputs = [
+ 0, 1, 2, 3, 4,
+ 10, 20, 30, 31, 32, 33, 100, 2000,
+ 30000, 400000, 5000000,
+ 100000000, 2000000000,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 2147483649,
+ 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+ 0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+ 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ -1, -2, -3, -4,
+ -10, -20, -30, -31, -32, -33, -100, -2000,
+ -30000, -400000, -5000000,
+ -100000000, -2000000000,
+ -2147483646,
+ -2147483647,
+ -2147483648,
+ -2147483649,
+];
+
+var funcs = [
+ u32_add,
+ u32_sub,
+ u32_div,
+ u32_mod,
+// TODO(titzer): u32_mul crashes turbofan in asm.js mode
+ u32_and,
+ u32_or,
+ u32_xor,
+ u32_shl,
+ u32_shr,
+ u32_sar,
+ u32_eq,
+ u32_ne,
+ u32_lt,
+ u32_lteq,
+ u32_gt,
+ u32_gteq,
+ // TODO(titzer): u32_min
+ // TODO(titzer): u32_max
+ // TODO(titzer): u32_abs
+];
+
+(function () {
+ for (func of funcs) {
+ RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ for (a of inputs) {
+ for (b of inputs) {
+ var expected = func(a, b);
+ assertEquals(expected, module.main(a, b));
+ }
+ }
+ });
+ }
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 3f936f5f21..2efb006436 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -4,6 +4,12 @@
// Flags: --expose-wasm
+function assertWasm(expected, func, ffi) {
+ print("Testing " + func.name + "...");
+ assertEquals(expected, Wasm.instantiateModuleFromAsm(
+ func.toString(), ffi).caller());
+}
+
function EmptyTest() {
"use asm";
function caller() {
@@ -15,8 +21,7 @@ function EmptyTest() {
return {caller: caller};
}
-assertEquals(11, _WASMEXP_.instantiateModuleFromAsm(
- EmptyTest.toString()).caller());
+assertWasm(11, EmptyTest);
function IntTest() {
@@ -37,8 +42,7 @@ function IntTest() {
return {caller: caller};
}
-assertEquals(101, _WASMEXP_.instantiateModuleFromAsm(
- IntTest.toString()).caller());
+assertWasm(101,IntTest);
function Float64Test() {
@@ -63,8 +67,7 @@ function Float64Test() {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
- Float64Test.toString()).caller());
+assertWasm(1, Float64Test);
function BadModule() {
@@ -84,7 +87,7 @@ function BadModule() {
}
assertThrows(function() {
- _WASMEXP_.instantiateModuleFromAsm(BadModule.toString()).caller();
+ Wasm.instantiateModuleFromAsm(BadModule.toString()).caller();
});
@@ -105,8 +108,22 @@ function TestReturnInBlock() {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
- TestReturnInBlock.toString()).caller());
+assertWasm(1, TestReturnInBlock);
+
+
+function TestAddSimple() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ x = (x + 1)|0;
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(1, TestAddSimple);
function TestWhileSimple() {
@@ -123,8 +140,7 @@ function TestWhileSimple() {
return {caller: caller};
}
-assertEquals(5, _WASMEXP_.instantiateModuleFromAsm(
- TestWhileSimple.toString()).caller());
+assertWasm(5, TestWhileSimple);
function TestWhileWithoutBraces() {
@@ -140,8 +156,7 @@ function TestWhileWithoutBraces() {
return {caller: caller};
}
-assertEquals(4, _WASMEXP_.instantiateModuleFromAsm(
- TestWhileWithoutBraces.toString()).caller());
+assertWasm(4, TestWhileWithoutBraces);
function TestReturnInWhile() {
@@ -159,8 +174,7 @@ function TestReturnInWhile() {
return {caller: caller};
}
-assertEquals(6, _WASMEXP_.instantiateModuleFromAsm(
- TestReturnInWhile.toString()).caller());
+assertWasm(6, TestReturnInWhile);
function TestReturnInWhileWithoutBraces() {
@@ -176,9 +190,7 @@ function TestReturnInWhileWithoutBraces() {
return {caller: caller};
}
-assertEquals(
- 7, _WASMEXP_.instantiateModuleFromAsm(
- TestReturnInWhileWithoutBraces.toString()).caller());
+assertWasm(7, TestReturnInWhileWithoutBraces);
function TestBreakInWhile() {
@@ -194,8 +206,7 @@ function TestBreakInWhile() {
return {caller: caller};
}
-assertEquals(8, _WASMEXP_.instantiateModuleFromAsm(
- TestBreakInWhile.toString()).caller());
+assertWasm(8, TestBreakInWhile);
function TestBreakInNestedWhile() {
@@ -218,8 +229,7 @@ function TestBreakInNestedWhile() {
return {caller: caller};
}
-assertEquals(9, _WASMEXP_.instantiateModuleFromAsm(
- TestBreakInNestedWhile.toString()).caller());
+assertWasm(9, TestBreakInNestedWhile);
function TestBreakInBlock() {
@@ -240,8 +250,7 @@ function TestBreakInBlock() {
return {caller: caller};
}
-assertEquals(10, _WASMEXP_.instantiateModuleFromAsm(
- TestBreakInBlock.toString()).caller());
+assertWasm(10, TestBreakInBlock);
function TestBreakInNamedWhile() {
@@ -261,8 +270,7 @@ function TestBreakInNamedWhile() {
return {caller: caller};
}
-assertEquals(11, _WASMEXP_.instantiateModuleFromAsm(
- TestBreakInNamedWhile.toString()).caller());
+assertWasm(11, TestBreakInNamedWhile);
function TestContinue() {
@@ -284,8 +292,7 @@ function TestContinue() {
return {caller: caller};
}
-assertEquals(-5, _WASMEXP_.instantiateModuleFromAsm(
- TestContinue.toString()).caller());
+assertWasm(-5, TestContinue);
function TestContinueInNamedWhile() {
@@ -312,8 +319,7 @@ function TestContinueInNamedWhile() {
return {caller: caller};
}
-assertEquals(20, _WASMEXP_.instantiateModuleFromAsm(
- TestContinueInNamedWhile.toString()).caller());
+assertWasm(20, TestContinueInNamedWhile);
function TestNot() {
@@ -327,8 +333,7 @@ function TestNot() {
return {caller:caller};
}
-assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
- TestNot.toString()).caller());
+assertWasm(1, TestNot);
function TestNotEquals() {
@@ -345,8 +350,7 @@ function TestNotEquals() {
return {caller:caller};
}
-assertEquals(21, _WASMEXP_.instantiateModuleFromAsm(
- TestNotEquals.toString()).caller());
+assertWasm(21, TestNotEquals);
function TestUnsignedComparison() {
@@ -363,8 +367,7 @@ function TestUnsignedComparison() {
return {caller:caller};
}
-assertEquals(22, _WASMEXP_.instantiateModuleFromAsm(
- TestUnsignedComparison.toString()).caller());
+assertWasm(22, TestUnsignedComparison);
function TestMixedAdd() {
@@ -386,8 +389,7 @@ function TestMixedAdd() {
return {caller:caller};
}
-assertEquals(23, _WASMEXP_.instantiateModuleFromAsm(
- TestMixedAdd.toString()).caller());
+assertWasm(23, TestMixedAdd);
function TestInt32HeapAccess(stdlib, foreign, buffer) {
@@ -406,16 +408,14 @@ function TestInt32HeapAccess(stdlib, foreign, buffer) {
return {caller: caller};
}
-assertEquals(7, _WASMEXP_.instantiateModuleFromAsm(
- TestInt32HeapAccess.toString()).caller());
+assertWasm(7, TestInt32HeapAccess);
function TestInt32HeapAccessExternal() {
var memory = new ArrayBuffer(1024);
var memory_int32 = new Int32Array(memory);
- var module = _WASMEXP_.instantiateModuleFromAsm(
+ var module = Wasm.instantiateModuleFromAsm(
TestInt32HeapAccess.toString(), null, memory);
- module.__init__();
assertEquals(7, module.caller());
assertEquals(7, memory_int32[2]);
}
@@ -438,11 +438,10 @@ function TestHeapAccessIntTypes() {
code = code.replace(/>> 2/g, types[i][2]);
var memory = new ArrayBuffer(1024);
var memory_view = new types[i][0](memory);
- var module = _WASMEXP_.instantiateModuleFromAsm(code, null, memory);
- module.__init__();
+ var module = Wasm.instantiateModuleFromAsm(code, null, memory);
assertEquals(7, module.caller());
assertEquals(7, memory_view[2]);
- assertEquals(7, _WASMEXP_.instantiateModuleFromAsm(code).caller());
+ assertEquals(7, Wasm.instantiateModuleFromAsm(code).caller());
}
}
@@ -460,7 +459,6 @@ function TestFloatHeapAccess(stdlib, foreign, buffer) {
var j = 8;
var v = 6.0;
- // TODO(bradnelson): Add float32 when asm-wasm supports it.
f64[2] = v + 1.0;
f64[i >> 3] = +f64[2] + 1.0;
f64[j >> 3] = +f64[j >> 3] + 1.0;
@@ -471,16 +469,15 @@ function TestFloatHeapAccess(stdlib, foreign, buffer) {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
+assertEquals(1, Wasm.instantiateModuleFromAsm(
TestFloatHeapAccess.toString()).caller());
function TestFloatHeapAccessExternal() {
var memory = new ArrayBuffer(1024);
var memory_float64 = new Float64Array(memory);
- var module = _WASMEXP_.instantiateModuleFromAsm(
+ var module = Wasm.instantiateModuleFromAsm(
TestFloatHeapAccess.toString(), null, memory);
- module.__init__();
assertEquals(1, module.caller());
assertEquals(9.0, memory_float64[1]);
}
@@ -502,8 +499,7 @@ function TestConvertI32() {
return {caller:caller};
}
-assertEquals(24, _WASMEXP_.instantiateModuleFromAsm(
- TestConvertI32.toString()).caller());
+assertWasm(24, TestConvertI32);
function TestConvertF64FromInt() {
@@ -511,7 +507,7 @@ function TestConvertF64FromInt() {
function caller() {
var a = 1;
- if ((+(a + a)) > 1.5) {
+ if ((+((a + a)|0)) > 1.5) {
return 25;
}
return 0;
@@ -520,8 +516,7 @@ function TestConvertF64FromInt() {
return {caller:caller};
}
-assertEquals(25, _WASMEXP_.instantiateModuleFromAsm(
- TestConvertF64FromInt.toString()).caller());
+assertWasm(25, TestConvertF64FromInt);
function TestConvertF64FromUnsigned() {
@@ -530,7 +525,7 @@ function TestConvertF64FromUnsigned() {
function caller() {
var a = 0xffffffff;
if ((+(a>>>0)) > 0.0) {
- if((+a) < 0.0) {
+ if((+(a|0)) < 0.0) {
return 26;
}
}
@@ -540,8 +535,7 @@ function TestConvertF64FromUnsigned() {
return {caller:caller};
}
-assertEquals(26, _WASMEXP_.instantiateModuleFromAsm(
- TestConvertF64FromUnsigned.toString()).caller());
+assertWasm(26, TestConvertF64FromUnsigned);
function TestModInt() {
@@ -556,8 +550,7 @@ function TestModInt() {
return {caller:caller};
}
-assertEquals(-27, _WASMEXP_.instantiateModuleFromAsm(
- TestModInt.toString()).caller());
+assertWasm(-27,TestModInt);
function TestModUnsignedInt() {
@@ -572,8 +565,7 @@ function TestModUnsignedInt() {
return {caller:caller};
}
-assertEquals(8, _WASMEXP_.instantiateModuleFromAsm(
- TestModUnsignedInt.toString()).caller());
+assertWasm(8, TestModUnsignedInt);
function TestModDouble() {
@@ -591,13 +583,9 @@ function TestModDouble() {
return {caller:caller};
}
-assertEquals(28, _WASMEXP_.instantiateModuleFromAsm(
- TestModDouble.toString()).caller());
+assertWasm(28, TestModDouble);
-/*
-TODO: Fix parsing of negative doubles
- Fix code to use trunc instead of casts
function TestModDoubleNegative() {
"use asm";
@@ -613,11 +601,9 @@ function TestModDoubleNegative() {
return {caller:caller};
}
-assertEquals(28, _WASMEXP_.instantiateModuleFromAsm(
- TestModDoubleNegative.toString()).caller());
-*/
-
+assertWasm(28, TestModDoubleNegative);
+(function () {
function TestNamedFunctions() {
"use asm";
@@ -637,11 +623,12 @@ function TestNamedFunctions() {
add:add};
}
-var module = _WASMEXP_.instantiateModuleFromAsm(TestNamedFunctions.toString());
+var module = Wasm.instantiateModuleFromAsm(TestNamedFunctions.toString());
module.init();
assertEquals(77.5, module.add());
+})();
-
+(function () {
function TestGlobalsWithInit() {
"use asm";
@@ -655,10 +642,9 @@ function TestGlobalsWithInit() {
return {add:add};
}
-var module = _WASMEXP_.instantiateModuleFromAsm(TestGlobalsWithInit.toString());
-module.__init__();
+var module = Wasm.instantiateModuleFromAsm(TestGlobalsWithInit.toString());
assertEquals(77.5, module.add());
-
+})();
function TestForLoop() {
"use asm"
@@ -675,8 +661,7 @@ function TestForLoop() {
return {caller:caller};
}
-assertEquals(54, _WASMEXP_.instantiateModuleFromAsm(
- TestForLoop.toString()).caller());
+assertWasm(54, TestForLoop);
function TestForLoopWithoutInit() {
@@ -694,8 +679,7 @@ function TestForLoopWithoutInit() {
return {caller:caller};
}
-assertEquals(100, _WASMEXP_.instantiateModuleFromAsm(
- TestForLoopWithoutInit.toString()).caller());
+assertWasm(100,TestForLoopWithoutInit);
function TestForLoopWithoutCondition() {
@@ -716,8 +700,7 @@ function TestForLoopWithoutCondition() {
return {caller:caller};
}
-assertEquals(66, _WASMEXP_.instantiateModuleFromAsm(
- TestForLoopWithoutCondition.toString()).caller());
+assertWasm(66, TestForLoopWithoutCondition);
function TestForLoopWithoutNext() {
@@ -734,8 +717,7 @@ function TestForLoopWithoutNext() {
return {caller:caller};
}
-assertEquals(41, _WASMEXP_.instantiateModuleFromAsm(
- TestForLoopWithoutNext.toString()).caller());
+assertWasm(41, TestForLoopWithoutNext);
function TestForLoopWithoutBody() {
@@ -751,8 +733,7 @@ function TestForLoopWithoutBody() {
return {caller:caller};
}
-assertEquals(45, _WASMEXP_.instantiateModuleFromAsm(
- TestForLoopWithoutBody.toString()).caller());
+assertWasm(45, TestForLoopWithoutBody);
function TestDoWhile() {
@@ -771,8 +752,7 @@ function TestDoWhile() {
return {caller:caller};
}
-assertEquals(84, _WASMEXP_.instantiateModuleFromAsm(
- TestDoWhile.toString()).caller());
+assertWasm(84, TestDoWhile);
function TestConditional() {
@@ -786,8 +766,7 @@ function TestConditional() {
return {caller:caller};
}
-assertEquals(41, _WASMEXP_.instantiateModuleFromAsm(
- TestConditional.toString()).caller());
+assertWasm(41, TestConditional);
function TestSwitch() {
@@ -815,8 +794,7 @@ function TestSwitch() {
return {caller:caller};
}
-assertEquals(23, _WASMEXP_.instantiateModuleFromAsm(
- TestSwitch.toString()).caller());
+assertWasm(23, TestSwitch);
function TestSwitchFallthrough() {
@@ -838,8 +816,7 @@ function TestSwitchFallthrough() {
return {caller:caller};
}
-assertEquals(42, _WASMEXP_.instantiateModuleFromAsm(
- TestSwitchFallthrough.toString()).caller());
+assertWasm(42, TestSwitchFallthrough);
function TestNestedSwitch() {
@@ -865,10 +842,10 @@ function TestNestedSwitch() {
return {caller:caller};
}
-assertEquals(43, _WASMEXP_.instantiateModuleFromAsm(
- TestNestedSwitch.toString()).caller());
+assertWasm(43, TestNestedSwitch);
+(function () {
function TestInitFunctionWithNoGlobals() {
"use asm";
function caller() {
@@ -877,12 +854,12 @@ function TestInitFunctionWithNoGlobals() {
return {caller};
}
-var module = _WASMEXP_.instantiateModuleFromAsm(
+var module = Wasm.instantiateModuleFromAsm(
TestInitFunctionWithNoGlobals.toString());
-module.__init__();
assertEquals(51, module.caller());
+})();
-
+(function () {
function TestExportNameDifferentFromFunctionName() {
"use asm";
function caller() {
@@ -891,11 +868,10 @@ function TestExportNameDifferentFromFunctionName() {
return {alt_caller:caller};
}
-var module = _WASMEXP_.instantiateModuleFromAsm(
+var module = Wasm.instantiateModuleFromAsm(
TestExportNameDifferentFromFunctionName.toString());
-module.__init__();
assertEquals(55, module.alt_caller());
-
+})();
function TestFunctionTableSingleFunction() {
"use asm";
@@ -913,8 +889,7 @@ function TestFunctionTableSingleFunction() {
return {caller:caller};
}
-assertEquals(71, _WASMEXP_.instantiateModuleFromAsm(
- TestFunctionTableSingleFunction.toString()).caller());
+assertWasm(71, TestFunctionTableSingleFunction);
function TestFunctionTableMultipleFunctions() {
@@ -944,10 +919,10 @@ function TestFunctionTableMultipleFunctions() {
return {caller:caller};
}
-assertEquals(73, _WASMEXP_.instantiateModuleFromAsm(
- TestFunctionTableMultipleFunctions.toString()).caller());
+assertWasm(73, TestFunctionTableMultipleFunctions);
+(function () {
function TestFunctionTable() {
"use asm";
@@ -987,14 +962,14 @@ function TestFunctionTable() {
return {caller:caller};
}
-var module = _WASMEXP_.instantiateModuleFromAsm(TestFunctionTable.toString());
-module.__init__();
+var module = Wasm.instantiateModuleFromAsm(TestFunctionTable.toString());
assertEquals(55, module.caller(0, 0, 33, 22));
assertEquals(11, module.caller(0, 1, 33, 22));
assertEquals(9, module.caller(0, 2, 54, 45));
assertEquals(99, module.caller(0, 3, 54, 45));
assertEquals(23, module.caller(0, 4, 12, 11));
assertEquals(31, module.caller(1, 0, 30, 11));
+})();
function TestForeignFunctions() {
@@ -1033,10 +1008,9 @@ function TestForeignFunctions() {
var foreign = new ffi(23);
- var module = _WASMEXP_.instantiateModuleFromAsm(AsmModule.toString(),
+ var module = Wasm.instantiateModuleFromAsm(AsmModule.toString(),
foreign, null);
- module.__init__();
assertEquals(103, module.caller(23, 103));
}
@@ -1073,10 +1047,9 @@ function TestForeignFunctionMultipleUse() {
var foreign = new ffi();
- var module = _WASMEXP_.instantiateModuleFromAsm(AsmModule.toString(),
+ var module = Wasm.instantiateModuleFromAsm(AsmModule.toString(),
foreign, null);
- module.__init__();
assertEquals(89, module.caller(83, 83.25));
}
@@ -1112,9 +1085,9 @@ function TestForeignVariables() {
}
function TestCase(env, i1, f1, i2, f2) {
- var module = _WASMEXP_.instantiateModuleFromAsm(
+ print("Testing foreign variables...");
+ var module = Wasm.instantiateModuleFromAsm(
AsmModule.toString(), env);
- module.__init__();
assertEquals(i1, module.geti1());
assertEquals(f1, module.getf1());
assertEquals(i2, module.geti2());
@@ -1205,7 +1178,7 @@ TestForeignVariables();
return {load: load, iload: iload, store: store, storeb: storeb};
}
- var m = _WASMEXP_.instantiateModuleFromAsm(
+ var m = Wasm.instantiateModuleFromAsm(
TestByteHeapAccessCompat.toString());
m.store(0, 20);
m.store(4, 21);
@@ -1222,25 +1195,19 @@ TestForeignVariables();
})();
-(function TestGlobalBlock() {
- function Module(stdlib, foreign, buffer) {
- "use asm";
-
- var x = foreign.x | 0, y = foreign.y | 0;
+function TestGlobalBlock(stdlib, foreign, buffer) {
+ "use asm";
- function test() {
- return (x + y) | 0;
- }
+ var x = foreign.x | 0, y = foreign.y | 0;
- return {test: test};
+ function test() {
+ return (x + y) | 0;
}
- var m = _WASMEXP_.instantiateModuleFromAsm(
- Module.toString(), { x: 4, y: 11 });
- m.__init__();
- assertEquals(15, m.test());
-})();
+ return {caller: test};
+}
+assertWasm(15, TestGlobalBlock, { x: 4, y: 11 });
(function TestComma() {
function CommaModule() {
@@ -1261,92 +1228,268 @@ TestForeignVariables();
return {ifunc: ifunc, dfunc: dfunc};
}
- var m = _WASMEXP_.instantiateModuleFromAsm(CommaModule.toString());
+ var m = Wasm.instantiateModuleFromAsm(CommaModule.toString());
assertEquals(123, m.ifunc(456.7, 123));
assertEquals(123.4, m.dfunc(456, 123.4));
})();
-(function TestOr() {
- function Module() {
+function TestFloatAsDouble(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ function func() {
+ var x = fround(1.0);
+ return +fround(x);
+ }
+ return {caller: func};
+}
+assertWasm(1, TestFloatAsDouble);
+
+
+function TestOr() {
+ "use asm";
+ function func() {
+ var x = 1;
+ var y = 2;
+ return (x | y) | 0;
+ }
+ return {caller: func};
+}
+
+assertWasm(3, TestOr);
+
+
+function TestAnd() {
+ "use asm";
+ function func() {
+ var x = 3;
+ var y = 2;
+ return (x & y) | 0;
+ }
+ return {caller: func};
+}
+
+assertWasm(2, TestAnd);
+
+
+function TestXor() {
+ "use asm";
+ function func() {
+ var x = 3;
+ var y = 2;
+ return (x ^ y) | 0;
+ }
+ return {caller: func};
+}
+
+assertWasm(1, TestXor);
+
+
+(function TestIntishAssignment() {
+ function Module(stdlib, foreign, heap) {
"use asm";
+ var HEAP32 = new stdlib.Int32Array(heap);
function func() {
- var x = 1;
- var y = 2;
- return (x | y) | 0;
+ var a = 1;
+ var b = 2;
+ HEAP32[0] = a + b;
+ return HEAP32[0] | 0;
}
return {func: func};
}
- var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
assertEquals(3, m.func());
})();
-(function TestAnd() {
- function Module() {
+(function TestFloatishAssignment() {
+ function Module(stdlib, foreign, heap) {
"use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
function func() {
- var x = 3;
- var y = 2;
- return (x & y) | 0;
+ var a = fround(1.0);
+ var b = fround(2.0);
+ HEAPF32[0] = a + b;
+ return +HEAPF32[0];
}
return {func: func};
}
- var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
- assertEquals(2, m.func());
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(3, m.func());
})();
-(function TestXor() {
- function Module() {
+(function TestDoubleToFloatAssignment() {
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = 1.23;
+ HEAPF32[0] = a;
+ return +HEAPF32[0];
+ }
+ return {func: func};
+ }
+
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(1.23, m.func());
+});
+
+
+(function TestIntegerMultiplyBothWays() {
+ function Module(stdlib, foreign, heap) {
"use asm";
function func() {
- var x = 3;
- var y = 2;
- return (x ^ y) | 0;
+ var a = 1;
+ return ((a * 3) + (4 * a)) | 0;
}
return {func: func};
}
- var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
- assertEquals(1, m.func());
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(7, m.func());
})();
-(function TestIntishAssignment() {
+(function TestBadMultiplyIntish() {
function Module(stdlib, foreign, heap) {
"use asm";
- var HEAP32 = new stdlib.Int32Array(heap);
function func() {
var a = 1;
- var b = 2;
- HEAP32[0] = a + b;
- return HEAP32[0] | 0;
+ return ((a + a) * 4) | 0;
}
return {func: func};
}
-
- var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
- assertEquals(3, m.func());
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(Module.toString());
+ });
})();
-(function TestFloatishAssignment() {
+(function TestBadCastFromInt() {
function Module(stdlib, foreign, heap) {
"use asm";
- var HEAPF32 = new stdlib.Float32Array(heap);
- var fround = stdlib.Math.fround;
function func() {
- var a = fround(1.0);
- var b = fround(2.0);
- HEAPF32[0] = a + b;
- return +HEAPF32[0];
+ var a = 1;
+ return +a;
}
return {func: func};
}
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(Module.toString());
+ });
+})();
- var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+
+(function TestAndNegative() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = 1;
+ var y = 2;
+ var z = 0;
+ z = x + y & -1;
+ return z | 0;
+ }
+ return {func: func};
+ }
+
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
assertEquals(3, m.func());
-}) // TODO(bradnelson): Enable when Math.fround implementation lands.
+})();
+
+
+(function TestNegativeDouble() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = -(34359738368.25);
+ var y = -2.5;
+ return +(x + y);
+ }
+ return {func: func};
+ }
+
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(-34359738370.75, m.func());
+})();
+
+
+(function TestBadAndDouble() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = 1.0;
+ var y = 2.0;
+ return (x & y) | 0;
+ }
+ return {func: func};
+ }
+
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(Module.toString());
+ });
+})();
+
+
+(function TestAndIntAndHeapValue() {
+ function Module(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(buffer);
+ function func() {
+ var x = 0;
+ x = HEAP32[0] & -1;
+ return x | 0;
+ }
+ return {func: func};
+ }
+
+ var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ assertEquals(0, m.func());
+})();
+
+(function TestOutOfBoundsConversion() {
+ function asmModule($a,$b,$c){'use asm';
+ function aaa() {
+ var f = 0.0;
+ var a = 0;
+ f = 5616315000.000001;
+ a = ~~f >>>0;
+ return a | 0;
+ }
+ return { main : aaa };
+ }
+ var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
+ assertEquals(1321347704, wasm.main());
+})();
+
+(function TestUnsignedLiterals() {
+ function asmModule() {
+ "use asm";
+ function u0xffffffff() {
+ var f = 0xffffffff;
+ return +(f >>> 0);
+ }
+ function u0x80000000() {
+ var f = 0x80000000;
+ return +(f >>> 0);
+ }
+ function u0x87654321() {
+ var f = 0x87654321;
+ return +(f >>> 0);
+ }
+ return {
+ u0xffffffff: u0xffffffff,
+ u0x80000000: u0x80000000,
+ u0x87654321: u0x87654321,
+ };
+ }
+ var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
+ assertEquals(0xffffffff, wasm.u0xffffffff());
+ assertEquals(0x80000000, wasm.u0x80000000());
+ assertEquals(0x87654321, wasm.u0x87654321());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
index 98ad657f52..11cc92a8ec 100644
--- a/deps/v8/test/mjsunit/wasm/calls.js
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -5,141 +5,100 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function assertModule(module, memsize) {
+ // Check the module exists.
+ assertFalse(module === undefined);
+ assertFalse(module === null);
+ assertFalse(module === 0);
+ assertEquals("object", typeof module);
+
+ // Check the memory is an ArrayBuffer.
+ var mem = module.exports.memory;
+ assertFalse(mem === undefined);
+ assertFalse(mem === null);
+ assertFalse(mem === 0);
+ assertEquals("object", typeof mem);
+ assertTrue(mem instanceof ArrayBuffer);
+ for (var i = 0; i < 4; i++) {
+ module.exports.memory = 0; // should be ignored
+ assertEquals(mem, module.exports.memory);
+ }
+
+ assertEquals(memsize, module.exports.memory.byteLength);
+}
-var module = (function () {
- var kBodySize = 5;
- var kNameOffset = 21 + kBodySize + 1;
-
- return _WASMEXP_.instantiateModule(bytes(
- // -- memory
- kDeclMemory,
- 12, 12, 1,
- // -- signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstI32, kAstI32, // int, int -> int
- // -- functions
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // -- body
- kExprI32Sub, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kDeclEnd,
- 's', 'u', 'b', 0 // name
- ));
-})();
+function assertFunction(module, func) {
+ assertEquals("object", typeof module.exports);
+
+ var exp = module.exports[func];
+ assertFalse(exp === undefined);
+ assertFalse(exp === null);
+ assertFalse(exp === 0);
+ assertEquals("function", typeof exp);
-// Check the module exists.
-assertFalse(module === undefined);
-assertFalse(module === null);
-assertFalse(module === 0);
-assertEquals("object", typeof module);
-
-// Check the memory is an ArrayBuffer.
-var mem = module.memory;
-assertFalse(mem === undefined);
-assertFalse(mem === null);
-assertFalse(mem === 0);
-assertEquals("object", typeof mem);
-assertTrue(mem instanceof ArrayBuffer);
-for (var i = 0; i < 4; i++) {
- module.memory = 0; // should be ignored
- assertEquals(mem, module.memory);
+ return exp;
}
-assertEquals(4096, module.memory.byteLength);
-
-// Check the properties of the sub function.
-assertEquals("function", typeof module.sub);
-
-assertEquals(-55, module.sub(33, 88));
-assertEquals(-55555, module.sub(33333, 88888));
-assertEquals(-5555555, module.sub(3333333, 8888888));
-
-
-var module = (function() {
- var kBodySize = 1;
- var kNameOffset2 = 19 + kBodySize + 1;
-
- return _WASMEXP_.instantiateModule(bytes(
- // -- memory
- kDeclMemory,
- 12, 12, 1,
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt, // signature: void -> void
- // -- functions
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature index
- kNameOffset2, 0, 0, 0, // name offset
- kBodySize, 0,
- kExprNop, // body
- kDeclEnd,
- 'n', 'o', 'p', 0 // name
- ));
+(function SubTest() {
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("sub", [kAstI32, kAstI32, kAstI32])
+ .addBody([
+ kExprI32Sub, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportFunc()
+
+ var module = builder.instantiate();
+ assertModule(module, kPageSize);
+
+ // Check the properties of the sub function.
+ var sub = assertFunction(module, "sub");
+ assertEquals(-55, sub(33, 88));
+ assertEquals(-55555, sub(33333, 88888));
+ assertEquals(-5555555, sub(3333333, 8888888));
})();
-// Check the module exists.
-assertFalse(module === undefined);
-assertFalse(module === null);
-assertFalse(module === 0);
-assertEquals("object", typeof module);
-
-// Check the memory is an ArrayBuffer.
-var mem = module.memory;
-assertFalse(mem === undefined);
-assertFalse(mem === null);
-assertFalse(mem === 0);
-assertEquals("object", typeof mem);
-assertTrue(mem instanceof ArrayBuffer);
-for (var i = 0; i < 4; i++) {
- module.memory = 0; // should be ignored
- assertEquals(mem, module.memory);
-}
-assertEquals(4096, module.memory.byteLength);
+(function NopTest() {
+
+ var builder = new WasmModuleBuilder();
-// Check the properties of the sub function.
-assertFalse(module.nop === undefined);
-assertFalse(module.nop === null);
-assertFalse(module.nop === 0);
-assertEquals("function", typeof module.nop);
+ var kPages = 2;
+ builder.addMemory(kPages, kPages, true);
+ builder.addFunction("nop", [kAstStmt])
+ .addBody([kExprNop])
+ .exportFunc();
+
+ var module = builder.instantiate();
+ assertModule(module, kPageSize * kPages);
+
+ var nop = assertFunction(module, "nop");
+ assertEquals(undefined, nop());
+})();
-assertEquals(undefined, module.nop());
(function testLt() {
- var kBodySize = 5;
- var kNameOffset = 21 + kBodySize + 1;
-
- var data = bytes(
- // -- memory
- kDeclMemory,
- 12, 12, 1,
- // -- signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstF64, kAstF64, // (f64,f64)->int
- // -- functions
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature index
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // -- body
- kExprF64Lt, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kDeclEnd,
- 'f', 'l', 't', 0 // name
- );
-
- var module = _WASMEXP_.instantiateModule(data);
-
- assertEquals("function", typeof module.flt);
- assertEquals(1, module.flt(-2, -1));
- assertEquals(0, module.flt(7.3, 7.1));
- assertEquals(1, module.flt(7.1, 7.3));
+ var builder = new WasmModuleBuilder();
+
+ var kPages = 3;
+ builder.addMemory(kPages, kPages, true);
+ builder.addFunction("flt", [kAstI32, kAstF64, kAstF64])
+ .addBody([
+ kExprF64Lt, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportFunc();
+
+ var module = builder.instantiate();
+ assertModule(module, kPageSize * kPages);
+
+ var flt = assertFunction(module, "flt");
+ assertEquals(1, flt(-2, -1));
+ assertEquals(0, flt(7.3, 7.1));
+ assertEquals(1, flt(7.1, 7.3));
})();
diff --git a/deps/v8/test/mjsunit/wasm/divrem-trap.js b/deps/v8/test/mjsunit/wasm/divrem-trap.js
index 9787ae34c9..976e4736bc 100644
--- a/deps/v8/test/mjsunit/wasm/divrem-trap.js
+++ b/deps/v8/test/mjsunit/wasm/divrem-trap.js
@@ -5,6 +5,7 @@
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function assertTraps(code, msg) {
var threwException = true;
@@ -29,38 +30,18 @@ function assertTraps(code, msg) {
}
-function makeDivRem(opcode) {
- var kBodySize = 5;
- var kNameMainOffset = 6 + 11 + kBodySize + 1;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstI32, kAstI32, // (int,int) -> int
- // -- main function
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- opcode, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // names
- kDeclEnd,
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data);
-
- assertEquals("function", typeof module.main);
-
- return module.main;
+function makeBinop(opcode) {
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", [kAstI32, kAstI32, kAstI32])
+ .addBody([opcode, kExprGetLocal, 0, kExprGetLocal, 1])
+ .exportFunc();
+
+ return builder.instantiate().exports.main;
}
-var divs = makeDivRem(kExprI32DivS);
-var divu = makeDivRem(kExprI32DivU);
+var divs = makeBinop(kExprI32DivS);
+var divu = makeBinop(kExprI32DivU);
assertEquals( 33, divs( 333, 10));
assertEquals(-33, divs(-336, 10));
@@ -78,8 +59,8 @@ assertTraps(kTrapDivUnrepresentable, "divs(0x80000000, -1)");
assertEquals(0, divu(0x80000000, -1));
-var rems = makeDivRem(kExprI32RemS);
-var remu = makeDivRem(kExprI32RemU);
+var rems = makeBinop(kExprI32RemS);
+var remu = makeBinop(kExprI32RemU);
assertEquals( 3, rems( 333, 10));
assertEquals(-6, rems(-336, 10));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/README b/deps/v8/test/mjsunit/wasm/embenchen/README
new file mode 100644
index 0000000000..7bce3f4180
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/README
@@ -0,0 +1,3 @@
+This directory contains versions of test/mjsunit/asm/embenchen
+modified to work with the current state of the asm->wasm
+conversion plumbing.
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/box2d.js b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
new file mode 100644
index 0000000000..d9c78124d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
@@ -0,0 +1,20325 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT =
+ /frame averages: .+ \+- .+, range: .+ to .+ \n/;
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertTrue(EXPECTED_OUTPUT.test(Module.printBuffer));
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+var __ZTVN10__cxxabiv117__class_type_infoE = 7024;
+var __ZTVN10__cxxabiv120__si_class_type_infoE = 7064;
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(7731);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,232,118,72,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,114,114,111,114,58,32,37,100,92,110,0,0,0,0,0,102,114,97,109,101,32,97,118,101,114,97,103,101,115,58,32,37,46,51,102,32,43,45,32,37,46,51,102,44,32,114,97,110,103,101,58,32,37,46,51,102,32,116,111,32,37,46,51,102,32,10,0,0,0,0,0,105,102,32,40,77,111,100,117,108,101,46,114,101,112,111,114,116,67,111,109,112,108,101,116,105,111,110,41,32,77,111,100,117,108,101,46,114,101,112,111,114,116,67,111,109,112,108,101,116,105,111,110,40,41,0,0,114,101,115,112,111,110,115,105,118,101,32,109,97,105,110,32,108,111,111,112,0,0,0,0,0,0,0,0,56,1,0,0,1,0,0,0,2,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,49,49,98,50,69,100,103,101,83,104,97,112,101,0,0,0,55,98,50,83,104,97,112,101,0,0,0,0,0,0,0,0,120,27,0,0,32,1,0,0,160,27,0,0,16,1,0,0,48,1,0,0,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,83,104,97,112,101,115,47,98,50,80,111,108,121,103,111,110,83,104,97,112,101,46,99,112,112,0,0,0,0,0,0,0,48,46,48,102,32,60,61,32,108,111,119,101,114,32,38,38,32,108,111,119,101,114,32,60,61,32,105,110,112,117,116,46,109,97,120,70,114,97,99,116,105,111,110,0,0,0,0,0,82,97,121,67,97,115,116,0,109,95,118,101,114,116,101,120,67,111,117,110,116,32,62,61,32,51,0,0,0,0,0,0,67,111,109,112,117,116,101,77,97,115,115,0,0,0,0,0,97,114,101,97,32,62,32,49,46,49,57,50,48,57,50,57,48,101,45,48,55,70,0,0,0,0,0,0,48,2,0,0,3,0,0,0,4,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,49,52,98,50,80,111,108,121,103,111,110,83,104,97,112,101,0,0,0,0,0,0,0,0,160,27,0,0,24,2,0,0,48,1,0,0,0,0,0,0,16,0,0,0,32,0,0,0,64,0,0,0,96,0,0,0,128,0,0,0,160,0,0,0,192,0,0,0,224,0,0,0,0,1,0,0,64,1,0,0,128,1,0,0,192,1,0,0,0,2,0,0,128,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,106,32,60,32,98,50,95,98,108,111,99,107,83,105,122,101,115,0,0,0,0,0,0,0,66,111,120,50,68,47,67,111,109,109,111,110,47,98,50,66,108,111,99,107,65,108,108,111,99,97,116,111,114,46,99,112,112,0,0,0,0,0,0,0,98,50,66,108,111,99,107,65,108,108,111,99,97,116,111,114,0,0,0,0,0,0,0,0,48,32,60,32,115,105,122,101,0,0,0,0,0,0,0,0,65,108,108,111,99,97,116,101,0,0,0,0,0,0,0,0,48,32,60,61,32,105,110,100,101,120,32,38,38,32,105,110,100,101,120,32,60,32,98,50,95,98,108,111,99,107,83,105,122,101,115,0,0,0,0,0,98,108,111,99,107,67,111,117,110,116,32,42,32,98,108,111,99,107,83,105,122,101,32,60,61,32,98,50,95,99,104,117,110,107,83,105,122,101,0,0,70,114,101,101,0,0,0,0,98,100,45,62,112,111,115,105,116,105,111,110,46,73,115,86,97,108,105,100,40,41,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,98,50,66,111,100,121,46,99,112,112,0,0,0,0,0,0,0,98,50,66,111,100,121,0,0,98,100,45,62,108,105,110,101,97,114,86,101,108,111,99,105,116,121,46,73,115,86,97,108,105,100,40,41,0,0,0,0,98,50,73,115,86,97,108,105,100,40,98,100,45,62,97,110,103,108,101,41,0,0,0,0,98,50,73,115,86,97,108,105,100,40,98,100,45,62,97,110,103,117,108,97,114,86,101,108,111,99,105,116,121,41,0,0,98,50,73,115,86,97,108,105,100,40,98,100,45,62,97,110,103,117,108,97,114,68,97,109,112,105,110,103,41,32,38,38,32,98,100,45,62,97,110,103,117,108,97,114,68,97,109,112,105,110,103,32,62,61,32,48,46,48,102,0,0,0,0,0,98,50,73,115,86,97,108,105,100,40,98,100,45,62,108,105,110,101,97,114,68,97,109,112,105,110,103,41,32,38,38,32,98,100,45,62,108,105,110,101,97,114,68,97,109,112,105,110,103,32,62,61,32,48,46,48,102,0,0,0,0,0,0,0,109,95,119,111,114,108,100,45,62,73,115,76,111,99,107,101,100,40,41,32,61,61,32,102,97,108,115,101,0,0,0,0,67,114,101,97,116,101,70,105,120,116,117,114,101,0,0,0,109,95,116,121,112,101,32,61,61,32,98,50,95,100,121,110,97,109,105,99,66,111,100,121,0,0,0,0,0,0,0,0,82,101,115,101,116,77,97,115,115,68,97,116,97,0,0,0,109,95,73,32,62,32,48,46,48,102,0,0,0,0,0,0,0,10,0,0,0,0,0,0,240,7,0,0,0,0,0,0,48,32,60,61,32,112,114,111,120,121,73,100,32,38,38,32,112,114,111,120,121,73,100,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,0,0,0,0,0,0,46,47,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,68,121,110,97,109,105,99,84,114,101,101,46,104,0,0,0,0,0,0,0,71,101,116,85,115,101,114,68,97,116,97,0,0,0,0,0,71,101,116,70,97,116,65,65,66,66,0,0,0,0,0,0,0,0,0,0,32,8,0,0,5,0,0,0,6,0,0,0,1,0,0,0,2,0,0,0,1,0,0,0,2,0,0,0,49,55,98,50,67,111,110,116,97,99,116,76,105,115,116,101,110,101,114,0,0,0,0,0,120,27,0,0,8,8,0,0,109,95,112,114,111,120,121,67,111,117,110,116,32,61,61,32,48,0,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,98,50,70,105,120,116,117,114,101,46,99,112,112,0,0,0,0,67,114,101,97,116,101,80,114,111,120,105,101,115,0,0,0,73,115,76,111,99,107,101,100,40,41,32,61,61,32,102,97,108,115,101,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,98,50,87,111,114,108,100,46,99,112,112,0,0,0,0,0,0,67,114,101,97,116,101,66,111,100,121,0,0,0,0,0,0,98,45,62,73,115,65,99,116,105,118,101,40,41,32,61,61,32,116,114,117,101,0,0,0,83,111,108,118,101,0,0,0,115,116,97,99,107,67,111,117,110,116,32,60,32,115,116,97,99,107,83,105,122,101,0,0,116,121,112,101,65,32,61,61,32,98,50,95,100,121,110,97,109,105,99,66,111,100,121,32,124,124,32,116,121,112,101,66,32,61,61,32,98,50,95,100,121,110,97,109,105,99,66,111,100,121,0,0,0,0,0,0,83,111,108,118,101,84,79,73,0,0,0,0,0,0,0,0,97,108,112,104,97,48,32,60,32,49,46,48,102,0,0,0,46,47,66,111,120,50,68,47,67,111,109,109,111,110,47,98,50,77,97,116,104,46,104,0,65,100,118,97,110,99,101,0,109,95,106,111,105,110,116,67,111,117,110,116,32,60,32,109,95,106,111,105,110,116,67,97,112,97,99,105,116,121,0,0,46,47,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,98,50,73,115,108,97,110,100,46,104,0,0,0,0,0,65,100,100,0,0,0,0,0,109,95,99,111,110,116,97,99,116,67,111,117,110,116,32,60,32,109,95,99,111,110,116,97,99,116,67,97,112,97,99,105,116,121,0,0,0,0,0,0,109,95,98,111,100,121,67,111,117,110,116,32,60,32,109,95,98,111,100,121,67,97,112,97,99,105,116,121,0,0,0,0,0,0,0,0,40,10,0,0,7,0,0,0,8,0,0,0,3,0,0,0,0,0,0,0,49,53,98,50,67,111,110,116,97,99,116,70,105,108,116,101,114,0,0,0,0,0,0,0,120,27,0,0,16,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48,32,60,61,32,105,110,100,101,120,32,38,38,32,105,110,100,101,120,32,60,32,99,104,97,105,110,45,62,109,95,99,111,117,110,116,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,68,105,115,116,97,110,99,101,46,99,112,112,0,0,83,101,116,0,0,0,0,0,102,97,108,115,101,0,0,0,98,50,68,105,115,116,97,110,99,101,0,0,0,0,0,0,71,101,116,77,101,116,114,105,99,0,0,0,0,0,0,0,71,101,116,87,105,116,110,101,115,115,80,111,105,110,116,115,0,0,0,0,0,0,0,0,48,32,60,61,32,105,110,100,101,120,32,38,38,32,105,110,100,101,120,32,60,32,109,95,99,111,117,110,116,0,0,0,46,47,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,68,105,115,116,97,110,99,101,46,104,0,0,71,101,116,86,101,114,116,101,120,0,0,0,0,0,0,0,71,101,116,67,108,111,115,101,115,116,80,111,105,110,116,0,99,97,99,104,101,45,62,99,111,117,110,116,32,60,61,32,51,0,0,0,0,0,0,0,82,101,97,100,67,97,99,104,101,0,0,0,0,0,0,0,109,95,110,111,100,101,67,111,117,110,116,32,61,61,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,68,121,110,97,109,105,99,84,114,101,101,46,99,112,112,0,0,0,0,0,0,0,65,108,108,111,99,97,116,101,78,111,100,101,0,0,0,0,48,32,60,61,32,110,111,100,101,73,100,32,38,38,32,110,111,100,101,73,100,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,70,114,101,101,78,111,100,101,0,0,0,0,0,0,0,0,48,32,60,32,109,95,110,111,100,101,67,111,117,110,116,0,48,32,60,61,32,112,114,111,120,121,73,100,32,38,38,32,112,114,111,120,121,73,100,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,0,0,0,0,0,0,109,95,110,111,100,101,115,91,112,114,111,120,121,73,100,93,46,73,115,76,101,97,102,40,41,0,0,0,0,0,0,0,77,111,118,101,80,114,111,120,121,0,0,0,0,0,0,0,99,104,105,108,100,49,32,33,61,32,40,45,49,41,0,0,73,110,115,101,114,116,76,101,97,102,0,0,0,0,0,0,99,104,105,108,100,50,32,33,61,32,40,45,49,41,0,0,105,65,32,33,61,32,40,45,49,41,0,0,0,0,0,0,66,97,108,97,110,99,101,0,48,32,60,61,32,105,66,32,38,38,32,105,66,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,48,32,60,61,32,105,67,32,38,38,32,105,67,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,48,32,60,61,32,105,70,32,38,38,32,105,70,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,48,32,60,61,32,105,71,32,38,38,32,105,71,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,109,95,110,111,100,101,115,91,67,45,62,112,97,114,101,110,116,93,46,99,104,105,108,100,50,32,61,61,32,105,65,0,48,32,60,61,32,105,68,32,38,38,32,105,68,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,48,32,60,61,32,105,69,32,38,38,32,105,69,32,60,32,109,95,110,111,100,101,67,97,112,97,99,105,116,121,0,0,109,95,110,111,100,101,115,91,66,45,62,112,97,114,101,110,116,93,46,99,104,105,108,100,50,32,61,61,32,105,65,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,116,97,114,103,101,116,32,62,32,116,111,108,101,114,97,110,99,101,0,0,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,84,105,109,101,79,102,73,109,112,97,99,116,46,99,112,112,0,0,0,0,0,0,98,50,84,105,109,101,79,102,73,109,112,97,99,116,0,0,102,97,108,115,101,0,0,0,69,118,97,108,117,97,116,101,0,0,0,0,0,0,0,0,48,32,60,61,32,105,110,100,101,120,32,38,38,32,105,110,100,101,120,32,60,32,109,95,99,111,117,110,116,0,0,0,46,47,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,68,105,115,116,97,110,99,101,46,104,0,0,71,101,116,86,101,114,116,101,120,0,0,0,0,0,0,0,70,105,110,100,77,105,110,83,101,112,97,114,97,116,105,111,110,0,0,0,0,0,0,0,48,32,60,32,99,111,117,110,116,32,38,38,32,99,111,117,110,116,32,60,32,51,0,0,73,110,105,116,105,97,108,105,122,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,109,95,105,110,100,101,120,32,61,61,32,48,0,0,0,0,66,111,120,50,68,47,67,111,109,109,111,110,47,98,50,83,116,97,99,107,65,108,108,111,99,97,116,111,114,46,99,112,112,0,0,0,0,0,0,0,126,98,50,83,116,97,99,107,65,108,108,111,99,97,116,111,114,0,0,0,0,0,0,0,109,95,101,110,116,114,121,67,111,117,110,116,32,61,61,32,48,0,0,0,0,0,0,0,109,95,101,110,116,114,121,67,111,117,110,116,32,60,32,98,50,95,109,97,120,83,116,97,99,107,69,110,116,114,105,101,115,0,0,0,0,0,0,0,65,108,108,111,99,97,116,101,0,0,0,0,0,0,0,0,109,95,101,110,116,114,121,67,111,117,110,116,32,62,32,48,0,0,0,0,0,0,0,0,70,114,101,101,0,0,0,0,112,32,61,61,32,101,110,116,114,121,45,62,100,97,116,97,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48,32,60,61,32,116,121,112,101,49,32,38,38,32,116,121,112,101,49,32,60,32,98,50,83,104,97,112,101,58,58,101,95,116,121,112,101,67,111,117,110,116,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,67,111,110,116,97,99,116,46,99,112,112,0,0,0,48,32,60,61,32,116,121,112,101,50,32,38,38,32,116,121,112,101,50,32,60,32,98,50,83,104,97,112,101,58,58,101,95,116,121,112,101,67,111,117,110,116,0,0,0,0,0,0,67,114,101,97,116,101,0,0,115,95,105,110,105,116,105,97,108,105,122,101,100,32,61,61,32,116,114,117,101,0,0,0,68,101,115,116,114,111,121,0,48,32,60,61,32,116,121,112,101,65,32,38,38,32,116,121,112,101,66,32,60,32,98,50,83,104,97,112,101,58,58,101,95,116,121,112,101,67,111,117,110,116,0,0,0,0,0,0,0,0,0,0,120,17,0,0,1,0,0,0,9,0,0,0,10,0,0,0,0,0,0,0,57,98,50,67,111,110,116,97,99,116,0,0,0,0,0,0,120,27,0,0,104,17,0,0,0,0,0,0,104,18,0,0,3,0,0,0,11,0,0,0,12,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,101,100,103,101,0,0,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,69,100,103,101,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,0,0,98,50,69,100,103,101,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,105,114,99,108,101,0,0,0,0,0,0,50,50,98,50,69,100,103,101,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,0,0,0,0,0,0,160,27,0,0,72,18,0,0,120,17,0,0,0,0,0,0,0,0,0,0,96,19,0,0,4,0,0,0,13,0,0,0,14,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,101,100,103,101,0,0,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,69,100,103,101,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,0,98,50,69,100,103,101,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,112,111,108,121,103,111,110,0,0,0,0,0,50,51,98,50,69,100,103,101,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,0,0,0,0,0,0,160,27,0,0,64,19,0,0,120,17,0,0,0,0,0,0,0,0,0,0,96,20,0,0,5,0,0,0,15,0,0,0,16,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,112,111,108,121,103,111,110,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,80,111,108,121,103,111,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,46,99,112,112,0,0,0,98,50,80,111,108,121,103,111,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,105,114,99,108,101,0,0,0,0,0,0,50,53,98,50,80,111,108,121,103,111,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,0,0,0,160,27,0,0,64,20,0,0,120,17,0,0,0,0,0,0,0,0,0,0,72,21,0,0,6,0,0,0,17,0,0,0,18,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,112,111,108,121,103,111,110,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,80,111,108,121,103,111,110,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,98,50,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,112,111,108,121,103,111,110,0,0,0,0,0,49,54,98,50,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,0,0,0,0,0,160,27,0,0,48,21,0,0,120,17,0,0,0,0,0,0,116,111,105,73,110,100,101,120,65,32,60,32,109,95,98,111,100,121,67,111,117,110,116,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,98,50,73,115,108,97,110,100,46,99,112,112,0,0,0,0,0,83,111,108,118,101,84,79,73,0,0,0,0,0,0,0,0,116,111,105,73,110,100,101,120,66,32,60,32,109,95,98,111,100,121,67,111,117,110,116,0,100,101,110,32,62,32,48,46,48,102,0,0,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,67,111,108,108,105,100,101,69,100,103,101,46,99,112,112,0,0,0,0,0,0,0,98,50,67,111,108,108,105,100,101,69,100,103,101,65,110,100,67,105,114,99,108,101,0,0,48,32,60,61,32,101,100,103,101,49,32,38,38,32,101,100,103,101,49,32,60,32,112,111,108,121,49,45,62,109,95,118,101,114,116,101,120,67,111,117,110,116,0,0,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,98,50,67,111,108,108,105,100,101,80,111,108,121,103,111,110,46,99,112,112,0,0,0,0,98,50,70,105,110,100,73,110,99,105,100,101,110,116,69,100,103,101,0,0,0,0,0,0,98,50,69,100,103,101,83,101,112,97,114,97,116,105,111,110,0,0,0,0,0,0,0,0,0,0,0,0,120,23,0,0,7,0,0,0,19,0,0,0,20,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,104,97,105,110,0,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,67,104,97,105,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,0,98,50,67,104,97,105,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,105,114,99,108,101,0,0,0,0,0,0,50,51,98,50,67,104,97,105,110,65,110,100,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,0,0,0,0,0,160,27,0,0,88,23,0,0,120,17,0,0,0,0,0,0,0,0,0,0,120,24,0,0,8,0,0,0,21,0,0,0,22,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,104,97,105,110,0,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,67,104,97,105,110,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,98,50,67,104,97,105,110,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,112,111,108,121,103,111,110,0,0,0,0,0,50,52,98,50,67,104,97,105,110,65,110,100,80,111,108,121,103,111,110,67,111,110,116,97,99,116,0,0,0,0,0,0,160,27,0,0,88,24,0,0,120,17,0,0,0,0,0,0,0,0,0,0,88,25,0,0,9,0,0,0,23,0,0,0,24,0,0,0,0,0,0,0,109,95,102,105,120,116,117,114,101,65,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,105,114,99,108,101,0,0,0,0,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,67,105,114,99,108,101,67,111,110,116,97,99,116,46,99,112,112,0,0,0,0,0,98,50,67,105,114,99,108,101,67,111,110,116,97,99,116,0,109,95,102,105,120,116,117,114,101,66,45,62,71,101,116,84,121,112,101,40,41,32,61,61,32,98,50,83,104,97,112,101,58,58,101,95,99,105,114,99,108,101,0,0,0,0,0,0,49,53,98,50,67,105,114,99,108,101,67,111,110,116,97,99,116,0,0,0,0,0,0,0,160,27,0,0,64,25,0,0,120,17,0,0,0,0,0,0,112,111,105,110,116,67,111,117,110,116,32,62,32,48,0,0,66,111,120,50,68,47,68,121,110,97,109,105,99,115,47,67,111,110,116,97,99,116,115,47,98,50,67,111,110,116,97,99,116,83,111,108,118,101,114,46,99,112,112,0,0,0,0,0,98,50,67,111,110,116,97,99,116,83,111,108,118,101,114,0,109,97,110,105,102,111,108,100,45,62,112,111,105,110,116,67,111,117,110,116,32,62,32,48,0,0,0,0,0,0,0,0,73,110,105,116,105,97,108,105,122,101,86,101,108,111,99,105,116,121,67,111,110,115,116,114,97,105,110,116,115,0,0,0,112,111,105,110,116,67,111,117,110,116,32,61,61,32,49,32,124,124,32,112,111,105,110,116,67,111,117,110,116,32,61,61,32,50,0,0,0,0,0,0,83,111,108,118,101,86,101,108,111,99,105,116,121,67,111,110,115,116,114,97,105,110,116,115,0,0,0,0,0,0,0,0,97,46,120,32,62,61,32,48,46,48,102,32,38,38,32,97,46,121,32,62,61,32,48,46,48,102,0,0,0,0,0,0,112,99,45,62,112,111,105,110,116,67,111,117,110,116,32,62,32,48,0,0,0,0,0,0,73,110,105,116,105,97,108,105,122,101,0,0,0,0,0,0,66,111,120,50,68,47,67,111,108,108,105,115,105,111,110,47,83,104,97,112,101,115,47,98,50,67,104,97,105,110,83,104,97,112,101,46,99,112,112,0,48,32,60,61,32,105,110,100,101,120,32,38,38,32,105,110,100,101,120,32,60,32,109,95,99,111,117,110,116,32,45,32,49,0,0,0,0,0,0,0,71,101,116,67,104,105,108,100,69,100,103,101,0,0,0,0,83,116,57,116,121,112,101,95,105,110,102,111,0,0,0,0,120,27,0,0,232,26,0,0,78,49,48,95,95,99,120,120,97,98,105,118,49,49,54,95,95,115,104,105,109,95,116,121,112,101,95,105,110,102,111,69,0,0,0,0,0,0,0,0,160,27,0,0,0,27,0,0,248,26,0,0,0,0,0,0,78,49,48,95,95,99,120,120,97,98,105,118,49,49,55,95,95,99,108,97,115,115,95,116,121,112,101,95,105,110,102,111,69,0,0,0,0,0,0,0,160,27,0,0,56,27,0,0,40,27,0,0,0,0,0,0,0,0,0,0,96,27,0,0,25,0,0,0,26,0,0,0,27,0,0,0,28,0,0,0,4,0,0,0,1,0,0,0,1,0,0,0,10,0,0,0,0,0,0,0,232,27,0,0,25,0,0,0,29,0,0,0,27,0,0,0,28,0,0,0,4,0,0,0,2,0,0,0,2,0,0,0,11,0,0,0,78,49,48,95,95,99,120,120,97,98,105,118,49,50,48,95,95,115,105,95,99,108,97,115,115,95,116,121,112,101,95,105,110,102,111,69,0,0,0,0,160,27,0,0,192,27,0,0,96,27,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,40,30,0,0,30,0,0,0,31,0,0,0,3,0,0,0,0,0,0,0,115,116,100,58,58,98,97,100,95,97,108,108,111,99,0,0,83,116,57,98,97,100,95,97,108,108,111,99,0,0,0,0,160,27,0,0,24,30,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+ function _emscripten_set_main_loop(func, fps, simulateInfiniteLoop, arg) {
+ Module['noExitRuntime'] = true;
+
+ Browser.mainLoop.runner = function Browser_mainLoop_runner() {
+ if (ABORT) return;
+ if (Browser.mainLoop.queue.length > 0) {
+ var start = Date.now();
+ var blocker = Browser.mainLoop.queue.shift();
+ blocker.func(blocker.arg);
+ if (Browser.mainLoop.remainingBlockers) {
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var next = remaining%1 == 0 ? remaining-1 : Math.floor(remaining);
+ if (blocker.counted) {
+ Browser.mainLoop.remainingBlockers = next;
+ } else {
+ // not counted, but move the progress along a tiny bit
+ next = next + 0.5; // do not steal all the next one's progress
+ Browser.mainLoop.remainingBlockers = (8*remaining + next)/9;
+ }
+ }
+ console.log('main loop blocker "' + blocker.name + '" took ' + (Date.now() - start) + ' ms'); //, left: ' + Browser.mainLoop.remainingBlockers);
+ Browser.mainLoop.updateStatus();
+ setTimeout(Browser.mainLoop.runner, 0);
+ return;
+ }
+ if (Browser.mainLoop.shouldPause) {
+ // catch pauses from non-main loop sources
+ Browser.mainLoop.paused = true;
+ Browser.mainLoop.shouldPause = false;
+ return;
+ }
+
+ // Signal GL rendering layer that processing of a new frame is about to start. This helps it optimize
+ // VBO double-buffering and reduce GPU stalls.
+
+ if (Browser.mainLoop.method === 'timeout' && Module.ctx) {
+ Module.printErr('Looks like you are rendering without using requestAnimationFrame for the main loop. You should use 0 for the frame rate in emscripten_set_main_loop in order to use requestAnimationFrame, as that can greatly improve your frame rates!');
+ Browser.mainLoop.method = ''; // just warn once per call to set main loop
+ }
+
+ if (Module['preMainLoop']) {
+ Module['preMainLoop']();
+ }
+
+ try {
+ if (typeof arg !== 'undefined') {
+ Runtime.dynCall('vi', func, [arg]);
+ } else {
+ Runtime.dynCall('v', func);
+ }
+ } catch (e) {
+ if (e instanceof ExitStatus) {
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ }
+
+ if (Module['postMainLoop']) {
+ Module['postMainLoop']();
+ }
+
+ if (Browser.mainLoop.shouldPause) {
+ // catch pauses from the main loop itself
+ Browser.mainLoop.paused = true;
+ Browser.mainLoop.shouldPause = false;
+ return;
+ }
+ Browser.mainLoop.scheduler();
+ }
+ if (fps && fps > 0) {
+ Browser.mainLoop.scheduler = function Browser_mainLoop_scheduler() {
+ setTimeout(Browser.mainLoop.runner, 1000/fps); // doing this each time means that on exception, we stop
+ };
+ Browser.mainLoop.method = 'timeout';
+ } else {
+ Browser.mainLoop.scheduler = function Browser_mainLoop_scheduler() {
+ Browser.requestAnimationFrame(Browser.mainLoop.runner);
+ };
+ Browser.mainLoop.method = 'rAF';
+ }
+ Browser.mainLoop.scheduler();
+
+ if (simulateInfiniteLoop) {
+ throw 'SimulateInfiniteLoop';
+ }
+ }
+
+ var _cosf=Math_cos;
+
+ function ___cxa_pure_virtual() {
+ ABORT = true;
+ throw 'Pure virtual function called!';
+ }
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+ function ___assert_fail(condition, filename, line, func) {
+ ABORT = true;
+ throw 'Assertion failed: ' + Pointer_stringify(condition) + ', at: ' + [filename ? Pointer_stringify(filename) : 'unknown filename', line, func ? Pointer_stringify(func) : 'unknown function'] + ' at ' + stackTrace();
+ }
+
+
+ function __ZSt18uncaught_exceptionv() { // std::uncaught_exception()
+ return !!__ZSt18uncaught_exceptionv.uncaught_exception;
+ }
+
+
+
+ function ___cxa_is_number_type(type) {
+ var isNumber = false;
+ try { if (type == __ZTIi) isNumber = true } catch(e){}
+ try { if (type == __ZTIj) isNumber = true } catch(e){}
+ try { if (type == __ZTIl) isNumber = true } catch(e){}
+ try { if (type == __ZTIm) isNumber = true } catch(e){}
+ try { if (type == __ZTIx) isNumber = true } catch(e){}
+ try { if (type == __ZTIy) isNumber = true } catch(e){}
+ try { if (type == __ZTIf) isNumber = true } catch(e){}
+ try { if (type == __ZTId) isNumber = true } catch(e){}
+ try { if (type == __ZTIe) isNumber = true } catch(e){}
+ try { if (type == __ZTIc) isNumber = true } catch(e){}
+ try { if (type == __ZTIa) isNumber = true } catch(e){}
+ try { if (type == __ZTIh) isNumber = true } catch(e){}
+ try { if (type == __ZTIs) isNumber = true } catch(e){}
+ try { if (type == __ZTIt) isNumber = true } catch(e){}
+ return isNumber;
+ }function ___cxa_does_inherit(definiteType, possibilityType, possibility) {
+ if (possibility == 0) return false;
+ if (possibilityType == 0 || possibilityType == definiteType)
+ return true;
+ var possibility_type_info;
+ if (___cxa_is_number_type(possibilityType)) {
+ possibility_type_info = possibilityType;
+ } else {
+ var possibility_type_infoAddr = HEAP32[((possibilityType)>>2)] - 8;
+ possibility_type_info = HEAP32[((possibility_type_infoAddr)>>2)];
+ }
+ switch (possibility_type_info) {
+ case 0: // possibility is a pointer
+ // See if definite type is a pointer
+ var definite_type_infoAddr = HEAP32[((definiteType)>>2)] - 8;
+ var definite_type_info = HEAP32[((definite_type_infoAddr)>>2)];
+ if (definite_type_info == 0) {
+ // Also a pointer; compare base types of pointers
+ var defPointerBaseAddr = definiteType+8;
+ var defPointerBaseType = HEAP32[((defPointerBaseAddr)>>2)];
+ var possPointerBaseAddr = possibilityType+8;
+ var possPointerBaseType = HEAP32[((possPointerBaseAddr)>>2)];
+ return ___cxa_does_inherit(defPointerBaseType, possPointerBaseType, possibility);
+ } else
+ return false; // one pointer and one non-pointer
+ case 1: // class with no base class
+ return false;
+ case 2: // class with base class
+ var parentTypeAddr = possibilityType + 8;
+ var parentType = HEAP32[((parentTypeAddr)>>2)];
+ return ___cxa_does_inherit(definiteType, parentType, possibility);
+ default:
+ return false; // some unencountered type
+ }
+ }
+
+
+
+ var ___cxa_last_thrown_exception=0;function ___resumeException(ptr) {
+ if (!___cxa_last_thrown_exception) { ___cxa_last_thrown_exception = ptr; }
+ throw ptr + " - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";
+ }
+
+ var ___cxa_exception_header_size=8;function ___cxa_find_matching_catch(thrown, throwntype) {
+ if (thrown == -1) thrown = ___cxa_last_thrown_exception;
+ header = thrown - ___cxa_exception_header_size;
+ if (throwntype == -1) throwntype = HEAP32[((header)>>2)];
+ var typeArray = Array.prototype.slice.call(arguments, 2);
+
+ // If throwntype is a pointer, this means a pointer has been
+ // thrown. When a pointer is thrown, actually what's thrown
+ // is a pointer to the pointer. We'll dereference it.
+ if (throwntype != 0 && !___cxa_is_number_type(throwntype)) {
+ var throwntypeInfoAddr= HEAP32[((throwntype)>>2)] - 8;
+ var throwntypeInfo= HEAP32[((throwntypeInfoAddr)>>2)];
+ if (throwntypeInfo == 0)
+ thrown = HEAP32[((thrown)>>2)];
+ }
+ // The different catch blocks are denoted by different types.
+ // Due to inheritance, those types may not precisely match the
+ // type of the thrown object. Find one which matches, and
+ // return the type of the catch block which should be called.
+ for (var i = 0; i < typeArray.length; i++) {
+ if (___cxa_does_inherit(typeArray[i], throwntype, thrown))
+ return ((asm["setTempRet0"](typeArray[i]),thrown)|0);
+ }
+ // Shouldn't happen unless we have bogus data in typeArray
+ // or encounter a type for which emscripten doesn't have suitable
+ // typeinfo defined. Best-efforts match just in case.
+ return ((asm["setTempRet0"](throwntype),thrown)|0);
+ }function ___cxa_throw(ptr, type, destructor) {
+ if (!___cxa_throw.initialized) {
+ try {
+ HEAP32[((__ZTVN10__cxxabiv119__pointer_type_infoE)>>2)]=0; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ try {
+ HEAP32[((__ZTVN10__cxxabiv117__class_type_infoE)>>2)]=1; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ try {
+ HEAP32[((__ZTVN10__cxxabiv120__si_class_type_infoE)>>2)]=2; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ ___cxa_throw.initialized = true;
+ }
+ var header = ptr - ___cxa_exception_header_size;
+ HEAP32[((header)>>2)]=type;
+ HEAP32[(((header)+(4))>>2)]=destructor;
+ ___cxa_last_thrown_exception = ptr;
+ if (!("uncaught_exception" in __ZSt18uncaught_exceptionv)) {
+ __ZSt18uncaught_exceptionv.uncaught_exception = 1;
+ } else {
+ __ZSt18uncaught_exceptionv.uncaught_exception++;
+ }
+ throw ptr + " - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";
+ }
+
+
+ Module["_memset"] = _memset;
+
+
+
+ function __exit(status) {
+ // void _exit(int status);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/exit.html
+ Module['exit'](status);
+ }function _exit(status) {
+ __exit(status);
+ }function __ZSt9terminatev() {
+ _exit(-1234);
+ }
+
+ function _abort() {
+ Module['abort']();
+ }
+
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+ var _sinf=Math_sin;
+
+
+ var _sqrtf=Math_sqrt;
+
+ var _floorf=Math_floor;
+
+
+ function _fputs(s, stream) {
+ // int fputs(const char *restrict s, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputs.html
+ var fd = _fileno(stream);
+ return _write(fd, s, _strlen(s));
+ }
+
+ function _fputc(c, stream) {
+ // int fputc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputc.html
+ var chr = unSign(c & 0xFF);
+ HEAP8[((_fputc.ret)|0)]=chr;
+ var fd = _fileno(stream);
+ var ret = _write(fd, _fputc.ret, 1);
+ if (ret == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return -1;
+ } else {
+ return chr;
+ }
+ }function _puts(s) {
+ // int puts(const char *s);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/puts.html
+ // NOTE: puts() always writes an extra newline.
+ var stdout = HEAP32[((_stdout)>>2)];
+ var ret = _fputs(s, stdout);
+ if (ret < 0) {
+ return ret;
+ } else {
+ var newlineRet = _fputc(10, stdout);
+ return (newlineRet < 0) ? -1 : ret + 1;
+ }
+ }
+
+ function _clock() {
+ if (_clock.start === undefined) _clock.start = Date.now();
+ return Math.floor((Date.now() - _clock.start) * (1000000/1000));
+ }
+
+
+ var ___cxa_caught_exceptions=[];function ___cxa_begin_catch(ptr) {
+ __ZSt18uncaught_exceptionv.uncaught_exception--;
+ ___cxa_caught_exceptions.push(___cxa_last_thrown_exception);
+ return ptr;
+ }
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+ function __ZNSt9exceptionD2Ev() {}
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+ function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+ function _emscripten_run_script(ptr) {
+ eval(Pointer_stringify(ptr));
+ }
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;function ___cxa_allocate_exception(size) {
+ var ptr = _malloc(size + ___cxa_exception_header_size);
+ return ptr + ___cxa_exception_header_size;
+ }
+
+ function _emscripten_cancel_main_loop() {
+ Browser.mainLoop.scheduler = null;
+ Browser.mainLoop.shouldPause = true;
+ }
+
+ var __ZTISt9exception=allocate([allocate([1,0,0,0,0,0,0], "i8", ALLOC_STATIC)+8, 0], "i32", ALLOC_STATIC);
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+_fputc.ret = allocate([0], "i8", ALLOC_STATIC);
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function invoke_iiii(index,a1,a2,a3) {
+ try {
+ return Module["dynCall_iiii"](index,a1,a2,a3);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_viiiii(index,a1,a2,a3,a4,a5) {
+ try {
+ Module["dynCall_viiiii"](index,a1,a2,a3,a4,a5);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vi(index,a1) {
+ try {
+ Module["dynCall_vi"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vii(index,a1,a2) {
+ try {
+ Module["dynCall_vii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_ii(index,a1) {
+ try {
+ return Module["dynCall_ii"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_viii(index,a1,a2,a3) {
+ try {
+ Module["dynCall_viii"](index,a1,a2,a3);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_v(index) {
+ try {
+ Module["dynCall_v"](index);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_viid(index,a1,a2,a3) {
+ try {
+ Module["dynCall_viid"](index,a1,a2,a3);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6) {
+ try {
+ Module["dynCall_viiiiii"](index,a1,a2,a3,a4,a5,a6);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_iii(index,a1,a2) {
+ try {
+ return Module["dynCall_iii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_iiiiii(index,a1,a2,a3,a4,a5) {
+ try {
+ return Module["dynCall_iiiiii"](index,a1,a2,a3,a4,a5);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_viiii(index,a1,a2,a3,a4) {
+ try {
+ Module["dynCall_viiii"](index,a1,a2,a3,a4);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+ var __ZTISt9exception=env.__ZTISt9exception|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var invoke_iiii=env.invoke_iiii;
+ var invoke_viiiii=env.invoke_viiiii;
+ var invoke_vi=env.invoke_vi;
+ var invoke_vii=env.invoke_vii;
+ var invoke_ii=env.invoke_ii;
+ var invoke_viii=env.invoke_viii;
+ var invoke_v=env.invoke_v;
+ var invoke_viid=env.invoke_viid;
+ var invoke_viiiiii=env.invoke_viiiiii;
+ var invoke_iii=env.invoke_iii;
+ var invoke_iiiiii=env.invoke_iiiiii;
+ var invoke_viiii=env.invoke_viiii;
+ var ___cxa_throw=env.___cxa_throw;
+ var _emscripten_run_script=env._emscripten_run_script;
+ var _cosf=env._cosf;
+ var _send=env._send;
+ var __ZSt9terminatev=env.__ZSt9terminatev;
+ var __reallyNegative=env.__reallyNegative;
+ var ___cxa_is_number_type=env.___cxa_is_number_type;
+ var ___assert_fail=env.___assert_fail;
+ var ___cxa_allocate_exception=env.___cxa_allocate_exception;
+ var ___cxa_find_matching_catch=env.___cxa_find_matching_catch;
+ var _fflush=env._fflush;
+ var _pwrite=env._pwrite;
+ var ___setErrNo=env.___setErrNo;
+ var _sbrk=env._sbrk;
+ var ___cxa_begin_catch=env.___cxa_begin_catch;
+ var _sinf=env._sinf;
+ var _fileno=env._fileno;
+ var ___resumeException=env.___resumeException;
+ var __ZSt18uncaught_exceptionv=env.__ZSt18uncaught_exceptionv;
+ var _sysconf=env._sysconf;
+ var _clock=env._clock;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _puts=env._puts;
+ var _mkport=env._mkport;
+ var _floorf=env._floorf;
+ var _sqrtf=env._sqrtf;
+ var _write=env._write;
+ var _emscripten_set_main_loop=env._emscripten_set_main_loop;
+ var ___errno_location=env.___errno_location;
+ var __ZNSt9exceptionD2Ev=env.__ZNSt9exceptionD2Ev;
+ var _printf=env._printf;
+ var ___cxa_does_inherit=env.___cxa_does_inherit;
+ var __exit=env.__exit;
+ var _fputc=env._fputc;
+ var _abort=env._abort;
+ var _fwrite=env._fwrite;
+ var _time=env._time;
+ var _fprintf=env._fprintf;
+ var _emscripten_cancel_main_loop=env._emscripten_cancel_main_loop;
+ var __formatString=env.__formatString;
+ var _fputs=env._fputs;
+ var _exit=env._exit;
+ var ___cxa_pure_virtual=env.___cxa_pure_virtual;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[1790] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 7200 + (i5 << 2) | 0;
+ i5 = 7200 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[1790] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[7168 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 7200 + (i7 << 2) | 0;
+ i7 = 7200 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[1790] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[7168 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[7180 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 7200 + (i9 << 2) | 0;
+ i7 = HEAP32[1790] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 7200 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[1790] = i7 | i8;
+ i28 = 7200 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[7168 >> 2] = i4;
+ HEAP32[7180 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[7164 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[7464 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[7176 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 7464 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[7164 >> 2] = HEAP32[7164 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[7168 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[7180 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 7200 + (i9 << 2) | 0;
+ i7 = HEAP32[1790] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 7200 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[1790] = i7 | i8;
+ i25 = 7200 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[7168 >> 2] = i2;
+ HEAP32[7180 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[7164 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[7464 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[7464 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[7168 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[7176 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 7464 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[7164 >> 2] = HEAP32[7164 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 7200 + (i6 << 2) | 0;
+ i5 = HEAP32[1790] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 7200 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[1790] = i5 | i4;
+ i21 = 7200 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 7464 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[7164 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[7164 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[7176 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[7168 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[7180 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[7180 >> 2] = i2 + i12;
+ HEAP32[7168 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[7168 >> 2] = 0;
+ HEAP32[7180 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[7172 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[7172 >> 2] = i31;
+ i32 = HEAP32[7184 >> 2] | 0;
+ HEAP32[7184 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[1908] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[7640 >> 2] = i18;
+ HEAP32[7636 >> 2] = i18;
+ HEAP32[7644 >> 2] = -1;
+ HEAP32[7648 >> 2] = -1;
+ HEAP32[7652 >> 2] = 0;
+ HEAP32[7604 >> 2] = 0;
+ HEAP32[1908] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[7640 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[7600 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[7592 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[7604 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[7184 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 7608 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[7172 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[7636 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[7592 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[7600 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[7640 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[7604 >> 2] = HEAP32[7604 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[7592 >> 2] | 0) + i14 | 0;
+ HEAP32[7592 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[7596 >> 2] | 0) >>> 0) {
+ HEAP32[7596 >> 2] = i15;
+ }
+ i15 = HEAP32[7184 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 7608 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[7172 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[7184 >> 2] = i15 + i3;
+ HEAP32[7172 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[7188 >> 2] = HEAP32[7648 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ HEAP32[7176 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 7608 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[7184 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[7180 >> 2] | 0)) {
+ i32 = (HEAP32[7168 >> 2] | 0) + i10 | 0;
+ HEAP32[7168 >> 2] = i32;
+ HEAP32[7180 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 7464 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[7164 >> 2] = HEAP32[7164 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 7200 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[1790] = HEAP32[1790] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 7200 + (i10 << 2) | 0;
+ i9 = HEAP32[1790] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 7200 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[1790] = i9 | i5;
+ i3 = 7200 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 7464 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[7164 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[7164 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L444 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L444;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[7176 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[7172 >> 2] | 0) + i10 | 0;
+ HEAP32[7172 >> 2] = i32;
+ HEAP32[7184 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 7608 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[7184 >> 2] = i17 + i4;
+ HEAP32[7172 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[7188 >> 2] = HEAP32[7648 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[7608 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[7612 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[7616 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[7620 >> 2];
+ HEAP32[7608 >> 2] = i17;
+ HEAP32[7612 >> 2] = i14;
+ HEAP32[7620 >> 2] = 0;
+ HEAP32[7616 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 7200 + (i4 << 2) | 0;
+ i5 = HEAP32[1790] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 7200 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[1790] = i5 | i3;
+ i7 = 7200 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 7464 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[7164 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[7164 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[7176 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[7176 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[7176 >> 2] = i17;
+ }
+ HEAP32[7608 >> 2] = i17;
+ HEAP32[7612 >> 2] = i14;
+ HEAP32[7620 >> 2] = 0;
+ HEAP32[7196 >> 2] = HEAP32[1908];
+ HEAP32[7192 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 7200 + (i32 << 2) | 0;
+ HEAP32[7200 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[7200 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[7184 >> 2] = i17 + i2;
+ HEAP32[7172 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[7188 >> 2] = HEAP32[7648 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[7172 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[7172 >> 2] = i31;
+ i32 = HEAP32[7184 >> 2] | 0;
+ HEAP32[7184 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function __ZN12b2EPCollider7CollideEP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK14b2PolygonShapeS7_(i12, i2, i16, i5, i8, i6) {
+ i12 = i12 | 0;
+ i2 = i2 | 0;
+ i16 = i16 | 0;
+ i5 = i5 | 0;
+ i8 = i8 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, d27 = 0.0, d28 = 0.0, i29 = 0, d30 = 0.0, d31 = 0.0, d32 = 0.0, d33 = 0.0, i34 = 0, i35 = 0, d36 = 0.0, d37 = 0.0, i38 = 0, d39 = 0.0, i40 = 0, i41 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 144 | 0;
+ i18 = i1 + 128 | 0;
+ i11 = i1 + 24 | 0;
+ i9 = i1 + 72 | 0;
+ i10 = i1 + 48 | 0;
+ i3 = i1;
+ i4 = i12 + 132 | 0;
+ d28 = +HEAPF32[i5 + 12 >> 2];
+ d37 = +HEAPF32[i6 + 8 >> 2];
+ d23 = +HEAPF32[i5 + 8 >> 2];
+ d27 = +HEAPF32[i6 + 12 >> 2];
+ d22 = d28 * d37 - d23 * d27;
+ d27 = d37 * d23 + d28 * d27;
+ d37 = +d22;
+ d26 = +d27;
+ d25 = +HEAPF32[i6 >> 2] - +HEAPF32[i5 >> 2];
+ d36 = +HEAPF32[i6 + 4 >> 2] - +HEAPF32[i5 + 4 >> 2];
+ d24 = d28 * d25 + d23 * d36;
+ d25 = d28 * d36 - d23 * d25;
+ d23 = +d24;
+ d36 = +d25;
+ i5 = i4;
+ HEAPF32[i5 >> 2] = d23;
+ HEAPF32[i5 + 4 >> 2] = d36;
+ i5 = i12 + 140 | 0;
+ HEAPF32[i5 >> 2] = d37;
+ HEAPF32[i5 + 4 >> 2] = d26;
+ i5 = i12 + 144 | 0;
+ d26 = +HEAPF32[i8 + 12 >> 2];
+ i7 = i12 + 140 | 0;
+ d37 = +HEAPF32[i8 + 16 >> 2];
+ d24 = d24 + (d27 * d26 - d22 * d37);
+ i6 = i12 + 136 | 0;
+ d25 = d26 * d22 + d27 * d37 + d25;
+ d37 = +d24;
+ d27 = +d25;
+ i34 = i12 + 148 | 0;
+ HEAPF32[i34 >> 2] = d37;
+ HEAPF32[i34 + 4 >> 2] = d27;
+ i34 = i16 + 28 | 0;
+ i29 = HEAP32[i34 >> 2] | 0;
+ i34 = HEAP32[i34 + 4 >> 2] | 0;
+ i14 = i12 + 156 | 0;
+ HEAP32[i14 >> 2] = i29;
+ HEAP32[i14 + 4 >> 2] = i34;
+ i14 = i12 + 164 | 0;
+ i17 = i16 + 12 | 0;
+ i40 = HEAP32[i17 >> 2] | 0;
+ i17 = HEAP32[i17 + 4 >> 2] | 0;
+ i13 = i14;
+ HEAP32[i13 >> 2] = i40;
+ HEAP32[i13 + 4 >> 2] = i17;
+ i13 = i12 + 172 | 0;
+ i20 = i16 + 20 | 0;
+ i41 = HEAP32[i20 >> 2] | 0;
+ i20 = HEAP32[i20 + 4 >> 2] | 0;
+ i38 = i13;
+ HEAP32[i38 >> 2] = i41;
+ HEAP32[i38 + 4 >> 2] = i20;
+ i38 = i16 + 36 | 0;
+ i35 = HEAP32[i38 >> 2] | 0;
+ i38 = HEAP32[i38 + 4 >> 2] | 0;
+ i19 = i12 + 180 | 0;
+ HEAP32[i19 >> 2] = i35;
+ HEAP32[i19 + 4 >> 2] = i38;
+ i19 = (HEAP8[i16 + 44 | 0] | 0) != 0;
+ i21 = (HEAP8[i16 + 45 | 0] | 0) != 0;
+ d27 = (HEAP32[tempDoublePtr >> 2] = i41, +HEAPF32[tempDoublePtr >> 2]);
+ d37 = (HEAP32[tempDoublePtr >> 2] = i40, +HEAPF32[tempDoublePtr >> 2]);
+ d22 = d27 - d37;
+ d26 = (HEAP32[tempDoublePtr >> 2] = i20, +HEAPF32[tempDoublePtr >> 2]);
+ i20 = i12 + 168 | 0;
+ d36 = (HEAP32[tempDoublePtr >> 2] = i17, +HEAPF32[tempDoublePtr >> 2]);
+ d23 = d26 - d36;
+ d28 = +Math_sqrt(+(d22 * d22 + d23 * d23));
+ d33 = (HEAP32[tempDoublePtr >> 2] = i29, +HEAPF32[tempDoublePtr >> 2]);
+ d32 = (HEAP32[tempDoublePtr >> 2] = i34, +HEAPF32[tempDoublePtr >> 2]);
+ d31 = (HEAP32[tempDoublePtr >> 2] = i35, +HEAPF32[tempDoublePtr >> 2]);
+ d30 = (HEAP32[tempDoublePtr >> 2] = i38, +HEAPF32[tempDoublePtr >> 2]);
+ if (!(d28 < 1.1920928955078125e-7)) {
+ d39 = 1.0 / d28;
+ d22 = d22 * d39;
+ d23 = d23 * d39;
+ }
+ i16 = i12 + 196 | 0;
+ d28 = -d22;
+ HEAPF32[i16 >> 2] = d23;
+ i17 = i12 + 200 | 0;
+ HEAPF32[i17 >> 2] = d28;
+ d28 = (d24 - d37) * d23 + (d25 - d36) * d28;
+ if (i19) {
+ d37 = d37 - d33;
+ d36 = d36 - d32;
+ d39 = +Math_sqrt(+(d37 * d37 + d36 * d36));
+ if (!(d39 < 1.1920928955078125e-7)) {
+ d39 = 1.0 / d39;
+ d37 = d37 * d39;
+ d36 = d36 * d39;
+ }
+ d39 = -d37;
+ HEAPF32[i12 + 188 >> 2] = d36;
+ HEAPF32[i12 + 192 >> 2] = d39;
+ i29 = d23 * d37 - d22 * d36 >= 0.0;
+ d32 = (d24 - d33) * d36 + (d25 - d32) * d39;
+ } else {
+ i29 = 0;
+ d32 = 0.0;
+ }
+ L10 : do {
+ if (!i21) {
+ if (!i19) {
+ i41 = d28 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ i15 = 64;
+ break;
+ } else {
+ i15 = 65;
+ break;
+ }
+ }
+ i19 = d32 >= 0.0;
+ if (i29) {
+ if (!i19) {
+ i41 = d28 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (!i41) {
+ d37 = +-d23;
+ d39 = +d22;
+ i38 = i19;
+ HEAPF32[i38 >> 2] = d37;
+ HEAPF32[i38 + 4 >> 2] = d39;
+ i38 = i16;
+ i40 = HEAP32[i38 >> 2] | 0;
+ i38 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 228 | 0;
+ HEAP32[i41 >> 2] = i40;
+ HEAP32[i41 + 4 >> 2] = i38;
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = -(HEAP32[tempDoublePtr >> 2] = i40, +HEAPF32[tempDoublePtr >> 2]);
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 1;
+ i19 = i12 + 212 | 0;
+ }
+ i41 = i16;
+ i40 = HEAP32[i41 + 4 >> 2] | 0;
+ i38 = i19;
+ HEAP32[i38 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i38 + 4 >> 2] = i40;
+ i38 = i12 + 188 | 0;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 228 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ d37 = +-+HEAPF32[i16 >> 2];
+ d39 = +-+HEAPF32[i17 >> 2];
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ } else {
+ if (i19) {
+ i41 = d28 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ i38 = i16;
+ i41 = HEAP32[i38 >> 2] | 0;
+ i38 = HEAP32[i38 + 4 >> 2] | 0;
+ i40 = i19;
+ HEAP32[i40 >> 2] = i41;
+ HEAP32[i40 + 4 >> 2] = i38;
+ i40 = i12 + 228 | 0;
+ HEAP32[i40 >> 2] = i41;
+ HEAP32[i40 + 4 >> 2] = i38;
+ d37 = +-(HEAP32[tempDoublePtr >> 2] = i41, +HEAPF32[tempDoublePtr >> 2]);
+ d39 = +d22;
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 0;
+ i19 = i12 + 212 | 0;
+ }
+ d39 = +-d23;
+ d37 = +d22;
+ i38 = i19;
+ HEAPF32[i38 >> 2] = d39;
+ HEAPF32[i38 + 4 >> 2] = d37;
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 228 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ d37 = +-+HEAPF32[i12 + 188 >> 2];
+ d39 = +-+HEAPF32[i12 + 192 >> 2];
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ }
+ } else {
+ d33 = d31 - d27;
+ d31 = d30 - d26;
+ d30 = +Math_sqrt(+(d33 * d33 + d31 * d31));
+ if (d30 < 1.1920928955078125e-7) {
+ d30 = d33;
+ } else {
+ d39 = 1.0 / d30;
+ d30 = d33 * d39;
+ d31 = d31 * d39;
+ }
+ d39 = -d30;
+ i34 = i12 + 204 | 0;
+ HEAPF32[i34 >> 2] = d31;
+ i35 = i12 + 208 | 0;
+ HEAPF32[i35 >> 2] = d39;
+ i38 = d22 * d31 - d23 * d30 > 0.0;
+ d24 = (d24 - d27) * d31 + (d25 - d26) * d39;
+ if (!i19) {
+ i19 = d28 >= 0.0;
+ if (!i21) {
+ HEAP8[i12 + 248 | 0] = i19 & 1;
+ i15 = i12 + 212 | 0;
+ if (i19) {
+ i19 = i15;
+ i15 = 64;
+ break;
+ } else {
+ i19 = i15;
+ i15 = 65;
+ break;
+ }
+ }
+ if (i38) {
+ if (!i19) {
+ i41 = d24 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (!i41) {
+ d37 = +-d23;
+ d39 = +d22;
+ i38 = i19;
+ HEAPF32[i38 >> 2] = d37;
+ HEAPF32[i38 + 4 >> 2] = d39;
+ i38 = i12 + 228 | 0;
+ HEAPF32[i38 >> 2] = d37;
+ HEAPF32[i38 + 4 >> 2] = d39;
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 1;
+ i19 = i12 + 212 | 0;
+ }
+ i41 = i16;
+ i40 = HEAP32[i41 + 4 >> 2] | 0;
+ i38 = i19;
+ HEAP32[i38 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i38 + 4 >> 2] = i40;
+ d37 = +-+HEAPF32[i16 >> 2];
+ d39 = +-+HEAPF32[i17 >> 2];
+ i38 = i12 + 228 | 0;
+ HEAPF32[i38 >> 2] = d37;
+ HEAPF32[i38 + 4 >> 2] = d39;
+ i38 = i12 + 204 | 0;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ } else {
+ if (i19) {
+ i41 = d24 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ i40 = i16;
+ i38 = HEAP32[i40 >> 2] | 0;
+ i40 = HEAP32[i40 + 4 >> 2] | 0;
+ i41 = i19;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ d37 = +-(HEAP32[tempDoublePtr >> 2] = i38, +HEAPF32[tempDoublePtr >> 2]);
+ d39 = +d22;
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 0;
+ i19 = i12 + 212 | 0;
+ }
+ d39 = +-d23;
+ d37 = +d22;
+ i38 = i19;
+ HEAPF32[i38 >> 2] = d39;
+ HEAPF32[i38 + 4 >> 2] = d37;
+ d37 = +-+HEAPF32[i12 + 204 >> 2];
+ d39 = +-+HEAPF32[i12 + 208 >> 2];
+ i38 = i12 + 228 | 0;
+ HEAPF32[i38 >> 2] = d37;
+ HEAPF32[i38 + 4 >> 2] = d39;
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ }
+ if (i29 & i38) {
+ if (!(d32 >= 0.0) & !(d28 >= 0.0)) {
+ i41 = d24 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (!i41) {
+ d37 = +-d23;
+ d39 = +d22;
+ i41 = i19;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 1;
+ i19 = i12 + 212 | 0;
+ }
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i19;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i12 + 188 | 0;
+ i40 = HEAP32[i41 + 4 >> 2] | 0;
+ i38 = i12 + 228 | 0;
+ HEAP32[i38 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i38 + 4 >> 2] = i40;
+ i38 = i12 + 204 | 0;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ if (i29) {
+ do {
+ if (!(d32 >= 0.0)) {
+ if (d28 >= 0.0) {
+ i41 = d24 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 0;
+ i19 = i12 + 212 | 0;
+ }
+ d37 = +-d23;
+ d39 = +d22;
+ i41 = i19;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ d39 = +-+HEAPF32[i34 >> 2];
+ d37 = +-+HEAPF32[i35 >> 2];
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d39;
+ HEAPF32[i41 + 4 >> 2] = d37;
+ d37 = +-+HEAPF32[i16 >> 2];
+ d39 = +-+HEAPF32[i17 >> 2];
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break L10;
+ } else {
+ HEAP8[i12 + 248 | 0] = 1;
+ i19 = i12 + 212 | 0;
+ }
+ } while (0);
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i19;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i12 + 188 | 0;
+ i40 = HEAP32[i41 + 4 >> 2] | 0;
+ i38 = i12 + 228 | 0;
+ HEAP32[i38 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i38 + 4 >> 2] = i40;
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ if (!i38) {
+ if (!(!(d32 >= 0.0) | !(d28 >= 0.0))) {
+ i41 = d24 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ i40 = i16;
+ i38 = HEAP32[i40 >> 2] | 0;
+ i40 = HEAP32[i40 + 4 >> 2] | 0;
+ i41 = i19;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i12 + 228 | 0;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 0;
+ i19 = i12 + 212 | 0;
+ }
+ d37 = +-d23;
+ d39 = +d22;
+ i41 = i19;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ d39 = +-+HEAPF32[i34 >> 2];
+ d37 = +-+HEAPF32[i35 >> 2];
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d39;
+ HEAPF32[i41 + 4 >> 2] = d37;
+ d37 = +-+HEAPF32[i12 + 188 >> 2];
+ d39 = +-+HEAPF32[i12 + 192 >> 2];
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break;
+ }
+ do {
+ if (!(d24 >= 0.0)) {
+ if (d32 >= 0.0) {
+ i41 = d28 >= 0.0;
+ HEAP8[i12 + 248 | 0] = i41 & 1;
+ i19 = i12 + 212 | 0;
+ if (i41) {
+ break;
+ }
+ } else {
+ HEAP8[i12 + 248 | 0] = 0;
+ i19 = i12 + 212 | 0;
+ }
+ d37 = +-d23;
+ d39 = +d22;
+ i41 = i19;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ d39 = +-+HEAPF32[i16 >> 2];
+ d37 = +-+HEAPF32[i17 >> 2];
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d39;
+ HEAPF32[i41 + 4 >> 2] = d37;
+ d37 = +-+HEAPF32[i12 + 188 >> 2];
+ d39 = +-+HEAPF32[i12 + 192 >> 2];
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ break L10;
+ } else {
+ HEAP8[i12 + 248 | 0] = 1;
+ i19 = i12 + 212 | 0;
+ }
+ } while (0);
+ i38 = i16;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i19;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i16;
+ i40 = HEAP32[i41 + 4 >> 2] | 0;
+ i38 = i12 + 228 | 0;
+ HEAP32[i38 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i38 + 4 >> 2] = i40;
+ i38 = i12 + 204 | 0;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ }
+ } while (0);
+ if ((i15 | 0) == 64) {
+ i38 = i16;
+ i41 = HEAP32[i38 >> 2] | 0;
+ i38 = HEAP32[i38 + 4 >> 2] | 0;
+ i40 = i19;
+ HEAP32[i40 >> 2] = i41;
+ HEAP32[i40 + 4 >> 2] = i38;
+ d37 = +-(HEAP32[tempDoublePtr >> 2] = i41, +HEAPF32[tempDoublePtr >> 2]);
+ d39 = +d22;
+ i41 = i12 + 228 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ i41 = i12 + 236 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ } else if ((i15 | 0) == 65) {
+ d37 = +-d23;
+ d39 = +d22;
+ i40 = i19;
+ HEAPF32[i40 >> 2] = d37;
+ HEAPF32[i40 + 4 >> 2] = d39;
+ i40 = i16;
+ i38 = HEAP32[i40 >> 2] | 0;
+ i40 = HEAP32[i40 + 4 >> 2] | 0;
+ i41 = i12 + 228 | 0;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ i41 = i12 + 236 | 0;
+ HEAP32[i41 >> 2] = i38;
+ HEAP32[i41 + 4 >> 2] = i40;
+ }
+ i21 = i8 + 148 | 0;
+ i34 = i12 + 128 | 0;
+ HEAP32[i34 >> 2] = HEAP32[i21 >> 2];
+ if ((HEAP32[i21 >> 2] | 0) > 0) {
+ i19 = 0;
+ do {
+ d33 = +HEAPF32[i5 >> 2];
+ d37 = +HEAPF32[i8 + (i19 << 3) + 20 >> 2];
+ d39 = +HEAPF32[i7 >> 2];
+ d36 = +HEAPF32[i8 + (i19 << 3) + 24 >> 2];
+ d32 = +(+HEAPF32[i4 >> 2] + (d33 * d37 - d39 * d36));
+ d36 = +(d37 * d39 + d33 * d36 + +HEAPF32[i6 >> 2]);
+ i41 = i12 + (i19 << 3) | 0;
+ HEAPF32[i41 >> 2] = d32;
+ HEAPF32[i41 + 4 >> 2] = d36;
+ d36 = +HEAPF32[i5 >> 2];
+ d32 = +HEAPF32[i8 + (i19 << 3) + 84 >> 2];
+ d33 = +HEAPF32[i7 >> 2];
+ d39 = +HEAPF32[i8 + (i19 << 3) + 88 >> 2];
+ d37 = +(d36 * d32 - d33 * d39);
+ d39 = +(d32 * d33 + d36 * d39);
+ i41 = i12 + (i19 << 3) + 64 | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ i19 = i19 + 1 | 0;
+ } while ((i19 | 0) < (HEAP32[i21 >> 2] | 0));
+ }
+ i21 = i12 + 244 | 0;
+ HEAPF32[i21 >> 2] = .019999999552965164;
+ i19 = i2 + 60 | 0;
+ HEAP32[i19 >> 2] = 0;
+ i29 = i12 + 248 | 0;
+ i35 = HEAP32[i34 >> 2] | 0;
+ if ((i35 | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ d23 = +HEAPF32[i12 + 164 >> 2];
+ d26 = +HEAPF32[i20 >> 2];
+ d24 = +HEAPF32[i12 + 212 >> 2];
+ d27 = +HEAPF32[i12 + 216 >> 2];
+ d22 = 3.4028234663852886e+38;
+ i20 = 0;
+ do {
+ d25 = d24 * (+HEAPF32[i12 + (i20 << 3) >> 2] - d23) + d27 * (+HEAPF32[i12 + (i20 << 3) + 4 >> 2] - d26);
+ d22 = d25 < d22 ? d25 : d22;
+ i20 = i20 + 1 | 0;
+ } while ((i20 | 0) != (i35 | 0));
+ if (d22 > .019999999552965164) {
+ STACKTOP = i1;
+ return;
+ }
+ __ZN12b2EPCollider24ComputePolygonSeparationEv(i18, i12);
+ i20 = HEAP32[i18 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ d23 = +HEAPF32[i18 + 8 >> 2];
+ if (d23 > +HEAPF32[i21 >> 2]) {
+ STACKTOP = i1;
+ return;
+ }
+ if (d23 > d22 * .9800000190734863 + .0010000000474974513) {
+ i18 = HEAP32[i18 + 4 >> 2] | 0;
+ i35 = i2 + 56 | 0;
+ if ((i20 | 0) == 1) {
+ i18 = i11;
+ i15 = 77;
+ } else {
+ HEAP32[i35 >> 2] = 2;
+ i40 = i14;
+ i41 = HEAP32[i40 + 4 >> 2] | 0;
+ i38 = i11;
+ HEAP32[i38 >> 2] = HEAP32[i40 >> 2];
+ HEAP32[i38 + 4 >> 2] = i41;
+ i38 = i11 + 8 | 0;
+ HEAP8[i38] = 0;
+ i41 = i18 & 255;
+ HEAP8[i38 + 1 | 0] = i41;
+ HEAP8[i38 + 2 | 0] = 0;
+ HEAP8[i38 + 3 | 0] = 1;
+ i38 = i13;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i13 = i11 + 12 | 0;
+ HEAP32[i13 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i13 + 4 >> 2] = i40;
+ i13 = i11 + 20 | 0;
+ HEAP8[i13] = 0;
+ HEAP8[i13 + 1 | 0] = i41;
+ HEAP8[i13 + 2 | 0] = 0;
+ HEAP8[i13 + 3 | 0] = 1;
+ HEAP32[i9 >> 2] = i18;
+ i13 = i18 + 1 | 0;
+ i16 = (i13 | 0) < (HEAP32[i34 >> 2] | 0) ? i13 : 0;
+ HEAP32[i9 + 4 >> 2] = i16;
+ i17 = i12 + (i18 << 3) | 0;
+ i13 = HEAP32[i17 >> 2] | 0;
+ i17 = HEAP32[i17 + 4 >> 2] | 0;
+ i29 = i9 + 8 | 0;
+ HEAP32[i29 >> 2] = i13;
+ HEAP32[i29 + 4 >> 2] = i17;
+ i16 = i12 + (i16 << 3) | 0;
+ i29 = HEAP32[i16 >> 2] | 0;
+ i16 = HEAP32[i16 + 4 >> 2] | 0;
+ i20 = i9 + 16 | 0;
+ HEAP32[i20 >> 2] = i29;
+ HEAP32[i20 + 4 >> 2] = i16;
+ i20 = i12 + (i18 << 3) + 64 | 0;
+ i12 = HEAP32[i20 >> 2] | 0;
+ i20 = HEAP32[i20 + 4 >> 2] | 0;
+ i14 = i9 + 24 | 0;
+ HEAP32[i14 >> 2] = i12;
+ HEAP32[i14 + 4 >> 2] = i20;
+ i14 = 0;
+ }
+ } else {
+ i15 = 75;
+ }
+ } else {
+ i15 = 75;
+ }
+ if ((i15 | 0) == 75) {
+ i18 = i11;
+ i35 = i2 + 56 | 0;
+ i15 = 77;
+ }
+ do {
+ if ((i15 | 0) == 77) {
+ HEAP32[i35 >> 2] = 1;
+ i15 = HEAP32[i34 >> 2] | 0;
+ if ((i15 | 0) > 1) {
+ d23 = +HEAPF32[i12 + 216 >> 2];
+ d22 = +HEAPF32[i12 + 212 >> 2];
+ i34 = 0;
+ d24 = d22 * +HEAPF32[i12 + 64 >> 2] + d23 * +HEAPF32[i12 + 68 >> 2];
+ i35 = 1;
+ while (1) {
+ d25 = d22 * +HEAPF32[i12 + (i35 << 3) + 64 >> 2] + d23 * +HEAPF32[i12 + (i35 << 3) + 68 >> 2];
+ i20 = d25 < d24;
+ i34 = i20 ? i35 : i34;
+ i35 = i35 + 1 | 0;
+ if ((i35 | 0) < (i15 | 0)) {
+ d24 = i20 ? d25 : d24;
+ } else {
+ break;
+ }
+ }
+ } else {
+ i34 = 0;
+ }
+ i20 = i34 + 1 | 0;
+ i40 = (i20 | 0) < (i15 | 0) ? i20 : 0;
+ i41 = i12 + (i34 << 3) | 0;
+ i38 = HEAP32[i41 + 4 >> 2] | 0;
+ i35 = i11;
+ HEAP32[i35 >> 2] = HEAP32[i41 >> 2];
+ HEAP32[i35 + 4 >> 2] = i38;
+ i35 = i11 + 8 | 0;
+ HEAP8[i35] = 0;
+ HEAP8[i35 + 1 | 0] = i34;
+ HEAP8[i35 + 2 | 0] = 1;
+ HEAP8[i35 + 3 | 0] = 0;
+ i35 = i12 + (i40 << 3) | 0;
+ i38 = HEAP32[i35 + 4 >> 2] | 0;
+ i41 = i11 + 12 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i35 >> 2];
+ HEAP32[i41 + 4 >> 2] = i38;
+ i41 = i11 + 20 | 0;
+ HEAP8[i41] = 0;
+ HEAP8[i41 + 1 | 0] = i40;
+ HEAP8[i41 + 2 | 0] = 1;
+ HEAP8[i41 + 3 | 0] = 0;
+ if ((HEAP8[i29] | 0) == 0) {
+ HEAP32[i9 >> 2] = 1;
+ HEAP32[i9 + 4 >> 2] = 0;
+ i11 = i13;
+ i13 = HEAP32[i11 >> 2] | 0;
+ i11 = HEAP32[i11 + 4 >> 2] | 0;
+ i29 = i9 + 8 | 0;
+ HEAP32[i29 >> 2] = i13;
+ HEAP32[i29 + 4 >> 2] = i11;
+ i29 = HEAP32[i14 >> 2] | 0;
+ i14 = HEAP32[i14 + 4 >> 2] | 0;
+ i12 = i9 + 16 | 0;
+ HEAP32[i12 >> 2] = i29;
+ HEAP32[i12 + 4 >> 2] = i14;
+ i12 = (HEAPF32[tempDoublePtr >> 2] = -+HEAPF32[i16 >> 2], HEAP32[tempDoublePtr >> 2] | 0);
+ i20 = (HEAPF32[tempDoublePtr >> 2] = -+HEAPF32[i17 >> 2], HEAP32[tempDoublePtr >> 2] | 0);
+ i16 = i9 + 24 | 0;
+ HEAP32[i16 >> 2] = i12;
+ HEAP32[i16 + 4 >> 2] = i20;
+ i16 = i14;
+ i17 = i11;
+ i11 = i18;
+ i18 = 1;
+ i14 = 1;
+ break;
+ } else {
+ HEAP32[i9 >> 2] = 0;
+ HEAP32[i9 + 4 >> 2] = 1;
+ i17 = i14;
+ i11 = HEAP32[i17 >> 2] | 0;
+ i17 = HEAP32[i17 + 4 >> 2] | 0;
+ i29 = i9 + 8 | 0;
+ HEAP32[i29 >> 2] = i11;
+ HEAP32[i29 + 4 >> 2] = i17;
+ i29 = HEAP32[i13 >> 2] | 0;
+ i13 = HEAP32[i13 + 4 >> 2] | 0;
+ i20 = i9 + 16 | 0;
+ HEAP32[i20 >> 2] = i29;
+ HEAP32[i20 + 4 >> 2] = i13;
+ i20 = i16;
+ i12 = HEAP32[i20 >> 2] | 0;
+ i20 = HEAP32[i20 + 4 >> 2] | 0;
+ i16 = i9 + 24 | 0;
+ HEAP32[i16 >> 2] = i12;
+ HEAP32[i16 + 4 >> 2] = i20;
+ i16 = i13;
+ i13 = i11;
+ i11 = i18;
+ i18 = 0;
+ i14 = 1;
+ break;
+ }
+ }
+ } while (0);
+ d30 = (HEAP32[tempDoublePtr >> 2] = i20, +HEAPF32[tempDoublePtr >> 2]);
+ d39 = (HEAP32[tempDoublePtr >> 2] = i12, +HEAPF32[tempDoublePtr >> 2]);
+ d31 = (HEAP32[tempDoublePtr >> 2] = i13, +HEAPF32[tempDoublePtr >> 2]);
+ d32 = (HEAP32[tempDoublePtr >> 2] = i17, +HEAPF32[tempDoublePtr >> 2]);
+ d33 = (HEAP32[tempDoublePtr >> 2] = i29, +HEAPF32[tempDoublePtr >> 2]);
+ d37 = (HEAP32[tempDoublePtr >> 2] = i16, +HEAPF32[tempDoublePtr >> 2]);
+ i41 = i9 + 32 | 0;
+ i16 = i9 + 24 | 0;
+ i13 = i9 + 28 | 0;
+ d39 = -d39;
+ HEAPF32[i41 >> 2] = d30;
+ HEAPF32[i9 + 36 >> 2] = d39;
+ i20 = i9 + 44 | 0;
+ d36 = -d30;
+ i17 = i20;
+ HEAPF32[i17 >> 2] = d36;
+ HEAP32[i17 + 4 >> 2] = i12;
+ i17 = i9 + 8 | 0;
+ i15 = i9 + 12 | 0;
+ d39 = d30 * d31 + d32 * d39;
+ HEAPF32[i9 + 40 >> 2] = d39;
+ i29 = i9 + 52 | 0;
+ HEAPF32[i29 >> 2] = d33 * d36 + (HEAP32[tempDoublePtr >> 2] = i12, +HEAPF32[tempDoublePtr >> 2]) * d37;
+ if ((__Z19b2ClipSegmentToLineP12b2ClipVertexPKS_RK6b2Vec2fi(i10, i11, i41, d39, i18) | 0) < 2) {
+ STACKTOP = i1;
+ return;
+ }
+ if ((__Z19b2ClipSegmentToLineP12b2ClipVertexPKS_RK6b2Vec2fi(i3, i10, i20, +HEAPF32[i29 >> 2], HEAP32[i9 + 4 >> 2] | 0) | 0) < 2) {
+ STACKTOP = i1;
+ return;
+ }
+ i10 = i2 + 40 | 0;
+ if (i14) {
+ i40 = i16;
+ i41 = HEAP32[i40 >> 2] | 0;
+ i40 = HEAP32[i40 + 4 >> 2] | 0;
+ i35 = i10;
+ HEAP32[i35 >> 2] = i41;
+ HEAP32[i35 + 4 >> 2] = i40;
+ i35 = i17;
+ i40 = HEAP32[i35 >> 2] | 0;
+ i35 = HEAP32[i35 + 4 >> 2] | 0;
+ i38 = i2 + 48 | 0;
+ HEAP32[i38 >> 2] = i40;
+ HEAP32[i38 + 4 >> 2] = i35;
+ d23 = (HEAP32[tempDoublePtr >> 2] = i40, +HEAPF32[tempDoublePtr >> 2]);
+ d22 = (HEAP32[tempDoublePtr >> 2] = i41, +HEAPF32[tempDoublePtr >> 2]);
+ d24 = +HEAPF32[i15 >> 2];
+ d25 = +HEAPF32[i13 >> 2];
+ d28 = +HEAPF32[i3 >> 2];
+ d27 = +HEAPF32[i3 + 4 >> 2];
+ d26 = +HEAPF32[i21 >> 2];
+ if (!((d28 - d23) * d22 + (d27 - d24) * d25 <= d26)) {
+ d28 = d26;
+ i8 = 0;
+ } else {
+ d37 = d28 - +HEAPF32[i4 >> 2];
+ d36 = d27 - +HEAPF32[i6 >> 2];
+ d33 = +HEAPF32[i5 >> 2];
+ d28 = +HEAPF32[i7 >> 2];
+ d39 = +(d37 * d33 + d36 * d28);
+ d28 = +(d33 * d36 - d37 * d28);
+ i8 = i2;
+ HEAPF32[i8 >> 2] = d39;
+ HEAPF32[i8 + 4 >> 2] = d28;
+ HEAP32[i2 + 16 >> 2] = HEAP32[i3 + 8 >> 2];
+ d28 = +HEAPF32[i21 >> 2];
+ i8 = 1;
+ }
+ d26 = +HEAPF32[i3 + 12 >> 2];
+ d27 = +HEAPF32[i3 + 16 >> 2];
+ if ((d26 - d23) * d22 + (d27 - d24) * d25 <= d28) {
+ d36 = d26 - +HEAPF32[i4 >> 2];
+ d33 = d27 - +HEAPF32[i6 >> 2];
+ d32 = +HEAPF32[i5 >> 2];
+ d39 = +HEAPF32[i7 >> 2];
+ d37 = +(d36 * d32 + d33 * d39);
+ d39 = +(d32 * d33 - d36 * d39);
+ i41 = i2 + (i8 * 20 | 0) | 0;
+ HEAPF32[i41 >> 2] = d37;
+ HEAPF32[i41 + 4 >> 2] = d39;
+ HEAP32[i2 + (i8 * 20 | 0) + 16 >> 2] = HEAP32[i3 + 20 >> 2];
+ i8 = i8 + 1 | 0;
+ }
+ } else {
+ i38 = HEAP32[i9 >> 2] | 0;
+ i35 = i8 + (i38 << 3) + 84 | 0;
+ i41 = HEAP32[i35 + 4 >> 2] | 0;
+ i40 = i10;
+ HEAP32[i40 >> 2] = HEAP32[i35 >> 2];
+ HEAP32[i40 + 4 >> 2] = i41;
+ i38 = i8 + (i38 << 3) + 20 | 0;
+ i40 = HEAP32[i38 + 4 >> 2] | 0;
+ i41 = i2 + 48 | 0;
+ HEAP32[i41 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i41 + 4 >> 2] = i40;
+ d22 = +HEAPF32[i17 >> 2];
+ d23 = +HEAPF32[i16 >> 2];
+ d24 = +HEAPF32[i15 >> 2];
+ d25 = +HEAPF32[i13 >> 2];
+ d26 = +HEAPF32[i21 >> 2];
+ if (!((+HEAPF32[i3 >> 2] - d22) * d23 + (+HEAPF32[i3 + 4 >> 2] - d24) * d25 <= d26)) {
+ i8 = 0;
+ } else {
+ i40 = i3;
+ i8 = HEAP32[i40 + 4 >> 2] | 0;
+ i41 = i2;
+ HEAP32[i41 >> 2] = HEAP32[i40 >> 2];
+ HEAP32[i41 + 4 >> 2] = i8;
+ i41 = i3 + 8 | 0;
+ i8 = i2 + 16 | 0;
+ HEAP8[i8 + 2 | 0] = HEAP8[i41 + 3 | 0] | 0;
+ HEAP8[i8 + 3 | 0] = HEAP8[i41 + 2 | 0] | 0;
+ HEAP8[i8] = HEAP8[i41 + 1 | 0] | 0;
+ HEAP8[i8 + 1 | 0] = HEAP8[i41] | 0;
+ d26 = +HEAPF32[i21 >> 2];
+ i8 = 1;
+ }
+ i4 = i3 + 12 | 0;
+ if ((+HEAPF32[i4 >> 2] - d22) * d23 + (+HEAPF32[i3 + 16 >> 2] - d24) * d25 <= d26) {
+ i38 = i4;
+ i41 = HEAP32[i38 + 4 >> 2] | 0;
+ i40 = i2 + (i8 * 20 | 0) | 0;
+ HEAP32[i40 >> 2] = HEAP32[i38 >> 2];
+ HEAP32[i40 + 4 >> 2] = i41;
+ i40 = i3 + 20 | 0;
+ i41 = i2 + (i8 * 20 | 0) + 16 | 0;
+ HEAP8[i41 + 2 | 0] = HEAP8[i40 + 3 | 0] | 0;
+ HEAP8[i41 + 3 | 0] = HEAP8[i40 + 2 | 0] | 0;
+ HEAP8[i41] = HEAP8[i40 + 1 | 0] | 0;
+ HEAP8[i41 + 1 | 0] = HEAP8[i40] | 0;
+ i8 = i8 + 1 | 0;
+ }
+ }
+ HEAP32[i19 >> 2] = i8;
+ STACKTOP = i1;
+ return;
+}
+function __ZN7b2World8SolveTOIERK10b2TimeStep(i30, i11) {
+ i30 = i30 | 0;
+ i11 = i11 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0, i38 = 0, i39 = 0, i40 = 0, i41 = 0, d42 = 0.0, i43 = 0, i44 = 0, i45 = 0, i46 = 0, i47 = 0, i48 = 0, i49 = 0, i50 = 0, i51 = 0, i52 = 0, i53 = 0, i54 = 0, i55 = 0, i56 = 0, i57 = 0, i58 = 0, i59 = 0, i60 = 0, i61 = 0, i62 = 0, i63 = 0, i64 = 0, i65 = 0, i66 = 0, d67 = 0.0, d68 = 0.0, d69 = 0.0, d70 = 0.0, d71 = 0.0, d72 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 336 | 0;
+ i3 = i1 + 284 | 0;
+ i6 = i1 + 152 | 0;
+ i5 = i1 + 144 | 0;
+ i4 = i1 + 108 | 0;
+ i8 = i1 + 72 | 0;
+ i7 = i1 + 64 | 0;
+ i14 = i1 + 24 | 0;
+ i9 = i1;
+ i10 = i30 + 102872 | 0;
+ i13 = i30 + 102944 | 0;
+ __ZN8b2IslandC2EiiiP16b2StackAllocatorP17b2ContactListener(i3, 64, 32, 0, i30 + 68 | 0, HEAP32[i13 >> 2] | 0);
+ i2 = i30 + 102995 | 0;
+ if ((HEAP8[i2] | 0) != 0) {
+ i15 = HEAP32[i30 + 102952 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ do {
+ i66 = i15 + 4 | 0;
+ HEAP16[i66 >> 1] = HEAP16[i66 >> 1] & 65534;
+ HEAPF32[i15 + 60 >> 2] = 0.0;
+ i15 = HEAP32[i15 + 96 >> 2] | 0;
+ } while ((i15 | 0) != 0);
+ }
+ i15 = i30 + 102932 | 0;
+ i16 = HEAP32[i15 >> 2] | 0;
+ if ((i16 | 0) != 0) {
+ do {
+ i66 = i16 + 4 | 0;
+ HEAP32[i66 >> 2] = HEAP32[i66 >> 2] & -34;
+ HEAP32[i16 + 128 >> 2] = 0;
+ HEAPF32[i16 + 132 >> 2] = 1.0;
+ i16 = HEAP32[i16 + 12 >> 2] | 0;
+ } while ((i16 | 0) != 0);
+ }
+ } else {
+ i15 = i30 + 102932 | 0;
+ }
+ i25 = i3 + 28 | 0;
+ i26 = i3 + 36 | 0;
+ i27 = i3 + 32 | 0;
+ i28 = i3 + 40 | 0;
+ i29 = i3 + 8 | 0;
+ i24 = i3 + 44 | 0;
+ i23 = i3 + 12 | 0;
+ i22 = i7 + 4 | 0;
+ i21 = i9 + 4 | 0;
+ i20 = i9 + 8 | 0;
+ i19 = i9 + 16 | 0;
+ i18 = i11 + 12 | 0;
+ i17 = i9 + 12 | 0;
+ i16 = i9 + 20 | 0;
+ i39 = i30 + 102994 | 0;
+ i37 = i6 + 16 | 0;
+ i36 = i6 + 20 | 0;
+ i35 = i6 + 24 | 0;
+ i34 = i6 + 44 | 0;
+ i33 = i6 + 48 | 0;
+ i32 = i6 + 52 | 0;
+ i41 = i6 + 28 | 0;
+ i31 = i6 + 56 | 0;
+ i40 = i6 + 92 | 0;
+ i30 = i6 + 128 | 0;
+ i38 = i5 + 4 | 0;
+ L11 : while (1) {
+ i47 = HEAP32[i15 >> 2] | 0;
+ if ((i47 | 0) == 0) {
+ i4 = 36;
+ break;
+ } else {
+ d42 = 1.0;
+ i44 = 0;
+ }
+ do {
+ i48 = i47 + 4 | 0;
+ i43 = HEAP32[i48 >> 2] | 0;
+ do {
+ if ((i43 & 4 | 0) != 0 ? (HEAP32[i47 + 128 >> 2] | 0) <= 8 : 0) {
+ if ((i43 & 32 | 0) == 0) {
+ i43 = HEAP32[i47 + 48 >> 2] | 0;
+ i45 = HEAP32[i47 + 52 >> 2] | 0;
+ if ((HEAP8[i43 + 38 | 0] | 0) != 0) {
+ break;
+ }
+ if ((HEAP8[i45 + 38 | 0] | 0) != 0) {
+ break;
+ }
+ i46 = HEAP32[i43 + 8 >> 2] | 0;
+ i50 = HEAP32[i45 + 8 >> 2] | 0;
+ i53 = HEAP32[i46 >> 2] | 0;
+ i52 = HEAP32[i50 >> 2] | 0;
+ if (!((i53 | 0) == 2 | (i52 | 0) == 2)) {
+ i4 = 16;
+ break L11;
+ }
+ i51 = HEAP16[i46 + 4 >> 1] | 0;
+ i49 = HEAP16[i50 + 4 >> 1] | 0;
+ if (!((i51 & 2) != 0 & (i53 | 0) != 0 | (i49 & 2) != 0 & (i52 | 0) != 0)) {
+ break;
+ }
+ if (!((i51 & 8) != 0 | (i53 | 0) != 2 | ((i49 & 8) != 0 | (i52 | 0) != 2))) {
+ break;
+ }
+ i51 = i46 + 28 | 0;
+ i52 = i46 + 60 | 0;
+ d68 = +HEAPF32[i52 >> 2];
+ i49 = i50 + 28 | 0;
+ i53 = i50 + 60 | 0;
+ d67 = +HEAPF32[i53 >> 2];
+ if (!(d68 < d67)) {
+ if (d67 < d68) {
+ if (!(d67 < 1.0)) {
+ i4 = 25;
+ break L11;
+ }
+ d67 = (d68 - d67) / (1.0 - d67);
+ i66 = i50 + 36 | 0;
+ d69 = 1.0 - d67;
+ d71 = +(+HEAPF32[i66 >> 2] * d69 + d67 * +HEAPF32[i50 + 44 >> 2]);
+ d70 = +(d69 * +HEAPF32[i50 + 40 >> 2] + d67 * +HEAPF32[i50 + 48 >> 2]);
+ HEAPF32[i66 >> 2] = d71;
+ HEAPF32[i66 + 4 >> 2] = d70;
+ i66 = i50 + 52 | 0;
+ HEAPF32[i66 >> 2] = d69 * +HEAPF32[i66 >> 2] + d67 * +HEAPF32[i50 + 56 >> 2];
+ HEAPF32[i53 >> 2] = d68;
+ d67 = d68;
+ } else {
+ d67 = d68;
+ }
+ } else {
+ if (!(d68 < 1.0)) {
+ i4 = 21;
+ break L11;
+ }
+ d71 = (d67 - d68) / (1.0 - d68);
+ i66 = i46 + 36 | 0;
+ d70 = 1.0 - d71;
+ d68 = +(+HEAPF32[i66 >> 2] * d70 + d71 * +HEAPF32[i46 + 44 >> 2]);
+ d69 = +(d70 * +HEAPF32[i46 + 40 >> 2] + d71 * +HEAPF32[i46 + 48 >> 2]);
+ HEAPF32[i66 >> 2] = d68;
+ HEAPF32[i66 + 4 >> 2] = d69;
+ i66 = i46 + 52 | 0;
+ HEAPF32[i66 >> 2] = d70 * +HEAPF32[i66 >> 2] + d71 * +HEAPF32[i46 + 56 >> 2];
+ HEAPF32[i52 >> 2] = d67;
+ }
+ if (!(d67 < 1.0)) {
+ i4 = 28;
+ break L11;
+ }
+ i66 = HEAP32[i47 + 56 >> 2] | 0;
+ i46 = HEAP32[i47 + 60 >> 2] | 0;
+ HEAP32[i37 >> 2] = 0;
+ HEAP32[i36 >> 2] = 0;
+ HEAPF32[i35 >> 2] = 0.0;
+ HEAP32[i34 >> 2] = 0;
+ HEAP32[i33 >> 2] = 0;
+ HEAPF32[i32 >> 2] = 0.0;
+ __ZN15b2DistanceProxy3SetEPK7b2Shapei(i6, HEAP32[i43 + 12 >> 2] | 0, i66);
+ __ZN15b2DistanceProxy3SetEPK7b2Shapei(i41, HEAP32[i45 + 12 >> 2] | 0, i46);
+ i43 = i31 + 0 | 0;
+ i45 = i51 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ i43 = i40 + 0 | 0;
+ i45 = i49 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ HEAPF32[i30 >> 2] = 1.0;
+ __Z14b2TimeOfImpactP11b2TOIOutputPK10b2TOIInput(i5, i6);
+ if ((HEAP32[i5 >> 2] | 0) == 3) {
+ d67 = d67 + (1.0 - d67) * +HEAPF32[i38 >> 2];
+ d67 = d67 < 1.0 ? d67 : 1.0;
+ } else {
+ d67 = 1.0;
+ }
+ HEAPF32[i47 + 132 >> 2] = d67;
+ HEAP32[i48 >> 2] = HEAP32[i48 >> 2] | 32;
+ } else {
+ d67 = +HEAPF32[i47 + 132 >> 2];
+ }
+ if (d67 < d42) {
+ d42 = d67;
+ i44 = i47;
+ }
+ }
+ } while (0);
+ i47 = HEAP32[i47 + 12 >> 2] | 0;
+ } while ((i47 | 0) != 0);
+ if ((i44 | 0) == 0 | d42 > .9999988079071045) {
+ i4 = 36;
+ break;
+ }
+ i47 = HEAP32[(HEAP32[i44 + 48 >> 2] | 0) + 8 >> 2] | 0;
+ i48 = HEAP32[(HEAP32[i44 + 52 >> 2] | 0) + 8 >> 2] | 0;
+ i49 = i47 + 28 | 0;
+ i43 = i4 + 0 | 0;
+ i45 = i49 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ i50 = i48 + 28 | 0;
+ i43 = i8 + 0 | 0;
+ i45 = i50 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ i43 = i47 + 60 | 0;
+ d67 = +HEAPF32[i43 >> 2];
+ if (!(d67 < 1.0)) {
+ i4 = 38;
+ break;
+ }
+ d70 = (d42 - d67) / (1.0 - d67);
+ i57 = i47 + 36 | 0;
+ d67 = 1.0 - d70;
+ i52 = i47 + 44 | 0;
+ i53 = i47 + 48 | 0;
+ d71 = +HEAPF32[i57 >> 2] * d67 + d70 * +HEAPF32[i52 >> 2];
+ d72 = d67 * +HEAPF32[i47 + 40 >> 2] + d70 * +HEAPF32[i53 >> 2];
+ d69 = +d71;
+ d68 = +d72;
+ HEAPF32[i57 >> 2] = d69;
+ HEAPF32[i57 + 4 >> 2] = d68;
+ i57 = i47 + 52 | 0;
+ i51 = i47 + 56 | 0;
+ d70 = d67 * +HEAPF32[i57 >> 2] + d70 * +HEAPF32[i51 >> 2];
+ HEAPF32[i57 >> 2] = d70;
+ HEAPF32[i43 >> 2] = d42;
+ i57 = i47 + 44 | 0;
+ HEAPF32[i57 >> 2] = d69;
+ HEAPF32[i57 + 4 >> 2] = d68;
+ HEAPF32[i51 >> 2] = d70;
+ d68 = +Math_sin(+d70);
+ i57 = i47 + 20 | 0;
+ HEAPF32[i57 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ i56 = i47 + 24 | 0;
+ HEAPF32[i56 >> 2] = d70;
+ i58 = i47 + 12 | 0;
+ i55 = i47 + 28 | 0;
+ d69 = +HEAPF32[i55 >> 2];
+ i54 = i47 + 32 | 0;
+ d67 = +HEAPF32[i54 >> 2];
+ d71 = +(d71 - (d70 * d69 - d68 * d67));
+ d67 = +(d72 - (d68 * d69 + d70 * d67));
+ i43 = i58;
+ HEAPF32[i43 >> 2] = d71;
+ HEAPF32[i43 + 4 >> 2] = d67;
+ i43 = i48 + 60 | 0;
+ d67 = +HEAPF32[i43 >> 2];
+ if (!(d67 < 1.0)) {
+ i4 = 40;
+ break;
+ }
+ d70 = (d42 - d67) / (1.0 - d67);
+ i64 = i48 + 36 | 0;
+ d72 = 1.0 - d70;
+ i61 = i48 + 44 | 0;
+ i60 = i48 + 48 | 0;
+ d71 = +HEAPF32[i64 >> 2] * d72 + d70 * +HEAPF32[i61 >> 2];
+ d67 = d72 * +HEAPF32[i48 + 40 >> 2] + d70 * +HEAPF32[i60 >> 2];
+ d69 = +d71;
+ d68 = +d67;
+ HEAPF32[i64 >> 2] = d69;
+ HEAPF32[i64 + 4 >> 2] = d68;
+ i64 = i48 + 52 | 0;
+ i59 = i48 + 56 | 0;
+ d70 = d72 * +HEAPF32[i64 >> 2] + d70 * +HEAPF32[i59 >> 2];
+ HEAPF32[i64 >> 2] = d70;
+ HEAPF32[i43 >> 2] = d42;
+ i64 = i48 + 44 | 0;
+ HEAPF32[i64 >> 2] = d69;
+ HEAPF32[i64 + 4 >> 2] = d68;
+ HEAPF32[i59 >> 2] = d70;
+ d68 = +Math_sin(+d70);
+ i64 = i48 + 20 | 0;
+ HEAPF32[i64 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ i63 = i48 + 24 | 0;
+ HEAPF32[i63 >> 2] = d70;
+ i65 = i48 + 12 | 0;
+ i62 = i48 + 28 | 0;
+ d69 = +HEAPF32[i62 >> 2];
+ i66 = i48 + 32 | 0;
+ d72 = +HEAPF32[i66 >> 2];
+ d71 = +(d71 - (d70 * d69 - d68 * d72));
+ d72 = +(d67 - (d68 * d69 + d70 * d72));
+ i43 = i65;
+ HEAPF32[i43 >> 2] = d71;
+ HEAPF32[i43 + 4 >> 2] = d72;
+ __ZN9b2Contact6UpdateEP17b2ContactListener(i44, HEAP32[i13 >> 2] | 0);
+ i43 = i44 + 4 | 0;
+ i45 = HEAP32[i43 >> 2] | 0;
+ HEAP32[i43 >> 2] = i45 & -33;
+ i46 = i44 + 128 | 0;
+ HEAP32[i46 >> 2] = (HEAP32[i46 >> 2] | 0) + 1;
+ if ((i45 & 6 | 0) != 6) {
+ HEAP32[i43 >> 2] = i45 & -37;
+ i43 = i49 + 0 | 0;
+ i45 = i4 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ i43 = i50 + 0 | 0;
+ i45 = i8 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ d69 = +HEAPF32[i51 >> 2];
+ d71 = +Math_sin(+d69);
+ HEAPF32[i57 >> 2] = d71;
+ d69 = +Math_cos(+d69);
+ HEAPF32[i56 >> 2] = d69;
+ d72 = +HEAPF32[i55 >> 2];
+ d70 = +HEAPF32[i54 >> 2];
+ d68 = +(+HEAPF32[i52 >> 2] - (d69 * d72 - d71 * d70));
+ d70 = +(+HEAPF32[i53 >> 2] - (d71 * d72 + d69 * d70));
+ HEAPF32[i58 >> 2] = d68;
+ HEAPF32[i58 + 4 >> 2] = d70;
+ d70 = +HEAPF32[i59 >> 2];
+ d68 = +Math_sin(+d70);
+ HEAPF32[i64 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ HEAPF32[i63 >> 2] = d70;
+ d69 = +HEAPF32[i62 >> 2];
+ d72 = +HEAPF32[i66 >> 2];
+ d71 = +(+HEAPF32[i61 >> 2] - (d70 * d69 - d68 * d72));
+ d72 = +(+HEAPF32[i60 >> 2] - (d68 * d69 + d70 * d72));
+ i66 = i65;
+ HEAPF32[i66 >> 2] = d71;
+ HEAPF32[i66 + 4 >> 2] = d72;
+ continue;
+ }
+ i45 = i47 + 4 | 0;
+ i46 = HEAPU16[i45 >> 1] | 0;
+ if ((i46 & 2 | 0) == 0) {
+ HEAP16[i45 >> 1] = i46 | 2;
+ HEAPF32[i47 + 144 >> 2] = 0.0;
+ }
+ i46 = i48 + 4 | 0;
+ i49 = HEAPU16[i46 >> 1] | 0;
+ if ((i49 & 2 | 0) == 0) {
+ HEAP16[i46 >> 1] = i49 | 2;
+ HEAPF32[i48 + 144 >> 2] = 0.0;
+ }
+ HEAP32[i25 >> 2] = 0;
+ HEAP32[i26 >> 2] = 0;
+ HEAP32[i27 >> 2] = 0;
+ if ((HEAP32[i28 >> 2] | 0) <= 0) {
+ i4 = 48;
+ break;
+ }
+ i49 = i47 + 8 | 0;
+ HEAP32[i49 >> 2] = 0;
+ i51 = HEAP32[i25 >> 2] | 0;
+ HEAP32[(HEAP32[i29 >> 2] | 0) + (i51 << 2) >> 2] = i47;
+ i51 = i51 + 1 | 0;
+ HEAP32[i25 >> 2] = i51;
+ if ((i51 | 0) >= (HEAP32[i28 >> 2] | 0)) {
+ i4 = 50;
+ break;
+ }
+ i50 = i48 + 8 | 0;
+ HEAP32[i50 >> 2] = i51;
+ i51 = HEAP32[i25 >> 2] | 0;
+ HEAP32[(HEAP32[i29 >> 2] | 0) + (i51 << 2) >> 2] = i48;
+ HEAP32[i25 >> 2] = i51 + 1;
+ i51 = HEAP32[i26 >> 2] | 0;
+ if ((i51 | 0) >= (HEAP32[i24 >> 2] | 0)) {
+ i4 = 52;
+ break;
+ }
+ HEAP32[i26 >> 2] = i51 + 1;
+ HEAP32[(HEAP32[i23 >> 2] | 0) + (i51 << 2) >> 2] = i44;
+ HEAP16[i45 >> 1] = HEAPU16[i45 >> 1] | 1;
+ HEAP16[i46 >> 1] = HEAPU16[i46 >> 1] | 1;
+ HEAP32[i43 >> 2] = HEAP32[i43 >> 2] | 1;
+ HEAP32[i7 >> 2] = i47;
+ HEAP32[i22 >> 2] = i48;
+ i44 = 1;
+ while (1) {
+ L58 : do {
+ if ((HEAP32[i47 >> 2] | 0) == 2 ? (i12 = HEAP32[i47 + 112 >> 2] | 0, (i12 | 0) != 0) : 0) {
+ i47 = i47 + 4 | 0;
+ i51 = i12;
+ do {
+ if ((HEAP32[i25 >> 2] | 0) == (HEAP32[i28 >> 2] | 0)) {
+ break L58;
+ }
+ if ((HEAP32[i26 >> 2] | 0) == (HEAP32[i24 >> 2] | 0)) {
+ break L58;
+ }
+ i52 = HEAP32[i51 + 4 >> 2] | 0;
+ i53 = i52 + 4 | 0;
+ do {
+ if ((HEAP32[i53 >> 2] & 1 | 0) == 0) {
+ i48 = HEAP32[i51 >> 2] | 0;
+ if (((HEAP32[i48 >> 2] | 0) == 2 ? (HEAP16[i47 >> 1] & 8) == 0 : 0) ? (HEAP16[i48 + 4 >> 1] & 8) == 0 : 0) {
+ break;
+ }
+ if ((HEAP8[(HEAP32[i52 + 48 >> 2] | 0) + 38 | 0] | 0) == 0 ? (HEAP8[(HEAP32[i52 + 52 >> 2] | 0) + 38 | 0] | 0) == 0 : 0) {
+ i54 = i48 + 28 | 0;
+ i43 = i14 + 0 | 0;
+ i45 = i54 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ i43 = i48 + 4 | 0;
+ if ((HEAP16[i43 >> 1] & 1) == 0) {
+ i45 = i48 + 60 | 0;
+ d67 = +HEAPF32[i45 >> 2];
+ if (!(d67 < 1.0)) {
+ i4 = 67;
+ break L11;
+ }
+ d70 = (d42 - d67) / (1.0 - d67);
+ i65 = i48 + 36 | 0;
+ d72 = 1.0 - d70;
+ d71 = +HEAPF32[i65 >> 2] * d72 + d70 * +HEAPF32[i48 + 44 >> 2];
+ d67 = d72 * +HEAPF32[i48 + 40 >> 2] + d70 * +HEAPF32[i48 + 48 >> 2];
+ d69 = +d71;
+ d68 = +d67;
+ HEAPF32[i65 >> 2] = d69;
+ HEAPF32[i65 + 4 >> 2] = d68;
+ i65 = i48 + 52 | 0;
+ i66 = i48 + 56 | 0;
+ d70 = d72 * +HEAPF32[i65 >> 2] + d70 * +HEAPF32[i66 >> 2];
+ HEAPF32[i65 >> 2] = d70;
+ HEAPF32[i45 >> 2] = d42;
+ i65 = i48 + 44 | 0;
+ HEAPF32[i65 >> 2] = d69;
+ HEAPF32[i65 + 4 >> 2] = d68;
+ HEAPF32[i66 >> 2] = d70;
+ d68 = +Math_sin(+d70);
+ HEAPF32[i48 + 20 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ HEAPF32[i48 + 24 >> 2] = d70;
+ d69 = +HEAPF32[i48 + 28 >> 2];
+ d72 = +HEAPF32[i48 + 32 >> 2];
+ d71 = +(d71 - (d70 * d69 - d68 * d72));
+ d72 = +(d67 - (d68 * d69 + d70 * d72));
+ i66 = i48 + 12 | 0;
+ HEAPF32[i66 >> 2] = d71;
+ HEAPF32[i66 + 4 >> 2] = d72;
+ }
+ __ZN9b2Contact6UpdateEP17b2ContactListener(i52, HEAP32[i13 >> 2] | 0);
+ i45 = HEAP32[i53 >> 2] | 0;
+ if ((i45 & 4 | 0) == 0) {
+ i43 = i54 + 0 | 0;
+ i45 = i14 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ d70 = +HEAPF32[i48 + 56 >> 2];
+ d68 = +Math_sin(+d70);
+ HEAPF32[i48 + 20 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ HEAPF32[i48 + 24 >> 2] = d70;
+ d69 = +HEAPF32[i48 + 28 >> 2];
+ d72 = +HEAPF32[i48 + 32 >> 2];
+ d71 = +(+HEAPF32[i48 + 44 >> 2] - (d70 * d69 - d68 * d72));
+ d72 = +(+HEAPF32[i48 + 48 >> 2] - (d68 * d69 + d70 * d72));
+ i66 = i48 + 12 | 0;
+ HEAPF32[i66 >> 2] = d71;
+ HEAPF32[i66 + 4 >> 2] = d72;
+ break;
+ }
+ if ((i45 & 2 | 0) == 0) {
+ i43 = i54 + 0 | 0;
+ i45 = i14 + 0 | 0;
+ i46 = i43 + 36 | 0;
+ do {
+ HEAP32[i43 >> 2] = HEAP32[i45 >> 2];
+ i43 = i43 + 4 | 0;
+ i45 = i45 + 4 | 0;
+ } while ((i43 | 0) < (i46 | 0));
+ d70 = +HEAPF32[i48 + 56 >> 2];
+ d68 = +Math_sin(+d70);
+ HEAPF32[i48 + 20 >> 2] = d68;
+ d70 = +Math_cos(+d70);
+ HEAPF32[i48 + 24 >> 2] = d70;
+ d69 = +HEAPF32[i48 + 28 >> 2];
+ d72 = +HEAPF32[i48 + 32 >> 2];
+ d71 = +(+HEAPF32[i48 + 44 >> 2] - (d70 * d69 - d68 * d72));
+ d72 = +(+HEAPF32[i48 + 48 >> 2] - (d68 * d69 + d70 * d72));
+ i66 = i48 + 12 | 0;
+ HEAPF32[i66 >> 2] = d71;
+ HEAPF32[i66 + 4 >> 2] = d72;
+ break;
+ }
+ HEAP32[i53 >> 2] = i45 | 1;
+ i45 = HEAP32[i26 >> 2] | 0;
+ if ((i45 | 0) >= (HEAP32[i24 >> 2] | 0)) {
+ i4 = 74;
+ break L11;
+ }
+ HEAP32[i26 >> 2] = i45 + 1;
+ HEAP32[(HEAP32[i23 >> 2] | 0) + (i45 << 2) >> 2] = i52;
+ i45 = HEAPU16[i43 >> 1] | 0;
+ if ((i45 & 1 | 0) == 0) {
+ HEAP16[i43 >> 1] = i45 | 1;
+ if ((HEAP32[i48 >> 2] | 0) != 0 ? (i45 & 2 | 0) == 0 : 0) {
+ HEAP16[i43 >> 1] = i45 | 3;
+ HEAPF32[i48 + 144 >> 2] = 0.0;
+ }
+ i43 = HEAP32[i25 >> 2] | 0;
+ if ((i43 | 0) >= (HEAP32[i28 >> 2] | 0)) {
+ i4 = 80;
+ break L11;
+ }
+ HEAP32[i48 + 8 >> 2] = i43;
+ i66 = HEAP32[i25 >> 2] | 0;
+ HEAP32[(HEAP32[i29 >> 2] | 0) + (i66 << 2) >> 2] = i48;
+ HEAP32[i25 >> 2] = i66 + 1;
+ }
+ }
+ }
+ } while (0);
+ i51 = HEAP32[i51 + 12 >> 2] | 0;
+ } while ((i51 | 0) != 0);
+ }
+ } while (0);
+ if ((i44 | 0) >= 2) {
+ break;
+ }
+ i47 = HEAP32[i7 + (i44 << 2) >> 2] | 0;
+ i44 = i44 + 1 | 0;
+ }
+ d72 = (1.0 - d42) * +HEAPF32[i11 >> 2];
+ HEAPF32[i9 >> 2] = d72;
+ HEAPF32[i21 >> 2] = 1.0 / d72;
+ HEAPF32[i20 >> 2] = 1.0;
+ HEAP32[i19 >> 2] = 20;
+ HEAP32[i17 >> 2] = HEAP32[i18 >> 2];
+ HEAP8[i16] = 0;
+ __ZN8b2Island8SolveTOIERK10b2TimeStepii(i3, i9, HEAP32[i49 >> 2] | 0, HEAP32[i50 >> 2] | 0);
+ i44 = HEAP32[i25 >> 2] | 0;
+ if ((i44 | 0) > 0) {
+ i43 = 0;
+ do {
+ i45 = HEAP32[(HEAP32[i29 >> 2] | 0) + (i43 << 2) >> 2] | 0;
+ i66 = i45 + 4 | 0;
+ HEAP16[i66 >> 1] = HEAP16[i66 >> 1] & 65534;
+ if ((HEAP32[i45 >> 2] | 0) == 2) {
+ __ZN6b2Body19SynchronizeFixturesEv(i45);
+ i44 = HEAP32[i45 + 112 >> 2] | 0;
+ if ((i44 | 0) != 0) {
+ do {
+ i66 = (HEAP32[i44 + 4 >> 2] | 0) + 4 | 0;
+ HEAP32[i66 >> 2] = HEAP32[i66 >> 2] & -34;
+ i44 = HEAP32[i44 + 12 >> 2] | 0;
+ } while ((i44 | 0) != 0);
+ }
+ i44 = HEAP32[i25 >> 2] | 0;
+ }
+ i43 = i43 + 1 | 0;
+ } while ((i43 | 0) < (i44 | 0));
+ }
+ __ZN16b2ContactManager15FindNewContactsEv(i10);
+ if ((HEAP8[i39] | 0) != 0) {
+ i4 = 92;
+ break;
+ }
+ }
+ if ((i4 | 0) == 16) {
+ ___assert_fail(2288, 2184, 641, 2344);
+ } else if ((i4 | 0) == 21) {
+ ___assert_fail(2360, 2376, 723, 2400);
+ } else if ((i4 | 0) == 25) {
+ ___assert_fail(2360, 2376, 723, 2400);
+ } else if ((i4 | 0) == 28) {
+ ___assert_fail(2360, 2184, 676, 2344);
+ } else if ((i4 | 0) == 36) {
+ HEAP8[i2] = 1;
+ __ZN8b2IslandD2Ev(i3);
+ STACKTOP = i1;
+ return;
+ } else if ((i4 | 0) == 38) {
+ ___assert_fail(2360, 2376, 723, 2400);
+ } else if ((i4 | 0) == 40) {
+ ___assert_fail(2360, 2376, 723, 2400);
+ } else if ((i4 | 0) == 48) {
+ ___assert_fail(2520, 2440, 54, 2472);
+ } else if ((i4 | 0) == 50) {
+ ___assert_fail(2520, 2440, 54, 2472);
+ } else if ((i4 | 0) == 52) {
+ ___assert_fail(2480, 2440, 62, 2472);
+ } else if ((i4 | 0) == 67) {
+ ___assert_fail(2360, 2376, 723, 2400);
+ } else if ((i4 | 0) == 74) {
+ ___assert_fail(2480, 2440, 62, 2472);
+ } else if ((i4 | 0) == 80) {
+ ___assert_fail(2520, 2440, 54, 2472);
+ } else if ((i4 | 0) == 92) {
+ HEAP8[i2] = 0;
+ __ZN8b2IslandD2Ev(i3);
+ STACKTOP = i1;
+ return;
+ }
+}
+function __ZNSt3__16__sortIRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i5, i8, i1) {
+ i5 = i5 | 0;
+ i8 = i8 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i3;
+ L1 : while (1) {
+ i7 = i8;
+ i4 = i8 + -12 | 0;
+ L3 : while (1) {
+ i9 = i5;
+ i11 = i7 - i9 | 0;
+ switch ((i11 | 0) / 12 | 0 | 0) {
+ case 4:
+ {
+ i6 = 14;
+ break L1;
+ }
+ case 2:
+ {
+ i6 = 4;
+ break L1;
+ }
+ case 3:
+ {
+ i6 = 6;
+ break L1;
+ }
+ case 5:
+ {
+ i6 = 15;
+ break L1;
+ }
+ case 1:
+ case 0:
+ {
+ i6 = 67;
+ break L1;
+ }
+ default:
+ {}
+ }
+ if ((i11 | 0) < 372) {
+ i6 = 21;
+ break L1;
+ }
+ i12 = (i11 | 0) / 24 | 0;
+ i10 = i5 + (i12 * 12 | 0) | 0;
+ do {
+ if ((i11 | 0) > 11988) {
+ i14 = (i11 | 0) / 48 | 0;
+ i11 = i5 + (i14 * 12 | 0) | 0;
+ i14 = i5 + ((i14 + i12 | 0) * 12 | 0) | 0;
+ i12 = __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i5, i11, i10, i14, i1) | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i14) | 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i14 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i14 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i14 + 8 >> 2];
+ HEAP32[i14 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i14 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i14 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i13 = i12 + 1 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i14, i10) | 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i14 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i14 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i14 + 8 >> 2];
+ HEAP32[i14 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i14 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i14 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i13 = i12 + 2 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i10, i11) | 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i11 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i11 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i11 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i11, i5) | 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i11 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i11 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i11 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = i12 + 4 | 0;
+ } else {
+ i12 = i12 + 3 | 0;
+ }
+ } else {
+ i12 = i13;
+ }
+ } else {
+ i12 = i13;
+ }
+ }
+ } else {
+ i15 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i10, i5) | 0;
+ i11 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i10) | 0;
+ if (!i15) {
+ if (!i11) {
+ i12 = 0;
+ break;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i10, i5) | 0)) {
+ i12 = 1;
+ break;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = 2;
+ break;
+ }
+ if (i11) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = 1;
+ break;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i10) | 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = 2;
+ } else {
+ i12 = 1;
+ }
+ }
+ } while (0);
+ do {
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i10) | 0) {
+ i13 = i4;
+ } else {
+ i13 = i4;
+ while (1) {
+ i13 = i13 + -12 | 0;
+ if ((i5 | 0) == (i13 | 0)) {
+ break;
+ }
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i13, i10) | 0) {
+ i6 = 50;
+ break;
+ }
+ }
+ if ((i6 | 0) == 50) {
+ i6 = 0;
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i13 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i13 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i13 + 8 >> 2];
+ HEAP32[i13 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i13 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i13 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = i12 + 1 | 0;
+ break;
+ }
+ i10 = i5 + 12 | 0;
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i4) | 0)) {
+ if ((i10 | 0) == (i4 | 0)) {
+ i6 = 67;
+ break L1;
+ }
+ while (1) {
+ i9 = i10 + 12 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i10) | 0) {
+ break;
+ }
+ if ((i9 | 0) == (i4 | 0)) {
+ i6 = 67;
+ break L1;
+ } else {
+ i10 = i9;
+ }
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i10 = i9;
+ }
+ if ((i10 | 0) == (i4 | 0)) {
+ i6 = 67;
+ break L1;
+ } else {
+ i9 = i4;
+ }
+ while (1) {
+ while (1) {
+ i11 = i10 + 12 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i10) | 0) {
+ break;
+ } else {
+ i10 = i11;
+ }
+ }
+ do {
+ i9 = i9 + -12 | 0;
+ } while (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i9) | 0);
+ if (!(i10 >>> 0 < i9 >>> 0)) {
+ i5 = i10;
+ continue L3;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i9 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i9 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ HEAP32[i9 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i9 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i10 = i11;
+ }
+ }
+ } while (0);
+ i11 = i5 + 12 | 0;
+ L47 : do {
+ if (i11 >>> 0 < i13 >>> 0) {
+ while (1) {
+ i15 = i11;
+ while (1) {
+ i11 = i15 + 12 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i15, i10) | 0) {
+ i15 = i11;
+ } else {
+ i14 = i13;
+ break;
+ }
+ }
+ do {
+ i14 = i14 + -12 | 0;
+ } while (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i14, i10) | 0));
+ if (i15 >>> 0 > i14 >>> 0) {
+ i11 = i15;
+ break L47;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i15 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i15 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i15 + 8 >> 2];
+ HEAP32[i15 + 0 >> 2] = HEAP32[i14 + 0 >> 2];
+ HEAP32[i15 + 4 >> 2] = HEAP32[i14 + 4 >> 2];
+ HEAP32[i15 + 8 >> 2] = HEAP32[i14 + 8 >> 2];
+ HEAP32[i14 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i14 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i14 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i13 = i14;
+ i10 = (i10 | 0) == (i15 | 0) ? i14 : i10;
+ i12 = i12 + 1 | 0;
+ }
+ }
+ } while (0);
+ if ((i11 | 0) != (i10 | 0) ? FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i10, i11) | 0 : 0) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i11 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i11 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i11 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i12 = i12 + 1 | 0;
+ }
+ if ((i12 | 0) == 0) {
+ i12 = __ZNSt3__127__insertion_sort_incompleteIRPFbRK6b2PairS3_EPS1_EEbT0_S8_T_(i5, i11, i1) | 0;
+ i10 = i11 + 12 | 0;
+ if (__ZNSt3__127__insertion_sort_incompleteIRPFbRK6b2PairS3_EPS1_EEbT0_S8_T_(i10, i8, i1) | 0) {
+ i6 = 62;
+ break;
+ }
+ if (i12) {
+ i5 = i10;
+ continue;
+ }
+ }
+ i15 = i11;
+ if ((i15 - i9 | 0) >= (i7 - i15 | 0)) {
+ i6 = 66;
+ break;
+ }
+ __ZNSt3__16__sortIRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i5, i11, i1);
+ i5 = i11 + 12 | 0;
+ }
+ if ((i6 | 0) == 62) {
+ i6 = 0;
+ if (i12) {
+ i6 = 67;
+ break;
+ } else {
+ i8 = i11;
+ continue;
+ }
+ } else if ((i6 | 0) == 66) {
+ i6 = 0;
+ __ZNSt3__16__sortIRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i11 + 12 | 0, i8, i1);
+ i8 = i11;
+ continue;
+ }
+ }
+ if ((i6 | 0) == 4) {
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i5) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ STACKTOP = i3;
+ return;
+ } else if ((i6 | 0) == 6) {
+ i6 = i5 + 12 | 0;
+ i15 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i6, i5) | 0;
+ i7 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i6) | 0;
+ if (!i15) {
+ if (!i7) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i6, i5) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ STACKTOP = i3;
+ return;
+ }
+ if (i7) {
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i6) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ STACKTOP = i3;
+ return;
+ } else if ((i6 | 0) == 14) {
+ __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i5, i5 + 12 | 0, i5 + 24 | 0, i4, i1) | 0;
+ STACKTOP = i3;
+ return;
+ } else if ((i6 | 0) == 15) {
+ i6 = i5 + 12 | 0;
+ i7 = i5 + 24 | 0;
+ i8 = i5 + 36 | 0;
+ __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i5, i6, i7, i8, i1) | 0;
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i8) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i8, i7) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i7, i6) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i6, i5) | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i2 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ STACKTOP = i3;
+ return;
+ } else if ((i6 | 0) == 21) {
+ __ZNSt3__118__insertion_sort_3IRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i5, i8, i1);
+ STACKTOP = i3;
+ return;
+ } else if ((i6 | 0) == 67) {
+ STACKTOP = i3;
+ return;
+ }
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[7176 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[7180 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[7168 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 7200 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[1790] = HEAP32[1790] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 7464 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[7164 >> 2] = HEAP32[7164 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[7184 >> 2] | 0)) {
+ i21 = (HEAP32[7172 >> 2] | 0) + i11 | 0;
+ HEAP32[7172 >> 2] = i21;
+ HEAP32[7184 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[7180 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[7180 >> 2] = 0;
+ HEAP32[7168 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[7180 >> 2] | 0)) {
+ i21 = (HEAP32[7168 >> 2] | 0) + i11 | 0;
+ HEAP32[7168 >> 2] = i21;
+ HEAP32[7180 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 7464 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[7164 >> 2] = HEAP32[7164 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 7200 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[1790] = HEAP32[1790] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[7180 >> 2] | 0)) {
+ HEAP32[7168 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 7200 + (i7 << 2) | 0;
+ i8 = HEAP32[1790] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 7200 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[1790] = i8 | i6;
+ i4 = 7200 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 7464 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[7164 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L204 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L204;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[7176 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[7176 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[7164 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[7192 >> 2] | 0) + -1 | 0;
+ HEAP32[7192 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 7616 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[7192 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function __ZNSt3__127__insertion_sort_incompleteIRPFbRK6b2PairS3_EPS1_EEbT0_S8_T_(i3, i4, i2) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i7 = i1 + 12 | 0;
+ i6 = i1;
+ switch ((i4 - i3 | 0) / 12 | 0 | 0) {
+ case 5:
+ {
+ i6 = i3 + 12 | 0;
+ i8 = i3 + 24 | 0;
+ i5 = i3 + 36 | 0;
+ i4 = i4 + -12 | 0;
+ __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i3, i6, i8, i5, i2) | 0;
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i4, i5) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i5, i8) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i8, i6) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i6, i3) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ case 4:
+ {
+ __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i3, i3 + 12 | 0, i3 + 24 | 0, i4 + -12 | 0, i2) | 0;
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ case 3:
+ {
+ i5 = i3 + 12 | 0;
+ i4 = i4 + -12 | 0;
+ i10 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i5, i3) | 0;
+ i6 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i4, i5) | 0;
+ if (!i10) {
+ if (!i6) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i5, i3) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ if (i6) {
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i4, i5) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ case 2:
+ {
+ i4 = i4 + -12 | 0;
+ if (!(FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i4, i3) | 0)) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ case 1:
+ case 0:
+ {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ default:
+ {
+ i9 = i3 + 24 | 0;
+ i10 = i3 + 12 | 0;
+ i11 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i10, i3) | 0;
+ i8 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i9, i10) | 0;
+ do {
+ if (i11) {
+ if (i8) {
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i9 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i9 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ HEAP32[i9 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i9 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ break;
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i9, i10) | 0) {
+ HEAP32[i7 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i9 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i9 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ HEAP32[i9 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i9 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ }
+ } else {
+ if (i8) {
+ HEAP32[i7 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i9 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i9 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ HEAP32[i9 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i9 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i10, i3) | 0) {
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i3 + 0 >> 2] = HEAP32[i10 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i10 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i10 + 8 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ }
+ }
+ }
+ } while (0);
+ i7 = i3 + 36 | 0;
+ if ((i7 | 0) == (i4 | 0)) {
+ i11 = 1;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i8 = 0;
+ while (1) {
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i7, i9) | 0) {
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i10 = i7;
+ while (1) {
+ HEAP32[i10 + 0 >> 2] = HEAP32[i9 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i9 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ if ((i9 | 0) == (i3 | 0)) {
+ break;
+ }
+ i10 = i9 + -12 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i6, i10) | 0) {
+ i11 = i9;
+ i9 = i10;
+ i10 = i11;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i9 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i9 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i8 = i8 + 1 | 0;
+ if ((i8 | 0) == 8) {
+ break;
+ }
+ }
+ i9 = i7 + 12 | 0;
+ if ((i9 | 0) == (i4 | 0)) {
+ i2 = 1;
+ i5 = 35;
+ break;
+ } else {
+ i11 = i7;
+ i7 = i9;
+ i9 = i11;
+ }
+ }
+ if ((i5 | 0) == 35) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ i11 = (i7 + 12 | 0) == (i4 | 0);
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ }
+ return 0;
+}
+function __ZN13b2DynamicTree7BalanceEi(i11, i6) {
+ i11 = i11 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, d19 = 0.0, i20 = 0, i21 = 0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0;
+ i1 = STACKTOP;
+ if ((i6 | 0) == -1) {
+ ___assert_fail(3216, 2944, 382, 3232);
+ }
+ i5 = HEAP32[i11 + 4 >> 2] | 0;
+ i13 = i5 + (i6 * 36 | 0) | 0;
+ i18 = i5 + (i6 * 36 | 0) + 24 | 0;
+ i8 = HEAP32[i18 >> 2] | 0;
+ if ((i8 | 0) == -1) {
+ i21 = i6;
+ STACKTOP = i1;
+ return i21 | 0;
+ }
+ i2 = i5 + (i6 * 36 | 0) + 32 | 0;
+ if ((HEAP32[i2 >> 2] | 0) < 2) {
+ i21 = i6;
+ STACKTOP = i1;
+ return i21 | 0;
+ }
+ i20 = i5 + (i6 * 36 | 0) + 28 | 0;
+ i7 = HEAP32[i20 >> 2] | 0;
+ if (!((i8 | 0) > -1)) {
+ ___assert_fail(3240, 2944, 392, 3232);
+ }
+ i12 = HEAP32[i11 + 12 >> 2] | 0;
+ if ((i8 | 0) >= (i12 | 0)) {
+ ___assert_fail(3240, 2944, 392, 3232);
+ }
+ if (!((i7 | 0) > -1 & (i7 | 0) < (i12 | 0))) {
+ ___assert_fail(3272, 2944, 393, 3232);
+ }
+ i9 = i5 + (i8 * 36 | 0) | 0;
+ i10 = i5 + (i7 * 36 | 0) | 0;
+ i3 = i5 + (i7 * 36 | 0) + 32 | 0;
+ i4 = i5 + (i8 * 36 | 0) + 32 | 0;
+ i14 = (HEAP32[i3 >> 2] | 0) - (HEAP32[i4 >> 2] | 0) | 0;
+ if ((i14 | 0) > 1) {
+ i21 = i5 + (i7 * 36 | 0) + 24 | 0;
+ i14 = HEAP32[i21 >> 2] | 0;
+ i18 = i5 + (i7 * 36 | 0) + 28 | 0;
+ i15 = HEAP32[i18 >> 2] | 0;
+ i16 = i5 + (i14 * 36 | 0) | 0;
+ i17 = i5 + (i15 * 36 | 0) | 0;
+ if (!((i14 | 0) > -1 & (i14 | 0) < (i12 | 0))) {
+ ___assert_fail(3304, 2944, 407, 3232);
+ }
+ if (!((i15 | 0) > -1 & (i15 | 0) < (i12 | 0))) {
+ ___assert_fail(3336, 2944, 408, 3232);
+ }
+ HEAP32[i21 >> 2] = i6;
+ i21 = i5 + (i6 * 36 | 0) + 20 | 0;
+ i12 = i5 + (i7 * 36 | 0) + 20 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i21 >> 2];
+ HEAP32[i21 >> 2] = i7;
+ i12 = HEAP32[i12 >> 2] | 0;
+ do {
+ if (!((i12 | 0) == -1)) {
+ i11 = i5 + (i12 * 36 | 0) + 24 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i11 >> 2] = i7;
+ break;
+ }
+ i11 = i5 + (i12 * 36 | 0) + 28 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i11 >> 2] = i7;
+ break;
+ } else {
+ ___assert_fail(3368, 2944, 424, 3232);
+ }
+ } else {
+ HEAP32[i11 >> 2] = i7;
+ }
+ } while (0);
+ i11 = i5 + (i14 * 36 | 0) + 32 | 0;
+ i12 = i5 + (i15 * 36 | 0) + 32 | 0;
+ if ((HEAP32[i11 >> 2] | 0) > (HEAP32[i12 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i14;
+ HEAP32[i20 >> 2] = i15;
+ HEAP32[i5 + (i15 * 36 | 0) + 20 >> 2] = i6;
+ d19 = +HEAPF32[i9 >> 2];
+ d22 = +HEAPF32[i17 >> 2];
+ d19 = d19 < d22 ? d19 : d22;
+ d23 = +HEAPF32[i5 + (i8 * 36 | 0) + 4 >> 2];
+ d22 = +HEAPF32[i5 + (i15 * 36 | 0) + 4 >> 2];
+ d24 = +d19;
+ d23 = +(d23 < d22 ? d23 : d22);
+ i21 = i13;
+ HEAPF32[i21 >> 2] = d24;
+ HEAPF32[i21 + 4 >> 2] = d23;
+ d23 = +HEAPF32[i5 + (i8 * 36 | 0) + 8 >> 2];
+ d24 = +HEAPF32[i5 + (i15 * 36 | 0) + 8 >> 2];
+ d22 = +HEAPF32[i5 + (i8 * 36 | 0) + 12 >> 2];
+ d25 = +HEAPF32[i5 + (i15 * 36 | 0) + 12 >> 2];
+ d23 = +(d23 > d24 ? d23 : d24);
+ d24 = +(d22 > d25 ? d22 : d25);
+ i21 = i5 + (i6 * 36 | 0) + 8 | 0;
+ HEAPF32[i21 >> 2] = d23;
+ HEAPF32[i21 + 4 >> 2] = d24;
+ d24 = +HEAPF32[i16 >> 2];
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 4 >> 2];
+ d23 = +HEAPF32[i5 + (i14 * 36 | 0) + 4 >> 2];
+ d19 = +(d19 < d24 ? d19 : d24);
+ d22 = +(d22 < d23 ? d22 : d23);
+ i21 = i10;
+ HEAPF32[i21 >> 2] = d19;
+ HEAPF32[i21 + 4 >> 2] = d22;
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 8 >> 2];
+ d19 = +HEAPF32[i5 + (i14 * 36 | 0) + 8 >> 2];
+ d23 = +HEAPF32[i5 + (i6 * 36 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i5 + (i14 * 36 | 0) + 12 >> 2];
+ d19 = +(d22 > d19 ? d22 : d19);
+ d25 = +(d23 > d24 ? d23 : d24);
+ i5 = i5 + (i7 * 36 | 0) + 8 | 0;
+ HEAPF32[i5 >> 2] = d19;
+ HEAPF32[i5 + 4 >> 2] = d25;
+ i4 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[i12 >> 2] | 0;
+ i4 = ((i4 | 0) > (i5 | 0) ? i4 : i5) + 1 | 0;
+ HEAP32[i2 >> 2] = i4;
+ i2 = HEAP32[i11 >> 2] | 0;
+ i2 = (i4 | 0) > (i2 | 0) ? i4 : i2;
+ } else {
+ HEAP32[i18 >> 2] = i15;
+ HEAP32[i20 >> 2] = i14;
+ HEAP32[i5 + (i14 * 36 | 0) + 20 >> 2] = i6;
+ d19 = +HEAPF32[i9 >> 2];
+ d22 = +HEAPF32[i16 >> 2];
+ d19 = d19 < d22 ? d19 : d22;
+ d23 = +HEAPF32[i5 + (i8 * 36 | 0) + 4 >> 2];
+ d24 = +HEAPF32[i5 + (i14 * 36 | 0) + 4 >> 2];
+ d22 = +d19;
+ d23 = +(d23 < d24 ? d23 : d24);
+ i21 = i13;
+ HEAPF32[i21 >> 2] = d22;
+ HEAPF32[i21 + 4 >> 2] = d23;
+ d23 = +HEAPF32[i5 + (i8 * 36 | 0) + 8 >> 2];
+ d24 = +HEAPF32[i5 + (i14 * 36 | 0) + 8 >> 2];
+ d22 = +HEAPF32[i5 + (i8 * 36 | 0) + 12 >> 2];
+ d25 = +HEAPF32[i5 + (i14 * 36 | 0) + 12 >> 2];
+ d23 = +(d23 > d24 ? d23 : d24);
+ d24 = +(d22 > d25 ? d22 : d25);
+ i21 = i5 + (i6 * 36 | 0) + 8 | 0;
+ HEAPF32[i21 >> 2] = d23;
+ HEAPF32[i21 + 4 >> 2] = d24;
+ d24 = +HEAPF32[i17 >> 2];
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 4 >> 2];
+ d23 = +HEAPF32[i5 + (i15 * 36 | 0) + 4 >> 2];
+ d19 = +(d19 < d24 ? d19 : d24);
+ d23 = +(d22 < d23 ? d22 : d23);
+ i21 = i10;
+ HEAPF32[i21 >> 2] = d19;
+ HEAPF32[i21 + 4 >> 2] = d23;
+ d23 = +HEAPF32[i5 + (i6 * 36 | 0) + 8 >> 2];
+ d19 = +HEAPF32[i5 + (i15 * 36 | 0) + 8 >> 2];
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i5 + (i15 * 36 | 0) + 12 >> 2];
+ d19 = +(d23 > d19 ? d23 : d19);
+ d25 = +(d22 > d24 ? d22 : d24);
+ i5 = i5 + (i7 * 36 | 0) + 8 | 0;
+ HEAPF32[i5 >> 2] = d19;
+ HEAPF32[i5 + 4 >> 2] = d25;
+ i4 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[i11 >> 2] | 0;
+ i4 = ((i4 | 0) > (i5 | 0) ? i4 : i5) + 1 | 0;
+ HEAP32[i2 >> 2] = i4;
+ i2 = HEAP32[i12 >> 2] | 0;
+ i2 = (i4 | 0) > (i2 | 0) ? i4 : i2;
+ }
+ HEAP32[i3 >> 2] = i2 + 1;
+ i21 = i7;
+ STACKTOP = i1;
+ return i21 | 0;
+ }
+ if (!((i14 | 0) < -1)) {
+ i21 = i6;
+ STACKTOP = i1;
+ return i21 | 0;
+ }
+ i21 = i5 + (i8 * 36 | 0) + 24 | 0;
+ i14 = HEAP32[i21 >> 2] | 0;
+ i20 = i5 + (i8 * 36 | 0) + 28 | 0;
+ i15 = HEAP32[i20 >> 2] | 0;
+ i17 = i5 + (i14 * 36 | 0) | 0;
+ i16 = i5 + (i15 * 36 | 0) | 0;
+ if (!((i14 | 0) > -1 & (i14 | 0) < (i12 | 0))) {
+ ___assert_fail(3400, 2944, 467, 3232);
+ }
+ if (!((i15 | 0) > -1 & (i15 | 0) < (i12 | 0))) {
+ ___assert_fail(3432, 2944, 468, 3232);
+ }
+ HEAP32[i21 >> 2] = i6;
+ i21 = i5 + (i6 * 36 | 0) + 20 | 0;
+ i12 = i5 + (i8 * 36 | 0) + 20 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i21 >> 2];
+ HEAP32[i21 >> 2] = i8;
+ i12 = HEAP32[i12 >> 2] | 0;
+ do {
+ if (!((i12 | 0) == -1)) {
+ i11 = i5 + (i12 * 36 | 0) + 24 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i11 >> 2] = i8;
+ break;
+ }
+ i11 = i5 + (i12 * 36 | 0) + 28 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i11 >> 2] = i8;
+ break;
+ } else {
+ ___assert_fail(3464, 2944, 484, 3232);
+ }
+ } else {
+ HEAP32[i11 >> 2] = i8;
+ }
+ } while (0);
+ i12 = i5 + (i14 * 36 | 0) + 32 | 0;
+ i11 = i5 + (i15 * 36 | 0) + 32 | 0;
+ if ((HEAP32[i12 >> 2] | 0) > (HEAP32[i11 >> 2] | 0)) {
+ HEAP32[i20 >> 2] = i14;
+ HEAP32[i18 >> 2] = i15;
+ HEAP32[i5 + (i15 * 36 | 0) + 20 >> 2] = i6;
+ d19 = +HEAPF32[i10 >> 2];
+ d22 = +HEAPF32[i16 >> 2];
+ d19 = d19 < d22 ? d19 : d22;
+ d23 = +HEAPF32[i5 + (i7 * 36 | 0) + 4 >> 2];
+ d22 = +HEAPF32[i5 + (i15 * 36 | 0) + 4 >> 2];
+ d24 = +d19;
+ d23 = +(d23 < d22 ? d23 : d22);
+ i21 = i13;
+ HEAPF32[i21 >> 2] = d24;
+ HEAPF32[i21 + 4 >> 2] = d23;
+ d23 = +HEAPF32[i5 + (i7 * 36 | 0) + 8 >> 2];
+ d22 = +HEAPF32[i5 + (i15 * 36 | 0) + 8 >> 2];
+ d24 = +HEAPF32[i5 + (i7 * 36 | 0) + 12 >> 2];
+ d25 = +HEAPF32[i5 + (i15 * 36 | 0) + 12 >> 2];
+ d22 = +(d23 > d22 ? d23 : d22);
+ d24 = +(d24 > d25 ? d24 : d25);
+ i21 = i5 + (i6 * 36 | 0) + 8 | 0;
+ HEAPF32[i21 >> 2] = d22;
+ HEAPF32[i21 + 4 >> 2] = d24;
+ d24 = +HEAPF32[i17 >> 2];
+ d23 = +HEAPF32[i5 + (i6 * 36 | 0) + 4 >> 2];
+ d22 = +HEAPF32[i5 + (i14 * 36 | 0) + 4 >> 2];
+ d19 = +(d19 < d24 ? d19 : d24);
+ d22 = +(d23 < d22 ? d23 : d22);
+ i21 = i9;
+ HEAPF32[i21 >> 2] = d19;
+ HEAPF32[i21 + 4 >> 2] = d22;
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 8 >> 2];
+ d23 = +HEAPF32[i5 + (i14 * 36 | 0) + 8 >> 2];
+ d19 = +HEAPF32[i5 + (i6 * 36 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i5 + (i14 * 36 | 0) + 12 >> 2];
+ d22 = +(d22 > d23 ? d22 : d23);
+ d25 = +(d19 > d24 ? d19 : d24);
+ i5 = i5 + (i8 * 36 | 0) + 8 | 0;
+ HEAPF32[i5 >> 2] = d22;
+ HEAPF32[i5 + 4 >> 2] = d25;
+ i3 = HEAP32[i3 >> 2] | 0;
+ i5 = HEAP32[i11 >> 2] | 0;
+ i3 = ((i3 | 0) > (i5 | 0) ? i3 : i5) + 1 | 0;
+ HEAP32[i2 >> 2] = i3;
+ i2 = HEAP32[i12 >> 2] | 0;
+ i2 = (i3 | 0) > (i2 | 0) ? i3 : i2;
+ } else {
+ HEAP32[i20 >> 2] = i15;
+ HEAP32[i18 >> 2] = i14;
+ HEAP32[i5 + (i14 * 36 | 0) + 20 >> 2] = i6;
+ d19 = +HEAPF32[i10 >> 2];
+ d22 = +HEAPF32[i17 >> 2];
+ d19 = d19 < d22 ? d19 : d22;
+ d23 = +HEAPF32[i5 + (i7 * 36 | 0) + 4 >> 2];
+ d24 = +HEAPF32[i5 + (i14 * 36 | 0) + 4 >> 2];
+ d22 = +d19;
+ d24 = +(d23 < d24 ? d23 : d24);
+ i21 = i13;
+ HEAPF32[i21 >> 2] = d22;
+ HEAPF32[i21 + 4 >> 2] = d24;
+ d24 = +HEAPF32[i5 + (i7 * 36 | 0) + 8 >> 2];
+ d23 = +HEAPF32[i5 + (i14 * 36 | 0) + 8 >> 2];
+ d22 = +HEAPF32[i5 + (i7 * 36 | 0) + 12 >> 2];
+ d25 = +HEAPF32[i5 + (i14 * 36 | 0) + 12 >> 2];
+ d23 = +(d24 > d23 ? d24 : d23);
+ d24 = +(d22 > d25 ? d22 : d25);
+ i21 = i5 + (i6 * 36 | 0) + 8 | 0;
+ HEAPF32[i21 >> 2] = d23;
+ HEAPF32[i21 + 4 >> 2] = d24;
+ d24 = +HEAPF32[i16 >> 2];
+ d23 = +HEAPF32[i5 + (i6 * 36 | 0) + 4 >> 2];
+ d22 = +HEAPF32[i5 + (i15 * 36 | 0) + 4 >> 2];
+ d19 = +(d19 < d24 ? d19 : d24);
+ d22 = +(d23 < d22 ? d23 : d22);
+ i21 = i9;
+ HEAPF32[i21 >> 2] = d19;
+ HEAPF32[i21 + 4 >> 2] = d22;
+ d22 = +HEAPF32[i5 + (i6 * 36 | 0) + 8 >> 2];
+ d23 = +HEAPF32[i5 + (i15 * 36 | 0) + 8 >> 2];
+ d19 = +HEAPF32[i5 + (i6 * 36 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i5 + (i15 * 36 | 0) + 12 >> 2];
+ d22 = +(d22 > d23 ? d22 : d23);
+ d25 = +(d19 > d24 ? d19 : d24);
+ i5 = i5 + (i8 * 36 | 0) + 8 | 0;
+ HEAPF32[i5 >> 2] = d22;
+ HEAPF32[i5 + 4 >> 2] = d25;
+ i3 = HEAP32[i3 >> 2] | 0;
+ i5 = HEAP32[i12 >> 2] | 0;
+ i3 = ((i3 | 0) > (i5 | 0) ? i3 : i5) + 1 | 0;
+ HEAP32[i2 >> 2] = i3;
+ i2 = HEAP32[i11 >> 2] | 0;
+ i2 = (i3 | 0) > (i2 | 0) ? i3 : i2;
+ }
+ HEAP32[i4 >> 2] = i2 + 1;
+ i21 = i8;
+ STACKTOP = i1;
+ return i21 | 0;
+}
+function __Z10b2DistanceP16b2DistanceOutputP14b2SimplexCachePK15b2DistanceInput(i2, i5, i3) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i6 = 0, d7 = 0.0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, i20 = 0, d21 = 0.0, d22 = 0.0, i23 = 0, d24 = 0.0, d25 = 0.0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, d36 = 0.0, d37 = 0.0, d38 = 0.0, i39 = 0, i40 = 0, i41 = 0, i42 = 0, d43 = 0.0, d44 = 0.0, d45 = 0.0, i46 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 176 | 0;
+ i11 = i1 + 152 | 0;
+ i10 = i1 + 136 | 0;
+ i4 = i1 + 24 | 0;
+ i14 = i1 + 12 | 0;
+ i15 = i1;
+ HEAP32[652] = (HEAP32[652] | 0) + 1;
+ i9 = i3 + 28 | 0;
+ i31 = i3 + 56 | 0;
+ HEAP32[i11 + 0 >> 2] = HEAP32[i31 + 0 >> 2];
+ HEAP32[i11 + 4 >> 2] = HEAP32[i31 + 4 >> 2];
+ HEAP32[i11 + 8 >> 2] = HEAP32[i31 + 8 >> 2];
+ HEAP32[i11 + 12 >> 2] = HEAP32[i31 + 12 >> 2];
+ i31 = i3 + 72 | 0;
+ HEAP32[i10 + 0 >> 2] = HEAP32[i31 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i31 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i31 + 8 >> 2];
+ HEAP32[i10 + 12 >> 2] = HEAP32[i31 + 12 >> 2];
+ __ZN9b2Simplex9ReadCacheEPK14b2SimplexCachePK15b2DistanceProxyRK11b2TransformS5_S8_(i4, i5, i3, i11, i9, i10);
+ i9 = i4 + 108 | 0;
+ i31 = HEAP32[i9 >> 2] | 0;
+ if ((i31 | 0) == 3 | (i31 | 0) == 2 | (i31 | 0) == 1) {
+ i8 = i4 + 16 | 0;
+ i6 = i4 + 20 | 0;
+ d17 = +HEAPF32[i11 + 12 >> 2];
+ d18 = +HEAPF32[i11 + 8 >> 2];
+ i13 = i3 + 16 | 0;
+ i12 = i3 + 20 | 0;
+ d16 = +HEAPF32[i11 >> 2];
+ d21 = +HEAPF32[i11 + 4 >> 2];
+ d19 = +HEAPF32[i10 + 12 >> 2];
+ d22 = +HEAPF32[i10 + 8 >> 2];
+ i23 = i3 + 44 | 0;
+ i20 = i3 + 48 | 0;
+ d24 = +HEAPF32[i10 >> 2];
+ d25 = +HEAPF32[i10 + 4 >> 2];
+ i11 = i4 + 52 | 0;
+ i10 = i4 + 56 | 0;
+ i30 = i4 + 16 | 0;
+ i27 = i4 + 36 | 0;
+ i26 = i4 + 52 | 0;
+ i29 = i4 + 24 | 0;
+ i28 = i4 + 60 | 0;
+ i33 = 0;
+ L3 : while (1) {
+ i32 = (i31 | 0) > 0;
+ if (i32) {
+ i34 = 0;
+ do {
+ HEAP32[i14 + (i34 << 2) >> 2] = HEAP32[i4 + (i34 * 36 | 0) + 28 >> 2];
+ HEAP32[i15 + (i34 << 2) >> 2] = HEAP32[i4 + (i34 * 36 | 0) + 32 >> 2];
+ i34 = i34 + 1 | 0;
+ } while ((i34 | 0) != (i31 | 0));
+ }
+ do {
+ if ((i31 | 0) == 2) {
+ i46 = i30;
+ d45 = +HEAPF32[i46 >> 2];
+ d36 = +HEAPF32[i46 + 4 >> 2];
+ i46 = i26;
+ d38 = +HEAPF32[i46 >> 2];
+ d37 = +HEAPF32[i46 + 4 >> 2];
+ d43 = d38 - d45;
+ d44 = d37 - d36;
+ d36 = d45 * d43 + d36 * d44;
+ if (d36 >= -0.0) {
+ HEAPF32[i29 >> 2] = 1.0;
+ HEAP32[i9 >> 2] = 1;
+ i35 = 17;
+ break;
+ }
+ d37 = d38 * d43 + d37 * d44;
+ if (!(d37 <= 0.0)) {
+ d45 = 1.0 / (d37 - d36);
+ HEAPF32[i29 >> 2] = d37 * d45;
+ HEAPF32[i28 >> 2] = -(d36 * d45);
+ HEAP32[i9 >> 2] = 2;
+ i35 = 18;
+ break;
+ } else {
+ HEAPF32[i28 >> 2] = 1.0;
+ HEAP32[i9 >> 2] = 1;
+ i34 = i4 + 0 | 0;
+ i39 = i27 + 0 | 0;
+ i35 = i34 + 36 | 0;
+ do {
+ HEAP32[i34 >> 2] = HEAP32[i39 >> 2];
+ i34 = i34 + 4 | 0;
+ i39 = i39 + 4 | 0;
+ } while ((i34 | 0) < (i35 | 0));
+ i35 = 17;
+ break;
+ }
+ } else if ((i31 | 0) == 3) {
+ __ZN9b2Simplex6Solve3Ev(i4);
+ i34 = HEAP32[i9 >> 2] | 0;
+ if ((i34 | 0) == 1) {
+ i35 = 17;
+ } else if ((i34 | 0) == 0) {
+ i35 = 15;
+ break L3;
+ } else if ((i34 | 0) == 2) {
+ i35 = 18;
+ } else if ((i34 | 0) == 3) {
+ i35 = 42;
+ break L3;
+ } else {
+ i35 = 16;
+ break L3;
+ }
+ } else if ((i31 | 0) == 1) {
+ i35 = 17;
+ } else {
+ i35 = 13;
+ break L3;
+ }
+ } while (0);
+ do {
+ if ((i35 | 0) == 17) {
+ d36 = -+HEAPF32[i8 >> 2];
+ d37 = -+HEAPF32[i6 >> 2];
+ i34 = 1;
+ } else if ((i35 | 0) == 18) {
+ d44 = +HEAPF32[i8 >> 2];
+ d37 = +HEAPF32[i11 >> 2] - d44;
+ d45 = +HEAPF32[i6 >> 2];
+ d36 = +HEAPF32[i10 >> 2] - d45;
+ if (d44 * d36 - d37 * d45 > 0.0) {
+ d36 = -d36;
+ i34 = 2;
+ break;
+ } else {
+ d37 = -d37;
+ i34 = 2;
+ break;
+ }
+ }
+ } while (0);
+ if (d37 * d37 + d36 * d36 < 1.4210854715202004e-14) {
+ i35 = 42;
+ break;
+ }
+ i39 = i4 + (i34 * 36 | 0) | 0;
+ d44 = -d36;
+ d45 = -d37;
+ d43 = d17 * d44 + d18 * d45;
+ d44 = d17 * d45 - d18 * d44;
+ i40 = HEAP32[i13 >> 2] | 0;
+ i41 = HEAP32[i12 >> 2] | 0;
+ if ((i41 | 0) > 1) {
+ i42 = 0;
+ d45 = d44 * +HEAPF32[i40 + 4 >> 2] + d43 * +HEAPF32[i40 >> 2];
+ i46 = 1;
+ while (1) {
+ d38 = d43 * +HEAPF32[i40 + (i46 << 3) >> 2] + d44 * +HEAPF32[i40 + (i46 << 3) + 4 >> 2];
+ i35 = d38 > d45;
+ i42 = i35 ? i46 : i42;
+ i46 = i46 + 1 | 0;
+ if ((i46 | 0) == (i41 | 0)) {
+ break;
+ } else {
+ d45 = i35 ? d38 : d45;
+ }
+ }
+ i35 = i4 + (i34 * 36 | 0) + 28 | 0;
+ HEAP32[i35 >> 2] = i42;
+ if (!((i42 | 0) > -1)) {
+ i35 = 28;
+ break;
+ }
+ } else {
+ i35 = i4 + (i34 * 36 | 0) + 28 | 0;
+ HEAP32[i35 >> 2] = 0;
+ i42 = 0;
+ }
+ if ((i41 | 0) <= (i42 | 0)) {
+ i35 = 28;
+ break;
+ }
+ d45 = +HEAPF32[i40 + (i42 << 3) >> 2];
+ d43 = +HEAPF32[i40 + (i42 << 3) + 4 >> 2];
+ d38 = d16 + (d17 * d45 - d18 * d43);
+ d44 = +d38;
+ d43 = +(d45 * d18 + d17 * d43 + d21);
+ i40 = i39;
+ HEAPF32[i40 >> 2] = d44;
+ HEAPF32[i40 + 4 >> 2] = d43;
+ d43 = d36 * d19 + d37 * d22;
+ d44 = d37 * d19 - d36 * d22;
+ i40 = HEAP32[i23 >> 2] | 0;
+ i39 = HEAP32[i20 >> 2] | 0;
+ if ((i39 | 0) > 1) {
+ i41 = 0;
+ d37 = d44 * +HEAPF32[i40 + 4 >> 2] + d43 * +HEAPF32[i40 >> 2];
+ i42 = 1;
+ while (1) {
+ d36 = d43 * +HEAPF32[i40 + (i42 << 3) >> 2] + d44 * +HEAPF32[i40 + (i42 << 3) + 4 >> 2];
+ i46 = d36 > d37;
+ i41 = i46 ? i42 : i41;
+ i42 = i42 + 1 | 0;
+ if ((i42 | 0) == (i39 | 0)) {
+ break;
+ } else {
+ d37 = i46 ? d36 : d37;
+ }
+ }
+ i42 = i4 + (i34 * 36 | 0) + 32 | 0;
+ HEAP32[i42 >> 2] = i41;
+ if (!((i41 | 0) > -1)) {
+ i35 = 35;
+ break;
+ }
+ } else {
+ i42 = i4 + (i34 * 36 | 0) + 32 | 0;
+ HEAP32[i42 >> 2] = 0;
+ i41 = 0;
+ }
+ if ((i39 | 0) <= (i41 | 0)) {
+ i35 = 35;
+ break;
+ }
+ d37 = +HEAPF32[i40 + (i41 << 3) >> 2];
+ d45 = +HEAPF32[i40 + (i41 << 3) + 4 >> 2];
+ d44 = d24 + (d19 * d37 - d22 * d45);
+ d43 = +d44;
+ d45 = +(d37 * d22 + d19 * d45 + d25);
+ i46 = i4 + (i34 * 36 | 0) + 8 | 0;
+ HEAPF32[i46 >> 2] = d43;
+ HEAPF32[i46 + 4 >> 2] = d45;
+ d44 = +(d44 - d38);
+ d45 = +(+HEAPF32[i4 + (i34 * 36 | 0) + 12 >> 2] - +HEAPF32[i4 + (i34 * 36 | 0) + 4 >> 2]);
+ i46 = i4 + (i34 * 36 | 0) + 16 | 0;
+ HEAPF32[i46 >> 2] = d44;
+ HEAPF32[i46 + 4 >> 2] = d45;
+ i33 = i33 + 1 | 0;
+ HEAP32[654] = (HEAP32[654] | 0) + 1;
+ if (i32) {
+ i34 = HEAP32[i35 >> 2] | 0;
+ i32 = 0;
+ do {
+ if ((i34 | 0) == (HEAP32[i14 + (i32 << 2) >> 2] | 0) ? (HEAP32[i42 >> 2] | 0) == (HEAP32[i15 + (i32 << 2) >> 2] | 0) : 0) {
+ i35 = 42;
+ break L3;
+ }
+ i32 = i32 + 1 | 0;
+ } while ((i32 | 0) < (i31 | 0));
+ }
+ i31 = (HEAP32[i9 >> 2] | 0) + 1 | 0;
+ HEAP32[i9 >> 2] = i31;
+ if ((i33 | 0) >= 20) {
+ i35 = 42;
+ break;
+ }
+ }
+ if ((i35 | 0) == 13) {
+ ___assert_fail(2712, 2672, 498, 2720);
+ } else if ((i35 | 0) == 15) {
+ ___assert_fail(2712, 2672, 194, 2856);
+ } else if ((i35 | 0) == 16) {
+ ___assert_fail(2712, 2672, 207, 2856);
+ } else if ((i35 | 0) == 28) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ } else if ((i35 | 0) == 35) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ } else if ((i35 | 0) == 42) {
+ i12 = HEAP32[656] | 0;
+ HEAP32[656] = (i12 | 0) > (i33 | 0) ? i12 : i33;
+ i14 = i2 + 8 | 0;
+ __ZNK9b2Simplex16GetWitnessPointsEP6b2Vec2S1_(i4, i2, i14);
+ d44 = +HEAPF32[i2 >> 2] - +HEAPF32[i14 >> 2];
+ i13 = i2 + 4 | 0;
+ i12 = i2 + 12 | 0;
+ d45 = +HEAPF32[i13 >> 2] - +HEAPF32[i12 >> 2];
+ i15 = i2 + 16 | 0;
+ HEAPF32[i15 >> 2] = +Math_sqrt(+(d44 * d44 + d45 * d45));
+ HEAP32[i2 + 20 >> 2] = i33;
+ i9 = HEAP32[i9 >> 2] | 0;
+ if ((i9 | 0) == 2) {
+ d45 = +HEAPF32[i8 >> 2] - +HEAPF32[i11 >> 2];
+ d7 = +HEAPF32[i6 >> 2] - +HEAPF32[i10 >> 2];
+ d7 = +Math_sqrt(+(d45 * d45 + d7 * d7));
+ } else if ((i9 | 0) == 3) {
+ d7 = +HEAPF32[i8 >> 2];
+ d45 = +HEAPF32[i6 >> 2];
+ d7 = (+HEAPF32[i11 >> 2] - d7) * (+HEAPF32[i4 + 92 >> 2] - d45) - (+HEAPF32[i10 >> 2] - d45) * (+HEAPF32[i4 + 88 >> 2] - d7);
+ } else if ((i9 | 0) == 1) {
+ d7 = 0.0;
+ } else if ((i9 | 0) == 0) {
+ ___assert_fail(2712, 2672, 246, 2736);
+ } else {
+ ___assert_fail(2712, 2672, 259, 2736);
+ }
+ HEAPF32[i5 >> 2] = d7;
+ HEAP16[i5 + 4 >> 1] = i9;
+ i6 = 0;
+ do {
+ HEAP8[i5 + i6 + 6 | 0] = HEAP32[i4 + (i6 * 36 | 0) + 28 >> 2];
+ HEAP8[i5 + i6 + 9 | 0] = HEAP32[i4 + (i6 * 36 | 0) + 32 >> 2];
+ i6 = i6 + 1 | 0;
+ } while ((i6 | 0) < (i9 | 0));
+ if ((HEAP8[i3 + 88 | 0] | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ d7 = +HEAPF32[i3 + 24 >> 2];
+ d16 = +HEAPF32[i3 + 52 >> 2];
+ d18 = +HEAPF32[i15 >> 2];
+ d17 = d7 + d16;
+ if (!(d18 > d17 & d18 > 1.1920928955078125e-7)) {
+ d44 = +((+HEAPF32[i2 >> 2] + +HEAPF32[i14 >> 2]) * .5);
+ d45 = +((+HEAPF32[i13 >> 2] + +HEAPF32[i12 >> 2]) * .5);
+ i46 = i2;
+ HEAPF32[i46 >> 2] = d44;
+ HEAPF32[i46 + 4 >> 2] = d45;
+ i46 = i14;
+ HEAPF32[i46 >> 2] = d44;
+ HEAPF32[i46 + 4 >> 2] = d45;
+ HEAPF32[i15 >> 2] = 0.0;
+ STACKTOP = i1;
+ return;
+ }
+ HEAPF32[i15 >> 2] = d18 - d17;
+ d18 = +HEAPF32[i14 >> 2];
+ d21 = +HEAPF32[i2 >> 2];
+ d24 = d18 - d21;
+ d17 = +HEAPF32[i12 >> 2];
+ d19 = +HEAPF32[i13 >> 2];
+ d22 = d17 - d19;
+ d25 = +Math_sqrt(+(d24 * d24 + d22 * d22));
+ if (!(d25 < 1.1920928955078125e-7)) {
+ d45 = 1.0 / d25;
+ d24 = d24 * d45;
+ d22 = d22 * d45;
+ }
+ HEAPF32[i2 >> 2] = d7 * d24 + d21;
+ HEAPF32[i13 >> 2] = d7 * d22 + d19;
+ HEAPF32[i14 >> 2] = d18 - d16 * d24;
+ HEAPF32[i12 >> 2] = d17 - d16 * d22;
+ STACKTOP = i1;
+ return;
+ }
+ } else if ((i31 | 0) == 0) {
+ ___assert_fail(2712, 2672, 194, 2856);
+ } else {
+ ___assert_fail(2712, 2672, 207, 2856);
+ }
+}
+function __ZN8b2Island5SolveEP9b2ProfileRK10b2TimeStepRK6b2Vec2b(i4, i8, i11, i17, i7) {
+ i4 = i4 | 0;
+ i8 = i8 | 0;
+ i11 = i11 | 0;
+ i17 = i17 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, d5 = 0.0, i6 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i18 = 0, i19 = 0, i20 = 0, d21 = 0.0, i22 = 0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, d27 = 0.0, d28 = 0.0, d29 = 0.0, i30 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 160 | 0;
+ i6 = i3 + 128 | 0;
+ i9 = i3 + 148 | 0;
+ i10 = i3 + 96 | 0;
+ i16 = i3 + 52 | 0;
+ i2 = i3;
+ __ZN7b2TimerC2Ev(i9);
+ d5 = +HEAPF32[i11 >> 2];
+ i1 = i4 + 28 | 0;
+ if ((HEAP32[i1 >> 2] | 0) > 0) {
+ i13 = i4 + 8 | 0;
+ i12 = i17 + 4 | 0;
+ i15 = i4 + 20 | 0;
+ i14 = i4 + 24 | 0;
+ i19 = 0;
+ do {
+ i22 = HEAP32[(HEAP32[i13 >> 2] | 0) + (i19 << 2) >> 2] | 0;
+ i18 = i22 + 44 | 0;
+ i20 = HEAP32[i18 >> 2] | 0;
+ i18 = HEAP32[i18 + 4 >> 2] | 0;
+ d21 = +HEAPF32[i22 + 56 >> 2];
+ i30 = i22 + 64 | 0;
+ d27 = +HEAPF32[i30 >> 2];
+ d24 = +HEAPF32[i30 + 4 >> 2];
+ d23 = +HEAPF32[i22 + 72 >> 2];
+ i30 = i22 + 36 | 0;
+ HEAP32[i30 >> 2] = i20;
+ HEAP32[i30 + 4 >> 2] = i18;
+ HEAPF32[i22 + 52 >> 2] = d21;
+ if ((HEAP32[i22 >> 2] | 0) == 2) {
+ d25 = +HEAPF32[i22 + 140 >> 2];
+ d26 = +HEAPF32[i22 + 120 >> 2];
+ d28 = 1.0 - d5 * +HEAPF32[i22 + 132 >> 2];
+ d28 = d28 < 1.0 ? d28 : 1.0;
+ d28 = d28 < 0.0 ? 0.0 : d28;
+ d29 = 1.0 - d5 * +HEAPF32[i22 + 136 >> 2];
+ d29 = d29 < 1.0 ? d29 : 1.0;
+ d27 = (d27 + d5 * (d25 * +HEAPF32[i17 >> 2] + d26 * +HEAPF32[i22 + 76 >> 2])) * d28;
+ d24 = (d24 + d5 * (d25 * +HEAPF32[i12 >> 2] + d26 * +HEAPF32[i22 + 80 >> 2])) * d28;
+ d23 = (d23 + d5 * +HEAPF32[i22 + 128 >> 2] * +HEAPF32[i22 + 84 >> 2]) * (d29 < 0.0 ? 0.0 : d29);
+ }
+ i30 = (HEAP32[i15 >> 2] | 0) + (i19 * 12 | 0) | 0;
+ HEAP32[i30 >> 2] = i20;
+ HEAP32[i30 + 4 >> 2] = i18;
+ HEAPF32[(HEAP32[i15 >> 2] | 0) + (i19 * 12 | 0) + 8 >> 2] = d21;
+ d28 = +d27;
+ d29 = +d24;
+ i30 = (HEAP32[i14 >> 2] | 0) + (i19 * 12 | 0) | 0;
+ HEAPF32[i30 >> 2] = d28;
+ HEAPF32[i30 + 4 >> 2] = d29;
+ HEAPF32[(HEAP32[i14 >> 2] | 0) + (i19 * 12 | 0) + 8 >> 2] = d23;
+ i19 = i19 + 1 | 0;
+ } while ((i19 | 0) < (HEAP32[i1 >> 2] | 0));
+ } else {
+ i14 = i4 + 24 | 0;
+ i15 = i4 + 20 | 0;
+ }
+ HEAP32[i10 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i10 + 12 >> 2] = HEAP32[i11 + 12 >> 2];
+ HEAP32[i10 + 16 >> 2] = HEAP32[i11 + 16 >> 2];
+ HEAP32[i10 + 20 >> 2] = HEAP32[i11 + 20 >> 2];
+ i22 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i10 + 24 >> 2] = i22;
+ i30 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i10 + 28 >> 2] = i30;
+ HEAP32[i16 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i16 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i16 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i16 + 12 >> 2] = HEAP32[i11 + 12 >> 2];
+ HEAP32[i16 + 16 >> 2] = HEAP32[i11 + 16 >> 2];
+ HEAP32[i16 + 20 >> 2] = HEAP32[i11 + 20 >> 2];
+ i13 = i4 + 12 | 0;
+ HEAP32[i16 + 24 >> 2] = HEAP32[i13 >> 2];
+ i12 = i4 + 36 | 0;
+ HEAP32[i16 + 28 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i16 + 32 >> 2] = i22;
+ HEAP32[i16 + 36 >> 2] = i30;
+ HEAP32[i16 + 40 >> 2] = HEAP32[i4 >> 2];
+ __ZN15b2ContactSolverC2EP18b2ContactSolverDef(i2, i16);
+ __ZN15b2ContactSolver29InitializeVelocityConstraintsEv(i2);
+ if ((HEAP8[i11 + 20 | 0] | 0) != 0) {
+ __ZN15b2ContactSolver9WarmStartEv(i2);
+ }
+ i16 = i4 + 32 | 0;
+ if ((HEAP32[i16 >> 2] | 0) > 0) {
+ i18 = i4 + 16 | 0;
+ i17 = 0;
+ do {
+ i30 = HEAP32[(HEAP32[i18 >> 2] | 0) + (i17 << 2) >> 2] | 0;
+ FUNCTION_TABLE_vii[HEAP32[(HEAP32[i30 >> 2] | 0) + 28 >> 2] & 15](i30, i10);
+ i17 = i17 + 1 | 0;
+ } while ((i17 | 0) < (HEAP32[i16 >> 2] | 0));
+ }
+ HEAPF32[i8 + 12 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i9);
+ i17 = i11 + 12 | 0;
+ if ((HEAP32[i17 >> 2] | 0) > 0) {
+ i20 = i4 + 16 | 0;
+ i19 = 0;
+ do {
+ if ((HEAP32[i16 >> 2] | 0) > 0) {
+ i18 = 0;
+ do {
+ i30 = HEAP32[(HEAP32[i20 >> 2] | 0) + (i18 << 2) >> 2] | 0;
+ FUNCTION_TABLE_vii[HEAP32[(HEAP32[i30 >> 2] | 0) + 32 >> 2] & 15](i30, i10);
+ i18 = i18 + 1 | 0;
+ } while ((i18 | 0) < (HEAP32[i16 >> 2] | 0));
+ }
+ __ZN15b2ContactSolver24SolveVelocityConstraintsEv(i2);
+ i19 = i19 + 1 | 0;
+ } while ((i19 | 0) < (HEAP32[i17 >> 2] | 0));
+ }
+ __ZN15b2ContactSolver13StoreImpulsesEv(i2);
+ HEAPF32[i8 + 16 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i9);
+ if ((HEAP32[i1 >> 2] | 0) > 0) {
+ i19 = HEAP32[i14 >> 2] | 0;
+ i18 = 0;
+ do {
+ i30 = HEAP32[i15 >> 2] | 0;
+ i17 = i30 + (i18 * 12 | 0) | 0;
+ i22 = i17;
+ d23 = +HEAPF32[i22 >> 2];
+ d21 = +HEAPF32[i22 + 4 >> 2];
+ d24 = +HEAPF32[i30 + (i18 * 12 | 0) + 8 >> 2];
+ i30 = i19 + (i18 * 12 | 0) | 0;
+ d26 = +HEAPF32[i30 >> 2];
+ d27 = +HEAPF32[i30 + 4 >> 2];
+ d25 = +HEAPF32[i19 + (i18 * 12 | 0) + 8 >> 2];
+ d29 = d5 * d26;
+ d28 = d5 * d27;
+ d28 = d29 * d29 + d28 * d28;
+ if (d28 > 4.0) {
+ d29 = 2.0 / +Math_sqrt(+d28);
+ d26 = d26 * d29;
+ d27 = d27 * d29;
+ }
+ d28 = d5 * d25;
+ if (d28 * d28 > 2.4674012660980225) {
+ if (!(d28 > 0.0)) {
+ d28 = -d28;
+ }
+ d25 = d25 * (1.5707963705062866 / d28);
+ }
+ d29 = +(d23 + d5 * d26);
+ d28 = +(d21 + d5 * d27);
+ i19 = i17;
+ HEAPF32[i19 >> 2] = d29;
+ HEAPF32[i19 + 4 >> 2] = d28;
+ HEAPF32[(HEAP32[i15 >> 2] | 0) + (i18 * 12 | 0) + 8 >> 2] = d24 + d5 * d25;
+ d28 = +d26;
+ d29 = +d27;
+ i19 = (HEAP32[i14 >> 2] | 0) + (i18 * 12 | 0) | 0;
+ HEAPF32[i19 >> 2] = d28;
+ HEAPF32[i19 + 4 >> 2] = d29;
+ i19 = HEAP32[i14 >> 2] | 0;
+ HEAPF32[i19 + (i18 * 12 | 0) + 8 >> 2] = d25;
+ i18 = i18 + 1 | 0;
+ } while ((i18 | 0) < (HEAP32[i1 >> 2] | 0));
+ }
+ i11 = i11 + 16 | 0;
+ L41 : do {
+ if ((HEAP32[i11 >> 2] | 0) > 0) {
+ i17 = i4 + 16 | 0;
+ i19 = 0;
+ while (1) {
+ i18 = __ZN15b2ContactSolver24SolvePositionConstraintsEv(i2) | 0;
+ if ((HEAP32[i16 >> 2] | 0) > 0) {
+ i20 = 0;
+ i22 = 1;
+ do {
+ i30 = HEAP32[(HEAP32[i17 >> 2] | 0) + (i20 << 2) >> 2] | 0;
+ i22 = i22 & (FUNCTION_TABLE_iii[HEAP32[(HEAP32[i30 >> 2] | 0) + 36 >> 2] & 3](i30, i10) | 0);
+ i20 = i20 + 1 | 0;
+ } while ((i20 | 0) < (HEAP32[i16 >> 2] | 0));
+ } else {
+ i22 = 1;
+ }
+ i19 = i19 + 1 | 0;
+ if (i18 & i22) {
+ i10 = 0;
+ break L41;
+ }
+ if ((i19 | 0) >= (HEAP32[i11 >> 2] | 0)) {
+ i10 = 1;
+ break;
+ }
+ }
+ } else {
+ i10 = 1;
+ }
+ } while (0);
+ if ((HEAP32[i1 >> 2] | 0) > 0) {
+ i11 = i4 + 8 | 0;
+ i16 = 0;
+ do {
+ i30 = HEAP32[(HEAP32[i11 >> 2] | 0) + (i16 << 2) >> 2] | 0;
+ i22 = (HEAP32[i15 >> 2] | 0) + (i16 * 12 | 0) | 0;
+ i20 = HEAP32[i22 >> 2] | 0;
+ i22 = HEAP32[i22 + 4 >> 2] | 0;
+ i17 = i30 + 44 | 0;
+ HEAP32[i17 >> 2] = i20;
+ HEAP32[i17 + 4 >> 2] = i22;
+ d27 = +HEAPF32[(HEAP32[i15 >> 2] | 0) + (i16 * 12 | 0) + 8 >> 2];
+ HEAPF32[i30 + 56 >> 2] = d27;
+ i17 = (HEAP32[i14 >> 2] | 0) + (i16 * 12 | 0) | 0;
+ i18 = HEAP32[i17 + 4 >> 2] | 0;
+ i19 = i30 + 64 | 0;
+ HEAP32[i19 >> 2] = HEAP32[i17 >> 2];
+ HEAP32[i19 + 4 >> 2] = i18;
+ HEAPF32[i30 + 72 >> 2] = +HEAPF32[(HEAP32[i14 >> 2] | 0) + (i16 * 12 | 0) + 8 >> 2];
+ d25 = +Math_sin(+d27);
+ HEAPF32[i30 + 20 >> 2] = d25;
+ d27 = +Math_cos(+d27);
+ HEAPF32[i30 + 24 >> 2] = d27;
+ d26 = +HEAPF32[i30 + 28 >> 2];
+ d29 = +HEAPF32[i30 + 32 >> 2];
+ d28 = (HEAP32[tempDoublePtr >> 2] = i20, +HEAPF32[tempDoublePtr >> 2]) - (d27 * d26 - d25 * d29);
+ d29 = (HEAP32[tempDoublePtr >> 2] = i22, +HEAPF32[tempDoublePtr >> 2]) - (d25 * d26 + d27 * d29);
+ d28 = +d28;
+ d29 = +d29;
+ i30 = i30 + 12 | 0;
+ HEAPF32[i30 >> 2] = d28;
+ HEAPF32[i30 + 4 >> 2] = d29;
+ i16 = i16 + 1 | 0;
+ } while ((i16 | 0) < (HEAP32[i1 >> 2] | 0));
+ }
+ HEAPF32[i8 + 20 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i9);
+ i9 = HEAP32[i2 + 40 >> 2] | 0;
+ i8 = i4 + 4 | 0;
+ if ((HEAP32[i8 >> 2] | 0) != 0 ? (HEAP32[i12 >> 2] | 0) > 0 : 0) {
+ i11 = i6 + 16 | 0;
+ i14 = 0;
+ do {
+ i15 = HEAP32[(HEAP32[i13 >> 2] | 0) + (i14 << 2) >> 2] | 0;
+ i16 = HEAP32[i9 + (i14 * 152 | 0) + 144 >> 2] | 0;
+ HEAP32[i11 >> 2] = i16;
+ if ((i16 | 0) > 0) {
+ i17 = 0;
+ do {
+ HEAPF32[i6 + (i17 << 2) >> 2] = +HEAPF32[i9 + (i14 * 152 | 0) + (i17 * 36 | 0) + 16 >> 2];
+ HEAPF32[i6 + (i17 << 2) + 8 >> 2] = +HEAPF32[i9 + (i14 * 152 | 0) + (i17 * 36 | 0) + 20 >> 2];
+ i17 = i17 + 1 | 0;
+ } while ((i17 | 0) != (i16 | 0));
+ }
+ i30 = HEAP32[i8 >> 2] | 0;
+ FUNCTION_TABLE_viii[HEAP32[(HEAP32[i30 >> 2] | 0) + 20 >> 2] & 3](i30, i15, i6);
+ i14 = i14 + 1 | 0;
+ } while ((i14 | 0) < (HEAP32[i12 >> 2] | 0));
+ }
+ if (!i7) {
+ __ZN15b2ContactSolverD2Ev(i2);
+ STACKTOP = i3;
+ return;
+ }
+ i7 = HEAP32[i1 >> 2] | 0;
+ i6 = (i7 | 0) > 0;
+ if (i6) {
+ i8 = HEAP32[i4 + 8 >> 2] | 0;
+ i9 = 0;
+ d21 = 3.4028234663852886e+38;
+ do {
+ i11 = HEAP32[i8 + (i9 << 2) >> 2] | 0;
+ do {
+ if ((HEAP32[i11 >> 2] | 0) != 0) {
+ if ((!((HEAP16[i11 + 4 >> 1] & 4) == 0) ? (d29 = +HEAPF32[i11 + 72 >> 2], !(d29 * d29 > .001218469929881394)) : 0) ? (d28 = +HEAPF32[i11 + 64 >> 2], d29 = +HEAPF32[i11 + 68 >> 2], !(d28 * d28 + d29 * d29 > 9999999747378752.0e-20)) : 0) {
+ i30 = i11 + 144 | 0;
+ d23 = d5 + +HEAPF32[i30 >> 2];
+ HEAPF32[i30 >> 2] = d23;
+ d21 = d21 < d23 ? d21 : d23;
+ break;
+ }
+ HEAPF32[i11 + 144 >> 2] = 0.0;
+ d21 = 0.0;
+ }
+ } while (0);
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) < (i7 | 0));
+ } else {
+ d21 = 3.4028234663852886e+38;
+ }
+ if (!(d21 >= .5) | i10 | i6 ^ 1) {
+ __ZN15b2ContactSolverD2Ev(i2);
+ STACKTOP = i3;
+ return;
+ }
+ i4 = i4 + 8 | 0;
+ i6 = 0;
+ do {
+ i30 = HEAP32[(HEAP32[i4 >> 2] | 0) + (i6 << 2) >> 2] | 0;
+ i22 = i30 + 4 | 0;
+ HEAP16[i22 >> 1] = HEAP16[i22 >> 1] & 65533;
+ HEAPF32[i30 + 144 >> 2] = 0.0;
+ i30 = i30 + 64 | 0;
+ HEAP32[i30 + 0 >> 2] = 0;
+ HEAP32[i30 + 4 >> 2] = 0;
+ HEAP32[i30 + 8 >> 2] = 0;
+ HEAP32[i30 + 12 >> 2] = 0;
+ HEAP32[i30 + 16 >> 2] = 0;
+ HEAP32[i30 + 20 >> 2] = 0;
+ i6 = i6 + 1 | 0;
+ } while ((i6 | 0) < (HEAP32[i1 >> 2] | 0));
+ __ZN15b2ContactSolverD2Ev(i2);
+ STACKTOP = i3;
+ return;
+}
+function __ZN15b2ContactSolver24SolveVelocityConstraintsEv(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, i19 = 0, d20 = 0.0, d21 = 0.0, i22 = 0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, d27 = 0.0, d28 = 0.0, d29 = 0.0, d30 = 0.0, d31 = 0.0, i32 = 0, i33 = 0, d34 = 0.0, d35 = 0.0, d36 = 0.0, d37 = 0.0, d38 = 0.0, d39 = 0.0, d40 = 0.0, i41 = 0, i42 = 0, d43 = 0.0, d44 = 0.0;
+ i1 = STACKTOP;
+ i2 = i4 + 48 | 0;
+ if ((HEAP32[i2 >> 2] | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = i4 + 40 | 0;
+ i4 = i4 + 28 | 0;
+ i42 = HEAP32[i4 >> 2] | 0;
+ i5 = 0;
+ L4 : while (1) {
+ i19 = HEAP32[i3 >> 2] | 0;
+ i22 = i19 + (i5 * 152 | 0) | 0;
+ i8 = HEAP32[i19 + (i5 * 152 | 0) + 112 >> 2] | 0;
+ i6 = HEAP32[i19 + (i5 * 152 | 0) + 116 >> 2] | 0;
+ d12 = +HEAPF32[i19 + (i5 * 152 | 0) + 120 >> 2];
+ d10 = +HEAPF32[i19 + (i5 * 152 | 0) + 128 >> 2];
+ d11 = +HEAPF32[i19 + (i5 * 152 | 0) + 124 >> 2];
+ d9 = +HEAPF32[i19 + (i5 * 152 | 0) + 132 >> 2];
+ i32 = i19 + (i5 * 152 | 0) + 144 | 0;
+ i33 = HEAP32[i32 >> 2] | 0;
+ i7 = i42 + (i8 * 12 | 0) | 0;
+ i41 = i7;
+ d21 = +HEAPF32[i41 >> 2];
+ d20 = +HEAPF32[i41 + 4 >> 2];
+ i41 = i42 + (i6 * 12 | 0) | 0;
+ d14 = +HEAPF32[i41 >> 2];
+ d13 = +HEAPF32[i41 + 4 >> 2];
+ i41 = i19 + (i5 * 152 | 0) + 72 | 0;
+ d17 = +HEAPF32[i41 >> 2];
+ d16 = +HEAPF32[i41 + 4 >> 2];
+ d23 = -d17;
+ d24 = +HEAPF32[i19 + (i5 * 152 | 0) + 136 >> 2];
+ if ((i33 + -1 | 0) >>> 0 < 2) {
+ i41 = 0;
+ d18 = +HEAPF32[i42 + (i8 * 12 | 0) + 8 >> 2];
+ d15 = +HEAPF32[i42 + (i6 * 12 | 0) + 8 >> 2];
+ } else {
+ i2 = 4;
+ break;
+ }
+ do {
+ d30 = +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 12 >> 2];
+ d25 = +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 8 >> 2];
+ d26 = +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 4 >> 2];
+ d27 = +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) >> 2];
+ d34 = d24 * +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 16 >> 2];
+ i42 = i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 20 | 0;
+ d28 = +HEAPF32[i42 >> 2];
+ d31 = d28 - +HEAPF32[i19 + (i5 * 152 | 0) + (i41 * 36 | 0) + 28 >> 2] * (d16 * (d14 - d15 * d30 - d21 + d18 * d26) + (d13 + d15 * d25 - d20 - d18 * d27) * d23);
+ d29 = -d34;
+ d31 = d31 < d34 ? d31 : d34;
+ d40 = d31 < d29 ? d29 : d31;
+ d39 = d40 - d28;
+ HEAPF32[i42 >> 2] = d40;
+ d40 = d16 * d39;
+ d39 = d39 * d23;
+ d21 = d21 - d12 * d40;
+ d20 = d20 - d12 * d39;
+ d18 = d18 - d10 * (d27 * d39 - d26 * d40);
+ d14 = d14 + d11 * d40;
+ d13 = d13 + d11 * d39;
+ d15 = d15 + d9 * (d25 * d39 - d30 * d40);
+ i41 = i41 + 1 | 0;
+ } while ((i41 | 0) != (i33 | 0));
+ do {
+ if ((HEAP32[i32 >> 2] | 0) != 1) {
+ i32 = i19 + (i5 * 152 | 0) + 16 | 0;
+ d31 = +HEAPF32[i32 >> 2];
+ i33 = i19 + (i5 * 152 | 0) + 52 | 0;
+ d34 = +HEAPF32[i33 >> 2];
+ if (!(d31 >= 0.0) | !(d34 >= 0.0)) {
+ i2 = 9;
+ break L4;
+ }
+ d23 = +HEAPF32[i19 + (i5 * 152 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i19 + (i5 * 152 | 0) + 8 >> 2];
+ d26 = +HEAPF32[i19 + (i5 * 152 | 0) + 4 >> 2];
+ d30 = +HEAPF32[i22 >> 2];
+ d27 = +HEAPF32[i19 + (i5 * 152 | 0) + 48 >> 2];
+ d25 = +HEAPF32[i19 + (i5 * 152 | 0) + 44 >> 2];
+ d28 = +HEAPF32[i19 + (i5 * 152 | 0) + 40 >> 2];
+ d29 = +HEAPF32[i19 + (i5 * 152 | 0) + 36 >> 2];
+ d37 = +HEAPF32[i19 + (i5 * 152 | 0) + 104 >> 2];
+ d38 = +HEAPF32[i19 + (i5 * 152 | 0) + 100 >> 2];
+ d35 = d17 * (d14 - d15 * d23 - d21 + d18 * d26) + d16 * (d13 + d15 * d24 - d20 - d18 * d30) - +HEAPF32[i19 + (i5 * 152 | 0) + 32 >> 2] - (d31 * +HEAPF32[i19 + (i5 * 152 | 0) + 96 >> 2] + d34 * d37);
+ d36 = d17 * (d14 - d15 * d27 - d21 + d18 * d28) + d16 * (d13 + d15 * d25 - d20 - d18 * d29) - +HEAPF32[i19 + (i5 * 152 | 0) + 68 >> 2] - (d31 * d38 + d34 * +HEAPF32[i19 + (i5 * 152 | 0) + 108 >> 2]);
+ d44 = +HEAPF32[i19 + (i5 * 152 | 0) + 80 >> 2] * d35 + +HEAPF32[i19 + (i5 * 152 | 0) + 88 >> 2] * d36;
+ d43 = d35 * +HEAPF32[i19 + (i5 * 152 | 0) + 84 >> 2] + d36 * +HEAPF32[i19 + (i5 * 152 | 0) + 92 >> 2];
+ d40 = -d44;
+ d39 = -d43;
+ if (!(!(d44 <= -0.0) | !(d43 <= -0.0))) {
+ d37 = d40 - d31;
+ d43 = d39 - d34;
+ d38 = d17 * d37;
+ d37 = d16 * d37;
+ d44 = d17 * d43;
+ d43 = d16 * d43;
+ d35 = d38 + d44;
+ d36 = d37 + d43;
+ HEAPF32[i32 >> 2] = d40;
+ HEAPF32[i33 >> 2] = d39;
+ d21 = d21 - d12 * d35;
+ d20 = d20 - d12 * d36;
+ d14 = d14 + d11 * d35;
+ d13 = d13 + d11 * d36;
+ d18 = d18 - d10 * (d30 * d37 - d26 * d38 + (d29 * d43 - d28 * d44));
+ d15 = d15 + d9 * (d24 * d37 - d23 * d38 + (d25 * d43 - d27 * d44));
+ break;
+ }
+ d44 = d35 * +HEAPF32[i19 + (i5 * 152 | 0) + 24 >> 2];
+ d39 = -d44;
+ if (d44 <= -0.0 ? d36 + d38 * d39 >= 0.0 : 0) {
+ d38 = d39 - d31;
+ d43 = 0.0 - d34;
+ d40 = d17 * d38;
+ d38 = d16 * d38;
+ d44 = d17 * d43;
+ d43 = d16 * d43;
+ d36 = d44 + d40;
+ d37 = d43 + d38;
+ HEAPF32[i32 >> 2] = d39;
+ HEAPF32[i33 >> 2] = 0.0;
+ d21 = d21 - d12 * d36;
+ d20 = d20 - d12 * d37;
+ d14 = d14 + d11 * d36;
+ d13 = d13 + d11 * d37;
+ d18 = d18 - d10 * (d38 * d30 - d40 * d26 + (d43 * d29 - d44 * d28));
+ d15 = d15 + d9 * (d38 * d24 - d40 * d23 + (d43 * d25 - d44 * d27));
+ break;
+ }
+ d44 = d36 * +HEAPF32[i19 + (i5 * 152 | 0) + 60 >> 2];
+ d38 = -d44;
+ if (d44 <= -0.0 ? d35 + d37 * d38 >= 0.0 : 0) {
+ d39 = 0.0 - d31;
+ d43 = d38 - d34;
+ d40 = d17 * d39;
+ d39 = d16 * d39;
+ d44 = d17 * d43;
+ d43 = d16 * d43;
+ d36 = d40 + d44;
+ d37 = d39 + d43;
+ HEAPF32[i32 >> 2] = 0.0;
+ HEAPF32[i33 >> 2] = d38;
+ d21 = d21 - d12 * d36;
+ d20 = d20 - d12 * d37;
+ d14 = d14 + d11 * d36;
+ d13 = d13 + d11 * d37;
+ d18 = d18 - d10 * (d39 * d30 - d40 * d26 + (d43 * d29 - d44 * d28));
+ d15 = d15 + d9 * (d39 * d24 - d40 * d23 + (d43 * d25 - d44 * d27));
+ break;
+ }
+ if (!(!(d35 >= 0.0) | !(d36 >= 0.0))) {
+ d39 = 0.0 - d31;
+ d43 = 0.0 - d34;
+ d40 = d17 * d39;
+ d39 = d16 * d39;
+ d44 = d17 * d43;
+ d43 = d16 * d43;
+ d37 = d40 + d44;
+ d38 = d39 + d43;
+ HEAPF32[i32 >> 2] = 0.0;
+ HEAPF32[i33 >> 2] = 0.0;
+ d21 = d21 - d12 * d37;
+ d20 = d20 - d12 * d38;
+ d14 = d14 + d11 * d37;
+ d13 = d13 + d11 * d38;
+ d18 = d18 - d10 * (d39 * d30 - d40 * d26 + (d43 * d29 - d44 * d28));
+ d15 = d15 + d9 * (d39 * d24 - d40 * d23 + (d43 * d25 - d44 * d27));
+ }
+ } else {
+ d23 = +HEAPF32[i19 + (i5 * 152 | 0) + 12 >> 2];
+ d24 = +HEAPF32[i19 + (i5 * 152 | 0) + 8 >> 2];
+ d25 = +HEAPF32[i19 + (i5 * 152 | 0) + 4 >> 2];
+ d26 = +HEAPF32[i22 >> 2];
+ i22 = i19 + (i5 * 152 | 0) + 16 | 0;
+ d27 = +HEAPF32[i22 >> 2];
+ d28 = d27 - +HEAPF32[i19 + (i5 * 152 | 0) + 24 >> 2] * (d17 * (d14 - d15 * d23 - d21 + d18 * d25) + d16 * (d13 + d15 * d24 - d20 - d18 * d26) - +HEAPF32[i19 + (i5 * 152 | 0) + 32 >> 2]);
+ d44 = d28 > 0.0 ? d28 : 0.0;
+ d43 = d44 - d27;
+ HEAPF32[i22 >> 2] = d44;
+ d44 = d17 * d43;
+ d43 = d16 * d43;
+ d21 = d21 - d12 * d44;
+ d20 = d20 - d12 * d43;
+ d14 = d14 + d11 * d44;
+ d13 = d13 + d11 * d43;
+ d18 = d18 - d10 * (d26 * d43 - d25 * d44);
+ d15 = d15 + d9 * (d24 * d43 - d23 * d44);
+ }
+ } while (0);
+ d44 = +d21;
+ d43 = +d20;
+ i42 = i7;
+ HEAPF32[i42 >> 2] = d44;
+ HEAPF32[i42 + 4 >> 2] = d43;
+ i42 = HEAP32[i4 >> 2] | 0;
+ HEAPF32[i42 + (i8 * 12 | 0) + 8 >> 2] = d18;
+ d43 = +d14;
+ d44 = +d13;
+ i42 = i42 + (i6 * 12 | 0) | 0;
+ HEAPF32[i42 >> 2] = d43;
+ HEAPF32[i42 + 4 >> 2] = d44;
+ i42 = HEAP32[i4 >> 2] | 0;
+ HEAPF32[i42 + (i6 * 12 | 0) + 8 >> 2] = d15;
+ i5 = i5 + 1 | 0;
+ if ((i5 | 0) >= (HEAP32[i2 >> 2] | 0)) {
+ i2 = 21;
+ break;
+ }
+ }
+ if ((i2 | 0) == 4) {
+ ___assert_fail(6648, 6520, 311, 6688);
+ } else if ((i2 | 0) == 9) {
+ ___assert_fail(6720, 6520, 406, 6688);
+ } else if ((i2 | 0) == 21) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function __Z14b2TimeOfImpactP11b2TOIOutputPK10b2TOIInput(i3, i11) {
+ i3 = i3 | 0;
+ i11 = i11 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0, d14 = 0.0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, d28 = 0.0, i29 = 0, d30 = 0.0, d31 = 0.0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0, i38 = 0, i39 = 0, d40 = 0.0, i41 = 0, d42 = 0.0, d43 = 0.0, i44 = 0, i45 = 0, d46 = 0.0, i47 = 0, d48 = 0.0, d49 = 0.0, d50 = 0.0, d51 = 0.0, i52 = 0, d53 = 0.0, d54 = 0.0, d55 = 0.0, d56 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 320 | 0;
+ i12 = i1 + 276 | 0;
+ i10 = i1 + 240 | 0;
+ i13 = i1 + 228 | 0;
+ i5 = i1 + 136 | 0;
+ i7 = i1 + 112 | 0;
+ i8 = i1 + 8 | 0;
+ i9 = i1 + 4 | 0;
+ i4 = i1;
+ HEAP32[874] = (HEAP32[874] | 0) + 1;
+ HEAP32[i3 >> 2] = 0;
+ i19 = i11 + 128 | 0;
+ i2 = i3 + 4 | 0;
+ HEAPF32[i2 >> 2] = +HEAPF32[i19 >> 2];
+ i6 = i11 + 28 | 0;
+ i16 = i12 + 0 | 0;
+ i15 = i11 + 56 | 0;
+ i17 = i16 + 36 | 0;
+ do {
+ HEAP32[i16 >> 2] = HEAP32[i15 >> 2];
+ i16 = i16 + 4 | 0;
+ i15 = i15 + 4 | 0;
+ } while ((i16 | 0) < (i17 | 0));
+ i16 = i10 + 0 | 0;
+ i15 = i11 + 92 | 0;
+ i17 = i16 + 36 | 0;
+ do {
+ HEAP32[i16 >> 2] = HEAP32[i15 >> 2];
+ i16 = i16 + 4 | 0;
+ i15 = i15 + 4 | 0;
+ } while ((i16 | 0) < (i17 | 0));
+ i15 = i12 + 24 | 0;
+ d42 = +HEAPF32[i15 >> 2];
+ d43 = +Math_floor(+(d42 / 6.2831854820251465)) * 6.2831854820251465;
+ d42 = d42 - d43;
+ HEAPF32[i15 >> 2] = d42;
+ i16 = i12 + 28 | 0;
+ d43 = +HEAPF32[i16 >> 2] - d43;
+ HEAPF32[i16 >> 2] = d43;
+ i17 = i10 + 24 | 0;
+ d46 = +HEAPF32[i17 >> 2];
+ d40 = +Math_floor(+(d46 / 6.2831854820251465)) * 6.2831854820251465;
+ d46 = d46 - d40;
+ HEAPF32[i17 >> 2] = d46;
+ i18 = i10 + 28 | 0;
+ d40 = +HEAPF32[i18 >> 2] - d40;
+ HEAPF32[i18 >> 2] = d40;
+ d14 = +HEAPF32[i19 >> 2];
+ d28 = +HEAPF32[i11 + 24 >> 2] + +HEAPF32[i11 + 52 >> 2] + -.014999999664723873;
+ d28 = d28 < .004999999888241291 ? .004999999888241291 : d28;
+ if (!(d28 > .0012499999720603228)) {
+ ___assert_fail(3536, 3560, 280, 3600);
+ }
+ HEAP16[i13 + 4 >> 1] = 0;
+ HEAP32[i5 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i5 + 12 >> 2] = HEAP32[i11 + 12 >> 2];
+ HEAP32[i5 + 16 >> 2] = HEAP32[i11 + 16 >> 2];
+ HEAP32[i5 + 20 >> 2] = HEAP32[i11 + 20 >> 2];
+ HEAP32[i5 + 24 >> 2] = HEAP32[i11 + 24 >> 2];
+ i38 = i5 + 28 | 0;
+ HEAP32[i38 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i38 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i38 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i38 + 12 >> 2] = HEAP32[i6 + 12 >> 2];
+ HEAP32[i38 + 16 >> 2] = HEAP32[i6 + 16 >> 2];
+ HEAP32[i38 + 20 >> 2] = HEAP32[i6 + 20 >> 2];
+ HEAP32[i38 + 24 >> 2] = HEAP32[i6 + 24 >> 2];
+ HEAP8[i5 + 88 | 0] = 0;
+ i38 = i12 + 8 | 0;
+ i27 = i12 + 12 | 0;
+ i29 = i12 + 16 | 0;
+ i22 = i12 + 20 | 0;
+ i32 = i12 + 4 | 0;
+ i34 = i10 + 8 | 0;
+ i36 = i10 + 12 | 0;
+ i35 = i10 + 16 | 0;
+ i37 = i10 + 20 | 0;
+ i33 = i10 + 4 | 0;
+ i26 = i5 + 56 | 0;
+ i25 = i5 + 64 | 0;
+ i24 = i5 + 68 | 0;
+ i23 = i5 + 72 | 0;
+ i20 = i5 + 80 | 0;
+ i19 = i5 + 84 | 0;
+ i21 = i7 + 16 | 0;
+ d30 = d28 + .0012499999720603228;
+ d31 = d28 + -.0012499999720603228;
+ d48 = d40;
+ i39 = 0;
+ d40 = 0.0;
+ L4 : while (1) {
+ d56 = 1.0 - d40;
+ d49 = d56 * d42 + d40 * d43;
+ d43 = +Math_sin(+d49);
+ d49 = +Math_cos(+d49);
+ d55 = +HEAPF32[i12 >> 2];
+ d54 = +HEAPF32[i32 >> 2];
+ d42 = d56 * d46 + d40 * d48;
+ d53 = +Math_sin(+d42);
+ d42 = +Math_cos(+d42);
+ d46 = +HEAPF32[i10 >> 2];
+ d51 = +HEAPF32[i33 >> 2];
+ d50 = d56 * +HEAPF32[i34 >> 2] + d40 * +HEAPF32[i35 >> 2] - (d42 * d46 - d53 * d51);
+ d51 = d56 * +HEAPF32[i36 >> 2] + d40 * +HEAPF32[i37 >> 2] - (d53 * d46 + d42 * d51);
+ d46 = +(d56 * +HEAPF32[i38 >> 2] + d40 * +HEAPF32[i29 >> 2] - (d49 * d55 - d43 * d54));
+ d48 = +(d56 * +HEAPF32[i27 >> 2] + d40 * +HEAPF32[i22 >> 2] - (d43 * d55 + d49 * d54));
+ i52 = i26;
+ HEAPF32[i52 >> 2] = d46;
+ HEAPF32[i52 + 4 >> 2] = d48;
+ HEAPF32[i25 >> 2] = d43;
+ HEAPF32[i24 >> 2] = d49;
+ d50 = +d50;
+ d51 = +d51;
+ i52 = i23;
+ HEAPF32[i52 >> 2] = d50;
+ HEAPF32[i52 + 4 >> 2] = d51;
+ HEAPF32[i20 >> 2] = d53;
+ HEAPF32[i19 >> 2] = d42;
+ __Z10b2DistanceP16b2DistanceOutputP14b2SimplexCachePK15b2DistanceInput(i7, i13, i5);
+ d42 = +HEAPF32[i21 >> 2];
+ if (d42 <= 0.0) {
+ i4 = 5;
+ break;
+ }
+ if (d42 < d30) {
+ i4 = 7;
+ break;
+ }
+ +__ZN20b2SeparationFunction10InitializeEPK14b2SimplexCachePK15b2DistanceProxyRK7b2SweepS5_S8_f(i8, i13, i11, i12, i6, i10, d40);
+ i41 = 0;
+ d42 = d14;
+ do {
+ d50 = +__ZNK20b2SeparationFunction17FindMinSeparationEPiS0_f(i8, i9, i4, d42);
+ if (d50 > d30) {
+ i4 = 10;
+ break L4;
+ }
+ if (d50 > d31) {
+ d40 = d42;
+ break;
+ }
+ i45 = HEAP32[i9 >> 2] | 0;
+ i44 = HEAP32[i4 >> 2] | 0;
+ d48 = +__ZNK20b2SeparationFunction8EvaluateEiif(i8, i45, i44, d40);
+ if (d48 < d31) {
+ i4 = 13;
+ break L4;
+ }
+ if (!(d48 <= d30)) {
+ d43 = d40;
+ d46 = d42;
+ i47 = 0;
+ } else {
+ i4 = 15;
+ break L4;
+ }
+ while (1) {
+ if ((i47 & 1 | 0) == 0) {
+ d49 = (d43 + d46) * .5;
+ } else {
+ d49 = d43 + (d28 - d48) * (d46 - d43) / (d50 - d48);
+ }
+ d51 = +__ZNK20b2SeparationFunction8EvaluateEiif(i8, i45, i44, d49);
+ d53 = d51 - d28;
+ if (!(d53 > 0.0)) {
+ d53 = -d53;
+ }
+ if (d53 < .0012499999720603228) {
+ d42 = d49;
+ break;
+ }
+ i52 = d51 > d28;
+ i47 = i47 + 1 | 0;
+ HEAP32[880] = (HEAP32[880] | 0) + 1;
+ if ((i47 | 0) == 50) {
+ i47 = 50;
+ break;
+ } else {
+ d43 = i52 ? d49 : d43;
+ d46 = i52 ? d46 : d49;
+ d48 = i52 ? d51 : d48;
+ d50 = i52 ? d50 : d51;
+ }
+ }
+ i44 = HEAP32[882] | 0;
+ HEAP32[882] = (i44 | 0) > (i47 | 0) ? i44 : i47;
+ i41 = i41 + 1 | 0;
+ } while ((i41 | 0) != 8);
+ i39 = i39 + 1 | 0;
+ HEAP32[876] = (HEAP32[876] | 0) + 1;
+ if ((i39 | 0) == 20) {
+ i4 = 27;
+ break;
+ }
+ d42 = +HEAPF32[i15 >> 2];
+ d43 = +HEAPF32[i16 >> 2];
+ d46 = +HEAPF32[i17 >> 2];
+ d48 = +HEAPF32[i18 >> 2];
+ }
+ if ((i4 | 0) == 5) {
+ HEAP32[i3 >> 2] = 2;
+ HEAPF32[i2 >> 2] = 0.0;
+ i2 = HEAP32[878] | 0;
+ i52 = (i2 | 0) > (i39 | 0);
+ i52 = i52 ? i2 : i39;
+ HEAP32[878] = i52;
+ STACKTOP = i1;
+ return;
+ } else if ((i4 | 0) == 7) {
+ HEAP32[i3 >> 2] = 3;
+ HEAPF32[i2 >> 2] = d40;
+ i2 = HEAP32[878] | 0;
+ i52 = (i2 | 0) > (i39 | 0);
+ i52 = i52 ? i2 : i39;
+ HEAP32[878] = i52;
+ STACKTOP = i1;
+ return;
+ } else if ((i4 | 0) == 10) {
+ HEAP32[i3 >> 2] = 4;
+ HEAPF32[i2 >> 2] = d14;
+ } else if ((i4 | 0) == 13) {
+ HEAP32[i3 >> 2] = 1;
+ HEAPF32[i2 >> 2] = d40;
+ } else if ((i4 | 0) == 15) {
+ HEAP32[i3 >> 2] = 3;
+ HEAPF32[i2 >> 2] = d40;
+ } else if ((i4 | 0) == 27) {
+ HEAP32[i3 >> 2] = 1;
+ HEAPF32[i2 >> 2] = d40;
+ i39 = 20;
+ i2 = HEAP32[878] | 0;
+ i52 = (i2 | 0) > (i39 | 0);
+ i52 = i52 ? i2 : i39;
+ HEAP32[878] = i52;
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[876] = (HEAP32[876] | 0) + 1;
+ i39 = i39 + 1 | 0;
+ i2 = HEAP32[878] | 0;
+ i52 = (i2 | 0) > (i39 | 0);
+ i52 = i52 ? i2 : i39;
+ HEAP32[878] = i52;
+ STACKTOP = i1;
+ return;
+}
+function __ZN7b2World5SolveERK10b2TimeStep(i5, i15) {
+ i5 = i5 | 0;
+ i15 = i15 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0, i38 = 0, d39 = 0.0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 96 | 0;
+ i4 = i3 + 32 | 0;
+ i9 = i3;
+ i2 = i3 + 84 | 0;
+ i11 = i5 + 103008 | 0;
+ HEAPF32[i11 >> 2] = 0.0;
+ i14 = i5 + 103012 | 0;
+ HEAPF32[i14 >> 2] = 0.0;
+ i8 = i5 + 103016 | 0;
+ HEAPF32[i8 >> 2] = 0.0;
+ i16 = i5 + 102960 | 0;
+ i1 = i5 + 102872 | 0;
+ i6 = i5 + 68 | 0;
+ __ZN8b2IslandC2EiiiP16b2StackAllocatorP17b2ContactListener(i4, HEAP32[i16 >> 2] | 0, HEAP32[i5 + 102936 >> 2] | 0, HEAP32[i5 + 102964 >> 2] | 0, i6, HEAP32[i5 + 102944 >> 2] | 0);
+ i7 = i5 + 102952 | 0;
+ i17 = HEAP32[i7 >> 2] | 0;
+ if ((i17 | 0) != 0) {
+ do {
+ i38 = i17 + 4 | 0;
+ HEAP16[i38 >> 1] = HEAP16[i38 >> 1] & 65534;
+ i17 = HEAP32[i17 + 96 >> 2] | 0;
+ } while ((i17 | 0) != 0);
+ }
+ i17 = HEAP32[i5 + 102932 >> 2] | 0;
+ if ((i17 | 0) != 0) {
+ do {
+ i38 = i17 + 4 | 0;
+ HEAP32[i38 >> 2] = HEAP32[i38 >> 2] & -2;
+ i17 = HEAP32[i17 + 12 >> 2] | 0;
+ } while ((i17 | 0) != 0);
+ }
+ i17 = HEAP32[i5 + 102956 >> 2] | 0;
+ if ((i17 | 0) != 0) {
+ do {
+ HEAP8[i17 + 60 | 0] = 0;
+ i17 = HEAP32[i17 + 12 >> 2] | 0;
+ } while ((i17 | 0) != 0);
+ }
+ i24 = HEAP32[i16 >> 2] | 0;
+ i16 = __ZN16b2StackAllocator8AllocateEi(i6, i24 << 2) | 0;
+ i32 = HEAP32[i7 >> 2] | 0;
+ L13 : do {
+ if ((i32 | 0) != 0) {
+ i18 = i4 + 28 | 0;
+ i30 = i4 + 36 | 0;
+ i27 = i4 + 32 | 0;
+ i17 = i4 + 40 | 0;
+ i23 = i4 + 8 | 0;
+ i29 = i4 + 48 | 0;
+ i28 = i4 + 16 | 0;
+ i26 = i4 + 44 | 0;
+ i31 = i4 + 12 | 0;
+ i25 = i5 + 102968 | 0;
+ i22 = i5 + 102976 | 0;
+ i21 = i9 + 12 | 0;
+ i20 = i9 + 16 | 0;
+ i19 = i9 + 20 | 0;
+ L15 : while (1) {
+ i33 = i32 + 4 | 0;
+ i34 = HEAP16[i33 >> 1] | 0;
+ if ((i34 & 35) == 34 ? (HEAP32[i32 >> 2] | 0) != 0 : 0) {
+ HEAP32[i18 >> 2] = 0;
+ HEAP32[i30 >> 2] = 0;
+ HEAP32[i27 >> 2] = 0;
+ HEAP32[i16 >> 2] = i32;
+ HEAP16[i33 >> 1] = i34 & 65535 | 1;
+ i35 = 1;
+ do {
+ i35 = i35 + -1 | 0;
+ i33 = HEAP32[i16 + (i35 << 2) >> 2] | 0;
+ i34 = i33 + 4 | 0;
+ i36 = HEAP16[i34 >> 1] | 0;
+ if ((i36 & 32) == 0) {
+ i8 = 13;
+ break L15;
+ }
+ i37 = HEAP32[i18 >> 2] | 0;
+ if ((i37 | 0) >= (HEAP32[i17 >> 2] | 0)) {
+ i8 = 15;
+ break L15;
+ }
+ HEAP32[i33 + 8 >> 2] = i37;
+ i38 = HEAP32[i18 >> 2] | 0;
+ HEAP32[(HEAP32[i23 >> 2] | 0) + (i38 << 2) >> 2] = i33;
+ HEAP32[i18 >> 2] = i38 + 1;
+ i36 = i36 & 65535;
+ if ((i36 & 2 | 0) == 0) {
+ HEAP16[i34 >> 1] = i36 | 2;
+ HEAPF32[i33 + 144 >> 2] = 0.0;
+ }
+ if ((HEAP32[i33 >> 2] | 0) != 0) {
+ i34 = HEAP32[i33 + 112 >> 2] | 0;
+ if ((i34 | 0) != 0) {
+ do {
+ i38 = HEAP32[i34 + 4 >> 2] | 0;
+ i36 = i38 + 4 | 0;
+ if (((HEAP32[i36 >> 2] & 7 | 0) == 6 ? (HEAP8[(HEAP32[i38 + 48 >> 2] | 0) + 38 | 0] | 0) == 0 : 0) ? (HEAP8[(HEAP32[i38 + 52 >> 2] | 0) + 38 | 0] | 0) == 0 : 0) {
+ i37 = HEAP32[i30 >> 2] | 0;
+ if ((i37 | 0) >= (HEAP32[i26 >> 2] | 0)) {
+ i8 = 25;
+ break L15;
+ }
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP32[(HEAP32[i31 >> 2] | 0) + (i37 << 2) >> 2] = i38;
+ HEAP32[i36 >> 2] = HEAP32[i36 >> 2] | 1;
+ i38 = HEAP32[i34 >> 2] | 0;
+ i36 = i38 + 4 | 0;
+ i37 = HEAP16[i36 >> 1] | 0;
+ if ((i37 & 1) == 0) {
+ if ((i35 | 0) >= (i24 | 0)) {
+ i8 = 28;
+ break L15;
+ }
+ HEAP32[i16 + (i35 << 2) >> 2] = i38;
+ HEAP16[i36 >> 1] = i37 & 65535 | 1;
+ i35 = i35 + 1 | 0;
+ }
+ }
+ i34 = HEAP32[i34 + 12 >> 2] | 0;
+ } while ((i34 | 0) != 0);
+ }
+ i33 = HEAP32[i33 + 108 >> 2] | 0;
+ if ((i33 | 0) != 0) {
+ do {
+ i37 = i33 + 4 | 0;
+ i36 = HEAP32[i37 >> 2] | 0;
+ if ((HEAP8[i36 + 60 | 0] | 0) == 0 ? (i10 = HEAP32[i33 >> 2] | 0, i13 = i10 + 4 | 0, i12 = HEAP16[i13 >> 1] | 0, !((i12 & 32) == 0)) : 0) {
+ i34 = HEAP32[i27 >> 2] | 0;
+ if ((i34 | 0) >= (HEAP32[i29 >> 2] | 0)) {
+ i8 = 35;
+ break L15;
+ }
+ HEAP32[i27 >> 2] = i34 + 1;
+ HEAP32[(HEAP32[i28 >> 2] | 0) + (i34 << 2) >> 2] = i36;
+ HEAP8[(HEAP32[i37 >> 2] | 0) + 60 | 0] = 1;
+ if ((i12 & 1) == 0) {
+ if ((i35 | 0) >= (i24 | 0)) {
+ i8 = 38;
+ break L15;
+ }
+ HEAP32[i16 + (i35 << 2) >> 2] = i10;
+ HEAP16[i13 >> 1] = i12 & 65535 | 1;
+ i35 = i35 + 1 | 0;
+ }
+ }
+ i33 = HEAP32[i33 + 12 >> 2] | 0;
+ } while ((i33 | 0) != 0);
+ }
+ }
+ } while ((i35 | 0) > 0);
+ __ZN8b2Island5SolveEP9b2ProfileRK10b2TimeStepRK6b2Vec2b(i4, i9, i15, i25, (HEAP8[i22] | 0) != 0);
+ HEAPF32[i11 >> 2] = +HEAPF32[i21 >> 2] + +HEAPF32[i11 >> 2];
+ HEAPF32[i14 >> 2] = +HEAPF32[i20 >> 2] + +HEAPF32[i14 >> 2];
+ HEAPF32[i8 >> 2] = +HEAPF32[i19 >> 2] + +HEAPF32[i8 >> 2];
+ i35 = HEAP32[i18 >> 2] | 0;
+ if ((i35 | 0) > 0) {
+ i33 = HEAP32[i23 >> 2] | 0;
+ i36 = 0;
+ do {
+ i34 = HEAP32[i33 + (i36 << 2) >> 2] | 0;
+ if ((HEAP32[i34 >> 2] | 0) == 0) {
+ i38 = i34 + 4 | 0;
+ HEAP16[i38 >> 1] = HEAP16[i38 >> 1] & 65534;
+ }
+ i36 = i36 + 1 | 0;
+ } while ((i36 | 0) < (i35 | 0));
+ }
+ }
+ i32 = HEAP32[i32 + 96 >> 2] | 0;
+ if ((i32 | 0) == 0) {
+ break L13;
+ }
+ }
+ if ((i8 | 0) == 13) {
+ ___assert_fail(2232, 2184, 445, 2256);
+ } else if ((i8 | 0) == 15) {
+ ___assert_fail(2520, 2440, 54, 2472);
+ } else if ((i8 | 0) == 25) {
+ ___assert_fail(2480, 2440, 62, 2472);
+ } else if ((i8 | 0) == 28) {
+ ___assert_fail(2264, 2184, 495, 2256);
+ } else if ((i8 | 0) == 35) {
+ ___assert_fail(2408, 2440, 68, 2472);
+ } else if ((i8 | 0) == 38) {
+ ___assert_fail(2264, 2184, 524, 2256);
+ }
+ }
+ } while (0);
+ __ZN16b2StackAllocator4FreeEPv(i6, i16);
+ __ZN7b2TimerC2Ev(i2);
+ i6 = HEAP32[i7 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ __ZN16b2ContactManager15FindNewContactsEv(i1);
+ d39 = +__ZNK7b2Timer15GetMillisecondsEv(i2);
+ i38 = i5 + 103020 | 0;
+ HEAPF32[i38 >> 2] = d39;
+ __ZN8b2IslandD2Ev(i4);
+ STACKTOP = i3;
+ return;
+ }
+ do {
+ if (!((HEAP16[i6 + 4 >> 1] & 1) == 0) ? (HEAP32[i6 >> 2] | 0) != 0 : 0) {
+ __ZN6b2Body19SynchronizeFixturesEv(i6);
+ }
+ i6 = HEAP32[i6 + 96 >> 2] | 0;
+ } while ((i6 | 0) != 0);
+ __ZN16b2ContactManager15FindNewContactsEv(i1);
+ d39 = +__ZNK7b2Timer15GetMillisecondsEv(i2);
+ i38 = i5 + 103020 | 0;
+ HEAPF32[i38 >> 2] = d39;
+ __ZN8b2IslandD2Ev(i4);
+ STACKTOP = i3;
+ return;
+}
+function __ZN15b2ContactSolver29InitializeVelocityConstraintsEv(i10) {
+ i10 = i10 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, d27 = 0.0, d28 = 0.0, d29 = 0.0, d30 = 0.0, i31 = 0, d32 = 0.0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0, d38 = 0.0, d39 = 0.0, d40 = 0.0, d41 = 0.0, i42 = 0, d43 = 0.0, d44 = 0.0, d45 = 0.0, d46 = 0.0, d47 = 0.0, d48 = 0.0, i49 = 0, i50 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i8 = i1 + 40 | 0;
+ i3 = i1 + 24 | 0;
+ i5 = i1;
+ i4 = i10 + 48 | 0;
+ if ((HEAP32[i4 >> 2] | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i9 = i10 + 40 | 0;
+ i2 = i10 + 36 | 0;
+ i7 = i10 + 44 | 0;
+ i6 = i10 + 24 | 0;
+ i13 = i10 + 28 | 0;
+ i14 = i8 + 8 | 0;
+ i12 = i8 + 12 | 0;
+ i11 = i3 + 8 | 0;
+ i10 = i3 + 12 | 0;
+ i16 = 0;
+ while (1) {
+ i15 = HEAP32[i9 >> 2] | 0;
+ i33 = HEAP32[i2 >> 2] | 0;
+ i31 = HEAP32[(HEAP32[i7 >> 2] | 0) + (HEAP32[i15 + (i16 * 152 | 0) + 148 >> 2] << 2) >> 2] | 0;
+ i35 = HEAP32[i15 + (i16 * 152 | 0) + 112 >> 2] | 0;
+ i42 = HEAP32[i15 + (i16 * 152 | 0) + 116 >> 2] | 0;
+ d30 = +HEAPF32[i15 + (i16 * 152 | 0) + 120 >> 2];
+ d24 = +HEAPF32[i15 + (i16 * 152 | 0) + 124 >> 2];
+ d17 = +HEAPF32[i15 + (i16 * 152 | 0) + 128 >> 2];
+ d18 = +HEAPF32[i15 + (i16 * 152 | 0) + 132 >> 2];
+ i36 = i33 + (i16 * 88 | 0) + 48 | 0;
+ d39 = +HEAPF32[i36 >> 2];
+ d40 = +HEAPF32[i36 + 4 >> 2];
+ i36 = i33 + (i16 * 88 | 0) + 56 | 0;
+ d41 = +HEAPF32[i36 >> 2];
+ d43 = +HEAPF32[i36 + 4 >> 2];
+ i36 = HEAP32[i6 >> 2] | 0;
+ i37 = i36 + (i35 * 12 | 0) | 0;
+ d26 = +HEAPF32[i37 >> 2];
+ d27 = +HEAPF32[i37 + 4 >> 2];
+ d32 = +HEAPF32[i36 + (i35 * 12 | 0) + 8 >> 2];
+ i37 = HEAP32[i13 >> 2] | 0;
+ i34 = i37 + (i35 * 12 | 0) | 0;
+ d22 = +HEAPF32[i34 >> 2];
+ d25 = +HEAPF32[i34 + 4 >> 2];
+ d23 = +HEAPF32[i37 + (i35 * 12 | 0) + 8 >> 2];
+ i35 = i36 + (i42 * 12 | 0) | 0;
+ d28 = +HEAPF32[i35 >> 2];
+ d29 = +HEAPF32[i35 + 4 >> 2];
+ d38 = +HEAPF32[i36 + (i42 * 12 | 0) + 8 >> 2];
+ i36 = i37 + (i42 * 12 | 0) | 0;
+ d20 = +HEAPF32[i36 >> 2];
+ d19 = +HEAPF32[i36 + 4 >> 2];
+ d21 = +HEAPF32[i37 + (i42 * 12 | 0) + 8 >> 2];
+ if ((HEAP32[i31 + 124 >> 2] | 0) <= 0) {
+ i2 = 4;
+ break;
+ }
+ d44 = +HEAPF32[i33 + (i16 * 88 | 0) + 80 >> 2];
+ d45 = +HEAPF32[i33 + (i16 * 88 | 0) + 76 >> 2];
+ d47 = +Math_sin(+d32);
+ HEAPF32[i14 >> 2] = d47;
+ d48 = +Math_cos(+d32);
+ HEAPF32[i12 >> 2] = d48;
+ d32 = +Math_sin(+d38);
+ HEAPF32[i11 >> 2] = d32;
+ d38 = +Math_cos(+d38);
+ HEAPF32[i10 >> 2] = d38;
+ d46 = +(d26 - (d39 * d48 - d40 * d47));
+ d40 = +(d27 - (d40 * d48 + d39 * d47));
+ i37 = i8;
+ HEAPF32[i37 >> 2] = d46;
+ HEAPF32[i37 + 4 >> 2] = d40;
+ d40 = +(d28 - (d41 * d38 - d43 * d32));
+ d43 = +(d29 - (d43 * d38 + d41 * d32));
+ i37 = i3;
+ HEAPF32[i37 >> 2] = d40;
+ HEAPF32[i37 + 4 >> 2] = d43;
+ __ZN15b2WorldManifold10InitializeEPK10b2ManifoldRK11b2TransformfS5_f(i5, i31 + 64 | 0, i8, d45, i3, d44);
+ i37 = i15 + (i16 * 152 | 0) + 72 | 0;
+ i42 = i5;
+ i33 = HEAP32[i42 + 4 >> 2] | 0;
+ i31 = i37;
+ HEAP32[i31 >> 2] = HEAP32[i42 >> 2];
+ HEAP32[i31 + 4 >> 2] = i33;
+ i31 = i15 + (i16 * 152 | 0) + 144 | 0;
+ i33 = HEAP32[i31 >> 2] | 0;
+ do {
+ if ((i33 | 0) > 0) {
+ i36 = i15 + (i16 * 152 | 0) + 76 | 0;
+ d32 = d30 + d24;
+ i35 = i15 + (i16 * 152 | 0) + 140 | 0;
+ i34 = 0;
+ do {
+ i49 = i5 + (i34 << 3) + 8 | 0;
+ d41 = +HEAPF32[i49 >> 2] - d26;
+ i42 = i5 + (i34 << 3) + 12 | 0;
+ d39 = +d41;
+ d40 = +(+HEAPF32[i42 >> 2] - d27);
+ i50 = i15 + (i16 * 152 | 0) + (i34 * 36 | 0) | 0;
+ HEAPF32[i50 >> 2] = d39;
+ HEAPF32[i50 + 4 >> 2] = d40;
+ d40 = +HEAPF32[i49 >> 2] - d28;
+ d39 = +d40;
+ d47 = +(+HEAPF32[i42 >> 2] - d29);
+ i42 = i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 8 | 0;
+ HEAPF32[i42 >> 2] = d39;
+ HEAPF32[i42 + 4 >> 2] = d47;
+ d47 = +HEAPF32[i36 >> 2];
+ d39 = +HEAPF32[i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 4 >> 2];
+ d43 = +HEAPF32[i37 >> 2];
+ d48 = d41 * d47 - d39 * d43;
+ d38 = +HEAPF32[i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 12 >> 2];
+ d43 = d47 * d40 - d43 * d38;
+ d43 = d32 + d48 * d17 * d48 + d43 * d18 * d43;
+ if (d43 > 0.0) {
+ d43 = 1.0 / d43;
+ } else {
+ d43 = 0.0;
+ }
+ HEAPF32[i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 24 >> 2] = d43;
+ d43 = +HEAPF32[i36 >> 2];
+ d47 = -+HEAPF32[i37 >> 2];
+ d48 = d41 * d47 - d43 * d39;
+ d43 = d40 * d47 - d43 * d38;
+ d43 = d32 + d48 * d17 * d48 + d43 * d18 * d43;
+ if (d43 > 0.0) {
+ d43 = 1.0 / d43;
+ } else {
+ d43 = 0.0;
+ }
+ HEAPF32[i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 28 >> 2] = d43;
+ i42 = i15 + (i16 * 152 | 0) + (i34 * 36 | 0) + 32 | 0;
+ HEAPF32[i42 >> 2] = 0.0;
+ d38 = +HEAPF32[i37 >> 2] * (d20 - d21 * d38 - d22 + d23 * d39) + +HEAPF32[i36 >> 2] * (d19 + d21 * d40 - d25 - d23 * d41);
+ if (d38 < -1.0) {
+ HEAPF32[i42 >> 2] = -(d38 * +HEAPF32[i35 >> 2]);
+ }
+ i34 = i34 + 1 | 0;
+ } while ((i34 | 0) != (i33 | 0));
+ if ((HEAP32[i31 >> 2] | 0) == 2) {
+ d45 = +HEAPF32[i15 + (i16 * 152 | 0) + 76 >> 2];
+ d20 = +HEAPF32[i37 >> 2];
+ d44 = +HEAPF32[i15 + (i16 * 152 | 0) >> 2] * d45 - +HEAPF32[i15 + (i16 * 152 | 0) + 4 >> 2] * d20;
+ d19 = d45 * +HEAPF32[i15 + (i16 * 152 | 0) + 8 >> 2] - d20 * +HEAPF32[i15 + (i16 * 152 | 0) + 12 >> 2];
+ d47 = d45 * +HEAPF32[i15 + (i16 * 152 | 0) + 36 >> 2] - d20 * +HEAPF32[i15 + (i16 * 152 | 0) + 40 >> 2];
+ d20 = d45 * +HEAPF32[i15 + (i16 * 152 | 0) + 44 >> 2] - d20 * +HEAPF32[i15 + (i16 * 152 | 0) + 48 >> 2];
+ d45 = d30 + d24;
+ d46 = d17 * d44;
+ d48 = d18 * d19;
+ d19 = d45 + d44 * d46 + d19 * d48;
+ d18 = d45 + d47 * d17 * d47 + d20 * d18 * d20;
+ d17 = d45 + d46 * d47 + d48 * d20;
+ d20 = d19 * d18 - d17 * d17;
+ if (!(d19 * d19 < d20 * 1.0e3)) {
+ HEAP32[i31 >> 2] = 1;
+ break;
+ }
+ HEAPF32[i15 + (i16 * 152 | 0) + 96 >> 2] = d19;
+ HEAPF32[i15 + (i16 * 152 | 0) + 100 >> 2] = d17;
+ HEAPF32[i15 + (i16 * 152 | 0) + 104 >> 2] = d17;
+ HEAPF32[i15 + (i16 * 152 | 0) + 108 >> 2] = d18;
+ if (d20 != 0.0) {
+ d20 = 1.0 / d20;
+ }
+ d48 = -(d20 * d17);
+ HEAPF32[i15 + (i16 * 152 | 0) + 80 >> 2] = d18 * d20;
+ HEAPF32[i15 + (i16 * 152 | 0) + 84 >> 2] = d48;
+ HEAPF32[i15 + (i16 * 152 | 0) + 88 >> 2] = d48;
+ HEAPF32[i15 + (i16 * 152 | 0) + 92 >> 2] = d19 * d20;
+ }
+ }
+ } while (0);
+ i16 = i16 + 1 | 0;
+ if ((i16 | 0) >= (HEAP32[i4 >> 2] | 0)) {
+ i2 = 21;
+ break;
+ }
+ }
+ if ((i2 | 0) == 4) {
+ ___assert_fail(6584, 6520, 168, 6616);
+ } else if ((i2 | 0) == 21) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function __Z17b2CollidePolygonsP10b2ManifoldPK14b2PolygonShapeRK11b2TransformS3_S6_(i5, i27, i28, i24, i14) {
+ i5 = i5 | 0;
+ i27 = i27 | 0;
+ i28 = i28 | 0;
+ i24 = i24 | 0;
+ i14 = i14 | 0;
+ var i1 = 0, i2 = 0, d3 = 0.0, i4 = 0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d9 = 0.0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, d15 = 0.0, d16 = 0.0, i17 = 0, d18 = 0.0, d19 = 0.0, i20 = 0, d21 = 0.0, d22 = 0.0, d23 = 0.0, d25 = 0.0, d26 = 0.0, d29 = 0.0, d30 = 0.0, i31 = 0, d32 = 0.0, i33 = 0, i34 = 0, d35 = 0.0, d36 = 0.0, d37 = 0.0, d38 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 96 | 0;
+ i17 = i1 + 92 | 0;
+ i20 = i1 + 88 | 0;
+ i13 = i1;
+ i11 = i1 + 80 | 0;
+ i12 = i1 + 56 | 0;
+ i4 = i1 + 32 | 0;
+ i10 = i1 + 24 | 0;
+ i2 = i5 + 60 | 0;
+ HEAP32[i2 >> 2] = 0;
+ d3 = +HEAPF32[i27 + 8 >> 2] + +HEAPF32[i24 + 8 >> 2];
+ HEAP32[i17 >> 2] = 0;
+ d7 = +__ZL19b2FindMaxSeparationPiPK14b2PolygonShapeRK11b2TransformS2_S5_(i17, i27, i28, i24, i14);
+ if (d7 > d3) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i20 >> 2] = 0;
+ d6 = +__ZL19b2FindMaxSeparationPiPK14b2PolygonShapeRK11b2TransformS2_S5_(i20, i24, i14, i27, i28);
+ if (d6 > d3) {
+ STACKTOP = i1;
+ return;
+ }
+ if (d6 > d7 * .9800000190734863 + .0010000000474974513) {
+ d18 = +HEAPF32[i14 >> 2];
+ d19 = +HEAPF32[i14 + 4 >> 2];
+ d15 = +HEAPF32[i14 + 8 >> 2];
+ d16 = +HEAPF32[i14 + 12 >> 2];
+ d9 = +HEAPF32[i28 >> 2];
+ d6 = +HEAPF32[i28 + 4 >> 2];
+ d7 = +HEAPF32[i28 + 8 >> 2];
+ d8 = +HEAPF32[i28 + 12 >> 2];
+ i17 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i5 + 56 >> 2] = 2;
+ i14 = 1;
+ i20 = i24;
+ } else {
+ d18 = +HEAPF32[i28 >> 2];
+ d19 = +HEAPF32[i28 + 4 >> 2];
+ d15 = +HEAPF32[i28 + 8 >> 2];
+ d16 = +HEAPF32[i28 + 12 >> 2];
+ d9 = +HEAPF32[i14 >> 2];
+ d6 = +HEAPF32[i14 + 4 >> 2];
+ d7 = +HEAPF32[i14 + 8 >> 2];
+ d8 = +HEAPF32[i14 + 12 >> 2];
+ i17 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i5 + 56 >> 2] = 1;
+ i14 = 0;
+ i20 = i27;
+ i27 = i24;
+ }
+ i28 = HEAP32[i27 + 148 >> 2] | 0;
+ if (!((i17 | 0) > -1)) {
+ ___assert_fail(5640, 5688, 151, 5728);
+ }
+ i24 = HEAP32[i20 + 148 >> 2] | 0;
+ if ((i24 | 0) <= (i17 | 0)) {
+ ___assert_fail(5640, 5688, 151, 5728);
+ }
+ d21 = +HEAPF32[i20 + (i17 << 3) + 84 >> 2];
+ d36 = +HEAPF32[i20 + (i17 << 3) + 88 >> 2];
+ d22 = d16 * d21 - d15 * d36;
+ d36 = d15 * d21 + d16 * d36;
+ d21 = d8 * d22 + d7 * d36;
+ d22 = d8 * d36 - d7 * d22;
+ if ((i28 | 0) > 0) {
+ i33 = 0;
+ i34 = 0;
+ d23 = 3.4028234663852886e+38;
+ while (1) {
+ d25 = d21 * +HEAPF32[i27 + (i33 << 3) + 84 >> 2] + d22 * +HEAPF32[i27 + (i33 << 3) + 88 >> 2];
+ i31 = d25 < d23;
+ i34 = i31 ? i33 : i34;
+ i33 = i33 + 1 | 0;
+ if ((i33 | 0) == (i28 | 0)) {
+ break;
+ } else {
+ d23 = i31 ? d25 : d23;
+ }
+ }
+ } else {
+ i34 = 0;
+ }
+ i31 = i34 + 1 | 0;
+ i33 = (i31 | 0) < (i28 | 0) ? i31 : 0;
+ d35 = +HEAPF32[i27 + (i34 << 3) + 20 >> 2];
+ d32 = +HEAPF32[i27 + (i34 << 3) + 24 >> 2];
+ d36 = +(d9 + (d8 * d35 - d7 * d32));
+ d32 = +(d6 + (d7 * d35 + d8 * d32));
+ i31 = i13;
+ HEAPF32[i31 >> 2] = d36;
+ HEAPF32[i31 + 4 >> 2] = d32;
+ i31 = i17 & 255;
+ i28 = i13 + 8 | 0;
+ HEAP8[i28] = i31;
+ HEAP8[i28 + 1 | 0] = i34;
+ HEAP8[i28 + 2 | 0] = 1;
+ HEAP8[i28 + 3 | 0] = 0;
+ d32 = +HEAPF32[i27 + (i33 << 3) + 20 >> 2];
+ d36 = +HEAPF32[i27 + (i33 << 3) + 24 >> 2];
+ d35 = +(d9 + (d8 * d32 - d7 * d36));
+ d36 = +(d6 + (d7 * d32 + d8 * d36));
+ i27 = i13 + 12 | 0;
+ HEAPF32[i27 >> 2] = d35;
+ HEAPF32[i27 + 4 >> 2] = d36;
+ i27 = i13 + 20 | 0;
+ HEAP8[i27] = i31;
+ HEAP8[i27 + 1 | 0] = i33;
+ HEAP8[i27 + 2 | 0] = 1;
+ HEAP8[i27 + 3 | 0] = 0;
+ i27 = i17 + 1 | 0;
+ i24 = (i27 | 0) < (i24 | 0) ? i27 : 0;
+ i34 = i20 + (i17 << 3) + 20 | 0;
+ d26 = +HEAPF32[i34 >> 2];
+ d25 = +HEAPF32[i34 + 4 >> 2];
+ i34 = i20 + (i24 << 3) + 20 | 0;
+ d30 = +HEAPF32[i34 >> 2];
+ d29 = +HEAPF32[i34 + 4 >> 2];
+ d32 = d30 - d26;
+ d35 = d29 - d25;
+ d21 = +Math_sqrt(+(d32 * d32 + d35 * d35));
+ if (!(d21 < 1.1920928955078125e-7)) {
+ d36 = 1.0 / d21;
+ d32 = d32 * d36;
+ d35 = d35 * d36;
+ }
+ d36 = d16 * d32 - d15 * d35;
+ d21 = d16 * d35 + d15 * d32;
+ HEAPF32[i11 >> 2] = d36;
+ HEAPF32[i11 + 4 >> 2] = d21;
+ d22 = -d36;
+ d38 = d18 + (d16 * d26 - d15 * d25);
+ d37 = d19 + (d15 * d26 + d16 * d25);
+ d23 = d38 * d21 + d37 * d22;
+ HEAPF32[i10 >> 2] = d22;
+ HEAPF32[i10 + 4 >> 2] = -d21;
+ if ((__Z19b2ClipSegmentToLineP12b2ClipVertexPKS_RK6b2Vec2fi(i12, i13, i10, d3 - (d38 * d36 + d37 * d21), i17) | 0) < 2) {
+ STACKTOP = i1;
+ return;
+ }
+ if ((__Z19b2ClipSegmentToLineP12b2ClipVertexPKS_RK6b2Vec2fi(i4, i12, i11, d3 + ((d18 + (d16 * d30 - d15 * d29)) * d36 + (d19 + (d15 * d30 + d16 * d29)) * d21), i24) | 0) < 2) {
+ STACKTOP = i1;
+ return;
+ }
+ d16 = +d35;
+ d15 = +-d32;
+ i10 = i5 + 40 | 0;
+ HEAPF32[i10 >> 2] = d16;
+ HEAPF32[i10 + 4 >> 2] = d15;
+ d15 = +((d26 + d30) * .5);
+ d16 = +((d25 + d29) * .5);
+ i10 = i5 + 48 | 0;
+ HEAPF32[i10 >> 2] = d15;
+ HEAPF32[i10 + 4 >> 2] = d16;
+ d16 = +HEAPF32[i4 >> 2];
+ d15 = +HEAPF32[i4 + 4 >> 2];
+ i10 = !(d21 * d16 + d15 * d22 - d23 <= d3);
+ if (i14 << 24 >> 24 == 0) {
+ if (i10) {
+ i10 = 0;
+ } else {
+ d38 = d16 - d9;
+ d36 = d15 - d6;
+ d37 = +(d8 * d38 + d7 * d36);
+ d38 = +(d8 * d36 - d7 * d38);
+ i10 = i5;
+ HEAPF32[i10 >> 2] = d37;
+ HEAPF32[i10 + 4 >> 2] = d38;
+ HEAP32[i5 + 16 >> 2] = HEAP32[i4 + 8 >> 2];
+ i10 = 1;
+ }
+ d16 = +HEAPF32[i4 + 12 >> 2];
+ d15 = +HEAPF32[i4 + 16 >> 2];
+ if (d21 * d16 + d15 * d22 - d23 <= d3) {
+ d38 = d16 - d9;
+ d36 = d15 - d6;
+ d37 = +(d8 * d38 + d7 * d36);
+ d38 = +(d8 * d36 - d7 * d38);
+ i34 = i5 + (i10 * 20 | 0) | 0;
+ HEAPF32[i34 >> 2] = d37;
+ HEAPF32[i34 + 4 >> 2] = d38;
+ HEAP32[i5 + (i10 * 20 | 0) + 16 >> 2] = HEAP32[i4 + 20 >> 2];
+ i10 = i10 + 1 | 0;
+ }
+ } else {
+ if (i10) {
+ i10 = 0;
+ } else {
+ d38 = d16 - d9;
+ d36 = d15 - d6;
+ d37 = +(d8 * d38 + d7 * d36);
+ d38 = +(d8 * d36 - d7 * d38);
+ i10 = i5;
+ HEAPF32[i10 >> 2] = d37;
+ HEAPF32[i10 + 4 >> 2] = d38;
+ i10 = i5 + 16 | 0;
+ i34 = HEAP32[i4 + 8 >> 2] | 0;
+ HEAP32[i10 >> 2] = i34;
+ HEAP8[i10] = i34 >>> 8;
+ HEAP8[i10 + 1 | 0] = i34;
+ HEAP8[i10 + 2 | 0] = i34 >>> 24;
+ HEAP8[i10 + 3 | 0] = i34 >>> 16;
+ i10 = 1;
+ }
+ d16 = +HEAPF32[i4 + 12 >> 2];
+ d15 = +HEAPF32[i4 + 16 >> 2];
+ if (d21 * d16 + d15 * d22 - d23 <= d3) {
+ d38 = d16 - d9;
+ d36 = d15 - d6;
+ d37 = +(d8 * d38 + d7 * d36);
+ d38 = +(d8 * d36 - d7 * d38);
+ i34 = i5 + (i10 * 20 | 0) | 0;
+ HEAPF32[i34 >> 2] = d37;
+ HEAPF32[i34 + 4 >> 2] = d38;
+ i34 = i5 + (i10 * 20 | 0) + 16 | 0;
+ i33 = HEAP32[i4 + 20 >> 2] | 0;
+ HEAP32[i34 >> 2] = i33;
+ HEAP8[i34] = i33 >>> 8;
+ HEAP8[i34 + 1 | 0] = i33;
+ HEAP8[i34 + 2 | 0] = i33 >>> 24;
+ HEAP8[i34 + 3 | 0] = i33 >>> 16;
+ i10 = i10 + 1 | 0;
+ }
+ }
+ HEAP32[i2 >> 2] = i10;
+ STACKTOP = i1;
+ return;
+}
+function __ZN8b2Island8SolveTOIERK10b2TimeStepii(i4, i11, i15, i18) {
+ i4 = i4 | 0;
+ i11 = i11 | 0;
+ i15 = i15 | 0;
+ i18 = i18 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, d12 = 0.0, d13 = 0.0, d14 = 0.0, d16 = 0.0, d17 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, d26 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 128 | 0;
+ i2 = i1 + 96 | 0;
+ i10 = i1 + 52 | 0;
+ i3 = i1;
+ i6 = i4 + 28 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) <= (i15 | 0)) {
+ ___assert_fail(5464, 5488, 386, 5520);
+ }
+ if ((i5 | 0) <= (i18 | 0)) {
+ ___assert_fail(5536, 5488, 387, 5520);
+ }
+ if ((i5 | 0) > 0) {
+ i9 = i4 + 8 | 0;
+ i8 = i4 + 20 | 0;
+ i7 = i4 + 24 | 0;
+ i22 = 0;
+ while (1) {
+ i23 = HEAP32[(HEAP32[i9 >> 2] | 0) + (i22 << 2) >> 2] | 0;
+ i5 = i23 + 44 | 0;
+ i24 = HEAP32[i5 + 4 >> 2] | 0;
+ i25 = (HEAP32[i8 >> 2] | 0) + (i22 * 12 | 0) | 0;
+ HEAP32[i25 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i25 + 4 >> 2] = i24;
+ HEAPF32[(HEAP32[i8 >> 2] | 0) + (i22 * 12 | 0) + 8 >> 2] = +HEAPF32[i23 + 56 >> 2];
+ i25 = i23 + 64 | 0;
+ i24 = HEAP32[i25 + 4 >> 2] | 0;
+ i5 = (HEAP32[i7 >> 2] | 0) + (i22 * 12 | 0) | 0;
+ HEAP32[i5 >> 2] = HEAP32[i25 >> 2];
+ HEAP32[i5 + 4 >> 2] = i24;
+ i5 = HEAP32[i7 >> 2] | 0;
+ HEAPF32[i5 + (i22 * 12 | 0) + 8 >> 2] = +HEAPF32[i23 + 72 >> 2];
+ i22 = i22 + 1 | 0;
+ if ((i22 | 0) >= (HEAP32[i6 >> 2] | 0)) {
+ i22 = i5;
+ break;
+ }
+ }
+ } else {
+ i8 = i4 + 20 | 0;
+ i22 = HEAP32[i4 + 24 >> 2] | 0;
+ }
+ i5 = i4 + 12 | 0;
+ HEAP32[i10 + 24 >> 2] = HEAP32[i5 >> 2];
+ i7 = i4 + 36 | 0;
+ HEAP32[i10 + 28 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i10 + 40 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i10 + 0 >> 2] = HEAP32[i11 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i11 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i10 + 12 >> 2] = HEAP32[i11 + 12 >> 2];
+ HEAP32[i10 + 16 >> 2] = HEAP32[i11 + 16 >> 2];
+ HEAP32[i10 + 20 >> 2] = HEAP32[i11 + 20 >> 2];
+ HEAP32[i10 + 32 >> 2] = HEAP32[i8 >> 2];
+ i9 = i4 + 24 | 0;
+ HEAP32[i10 + 36 >> 2] = i22;
+ __ZN15b2ContactSolverC2EP18b2ContactSolverDef(i3, i10);
+ i10 = i11 + 16 | 0;
+ L13 : do {
+ if ((HEAP32[i10 >> 2] | 0) > 0) {
+ i22 = 0;
+ do {
+ i22 = i22 + 1 | 0;
+ if (__ZN15b2ContactSolver27SolveTOIPositionConstraintsEii(i3, i15, i18) | 0) {
+ break L13;
+ }
+ } while ((i22 | 0) < (HEAP32[i10 >> 2] | 0));
+ }
+ } while (0);
+ i10 = i4 + 8 | 0;
+ i24 = (HEAP32[i8 >> 2] | 0) + (i15 * 12 | 0) | 0;
+ i25 = HEAP32[i24 + 4 >> 2] | 0;
+ i23 = (HEAP32[(HEAP32[i10 >> 2] | 0) + (i15 << 2) >> 2] | 0) + 36 | 0;
+ HEAP32[i23 >> 2] = HEAP32[i24 >> 2];
+ HEAP32[i23 + 4 >> 2] = i25;
+ i23 = HEAP32[i8 >> 2] | 0;
+ i25 = HEAP32[i10 >> 2] | 0;
+ HEAPF32[(HEAP32[i25 + (i15 << 2) >> 2] | 0) + 52 >> 2] = +HEAPF32[i23 + (i15 * 12 | 0) + 8 >> 2];
+ i23 = i23 + (i18 * 12 | 0) | 0;
+ i24 = HEAP32[i23 + 4 >> 2] | 0;
+ i25 = (HEAP32[i25 + (i18 << 2) >> 2] | 0) + 36 | 0;
+ HEAP32[i25 >> 2] = HEAP32[i23 >> 2];
+ HEAP32[i25 + 4 >> 2] = i24;
+ HEAPF32[(HEAP32[(HEAP32[i10 >> 2] | 0) + (i18 << 2) >> 2] | 0) + 52 >> 2] = +HEAPF32[(HEAP32[i8 >> 2] | 0) + (i18 * 12 | 0) + 8 >> 2];
+ __ZN15b2ContactSolver29InitializeVelocityConstraintsEv(i3);
+ i18 = i11 + 12 | 0;
+ if ((HEAP32[i18 >> 2] | 0) > 0) {
+ i15 = 0;
+ do {
+ __ZN15b2ContactSolver24SolveVelocityConstraintsEv(i3);
+ i15 = i15 + 1 | 0;
+ } while ((i15 | 0) < (HEAP32[i18 >> 2] | 0));
+ }
+ d16 = +HEAPF32[i11 >> 2];
+ if ((HEAP32[i6 >> 2] | 0) > 0) {
+ i15 = 0;
+ do {
+ i25 = HEAP32[i8 >> 2] | 0;
+ i11 = i25 + (i15 * 12 | 0) | 0;
+ i24 = i11;
+ d12 = +HEAPF32[i24 >> 2];
+ d14 = +HEAPF32[i24 + 4 >> 2];
+ d13 = +HEAPF32[i25 + (i15 * 12 | 0) + 8 >> 2];
+ i25 = HEAP32[i9 >> 2] | 0;
+ i24 = i25 + (i15 * 12 | 0) | 0;
+ d19 = +HEAPF32[i24 >> 2];
+ d20 = +HEAPF32[i24 + 4 >> 2];
+ d17 = +HEAPF32[i25 + (i15 * 12 | 0) + 8 >> 2];
+ d26 = d16 * d19;
+ d21 = d16 * d20;
+ d21 = d26 * d26 + d21 * d21;
+ if (d21 > 4.0) {
+ d26 = 2.0 / +Math_sqrt(+d21);
+ d19 = d19 * d26;
+ d20 = d20 * d26;
+ }
+ d21 = d16 * d17;
+ if (d21 * d21 > 2.4674012660980225) {
+ if (!(d21 > 0.0)) {
+ d21 = -d21;
+ }
+ d17 = d17 * (1.5707963705062866 / d21);
+ }
+ d21 = d12 + d16 * d19;
+ d14 = d14 + d16 * d20;
+ d26 = d13 + d16 * d17;
+ d12 = +d21;
+ d13 = +d14;
+ i25 = i11;
+ HEAPF32[i25 >> 2] = d12;
+ HEAPF32[i25 + 4 >> 2] = d13;
+ HEAPF32[(HEAP32[i8 >> 2] | 0) + (i15 * 12 | 0) + 8 >> 2] = d26;
+ d19 = +d19;
+ d20 = +d20;
+ i25 = (HEAP32[i9 >> 2] | 0) + (i15 * 12 | 0) | 0;
+ HEAPF32[i25 >> 2] = d19;
+ HEAPF32[i25 + 4 >> 2] = d20;
+ HEAPF32[(HEAP32[i9 >> 2] | 0) + (i15 * 12 | 0) + 8 >> 2] = d17;
+ i25 = HEAP32[(HEAP32[i10 >> 2] | 0) + (i15 << 2) >> 2] | 0;
+ i24 = i25 + 44 | 0;
+ HEAPF32[i24 >> 2] = d12;
+ HEAPF32[i24 + 4 >> 2] = d13;
+ HEAPF32[i25 + 56 >> 2] = d26;
+ i24 = i25 + 64 | 0;
+ HEAPF32[i24 >> 2] = d19;
+ HEAPF32[i24 + 4 >> 2] = d20;
+ HEAPF32[i25 + 72 >> 2] = d17;
+ d17 = +Math_sin(+d26);
+ HEAPF32[i25 + 20 >> 2] = d17;
+ d20 = +Math_cos(+d26);
+ HEAPF32[i25 + 24 >> 2] = d20;
+ d19 = +HEAPF32[i25 + 28 >> 2];
+ d26 = +HEAPF32[i25 + 32 >> 2];
+ d21 = +(d21 - (d20 * d19 - d17 * d26));
+ d26 = +(d14 - (d17 * d19 + d20 * d26));
+ i25 = i25 + 12 | 0;
+ HEAPF32[i25 >> 2] = d21;
+ HEAPF32[i25 + 4 >> 2] = d26;
+ i15 = i15 + 1 | 0;
+ } while ((i15 | 0) < (HEAP32[i6 >> 2] | 0));
+ }
+ i6 = HEAP32[i3 + 40 >> 2] | 0;
+ i4 = i4 + 4 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ __ZN15b2ContactSolverD2Ev(i3);
+ STACKTOP = i1;
+ return;
+ }
+ if ((HEAP32[i7 >> 2] | 0) <= 0) {
+ __ZN15b2ContactSolverD2Ev(i3);
+ STACKTOP = i1;
+ return;
+ }
+ i8 = i2 + 16 | 0;
+ i9 = 0;
+ do {
+ i10 = HEAP32[(HEAP32[i5 >> 2] | 0) + (i9 << 2) >> 2] | 0;
+ i11 = HEAP32[i6 + (i9 * 152 | 0) + 144 >> 2] | 0;
+ HEAP32[i8 >> 2] = i11;
+ if ((i11 | 0) > 0) {
+ i15 = 0;
+ do {
+ HEAPF32[i2 + (i15 << 2) >> 2] = +HEAPF32[i6 + (i9 * 152 | 0) + (i15 * 36 | 0) + 16 >> 2];
+ HEAPF32[i2 + (i15 << 2) + 8 >> 2] = +HEAPF32[i6 + (i9 * 152 | 0) + (i15 * 36 | 0) + 20 >> 2];
+ i15 = i15 + 1 | 0;
+ } while ((i15 | 0) != (i11 | 0));
+ }
+ i25 = HEAP32[i4 >> 2] | 0;
+ FUNCTION_TABLE_viii[HEAP32[(HEAP32[i25 >> 2] | 0) + 20 >> 2] & 3](i25, i10, i2);
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) < (HEAP32[i7 >> 2] | 0));
+ __ZN15b2ContactSolverD2Ev(i3);
+ STACKTOP = i1;
+ return;
+}
+function __ZN20b2SeparationFunction10InitializeEPK14b2SimplexCachePK15b2DistanceProxyRK7b2SweepS5_S8_f(i2, i11, i13, i21, i12, i24, d9) {
+ i2 = i2 | 0;
+ i11 = i11 | 0;
+ i13 = i13 | 0;
+ i21 = i21 | 0;
+ i12 = i12 | 0;
+ i24 = i24 | 0;
+ d9 = +d9;
+ var i1 = 0, d3 = 0.0, d4 = 0.0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d10 = 0.0, i14 = 0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d22 = 0.0, i23 = 0, i25 = 0, i26 = 0, i27 = 0, d28 = 0.0, d29 = 0.0;
+ i1 = STACKTOP;
+ HEAP32[i2 >> 2] = i13;
+ HEAP32[i2 + 4 >> 2] = i12;
+ i14 = HEAP16[i11 + 4 >> 1] | 0;
+ if (!(i14 << 16 >> 16 != 0 & (i14 & 65535) < 3)) {
+ ___assert_fail(3744, 3560, 50, 3768);
+ }
+ i23 = i2 + 8 | 0;
+ i25 = i23 + 0 | 0;
+ i27 = i21 + 0 | 0;
+ i26 = i25 + 36 | 0;
+ do {
+ HEAP32[i25 >> 2] = HEAP32[i27 >> 2];
+ i25 = i25 + 4 | 0;
+ i27 = i27 + 4 | 0;
+ } while ((i25 | 0) < (i26 | 0));
+ i21 = i2 + 44 | 0;
+ i25 = i21 + 0 | 0;
+ i27 = i24 + 0 | 0;
+ i26 = i25 + 36 | 0;
+ do {
+ HEAP32[i25 >> 2] = HEAP32[i27 >> 2];
+ i25 = i25 + 4 | 0;
+ i27 = i27 + 4 | 0;
+ } while ((i25 | 0) < (i26 | 0));
+ d19 = 1.0 - d9;
+ d4 = d19 * +HEAPF32[i2 + 32 >> 2] + +HEAPF32[i2 + 36 >> 2] * d9;
+ d3 = +Math_sin(+d4);
+ d4 = +Math_cos(+d4);
+ d7 = +HEAPF32[i23 >> 2];
+ d5 = +HEAPF32[i2 + 12 >> 2];
+ d8 = d19 * +HEAPF32[i2 + 16 >> 2] + +HEAPF32[i2 + 24 >> 2] * d9 - (d4 * d7 - d3 * d5);
+ d5 = d19 * +HEAPF32[i2 + 20 >> 2] + +HEAPF32[i2 + 28 >> 2] * d9 - (d3 * d7 + d4 * d5);
+ d7 = d19 * +HEAPF32[i2 + 68 >> 2] + +HEAPF32[i2 + 72 >> 2] * d9;
+ d6 = +Math_sin(+d7);
+ d7 = +Math_cos(+d7);
+ d20 = +HEAPF32[i21 >> 2];
+ d22 = +HEAPF32[i2 + 48 >> 2];
+ d10 = d19 * +HEAPF32[i2 + 52 >> 2] + +HEAPF32[i2 + 60 >> 2] * d9 - (d7 * d20 - d6 * d22);
+ d9 = d19 * +HEAPF32[i2 + 56 >> 2] + +HEAPF32[i2 + 64 >> 2] * d9 - (d6 * d20 + d7 * d22);
+ if (i14 << 16 >> 16 == 1) {
+ HEAP32[i2 + 80 >> 2] = 0;
+ i14 = HEAPU8[i11 + 6 | 0] | 0;
+ if ((HEAP32[i13 + 20 >> 2] | 0) <= (i14 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = (HEAP32[i13 + 16 >> 2] | 0) + (i14 << 3) | 0;
+ d15 = +HEAPF32[i27 >> 2];
+ d16 = +HEAPF32[i27 + 4 >> 2];
+ i11 = HEAPU8[i11 + 9 | 0] | 0;
+ if ((HEAP32[i12 + 20 >> 2] | 0) <= (i11 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i11 = (HEAP32[i12 + 16 >> 2] | 0) + (i11 << 3) | 0;
+ d20 = +HEAPF32[i11 >> 2];
+ d22 = +HEAPF32[i11 + 4 >> 2];
+ i11 = i2 + 92 | 0;
+ d8 = d10 + (d7 * d20 - d6 * d22) - (d8 + (d4 * d15 - d3 * d16));
+ d4 = d9 + (d6 * d20 + d7 * d22) - (d5 + (d3 * d15 + d4 * d16));
+ d22 = +d8;
+ d3 = +d4;
+ i27 = i11;
+ HEAPF32[i27 >> 2] = d22;
+ HEAPF32[i27 + 4 >> 2] = d3;
+ d3 = +Math_sqrt(+(d8 * d8 + d4 * d4));
+ if (d3 < 1.1920928955078125e-7) {
+ d22 = 0.0;
+ STACKTOP = i1;
+ return +d22;
+ }
+ d22 = 1.0 / d3;
+ HEAPF32[i11 >> 2] = d8 * d22;
+ HEAPF32[i2 + 96 >> 2] = d4 * d22;
+ d22 = d3;
+ STACKTOP = i1;
+ return +d22;
+ }
+ i14 = i11 + 6 | 0;
+ i21 = i11 + 7 | 0;
+ i23 = i2 + 80 | 0;
+ if ((HEAP8[i14] | 0) == (HEAP8[i21] | 0)) {
+ HEAP32[i23 >> 2] = 2;
+ i23 = HEAPU8[i11 + 9 | 0] | 0;
+ i21 = HEAP32[i12 + 20 >> 2] | 0;
+ if ((i21 | 0) <= (i23 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i12 = HEAP32[i12 + 16 >> 2] | 0;
+ i27 = i12 + (i23 << 3) | 0;
+ d16 = +HEAPF32[i27 >> 2];
+ d15 = +HEAPF32[i27 + 4 >> 2];
+ i11 = HEAPU8[i11 + 10 | 0] | 0;
+ if ((i21 | 0) <= (i11 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i11 = i12 + (i11 << 3) | 0;
+ d20 = +HEAPF32[i11 >> 2];
+ d18 = +HEAPF32[i11 + 4 >> 2];
+ i11 = i2 + 92 | 0;
+ d22 = d20 - d16;
+ d19 = d18 - d15;
+ d17 = -d22;
+ d29 = +d19;
+ d28 = +d17;
+ i27 = i11;
+ HEAPF32[i27 >> 2] = d29;
+ HEAPF32[i27 + 4 >> 2] = d28;
+ d22 = +Math_sqrt(+(d19 * d19 + d22 * d22));
+ if (!(d22 < 1.1920928955078125e-7)) {
+ d29 = 1.0 / d22;
+ d19 = d19 * d29;
+ HEAPF32[i11 >> 2] = d19;
+ d17 = d29 * d17;
+ HEAPF32[i2 + 96 >> 2] = d17;
+ }
+ d16 = (d16 + d20) * .5;
+ d15 = (d15 + d18) * .5;
+ d28 = +d16;
+ d29 = +d15;
+ i2 = i2 + 84 | 0;
+ HEAPF32[i2 >> 2] = d28;
+ HEAPF32[i2 + 4 >> 2] = d29;
+ i2 = HEAPU8[i14] | 0;
+ if ((HEAP32[i13 + 20 >> 2] | 0) <= (i2 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = (HEAP32[i13 + 16 >> 2] | 0) + (i2 << 3) | 0;
+ d28 = +HEAPF32[i27 >> 2];
+ d29 = +HEAPF32[i27 + 4 >> 2];
+ d3 = (d7 * d19 - d6 * d17) * (d8 + (d4 * d28 - d3 * d29) - (d10 + (d7 * d16 - d6 * d15))) + (d6 * d19 + d7 * d17) * (d5 + (d3 * d28 + d4 * d29) - (d9 + (d6 * d16 + d7 * d15)));
+ if (!(d3 < 0.0)) {
+ d29 = d3;
+ STACKTOP = i1;
+ return +d29;
+ }
+ d28 = +-d19;
+ d29 = +-d17;
+ i27 = i11;
+ HEAPF32[i27 >> 2] = d28;
+ HEAPF32[i27 + 4 >> 2] = d29;
+ d29 = -d3;
+ STACKTOP = i1;
+ return +d29;
+ } else {
+ HEAP32[i23 >> 2] = 1;
+ i23 = HEAPU8[i14] | 0;
+ i14 = HEAP32[i13 + 20 >> 2] | 0;
+ if ((i14 | 0) <= (i23 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i13 = HEAP32[i13 + 16 >> 2] | 0;
+ i27 = i13 + (i23 << 3) | 0;
+ d16 = +HEAPF32[i27 >> 2];
+ d15 = +HEAPF32[i27 + 4 >> 2];
+ i21 = HEAPU8[i21] | 0;
+ if ((i14 | 0) <= (i21 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i13 = i13 + (i21 << 3) | 0;
+ d20 = +HEAPF32[i13 >> 2];
+ d18 = +HEAPF32[i13 + 4 >> 2];
+ i13 = i2 + 92 | 0;
+ d22 = d20 - d16;
+ d19 = d18 - d15;
+ d17 = -d22;
+ d28 = +d19;
+ d29 = +d17;
+ i27 = i13;
+ HEAPF32[i27 >> 2] = d28;
+ HEAPF32[i27 + 4 >> 2] = d29;
+ d22 = +Math_sqrt(+(d19 * d19 + d22 * d22));
+ if (!(d22 < 1.1920928955078125e-7)) {
+ d29 = 1.0 / d22;
+ d19 = d19 * d29;
+ HEAPF32[i13 >> 2] = d19;
+ d17 = d29 * d17;
+ HEAPF32[i2 + 96 >> 2] = d17;
+ }
+ d16 = (d16 + d20) * .5;
+ d15 = (d15 + d18) * .5;
+ d28 = +d16;
+ d29 = +d15;
+ i2 = i2 + 84 | 0;
+ HEAPF32[i2 >> 2] = d28;
+ HEAPF32[i2 + 4 >> 2] = d29;
+ i2 = HEAPU8[i11 + 9 | 0] | 0;
+ if ((HEAP32[i12 + 20 >> 2] | 0) <= (i2 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = (HEAP32[i12 + 16 >> 2] | 0) + (i2 << 3) | 0;
+ d28 = +HEAPF32[i27 >> 2];
+ d29 = +HEAPF32[i27 + 4 >> 2];
+ d3 = (d4 * d19 - d3 * d17) * (d10 + (d7 * d28 - d6 * d29) - (d8 + (d4 * d16 - d3 * d15))) + (d3 * d19 + d4 * d17) * (d9 + (d6 * d28 + d7 * d29) - (d5 + (d3 * d16 + d4 * d15)));
+ if (!(d3 < 0.0)) {
+ d29 = d3;
+ STACKTOP = i1;
+ return +d29;
+ }
+ d28 = +-d19;
+ d29 = +-d17;
+ i27 = i13;
+ HEAPF32[i27 >> 2] = d28;
+ HEAPF32[i27 + 4 >> 2] = d29;
+ d29 = -d3;
+ STACKTOP = i1;
+ return +d29;
+ }
+ return 0.0;
+}
+function __ZNK20b2SeparationFunction17FindMinSeparationEPiS0_f(i12, i10, i9, d5) {
+ i12 = i12 | 0;
+ i10 = i10 | 0;
+ i9 = i9 | 0;
+ d5 = +d5;
+ var i1 = 0, d2 = 0.0, d3 = 0.0, d4 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d11 = 0.0, d13 = 0.0, d14 = 0.0, i15 = 0, i16 = 0, d17 = 0.0, d18 = 0.0, i19 = 0, d20 = 0.0, d21 = 0.0, i22 = 0, d23 = 0.0, d24 = 0.0, i25 = 0, i26 = 0, i27 = 0;
+ i1 = STACKTOP;
+ d21 = 1.0 - d5;
+ d6 = d21 * +HEAPF32[i12 + 32 >> 2] + +HEAPF32[i12 + 36 >> 2] * d5;
+ d7 = +Math_sin(+d6);
+ d6 = +Math_cos(+d6);
+ d3 = +HEAPF32[i12 + 8 >> 2];
+ d8 = +HEAPF32[i12 + 12 >> 2];
+ d11 = d21 * +HEAPF32[i12 + 16 >> 2] + +HEAPF32[i12 + 24 >> 2] * d5 - (d6 * d3 - d7 * d8);
+ d8 = d21 * +HEAPF32[i12 + 20 >> 2] + +HEAPF32[i12 + 28 >> 2] * d5 - (d7 * d3 + d6 * d8);
+ d3 = d21 * +HEAPF32[i12 + 68 >> 2] + +HEAPF32[i12 + 72 >> 2] * d5;
+ d2 = +Math_sin(+d3);
+ d3 = +Math_cos(+d3);
+ d23 = +HEAPF32[i12 + 44 >> 2];
+ d24 = +HEAPF32[i12 + 48 >> 2];
+ d4 = d21 * +HEAPF32[i12 + 52 >> 2] + +HEAPF32[i12 + 60 >> 2] * d5 - (d3 * d23 - d2 * d24);
+ d5 = d21 * +HEAPF32[i12 + 56 >> 2] + +HEAPF32[i12 + 64 >> 2] * d5 - (d2 * d23 + d3 * d24);
+ i19 = HEAP32[i12 + 80 >> 2] | 0;
+ if ((i19 | 0) == 1) {
+ d23 = +HEAPF32[i12 + 92 >> 2];
+ d14 = +HEAPF32[i12 + 96 >> 2];
+ d13 = d6 * d23 - d7 * d14;
+ d14 = d7 * d23 + d6 * d14;
+ d23 = +HEAPF32[i12 + 84 >> 2];
+ d24 = +HEAPF32[i12 + 88 >> 2];
+ d11 = d11 + (d6 * d23 - d7 * d24);
+ d6 = d8 + (d7 * d23 + d6 * d24);
+ d7 = -d13;
+ d24 = -d14;
+ d8 = d3 * d7 + d2 * d24;
+ d7 = d3 * d24 - d2 * d7;
+ HEAP32[i10 >> 2] = -1;
+ i25 = i12 + 4 | 0;
+ i22 = HEAP32[i25 >> 2] | 0;
+ i19 = HEAP32[i22 + 16 >> 2] | 0;
+ i22 = HEAP32[i22 + 20 >> 2] | 0;
+ if ((i22 | 0) > 1) {
+ i10 = 0;
+ d18 = d7 * +HEAPF32[i19 + 4 >> 2] + d8 * +HEAPF32[i19 >> 2];
+ i12 = 1;
+ while (1) {
+ d17 = d8 * +HEAPF32[i19 + (i12 << 3) >> 2] + d7 * +HEAPF32[i19 + (i12 << 3) + 4 >> 2];
+ i16 = d17 > d18;
+ i10 = i16 ? i12 : i10;
+ i12 = i12 + 1 | 0;
+ if ((i12 | 0) == (i22 | 0)) {
+ break;
+ } else {
+ d18 = i16 ? d17 : d18;
+ }
+ }
+ HEAP32[i9 >> 2] = i10;
+ if ((i10 | 0) > -1) {
+ i15 = i10;
+ } else {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ } else {
+ HEAP32[i9 >> 2] = 0;
+ i15 = 0;
+ }
+ i9 = HEAP32[i25 >> 2] | 0;
+ if ((HEAP32[i9 + 20 >> 2] | 0) <= (i15 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = (HEAP32[i9 + 16 >> 2] | 0) + (i15 << 3) | 0;
+ d23 = +HEAPF32[i27 >> 2];
+ d24 = +HEAPF32[i27 + 4 >> 2];
+ d24 = d13 * (d4 + (d3 * d23 - d2 * d24) - d11) + d14 * (d5 + (d2 * d23 + d3 * d24) - d6);
+ STACKTOP = i1;
+ return +d24;
+ } else if ((i19 | 0) == 0) {
+ d13 = +HEAPF32[i12 + 92 >> 2];
+ d14 = +HEAPF32[i12 + 96 >> 2];
+ d21 = d6 * d13 + d7 * d14;
+ d24 = d6 * d14 - d7 * d13;
+ d17 = -d13;
+ d23 = -d14;
+ d18 = d3 * d17 + d2 * d23;
+ d17 = d3 * d23 - d2 * d17;
+ i15 = HEAP32[i12 >> 2] | 0;
+ i16 = HEAP32[i15 + 16 >> 2] | 0;
+ i15 = i15 + 20 | 0;
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i19 | 0) > 1) {
+ i25 = 0;
+ d23 = d24 * +HEAPF32[i16 + 4 >> 2] + d21 * +HEAPF32[i16 >> 2];
+ i26 = 1;
+ while (1) {
+ d20 = d21 * +HEAPF32[i16 + (i26 << 3) >> 2] + d24 * +HEAPF32[i16 + (i26 << 3) + 4 >> 2];
+ i22 = d20 > d23;
+ i25 = i22 ? i26 : i25;
+ i26 = i26 + 1 | 0;
+ if ((i26 | 0) == (i19 | 0)) {
+ break;
+ } else {
+ d23 = i22 ? d20 : d23;
+ }
+ }
+ } else {
+ i25 = 0;
+ }
+ HEAP32[i10 >> 2] = i25;
+ i19 = HEAP32[i12 + 4 >> 2] | 0;
+ i12 = HEAP32[i19 + 16 >> 2] | 0;
+ i19 = i19 + 20 | 0;
+ i25 = HEAP32[i19 >> 2] | 0;
+ if ((i25 | 0) > 1) {
+ i27 = 0;
+ d20 = d17 * +HEAPF32[i12 + 4 >> 2] + d18 * +HEAPF32[i12 >> 2];
+ i26 = 1;
+ while (1) {
+ d21 = d18 * +HEAPF32[i12 + (i26 << 3) >> 2] + d17 * +HEAPF32[i12 + (i26 << 3) + 4 >> 2];
+ i22 = d21 > d20;
+ i27 = i22 ? i26 : i27;
+ i26 = i26 + 1 | 0;
+ if ((i26 | 0) == (i25 | 0)) {
+ break;
+ } else {
+ d20 = i22 ? d21 : d20;
+ }
+ }
+ } else {
+ i27 = 0;
+ }
+ HEAP32[i9 >> 2] = i27;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if (!((i9 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i15 >> 2] | 0) <= (i9 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i26 = i16 + (i9 << 3) | 0;
+ d18 = +HEAPF32[i26 >> 2];
+ d17 = +HEAPF32[i26 + 4 >> 2];
+ if (!((i27 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i19 >> 2] | 0) <= (i27 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = i12 + (i27 << 3) | 0;
+ d23 = +HEAPF32[i27 >> 2];
+ d24 = +HEAPF32[i27 + 4 >> 2];
+ d24 = d13 * (d4 + (d3 * d23 - d2 * d24) - (d11 + (d6 * d18 - d7 * d17))) + d14 * (d5 + (d2 * d23 + d3 * d24) - (d8 + (d7 * d18 + d6 * d17)));
+ STACKTOP = i1;
+ return +d24;
+ } else if ((i19 | 0) == 2) {
+ d23 = +HEAPF32[i12 + 92 >> 2];
+ d13 = +HEAPF32[i12 + 96 >> 2];
+ d14 = d3 * d23 - d2 * d13;
+ d13 = d2 * d23 + d3 * d13;
+ d23 = +HEAPF32[i12 + 84 >> 2];
+ d24 = +HEAPF32[i12 + 88 >> 2];
+ d4 = d4 + (d3 * d23 - d2 * d24);
+ d2 = d5 + (d2 * d23 + d3 * d24);
+ d3 = -d14;
+ d24 = -d13;
+ d5 = d6 * d3 + d7 * d24;
+ d3 = d6 * d24 - d7 * d3;
+ HEAP32[i9 >> 2] = -1;
+ i22 = HEAP32[i12 >> 2] | 0;
+ i15 = HEAP32[i22 + 16 >> 2] | 0;
+ i22 = HEAP32[i22 + 20 >> 2] | 0;
+ if ((i22 | 0) > 1) {
+ i9 = 0;
+ d17 = d3 * +HEAPF32[i15 + 4 >> 2] + d5 * +HEAPF32[i15 >> 2];
+ i19 = 1;
+ while (1) {
+ d18 = d5 * +HEAPF32[i15 + (i19 << 3) >> 2] + d3 * +HEAPF32[i15 + (i19 << 3) + 4 >> 2];
+ i25 = d18 > d17;
+ i9 = i25 ? i19 : i9;
+ i19 = i19 + 1 | 0;
+ if ((i19 | 0) == (i22 | 0)) {
+ break;
+ } else {
+ d17 = i25 ? d18 : d17;
+ }
+ }
+ HEAP32[i10 >> 2] = i9;
+ if ((i9 | 0) > -1) {
+ i16 = i9;
+ } else {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ } else {
+ HEAP32[i10 >> 2] = 0;
+ i16 = 0;
+ }
+ i9 = HEAP32[i12 >> 2] | 0;
+ if ((HEAP32[i9 + 20 >> 2] | 0) <= (i16 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i27 = (HEAP32[i9 + 16 >> 2] | 0) + (i16 << 3) | 0;
+ d23 = +HEAPF32[i27 >> 2];
+ d24 = +HEAPF32[i27 + 4 >> 2];
+ d24 = d14 * (d11 + (d6 * d23 - d7 * d24) - d4) + d13 * (d8 + (d7 * d23 + d6 * d24) - d2);
+ STACKTOP = i1;
+ return +d24;
+ } else {
+ ___assert_fail(3616, 3560, 183, 3720);
+ }
+ return 0.0;
+}
+function __ZN13b2DynamicTree10InsertLeafEi(i3, i4) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, d22 = 0.0, d23 = 0.0, i24 = 0;
+ i1 = STACKTOP;
+ i11 = i3 + 24 | 0;
+ HEAP32[i11 >> 2] = (HEAP32[i11 >> 2] | 0) + 1;
+ i11 = HEAP32[i3 >> 2] | 0;
+ if ((i11 | 0) == -1) {
+ HEAP32[i3 >> 2] = i4;
+ HEAP32[(HEAP32[i3 + 4 >> 2] | 0) + (i4 * 36 | 0) + 20 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+ }
+ i2 = i3 + 4 | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ d8 = +HEAPF32[i9 + (i4 * 36 | 0) >> 2];
+ d7 = +HEAPF32[i9 + (i4 * 36 | 0) + 4 >> 2];
+ d6 = +HEAPF32[i9 + (i4 * 36 | 0) + 8 >> 2];
+ d5 = +HEAPF32[i9 + (i4 * 36 | 0) + 12 >> 2];
+ i10 = HEAP32[i9 + (i11 * 36 | 0) + 24 >> 2] | 0;
+ L5 : do {
+ if (!((i10 | 0) == -1)) {
+ do {
+ i12 = HEAP32[i9 + (i11 * 36 | 0) + 28 >> 2] | 0;
+ d14 = +HEAPF32[i9 + (i11 * 36 | 0) + 8 >> 2];
+ d15 = +HEAPF32[i9 + (i11 * 36 | 0) >> 2];
+ d17 = +HEAPF32[i9 + (i11 * 36 | 0) + 12 >> 2];
+ d16 = +HEAPF32[i9 + (i11 * 36 | 0) + 4 >> 2];
+ d21 = ((d14 > d6 ? d14 : d6) - (d15 < d8 ? d15 : d8) + ((d17 > d5 ? d17 : d5) - (d16 < d7 ? d16 : d7))) * 2.0;
+ d13 = d21 * 2.0;
+ d14 = (d21 - (d14 - d15 + (d17 - d16)) * 2.0) * 2.0;
+ d21 = +HEAPF32[i9 + (i10 * 36 | 0) >> 2];
+ d16 = d8 < d21 ? d8 : d21;
+ d17 = +HEAPF32[i9 + (i10 * 36 | 0) + 4 >> 2];
+ d18 = d7 < d17 ? d7 : d17;
+ d19 = +HEAPF32[i9 + (i10 * 36 | 0) + 8 >> 2];
+ d20 = d6 > d19 ? d6 : d19;
+ d15 = +HEAPF32[i9 + (i10 * 36 | 0) + 12 >> 2];
+ d22 = d5 > d15 ? d5 : d15;
+ if ((HEAP32[i9 + (i10 * 36 | 0) + 24 >> 2] | 0) == -1) {
+ d15 = (d20 - d16 + (d22 - d18)) * 2.0;
+ } else {
+ d15 = (d20 - d16 + (d22 - d18)) * 2.0 - (d19 - d21 + (d15 - d17)) * 2.0;
+ }
+ d15 = d14 + d15;
+ d17 = +HEAPF32[i9 + (i12 * 36 | 0) >> 2];
+ d18 = d8 < d17 ? d8 : d17;
+ d23 = +HEAPF32[i9 + (i12 * 36 | 0) + 4 >> 2];
+ d22 = d7 < d23 ? d7 : d23;
+ d21 = +HEAPF32[i9 + (i12 * 36 | 0) + 8 >> 2];
+ d20 = d6 > d21 ? d6 : d21;
+ d19 = +HEAPF32[i9 + (i12 * 36 | 0) + 12 >> 2];
+ d16 = d5 > d19 ? d5 : d19;
+ if ((HEAP32[i9 + (i12 * 36 | 0) + 24 >> 2] | 0) == -1) {
+ d16 = (d20 - d18 + (d16 - d22)) * 2.0;
+ } else {
+ d16 = (d20 - d18 + (d16 - d22)) * 2.0 - (d21 - d17 + (d19 - d23)) * 2.0;
+ }
+ d14 = d14 + d16;
+ if (d13 < d15 & d13 < d14) {
+ break L5;
+ }
+ i11 = d15 < d14 ? i10 : i12;
+ i10 = HEAP32[i9 + (i11 * 36 | 0) + 24 >> 2] | 0;
+ } while (!((i10 | 0) == -1));
+ }
+ } while (0);
+ i9 = HEAP32[i9 + (i11 * 36 | 0) + 20 >> 2] | 0;
+ i10 = __ZN13b2DynamicTree12AllocateNodeEv(i3) | 0;
+ i12 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i12 + (i10 * 36 | 0) + 20 >> 2] = i9;
+ HEAP32[i12 + (i10 * 36 | 0) + 16 >> 2] = 0;
+ i12 = HEAP32[i2 >> 2] | 0;
+ d14 = +HEAPF32[i12 + (i11 * 36 | 0) >> 2];
+ d13 = +HEAPF32[i12 + (i11 * 36 | 0) + 4 >> 2];
+ d8 = +(d8 < d14 ? d8 : d14);
+ d7 = +(d7 < d13 ? d7 : d13);
+ i24 = i12 + (i10 * 36 | 0) | 0;
+ HEAPF32[i24 >> 2] = d8;
+ HEAPF32[i24 + 4 >> 2] = d7;
+ d8 = +HEAPF32[i12 + (i11 * 36 | 0) + 8 >> 2];
+ d7 = +HEAPF32[i12 + (i11 * 36 | 0) + 12 >> 2];
+ d6 = +(d6 > d8 ? d6 : d8);
+ d23 = +(d5 > d7 ? d5 : d7);
+ i12 = i12 + (i10 * 36 | 0) + 8 | 0;
+ HEAPF32[i12 >> 2] = d6;
+ HEAPF32[i12 + 4 >> 2] = d23;
+ i12 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i12 + (i10 * 36 | 0) + 32 >> 2] = (HEAP32[i12 + (i11 * 36 | 0) + 32 >> 2] | 0) + 1;
+ if ((i9 | 0) == -1) {
+ HEAP32[i12 + (i10 * 36 | 0) + 24 >> 2] = i11;
+ HEAP32[i12 + (i10 * 36 | 0) + 28 >> 2] = i4;
+ HEAP32[i12 + (i11 * 36 | 0) + 20 >> 2] = i10;
+ i24 = i12 + (i4 * 36 | 0) + 20 | 0;
+ HEAP32[i24 >> 2] = i10;
+ HEAP32[i3 >> 2] = i10;
+ i10 = HEAP32[i24 >> 2] | 0;
+ } else {
+ i24 = i12 + (i9 * 36 | 0) + 24 | 0;
+ if ((HEAP32[i24 >> 2] | 0) == (i11 | 0)) {
+ HEAP32[i24 >> 2] = i10;
+ } else {
+ HEAP32[i12 + (i9 * 36 | 0) + 28 >> 2] = i10;
+ }
+ HEAP32[i12 + (i10 * 36 | 0) + 24 >> 2] = i11;
+ HEAP32[i12 + (i10 * 36 | 0) + 28 >> 2] = i4;
+ HEAP32[i12 + (i11 * 36 | 0) + 20 >> 2] = i10;
+ HEAP32[i12 + (i4 * 36 | 0) + 20 >> 2] = i10;
+ }
+ if ((i10 | 0) == -1) {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i9 = __ZN13b2DynamicTree7BalanceEi(i3, i10) | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ i11 = HEAP32[i4 + (i9 * 36 | 0) + 24 >> 2] | 0;
+ i10 = HEAP32[i4 + (i9 * 36 | 0) + 28 >> 2] | 0;
+ if ((i11 | 0) == -1) {
+ i2 = 20;
+ break;
+ }
+ if ((i10 | 0) == -1) {
+ i2 = 22;
+ break;
+ }
+ i12 = HEAP32[i4 + (i11 * 36 | 0) + 32 >> 2] | 0;
+ i24 = HEAP32[i4 + (i10 * 36 | 0) + 32 >> 2] | 0;
+ HEAP32[i4 + (i9 * 36 | 0) + 32 >> 2] = ((i12 | 0) > (i24 | 0) ? i12 : i24) + 1;
+ d7 = +HEAPF32[i4 + (i11 * 36 | 0) >> 2];
+ d8 = +HEAPF32[i4 + (i10 * 36 | 0) >> 2];
+ d5 = +HEAPF32[i4 + (i11 * 36 | 0) + 4 >> 2];
+ d6 = +HEAPF32[i4 + (i10 * 36 | 0) + 4 >> 2];
+ d7 = +(d7 < d8 ? d7 : d8);
+ d5 = +(d5 < d6 ? d5 : d6);
+ i24 = i4 + (i9 * 36 | 0) | 0;
+ HEAPF32[i24 >> 2] = d7;
+ HEAPF32[i24 + 4 >> 2] = d5;
+ d5 = +HEAPF32[i4 + (i11 * 36 | 0) + 8 >> 2];
+ d6 = +HEAPF32[i4 + (i10 * 36 | 0) + 8 >> 2];
+ d7 = +HEAPF32[i4 + (i11 * 36 | 0) + 12 >> 2];
+ d8 = +HEAPF32[i4 + (i10 * 36 | 0) + 12 >> 2];
+ d5 = +(d5 > d6 ? d5 : d6);
+ d23 = +(d7 > d8 ? d7 : d8);
+ i10 = i4 + (i9 * 36 | 0) + 8 | 0;
+ HEAPF32[i10 >> 2] = d5;
+ HEAPF32[i10 + 4 >> 2] = d23;
+ i10 = HEAP32[(HEAP32[i2 >> 2] | 0) + (i9 * 36 | 0) + 20 >> 2] | 0;
+ if ((i10 | 0) == -1) {
+ i2 = 24;
+ break;
+ }
+ }
+ if ((i2 | 0) == 20) {
+ ___assert_fail(3168, 2944, 307, 3184);
+ } else if ((i2 | 0) == 22) {
+ ___assert_fail(3200, 2944, 308, 3184);
+ } else if ((i2 | 0) == 24) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function __ZN15b2ContactSolverC2EP18b2ContactSolverDef(i7, i5) {
+ i7 = i7 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, d15 = 0.0, d16 = 0.0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0;
+ i1 = STACKTOP;
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i7 + 12 >> 2] = HEAP32[i5 + 12 >> 2];
+ HEAP32[i7 + 16 >> 2] = HEAP32[i5 + 16 >> 2];
+ HEAP32[i7 + 20 >> 2] = HEAP32[i5 + 20 >> 2];
+ i14 = HEAP32[i5 + 40 >> 2] | 0;
+ i9 = i7 + 32 | 0;
+ HEAP32[i9 >> 2] = i14;
+ i2 = HEAP32[i5 + 28 >> 2] | 0;
+ i4 = i7 + 48 | 0;
+ HEAP32[i4 >> 2] = i2;
+ i3 = i7 + 36 | 0;
+ HEAP32[i3 >> 2] = __ZN16b2StackAllocator8AllocateEi(i14, i2 * 88 | 0) | 0;
+ i2 = i7 + 40 | 0;
+ HEAP32[i2 >> 2] = __ZN16b2StackAllocator8AllocateEi(HEAP32[i9 >> 2] | 0, (HEAP32[i4 >> 2] | 0) * 152 | 0) | 0;
+ HEAP32[i7 + 24 >> 2] = HEAP32[i5 + 32 >> 2];
+ HEAP32[i7 + 28 >> 2] = HEAP32[i5 + 36 >> 2];
+ i9 = HEAP32[i5 + 24 >> 2] | 0;
+ i5 = i7 + 44 | 0;
+ HEAP32[i5 >> 2] = i9;
+ if ((HEAP32[i4 >> 2] | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i6 = i7 + 20 | 0;
+ i7 = i7 + 8 | 0;
+ i8 = 0;
+ while (1) {
+ i10 = HEAP32[i9 + (i8 << 2) >> 2] | 0;
+ i11 = HEAP32[i10 + 48 >> 2] | 0;
+ i12 = HEAP32[i10 + 52 >> 2] | 0;
+ i14 = HEAP32[i11 + 8 >> 2] | 0;
+ i13 = HEAP32[i12 + 8 >> 2] | 0;
+ i9 = HEAP32[i10 + 124 >> 2] | 0;
+ if ((i9 | 0) <= 0) {
+ i2 = 4;
+ break;
+ }
+ d15 = +HEAPF32[(HEAP32[i12 + 12 >> 2] | 0) + 8 >> 2];
+ d16 = +HEAPF32[(HEAP32[i11 + 12 >> 2] | 0) + 8 >> 2];
+ i12 = HEAP32[i2 >> 2] | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + 136 >> 2] = +HEAPF32[i10 + 136 >> 2];
+ HEAPF32[i12 + (i8 * 152 | 0) + 140 >> 2] = +HEAPF32[i10 + 140 >> 2];
+ i22 = i14 + 8 | 0;
+ HEAP32[i12 + (i8 * 152 | 0) + 112 >> 2] = HEAP32[i22 >> 2];
+ i21 = i13 + 8 | 0;
+ HEAP32[i12 + (i8 * 152 | 0) + 116 >> 2] = HEAP32[i21 >> 2];
+ i19 = i14 + 120 | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + 120 >> 2] = +HEAPF32[i19 >> 2];
+ i20 = i13 + 120 | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + 124 >> 2] = +HEAPF32[i20 >> 2];
+ i18 = i14 + 128 | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + 128 >> 2] = +HEAPF32[i18 >> 2];
+ i17 = i13 + 128 | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + 132 >> 2] = +HEAPF32[i17 >> 2];
+ HEAP32[i12 + (i8 * 152 | 0) + 148 >> 2] = i8;
+ HEAP32[i12 + (i8 * 152 | 0) + 144 >> 2] = i9;
+ i11 = i12 + (i8 * 152 | 0) + 80 | 0;
+ HEAP32[i11 + 0 >> 2] = 0;
+ HEAP32[i11 + 4 >> 2] = 0;
+ HEAP32[i11 + 8 >> 2] = 0;
+ HEAP32[i11 + 12 >> 2] = 0;
+ HEAP32[i11 + 16 >> 2] = 0;
+ HEAP32[i11 + 20 >> 2] = 0;
+ HEAP32[i11 + 24 >> 2] = 0;
+ HEAP32[i11 + 28 >> 2] = 0;
+ i11 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i11 + (i8 * 88 | 0) + 32 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i11 + (i8 * 88 | 0) + 36 >> 2] = HEAP32[i21 >> 2];
+ HEAPF32[i11 + (i8 * 88 | 0) + 40 >> 2] = +HEAPF32[i19 >> 2];
+ HEAPF32[i11 + (i8 * 88 | 0) + 44 >> 2] = +HEAPF32[i20 >> 2];
+ i20 = i14 + 28 | 0;
+ i14 = HEAP32[i20 + 4 >> 2] | 0;
+ i19 = i11 + (i8 * 88 | 0) + 48 | 0;
+ HEAP32[i19 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i19 + 4 >> 2] = i14;
+ i19 = i13 + 28 | 0;
+ i14 = HEAP32[i19 + 4 >> 2] | 0;
+ i13 = i11 + (i8 * 88 | 0) + 56 | 0;
+ HEAP32[i13 >> 2] = HEAP32[i19 >> 2];
+ HEAP32[i13 + 4 >> 2] = i14;
+ HEAPF32[i11 + (i8 * 88 | 0) + 64 >> 2] = +HEAPF32[i18 >> 2];
+ HEAPF32[i11 + (i8 * 88 | 0) + 68 >> 2] = +HEAPF32[i17 >> 2];
+ i13 = i10 + 104 | 0;
+ i14 = HEAP32[i13 + 4 >> 2] | 0;
+ i17 = i11 + (i8 * 88 | 0) + 16 | 0;
+ HEAP32[i17 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i17 + 4 >> 2] = i14;
+ i17 = i10 + 112 | 0;
+ i14 = HEAP32[i17 + 4 >> 2] | 0;
+ i13 = i11 + (i8 * 88 | 0) + 24 | 0;
+ HEAP32[i13 >> 2] = HEAP32[i17 >> 2];
+ HEAP32[i13 + 4 >> 2] = i14;
+ HEAP32[i11 + (i8 * 88 | 0) + 84 >> 2] = i9;
+ HEAPF32[i11 + (i8 * 88 | 0) + 76 >> 2] = d16;
+ HEAPF32[i11 + (i8 * 88 | 0) + 80 >> 2] = d15;
+ HEAP32[i11 + (i8 * 88 | 0) + 72 >> 2] = HEAP32[i10 + 120 >> 2];
+ i13 = 0;
+ do {
+ i14 = i10 + (i13 * 20 | 0) + 64 | 0;
+ if ((HEAP8[i6] | 0) == 0) {
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 16 >> 2] = 0.0;
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 20 >> 2] = 0.0;
+ } else {
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 16 >> 2] = +HEAPF32[i7 >> 2] * +HEAPF32[i10 + (i13 * 20 | 0) + 72 >> 2];
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 20 >> 2] = +HEAPF32[i7 >> 2] * +HEAPF32[i10 + (i13 * 20 | 0) + 76 >> 2];
+ }
+ i20 = i12 + (i8 * 152 | 0) + (i13 * 36 | 0) | 0;
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 24 >> 2] = 0.0;
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 28 >> 2] = 0.0;
+ HEAPF32[i12 + (i8 * 152 | 0) + (i13 * 36 | 0) + 32 >> 2] = 0.0;
+ i22 = i11 + (i8 * 88 | 0) + (i13 << 3) | 0;
+ HEAP32[i20 + 0 >> 2] = 0;
+ HEAP32[i20 + 4 >> 2] = 0;
+ HEAP32[i20 + 8 >> 2] = 0;
+ HEAP32[i20 + 12 >> 2] = 0;
+ i20 = i14;
+ i21 = HEAP32[i20 + 4 >> 2] | 0;
+ HEAP32[i22 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i22 + 4 >> 2] = i21;
+ i13 = i13 + 1 | 0;
+ } while ((i13 | 0) != (i9 | 0));
+ i8 = i8 + 1 | 0;
+ if ((i8 | 0) >= (HEAP32[i4 >> 2] | 0)) {
+ i2 = 12;
+ break;
+ }
+ i9 = HEAP32[i5 >> 2] | 0;
+ }
+ if ((i2 | 0) == 4) {
+ ___assert_fail(6504, 6520, 71, 6568);
+ } else if ((i2 | 0) == 12) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function __Z25b2CollidePolygonAndCircleP10b2ManifoldPK14b2PolygonShapeRK11b2TransformPK13b2CircleShapeS6_(i1, i4, i11, i9, i10) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i11 = i11 | 0;
+ i9 = i9 | 0;
+ i10 = i10 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, d6 = 0.0, d7 = 0.0, d8 = 0.0, i12 = 0, d13 = 0.0, d14 = 0.0, i15 = 0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, i22 = 0;
+ i3 = STACKTOP;
+ i5 = i1 + 60 | 0;
+ HEAP32[i5 >> 2] = 0;
+ i2 = i9 + 12 | 0;
+ d20 = +HEAPF32[i10 + 12 >> 2];
+ d7 = +HEAPF32[i2 >> 2];
+ d6 = +HEAPF32[i10 + 8 >> 2];
+ d21 = +HEAPF32[i9 + 16 >> 2];
+ d8 = +HEAPF32[i10 >> 2] + (d20 * d7 - d6 * d21) - +HEAPF32[i11 >> 2];
+ d21 = d7 * d6 + d20 * d21 + +HEAPF32[i10 + 4 >> 2] - +HEAPF32[i11 + 4 >> 2];
+ d20 = +HEAPF32[i11 + 12 >> 2];
+ d6 = +HEAPF32[i11 + 8 >> 2];
+ d7 = d8 * d20 + d21 * d6;
+ d6 = d20 * d21 - d8 * d6;
+ d8 = +HEAPF32[i4 + 8 >> 2] + +HEAPF32[i9 + 8 >> 2];
+ i12 = HEAP32[i4 + 148 >> 2] | 0;
+ do {
+ if ((i12 | 0) > 0) {
+ i10 = 0;
+ i9 = 0;
+ d13 = -3.4028234663852886e+38;
+ while (1) {
+ d14 = (d7 - +HEAPF32[i4 + (i10 << 3) + 20 >> 2]) * +HEAPF32[i4 + (i10 << 3) + 84 >> 2] + (d6 - +HEAPF32[i4 + (i10 << 3) + 24 >> 2]) * +HEAPF32[i4 + (i10 << 3) + 88 >> 2];
+ if (d14 > d8) {
+ i10 = 19;
+ break;
+ }
+ i11 = d14 > d13;
+ d13 = i11 ? d14 : d13;
+ i9 = i11 ? i10 : i9;
+ i10 = i10 + 1 | 0;
+ if ((i10 | 0) >= (i12 | 0)) {
+ i10 = 4;
+ break;
+ }
+ }
+ if ((i10 | 0) == 4) {
+ i22 = d13 < 1.1920928955078125e-7;
+ break;
+ } else if ((i10 | 0) == 19) {
+ STACKTOP = i3;
+ return;
+ }
+ } else {
+ i9 = 0;
+ i22 = 1;
+ }
+ } while (0);
+ i15 = i9 + 1 | 0;
+ i11 = i4 + (i9 << 3) + 20 | 0;
+ i10 = HEAP32[i11 >> 2] | 0;
+ i11 = HEAP32[i11 + 4 >> 2] | 0;
+ d14 = (HEAP32[tempDoublePtr >> 2] = i10, +HEAPF32[tempDoublePtr >> 2]);
+ d13 = (HEAP32[tempDoublePtr >> 2] = i11, +HEAPF32[tempDoublePtr >> 2]);
+ i12 = i4 + (((i15 | 0) < (i12 | 0) ? i15 : 0) << 3) + 20 | 0;
+ i15 = HEAP32[i12 >> 2] | 0;
+ i12 = HEAP32[i12 + 4 >> 2] | 0;
+ d21 = (HEAP32[tempDoublePtr >> 2] = i15, +HEAPF32[tempDoublePtr >> 2]);
+ d18 = (HEAP32[tempDoublePtr >> 2] = i12, +HEAPF32[tempDoublePtr >> 2]);
+ if (i22) {
+ HEAP32[i5 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 1;
+ i22 = i4 + (i9 << 3) + 84 | 0;
+ i15 = HEAP32[i22 + 4 >> 2] | 0;
+ i12 = i1 + 40 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i12 + 4 >> 2] = i15;
+ d20 = +((d14 + d21) * .5);
+ d21 = +((d13 + d18) * .5);
+ i12 = i1 + 48 | 0;
+ HEAPF32[i12 >> 2] = d20;
+ HEAPF32[i12 + 4 >> 2] = d21;
+ i12 = i2;
+ i15 = HEAP32[i12 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ HEAP32[i1 + 16 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+ }
+ d16 = d7 - d14;
+ d20 = d6 - d13;
+ d19 = d7 - d21;
+ d17 = d6 - d18;
+ if (d16 * (d21 - d14) + d20 * (d18 - d13) <= 0.0) {
+ if (d16 * d16 + d20 * d20 > d8 * d8) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i5 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 1;
+ i4 = i1 + 40 | 0;
+ d21 = +d16;
+ d6 = +d20;
+ i22 = i4;
+ HEAPF32[i22 >> 2] = d21;
+ HEAPF32[i22 + 4 >> 2] = d6;
+ d6 = +Math_sqrt(+(d16 * d16 + d20 * d20));
+ if (!(d6 < 1.1920928955078125e-7)) {
+ d21 = 1.0 / d6;
+ HEAPF32[i4 >> 2] = d16 * d21;
+ HEAPF32[i1 + 44 >> 2] = d20 * d21;
+ }
+ i12 = i1 + 48 | 0;
+ HEAP32[i12 >> 2] = i10;
+ HEAP32[i12 + 4 >> 2] = i11;
+ i12 = i2;
+ i15 = HEAP32[i12 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ HEAP32[i1 + 16 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+ }
+ if (!(d19 * (d14 - d21) + d17 * (d13 - d18) <= 0.0)) {
+ d14 = (d14 + d21) * .5;
+ d13 = (d13 + d18) * .5;
+ i10 = i4 + (i9 << 3) + 84 | 0;
+ if ((d7 - d14) * +HEAPF32[i10 >> 2] + (d6 - d13) * +HEAPF32[i4 + (i9 << 3) + 88 >> 2] > d8) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i5 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 1;
+ i22 = i10;
+ i15 = HEAP32[i22 + 4 >> 2] | 0;
+ i12 = i1 + 40 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i12 + 4 >> 2] = i15;
+ d20 = +d14;
+ d21 = +d13;
+ i12 = i1 + 48 | 0;
+ HEAPF32[i12 >> 2] = d20;
+ HEAPF32[i12 + 4 >> 2] = d21;
+ i12 = i2;
+ i15 = HEAP32[i12 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ HEAP32[i1 + 16 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+ }
+ if (d19 * d19 + d17 * d17 > d8 * d8) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i5 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 1;
+ i4 = i1 + 40 | 0;
+ d21 = +d19;
+ d6 = +d17;
+ i22 = i4;
+ HEAPF32[i22 >> 2] = d21;
+ HEAPF32[i22 + 4 >> 2] = d6;
+ d6 = +Math_sqrt(+(d19 * d19 + d17 * d17));
+ if (!(d6 < 1.1920928955078125e-7)) {
+ d21 = 1.0 / d6;
+ HEAPF32[i4 >> 2] = d19 * d21;
+ HEAPF32[i1 + 44 >> 2] = d17 * d21;
+ }
+ i22 = i1 + 48 | 0;
+ HEAP32[i22 >> 2] = i15;
+ HEAP32[i22 + 4 >> 2] = i12;
+ i12 = i2;
+ i15 = HEAP32[i12 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ HEAP32[i1 + 16 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+}
+function __ZN15b2WorldManifold10InitializeEPK10b2ManifoldRK11b2TransformfS5_f(i1, i5, i7, d4, i8, d3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i7 = i7 | 0;
+ d4 = +d4;
+ i8 = i8 | 0;
+ d3 = +d3;
+ var i2 = 0, i6 = 0, d9 = 0.0, d10 = 0.0, i11 = 0, i12 = 0, i13 = 0, d14 = 0.0, d15 = 0.0, i16 = 0, d17 = 0.0, d18 = 0.0, d19 = 0.0, i20 = 0, d21 = 0.0, d22 = 0.0;
+ i2 = STACKTOP;
+ i6 = i5 + 60 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i11 = HEAP32[i5 + 56 >> 2] | 0;
+ if ((i11 | 0) == 2) {
+ i13 = i8 + 12 | 0;
+ d17 = +HEAPF32[i13 >> 2];
+ d18 = +HEAPF32[i5 + 40 >> 2];
+ i16 = i8 + 8 | 0;
+ d19 = +HEAPF32[i16 >> 2];
+ d15 = +HEAPF32[i5 + 44 >> 2];
+ d14 = d17 * d18 - d19 * d15;
+ d15 = d18 * d19 + d17 * d15;
+ d17 = +d14;
+ d19 = +d15;
+ i12 = i1;
+ HEAPF32[i12 >> 2] = d17;
+ HEAPF32[i12 + 4 >> 2] = d19;
+ d19 = +HEAPF32[i13 >> 2];
+ d17 = +HEAPF32[i5 + 48 >> 2];
+ d18 = +HEAPF32[i16 >> 2];
+ d10 = +HEAPF32[i5 + 52 >> 2];
+ d9 = +HEAPF32[i8 >> 2] + (d19 * d17 - d18 * d10);
+ d10 = d17 * d18 + d19 * d10 + +HEAPF32[i8 + 4 >> 2];
+ if ((HEAP32[i6 >> 2] | 0) > 0) {
+ i8 = i7 + 12 | 0;
+ i11 = i7 + 8 | 0;
+ i12 = i7 + 4 | 0;
+ i13 = i1 + 4 | 0;
+ i16 = 0;
+ do {
+ d18 = +HEAPF32[i8 >> 2];
+ d22 = +HEAPF32[i5 + (i16 * 20 | 0) >> 2];
+ d21 = +HEAPF32[i11 >> 2];
+ d17 = +HEAPF32[i5 + (i16 * 20 | 0) + 4 >> 2];
+ d19 = +HEAPF32[i7 >> 2] + (d18 * d22 - d21 * d17);
+ d17 = d22 * d21 + d18 * d17 + +HEAPF32[i12 >> 2];
+ d18 = d3 - (d14 * (d19 - d9) + (d17 - d10) * d15);
+ d19 = +((d19 - d14 * d4 + (d19 + d14 * d18)) * .5);
+ d14 = +((d17 - d15 * d4 + (d17 + d15 * d18)) * .5);
+ i20 = i1 + (i16 << 3) + 8 | 0;
+ HEAPF32[i20 >> 2] = d19;
+ HEAPF32[i20 + 4 >> 2] = d14;
+ i16 = i16 + 1 | 0;
+ d14 = +HEAPF32[i1 >> 2];
+ d15 = +HEAPF32[i13 >> 2];
+ } while ((i16 | 0) < (HEAP32[i6 >> 2] | 0));
+ }
+ d21 = +-d14;
+ d22 = +-d15;
+ i20 = i1;
+ HEAPF32[i20 >> 2] = d21;
+ HEAPF32[i20 + 4 >> 2] = d22;
+ STACKTOP = i2;
+ return;
+ } else if ((i11 | 0) == 1) {
+ i16 = i7 + 12 | 0;
+ d19 = +HEAPF32[i16 >> 2];
+ d21 = +HEAPF32[i5 + 40 >> 2];
+ i20 = i7 + 8 | 0;
+ d22 = +HEAPF32[i20 >> 2];
+ d15 = +HEAPF32[i5 + 44 >> 2];
+ d14 = d19 * d21 - d22 * d15;
+ d15 = d21 * d22 + d19 * d15;
+ d19 = +d14;
+ d22 = +d15;
+ i13 = i1;
+ HEAPF32[i13 >> 2] = d19;
+ HEAPF32[i13 + 4 >> 2] = d22;
+ d22 = +HEAPF32[i16 >> 2];
+ d19 = +HEAPF32[i5 + 48 >> 2];
+ d21 = +HEAPF32[i20 >> 2];
+ d10 = +HEAPF32[i5 + 52 >> 2];
+ d9 = +HEAPF32[i7 >> 2] + (d22 * d19 - d21 * d10);
+ d10 = d19 * d21 + d22 * d10 + +HEAPF32[i7 + 4 >> 2];
+ if ((HEAP32[i6 >> 2] | 0) <= 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i12 = i8 + 12 | 0;
+ i11 = i8 + 8 | 0;
+ i7 = i8 + 4 | 0;
+ i13 = i1 + 4 | 0;
+ i16 = 0;
+ while (1) {
+ d22 = +HEAPF32[i12 >> 2];
+ d17 = +HEAPF32[i5 + (i16 * 20 | 0) >> 2];
+ d18 = +HEAPF32[i11 >> 2];
+ d19 = +HEAPF32[i5 + (i16 * 20 | 0) + 4 >> 2];
+ d21 = +HEAPF32[i8 >> 2] + (d22 * d17 - d18 * d19);
+ d19 = d17 * d18 + d22 * d19 + +HEAPF32[i7 >> 2];
+ d22 = d4 - (d14 * (d21 - d9) + (d19 - d10) * d15);
+ d21 = +((d21 - d14 * d3 + (d21 + d14 * d22)) * .5);
+ d22 = +((d19 - d15 * d3 + (d19 + d15 * d22)) * .5);
+ i20 = i1 + (i16 << 3) + 8 | 0;
+ HEAPF32[i20 >> 2] = d21;
+ HEAPF32[i20 + 4 >> 2] = d22;
+ i16 = i16 + 1 | 0;
+ if ((i16 | 0) >= (HEAP32[i6 >> 2] | 0)) {
+ break;
+ }
+ d14 = +HEAPF32[i1 >> 2];
+ d15 = +HEAPF32[i13 >> 2];
+ }
+ STACKTOP = i2;
+ return;
+ } else if ((i11 | 0) == 0) {
+ HEAPF32[i1 >> 2] = 1.0;
+ i6 = i1 + 4 | 0;
+ HEAPF32[i6 >> 2] = 0.0;
+ d21 = +HEAPF32[i7 + 12 >> 2];
+ d22 = +HEAPF32[i5 + 48 >> 2];
+ d19 = +HEAPF32[i7 + 8 >> 2];
+ d10 = +HEAPF32[i5 + 52 >> 2];
+ d9 = +HEAPF32[i7 >> 2] + (d21 * d22 - d19 * d10);
+ d10 = d22 * d19 + d21 * d10 + +HEAPF32[i7 + 4 >> 2];
+ d21 = +HEAPF32[i8 + 12 >> 2];
+ d19 = +HEAPF32[i5 >> 2];
+ d22 = +HEAPF32[i8 + 8 >> 2];
+ d15 = +HEAPF32[i5 + 4 >> 2];
+ d14 = +HEAPF32[i8 >> 2] + (d21 * d19 - d22 * d15);
+ d15 = d19 * d22 + d21 * d15 + +HEAPF32[i8 + 4 >> 2];
+ d21 = d9 - d14;
+ d22 = d10 - d15;
+ if (d21 * d21 + d22 * d22 > 1.4210854715202004e-14) {
+ d19 = d14 - d9;
+ d17 = d15 - d10;
+ d22 = +d19;
+ d18 = +d17;
+ i20 = i1;
+ HEAPF32[i20 >> 2] = d22;
+ HEAPF32[i20 + 4 >> 2] = d18;
+ d18 = +Math_sqrt(+(d19 * d19 + d17 * d17));
+ if (!(d18 < 1.1920928955078125e-7)) {
+ d22 = 1.0 / d18;
+ d19 = d19 * d22;
+ HEAPF32[i1 >> 2] = d19;
+ d17 = d17 * d22;
+ HEAPF32[i6 >> 2] = d17;
+ }
+ } else {
+ d19 = 1.0;
+ d17 = 0.0;
+ }
+ d21 = +((d9 + d19 * d4 + (d14 - d19 * d3)) * .5);
+ d22 = +((d10 + d17 * d4 + (d15 - d17 * d3)) * .5);
+ i20 = i1 + 8 | 0;
+ HEAPF32[i20 >> 2] = d21;
+ HEAPF32[i20 + 4 >> 2] = d22;
+ STACKTOP = i2;
+ return;
+ } else {
+ STACKTOP = i2;
+ return;
+ }
+}
+function _main(i3, i2) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, d6 = 0.0, d7 = 0.0, i8 = 0, i9 = 0, d10 = 0.0, d11 = 0.0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, d22 = 0.0, d23 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 240 | 0;
+ i5 = i1;
+ i12 = i1 + 224 | 0;
+ i4 = i1 + 168 | 0;
+ i9 = i1 + 160 | 0;
+ i8 = i1 + 152 | 0;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i14 = HEAP8[HEAP32[i2 + 4 >> 2] | 0] | 0;
+ switch (i14 | 0) {
+ case 49:
+ {
+ HEAP32[2] = 5;
+ HEAP32[4] = 35;
+ i15 = 35;
+ i14 = 5;
+ break L1;
+ }
+ case 50:
+ {
+ HEAP32[2] = 32;
+ HEAP32[4] = 161;
+ i15 = 161;
+ i14 = 32;
+ break L1;
+ }
+ case 51:
+ {
+ i13 = 5;
+ break L1;
+ }
+ case 52:
+ {
+ HEAP32[2] = 320;
+ HEAP32[4] = 2331;
+ i15 = 2331;
+ i14 = 320;
+ break L1;
+ }
+ case 53:
+ {
+ HEAP32[2] = 640;
+ HEAP32[4] = 5661;
+ i15 = 5661;
+ i14 = 640;
+ break L1;
+ }
+ case 48:
+ {
+ i20 = 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ default:
+ {
+ HEAP32[i5 >> 2] = i14 + -48;
+ _printf(80, i5 | 0) | 0;
+ i20 = -1;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ }
+ } else {
+ i13 = 5;
+ }
+ } while (0);
+ if ((i13 | 0) == 5) {
+ HEAP32[2] = 64;
+ HEAP32[4] = 333;
+ i15 = 333;
+ i14 = 64;
+ }
+ i13 = i15 + i14 | 0;
+ HEAP32[4] = i13;
+ HEAP32[2] = 0;
+ HEAP32[8] = __Znaj(i13 >>> 0 > 1073741823 ? -1 : i13 << 2) | 0;
+ HEAPF32[i12 >> 2] = 0.0;
+ HEAPF32[i12 + 4 >> 2] = -10.0;
+ i15 = __Znwj(103028) | 0;
+ __ZN7b2WorldC2ERK6b2Vec2(i15, i12);
+ HEAP32[6] = i15;
+ __ZN7b2World16SetAllowSleepingEb(i15, 0);
+ HEAP32[i5 + 44 >> 2] = 0;
+ i15 = i5 + 4 | 0;
+ i14 = i5 + 36 | 0;
+ HEAP32[i15 + 0 >> 2] = 0;
+ HEAP32[i15 + 4 >> 2] = 0;
+ HEAP32[i15 + 8 >> 2] = 0;
+ HEAP32[i15 + 12 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 24 >> 2] = 0;
+ HEAP32[i15 + 28 >> 2] = 0;
+ HEAP8[i14] = 1;
+ HEAP8[i5 + 37 | 0] = 1;
+ HEAP8[i5 + 38 | 0] = 0;
+ HEAP8[i5 + 39 | 0] = 0;
+ HEAP32[i5 >> 2] = 0;
+ HEAP8[i5 + 40 | 0] = 1;
+ HEAPF32[i5 + 48 >> 2] = 1.0;
+ i14 = __ZN7b2World10CreateBodyEPK9b2BodyDef(HEAP32[6] | 0, i5) | 0;
+ HEAP32[i4 >> 2] = 240;
+ HEAP32[i4 + 4 >> 2] = 1;
+ HEAPF32[i4 + 8 >> 2] = .009999999776482582;
+ i15 = i4 + 28 | 0;
+ HEAP32[i15 + 0 >> 2] = 0;
+ HEAP32[i15 + 4 >> 2] = 0;
+ HEAP32[i15 + 8 >> 2] = 0;
+ HEAP32[i15 + 12 >> 2] = 0;
+ HEAP16[i15 + 16 >> 1] = 0;
+ HEAPF32[i9 >> 2] = -40.0;
+ HEAPF32[i9 + 4 >> 2] = 0.0;
+ HEAPF32[i8 >> 2] = 40.0;
+ HEAPF32[i8 + 4 >> 2] = 0.0;
+ __ZN11b2EdgeShape3SetERK6b2Vec2S2_(i4, i9, i8);
+ __ZN6b2Body13CreateFixtureEPK7b2Shapef(i14, i4, 0.0) | 0;
+ HEAP32[i5 >> 2] = 504;
+ HEAP32[i5 + 4 >> 2] = 2;
+ HEAPF32[i5 + 8 >> 2] = .009999999776482582;
+ HEAP32[i5 + 148 >> 2] = 0;
+ HEAPF32[i5 + 12 >> 2] = 0.0;
+ HEAPF32[i5 + 16 >> 2] = 0.0;
+ __ZN14b2PolygonShape8SetAsBoxEff(i5, .5, .5);
+ i14 = i4 + 44 | 0;
+ i15 = i4 + 4 | 0;
+ i8 = i4 + 36 | 0;
+ i17 = i4 + 37 | 0;
+ i18 = i4 + 38 | 0;
+ i19 = i4 + 39 | 0;
+ i20 = i4 + 40 | 0;
+ i13 = i4 + 48 | 0;
+ i12 = i4 + 4 | 0;
+ d11 = -7.0;
+ d10 = .75;
+ i9 = 0;
+ while (1) {
+ d7 = d11;
+ d6 = d10;
+ i16 = i9;
+ while (1) {
+ HEAP32[i14 >> 2] = 0;
+ HEAP32[i15 + 0 >> 2] = 0;
+ HEAP32[i15 + 4 >> 2] = 0;
+ HEAP32[i15 + 8 >> 2] = 0;
+ HEAP32[i15 + 12 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 24 >> 2] = 0;
+ HEAP32[i15 + 28 >> 2] = 0;
+ HEAP8[i8] = 1;
+ HEAP8[i17] = 1;
+ HEAP8[i18] = 0;
+ HEAP8[i19] = 0;
+ HEAP8[i20] = 1;
+ HEAPF32[i13 >> 2] = 1.0;
+ HEAP32[i4 >> 2] = 2;
+ d23 = +d7;
+ d22 = +d6;
+ i21 = i12;
+ HEAPF32[i21 >> 2] = d23;
+ HEAPF32[i21 + 4 >> 2] = d22;
+ i21 = __ZN7b2World10CreateBodyEPK9b2BodyDef(HEAP32[6] | 0, i4) | 0;
+ __ZN6b2Body13CreateFixtureEPK7b2Shapef(i21, i5, 5.0) | 0;
+ HEAP32[14] = i21;
+ i16 = i16 + 1 | 0;
+ if ((i16 | 0) >= 40) {
+ break;
+ } else {
+ d7 = d7 + 1.125;
+ d6 = d6 + 0.0;
+ }
+ }
+ i9 = i9 + 1 | 0;
+ if ((i9 | 0) >= 40) {
+ break;
+ } else {
+ d11 = d11 + .5625;
+ d10 = d10 + 1.0;
+ }
+ }
+ if ((HEAP32[2] | 0) > 0) {
+ i4 = 0;
+ do {
+ __ZN7b2World4StepEfii(HEAP32[6] | 0, .01666666753590107, 3, 3);
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) < (HEAP32[2] | 0));
+ }
+ if ((i3 | 0) > 2) {
+ i21 = (HEAP8[HEAP32[i2 + 8 >> 2] | 0] | 0) + -48 | 0;
+ HEAP32[18] = i21;
+ if ((i21 | 0) != 0) {
+ _puts(208) | 0;
+ _emscripten_set_main_loop(2, 60, 1);
+ i21 = 0;
+ STACKTOP = i1;
+ return i21 | 0;
+ }
+ } else {
+ HEAP32[18] = 0;
+ }
+ while (1) {
+ __Z4iterv();
+ if ((HEAP32[16] | 0) > (HEAP32[4] | 0)) {
+ i2 = 0;
+ break;
+ }
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function __ZN9b2Simplex9ReadCacheEPK14b2SimplexCachePK15b2DistanceProxyRK11b2TransformS5_S8_(i2, i11, i10, i4, i3, i5) {
+ i2 = i2 | 0;
+ i11 = i11 | 0;
+ i10 = i10 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i6 = 0, i7 = 0, d8 = 0.0, i9 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, d24 = 0.0, d25 = 0.0, i26 = 0, d27 = 0.0, d28 = 0.0, d29 = 0.0, d30 = 0.0, d31 = 0.0, d32 = 0.0;
+ i1 = STACKTOP;
+ i13 = HEAP16[i11 + 4 >> 1] | 0;
+ if (!((i13 & 65535) < 4)) {
+ ___assert_fail(2872, 2672, 102, 2896);
+ }
+ i12 = i13 & 65535;
+ i6 = i2 + 108 | 0;
+ HEAP32[i6 >> 2] = i12;
+ L4 : do {
+ if (!(i13 << 16 >> 16 == 0)) {
+ i17 = i10 + 20 | 0;
+ i21 = i10 + 16 | 0;
+ i13 = i3 + 20 | 0;
+ i14 = i3 + 16 | 0;
+ i15 = i4 + 12 | 0;
+ i16 = i4 + 8 | 0;
+ i12 = i4 + 4 | 0;
+ i18 = i5 + 12 | 0;
+ i19 = i5 + 8 | 0;
+ i20 = i5 + 4 | 0;
+ i22 = 0;
+ while (1) {
+ i26 = HEAPU8[i11 + i22 + 6 | 0] | 0;
+ HEAP32[i2 + (i22 * 36 | 0) + 28 >> 2] = i26;
+ i23 = HEAPU8[i11 + i22 + 9 | 0] | 0;
+ HEAP32[i2 + (i22 * 36 | 0) + 32 >> 2] = i23;
+ if ((HEAP32[i17 >> 2] | 0) <= (i26 | 0)) {
+ i9 = 6;
+ break;
+ }
+ i26 = (HEAP32[i21 >> 2] | 0) + (i26 << 3) | 0;
+ d25 = +HEAPF32[i26 >> 2];
+ d24 = +HEAPF32[i26 + 4 >> 2];
+ if ((HEAP32[i13 >> 2] | 0) <= (i23 | 0)) {
+ i9 = 8;
+ break;
+ }
+ i23 = (HEAP32[i14 >> 2] | 0) + (i23 << 3) | 0;
+ d29 = +HEAPF32[i23 >> 2];
+ d31 = +HEAPF32[i23 + 4 >> 2];
+ d32 = +HEAPF32[i15 >> 2];
+ d30 = +HEAPF32[i16 >> 2];
+ d27 = +HEAPF32[i4 >> 2] + (d25 * d32 - d24 * d30);
+ d28 = +d27;
+ d30 = +(d24 * d32 + d25 * d30 + +HEAPF32[i12 >> 2]);
+ i23 = i2 + (i22 * 36 | 0) | 0;
+ HEAPF32[i23 >> 2] = d28;
+ HEAPF32[i23 + 4 >> 2] = d30;
+ d30 = +HEAPF32[i18 >> 2];
+ d25 = +HEAPF32[i19 >> 2];
+ d24 = +HEAPF32[i5 >> 2] + (d29 * d30 - d31 * d25);
+ d28 = +d24;
+ d25 = +(d31 * d30 + d29 * d25 + +HEAPF32[i20 >> 2]);
+ i23 = i2 + (i22 * 36 | 0) + 8 | 0;
+ HEAPF32[i23 >> 2] = d28;
+ HEAPF32[i23 + 4 >> 2] = d25;
+ d24 = +(d24 - d27);
+ d25 = +(+HEAPF32[i2 + (i22 * 36 | 0) + 12 >> 2] - +HEAPF32[i2 + (i22 * 36 | 0) + 4 >> 2]);
+ i23 = i2 + (i22 * 36 | 0) + 16 | 0;
+ HEAPF32[i23 >> 2] = d24;
+ HEAPF32[i23 + 4 >> 2] = d25;
+ HEAPF32[i2 + (i22 * 36 | 0) + 24 >> 2] = 0.0;
+ i22 = i22 + 1 | 0;
+ i23 = HEAP32[i6 >> 2] | 0;
+ if ((i22 | 0) >= (i23 | 0)) {
+ i7 = i23;
+ break L4;
+ }
+ }
+ if ((i9 | 0) == 6) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ } else if ((i9 | 0) == 8) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ }
+ } else {
+ i7 = i12;
+ }
+ } while (0);
+ do {
+ if ((i7 | 0) > 1) {
+ d24 = +HEAPF32[i11 >> 2];
+ if ((i7 | 0) == 2) {
+ d32 = +HEAPF32[i2 + 16 >> 2] - +HEAPF32[i2 + 52 >> 2];
+ d8 = +HEAPF32[i2 + 20 >> 2] - +HEAPF32[i2 + 56 >> 2];
+ d8 = +Math_sqrt(+(d32 * d32 + d8 * d8));
+ } else if ((i7 | 0) == 3) {
+ d8 = +HEAPF32[i2 + 16 >> 2];
+ d32 = +HEAPF32[i2 + 20 >> 2];
+ d8 = (+HEAPF32[i2 + 52 >> 2] - d8) * (+HEAPF32[i2 + 92 >> 2] - d32) - (+HEAPF32[i2 + 56 >> 2] - d32) * (+HEAPF32[i2 + 88 >> 2] - d8);
+ } else {
+ ___assert_fail(2712, 2672, 259, 2736);
+ }
+ if (!(d8 < d24 * .5) ? !(d24 * 2.0 < d8 | d8 < 1.1920928955078125e-7) : 0) {
+ i9 = 18;
+ break;
+ }
+ HEAP32[i6 >> 2] = 0;
+ } else {
+ i9 = 18;
+ }
+ } while (0);
+ if ((i9 | 0) == 18 ? (i7 | 0) != 0 : 0) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i2 + 28 >> 2] = 0;
+ HEAP32[i2 + 32 >> 2] = 0;
+ if ((HEAP32[i10 + 20 >> 2] | 0) <= 0) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ }
+ i26 = HEAP32[i10 + 16 >> 2] | 0;
+ d8 = +HEAPF32[i26 >> 2];
+ d24 = +HEAPF32[i26 + 4 >> 2];
+ if ((HEAP32[i3 + 20 >> 2] | 0) <= 0) {
+ ___assert_fail(2776, 2808, 103, 2840);
+ }
+ i26 = HEAP32[i3 + 16 >> 2] | 0;
+ d27 = +HEAPF32[i26 >> 2];
+ d25 = +HEAPF32[i26 + 4 >> 2];
+ d30 = +HEAPF32[i4 + 12 >> 2];
+ d32 = +HEAPF32[i4 + 8 >> 2];
+ d31 = +HEAPF32[i4 >> 2] + (d8 * d30 - d24 * d32);
+ d32 = d24 * d30 + d8 * d32 + +HEAPF32[i4 + 4 >> 2];
+ d30 = +d31;
+ d28 = +d32;
+ i26 = i2;
+ HEAPF32[i26 >> 2] = d30;
+ HEAPF32[i26 + 4 >> 2] = d28;
+ d28 = +HEAPF32[i5 + 12 >> 2];
+ d30 = +HEAPF32[i5 + 8 >> 2];
+ d29 = +HEAPF32[i5 >> 2] + (d27 * d28 - d25 * d30);
+ d30 = d25 * d28 + d27 * d30 + +HEAPF32[i5 + 4 >> 2];
+ d27 = +d29;
+ d28 = +d30;
+ i26 = i2 + 8 | 0;
+ HEAPF32[i26 >> 2] = d27;
+ HEAPF32[i26 + 4 >> 2] = d28;
+ d31 = +(d29 - d31);
+ d32 = +(d30 - d32);
+ i26 = i2 + 16 | 0;
+ HEAPF32[i26 >> 2] = d31;
+ HEAPF32[i26 + 4 >> 2] = d32;
+ HEAP32[i6 >> 2] = 1;
+ STACKTOP = i1;
+ return;
+}
+function __ZNSt3__17__sort4IRPFbRK6b2PairS3_EPS1_EEjT0_S8_S8_S8_T_(i6, i7, i5, i4, i1) {
+ i6 = i6 | 0;
+ i7 = i7 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i9 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i7, i6) | 0;
+ i8 = FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i7) | 0;
+ do {
+ if (i9) {
+ if (i8) {
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ i8 = 1;
+ break;
+ }
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i7) | 0) {
+ HEAP32[i3 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ i8 = 2;
+ } else {
+ i8 = 1;
+ }
+ } else {
+ if (i8) {
+ HEAP32[i3 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i7, i6) | 0) {
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ i8 = 2;
+ } else {
+ i8 = 1;
+ }
+ } else {
+ i8 = 0;
+ }
+ }
+ } while (0);
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i4, i5) | 0)) {
+ i9 = i8;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ HEAP32[i3 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i4 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i5, i7) | 0)) {
+ i9 = i8 + 1 | 0;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ HEAP32[i3 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ if (!(FUNCTION_TABLE_iii[HEAP32[i1 >> 2] & 3](i7, i6) | 0)) {
+ i9 = i8 + 2 | 0;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ HEAP32[i6 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ i9 = i8 + 3 | 0;
+ STACKTOP = i2;
+ return i9 | 0;
+}
+function __ZN15b2ContactSolver27SolveTOIPositionConstraintsEii(i9, i2, i5) {
+ i9 = i9 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, d21 = 0.0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, d27 = 0.0, d28 = 0.0, d29 = 0.0, d30 = 0.0, d31 = 0.0, d32 = 0.0, d33 = 0.0, d34 = 0.0, d35 = 0.0, d36 = 0.0, i37 = 0, d38 = 0.0, d39 = 0.0, d40 = 0.0, d41 = 0.0, d42 = 0.0, d43 = 0.0, d44 = 0.0, d45 = 0.0, i46 = 0, d47 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i8 = i1 + 40 | 0;
+ i3 = i1 + 24 | 0;
+ i4 = i1;
+ i6 = i9 + 48 | 0;
+ if ((HEAP32[i6 >> 2] | 0) <= 0) {
+ d45 = 0.0;
+ i37 = d45 >= -.007499999832361937;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ i7 = i9 + 36 | 0;
+ i14 = i9 + 24 | 0;
+ i9 = i8 + 8 | 0;
+ i15 = i8 + 12 | 0;
+ i10 = i3 + 8 | 0;
+ i11 = i3 + 12 | 0;
+ i12 = i4 + 8 | 0;
+ i13 = i4 + 16 | 0;
+ i16 = 0;
+ d34 = 0.0;
+ do {
+ i37 = HEAP32[i7 >> 2] | 0;
+ i19 = i37 + (i16 * 88 | 0) | 0;
+ i17 = HEAP32[i37 + (i16 * 88 | 0) + 32 >> 2] | 0;
+ i18 = HEAP32[i37 + (i16 * 88 | 0) + 36 >> 2] | 0;
+ i20 = i37 + (i16 * 88 | 0) + 48 | 0;
+ d21 = +HEAPF32[i20 >> 2];
+ d22 = +HEAPF32[i20 + 4 >> 2];
+ i20 = i37 + (i16 * 88 | 0) + 56 | 0;
+ d23 = +HEAPF32[i20 >> 2];
+ d24 = +HEAPF32[i20 + 4 >> 2];
+ i20 = HEAP32[i37 + (i16 * 88 | 0) + 84 >> 2] | 0;
+ if ((i17 | 0) == (i2 | 0) | (i17 | 0) == (i5 | 0)) {
+ d26 = +HEAPF32[i37 + (i16 * 88 | 0) + 64 >> 2];
+ d27 = +HEAPF32[i37 + (i16 * 88 | 0) + 40 >> 2];
+ } else {
+ d26 = 0.0;
+ d27 = 0.0;
+ }
+ d25 = +HEAPF32[i37 + (i16 * 88 | 0) + 44 >> 2];
+ d28 = +HEAPF32[i37 + (i16 * 88 | 0) + 68 >> 2];
+ i37 = HEAP32[i14 >> 2] | 0;
+ i46 = i37 + (i17 * 12 | 0) | 0;
+ d33 = +HEAPF32[i46 >> 2];
+ d35 = +HEAPF32[i46 + 4 >> 2];
+ d29 = +HEAPF32[i37 + (i17 * 12 | 0) + 8 >> 2];
+ i46 = i37 + (i18 * 12 | 0) | 0;
+ d32 = +HEAPF32[i46 >> 2];
+ d36 = +HEAPF32[i46 + 4 >> 2];
+ d31 = +HEAPF32[i37 + (i18 * 12 | 0) + 8 >> 2];
+ if ((i20 | 0) > 0) {
+ d30 = d27 + d25;
+ i37 = 0;
+ do {
+ d38 = +Math_sin(+d29);
+ HEAPF32[i9 >> 2] = d38;
+ d44 = +Math_cos(+d29);
+ HEAPF32[i15 >> 2] = d44;
+ d43 = +Math_sin(+d31);
+ HEAPF32[i10 >> 2] = d43;
+ d41 = +Math_cos(+d31);
+ HEAPF32[i11 >> 2] = d41;
+ d40 = +(d33 - (d21 * d44 - d22 * d38));
+ d38 = +(d35 - (d22 * d44 + d21 * d38));
+ i46 = i8;
+ HEAPF32[i46 >> 2] = d40;
+ HEAPF32[i46 + 4 >> 2] = d38;
+ d38 = +(d32 - (d23 * d41 - d24 * d43));
+ d43 = +(d36 - (d24 * d41 + d23 * d43));
+ i46 = i3;
+ HEAPF32[i46 >> 2] = d38;
+ HEAPF32[i46 + 4 >> 2] = d43;
+ __ZN24b2PositionSolverManifold10InitializeEP27b2ContactPositionConstraintRK11b2TransformS4_i(i4, i19, i8, i3, i37);
+ i46 = i4;
+ d43 = +HEAPF32[i46 >> 2];
+ d38 = +HEAPF32[i46 + 4 >> 2];
+ i46 = i12;
+ d41 = +HEAPF32[i46 >> 2];
+ d40 = +HEAPF32[i46 + 4 >> 2];
+ d44 = +HEAPF32[i13 >> 2];
+ d39 = d41 - d33;
+ d42 = d40 - d35;
+ d41 = d41 - d32;
+ d40 = d40 - d36;
+ d34 = d34 < d44 ? d34 : d44;
+ d44 = (d44 + .004999999888241291) * .75;
+ d44 = d44 < 0.0 ? d44 : 0.0;
+ d45 = d38 * d39 - d43 * d42;
+ d47 = d38 * d41 - d43 * d40;
+ d45 = d47 * d28 * d47 + (d30 + d45 * d26 * d45);
+ if (d45 > 0.0) {
+ d44 = -(d44 < -.20000000298023224 ? -.20000000298023224 : d44) / d45;
+ } else {
+ d44 = 0.0;
+ }
+ d47 = d43 * d44;
+ d45 = d38 * d44;
+ d33 = d33 - d27 * d47;
+ d35 = d35 - d27 * d45;
+ d29 = d29 - d26 * (d39 * d45 - d42 * d47);
+ d32 = d32 + d25 * d47;
+ d36 = d36 + d25 * d45;
+ d31 = d31 + d28 * (d41 * d45 - d40 * d47);
+ i37 = i37 + 1 | 0;
+ } while ((i37 | 0) != (i20 | 0));
+ i37 = HEAP32[i14 >> 2] | 0;
+ }
+ d47 = +d33;
+ d45 = +d35;
+ i46 = i37 + (i17 * 12 | 0) | 0;
+ HEAPF32[i46 >> 2] = d47;
+ HEAPF32[i46 + 4 >> 2] = d45;
+ i46 = HEAP32[i14 >> 2] | 0;
+ HEAPF32[i46 + (i17 * 12 | 0) + 8 >> 2] = d29;
+ d45 = +d32;
+ d47 = +d36;
+ i46 = i46 + (i18 * 12 | 0) | 0;
+ HEAPF32[i46 >> 2] = d45;
+ HEAPF32[i46 + 4 >> 2] = d47;
+ HEAPF32[(HEAP32[i14 >> 2] | 0) + (i18 * 12 | 0) + 8 >> 2] = d31;
+ i16 = i16 + 1 | 0;
+ } while ((i16 | 0) < (HEAP32[i6 >> 2] | 0));
+ i46 = d34 >= -.007499999832361937;
+ STACKTOP = i1;
+ return i46 | 0;
+}
+function __ZN15b2ContactSolver24SolvePositionConstraintsEv(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, i21 = 0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0, i26 = 0, d27 = 0.0, d28 = 0.0, d29 = 0.0, d30 = 0.0, d31 = 0.0, d32 = 0.0, d33 = 0.0, d34 = 0.0, i35 = 0, d36 = 0.0, d37 = 0.0, d38 = 0.0, d39 = 0.0, d40 = 0.0, d41 = 0.0, d42 = 0.0, d43 = 0.0, i44 = 0, d45 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i4 = i1 + 40 | 0;
+ i5 = i1 + 24 | 0;
+ i3 = i1;
+ i2 = i7 + 48 | 0;
+ if ((HEAP32[i2 >> 2] | 0) <= 0) {
+ d43 = 0.0;
+ i35 = d43 >= -.014999999664723873;
+ STACKTOP = i1;
+ return i35 | 0;
+ }
+ i6 = i7 + 36 | 0;
+ i9 = i7 + 24 | 0;
+ i13 = i4 + 8 | 0;
+ i7 = i4 + 12 | 0;
+ i8 = i5 + 8 | 0;
+ i12 = i5 + 12 | 0;
+ i10 = i3 + 8 | 0;
+ i11 = i3 + 16 | 0;
+ i35 = HEAP32[i9 >> 2] | 0;
+ i15 = 0;
+ d32 = 0.0;
+ do {
+ i21 = HEAP32[i6 >> 2] | 0;
+ i26 = i21 + (i15 * 88 | 0) | 0;
+ i16 = HEAP32[i21 + (i15 * 88 | 0) + 32 >> 2] | 0;
+ i14 = HEAP32[i21 + (i15 * 88 | 0) + 36 >> 2] | 0;
+ i44 = i21 + (i15 * 88 | 0) + 48 | 0;
+ d22 = +HEAPF32[i44 >> 2];
+ d23 = +HEAPF32[i44 + 4 >> 2];
+ d25 = +HEAPF32[i21 + (i15 * 88 | 0) + 40 >> 2];
+ d18 = +HEAPF32[i21 + (i15 * 88 | 0) + 64 >> 2];
+ i44 = i21 + (i15 * 88 | 0) + 56 | 0;
+ d24 = +HEAPF32[i44 >> 2];
+ d19 = +HEAPF32[i44 + 4 >> 2];
+ d17 = +HEAPF32[i21 + (i15 * 88 | 0) + 44 >> 2];
+ d20 = +HEAPF32[i21 + (i15 * 88 | 0) + 68 >> 2];
+ i21 = HEAP32[i21 + (i15 * 88 | 0) + 84 >> 2] | 0;
+ i44 = i35 + (i16 * 12 | 0) | 0;
+ d28 = +HEAPF32[i44 >> 2];
+ d33 = +HEAPF32[i44 + 4 >> 2];
+ d29 = +HEAPF32[i35 + (i16 * 12 | 0) + 8 >> 2];
+ i44 = i35 + (i14 * 12 | 0) | 0;
+ d30 = +HEAPF32[i44 >> 2];
+ d34 = +HEAPF32[i44 + 4 >> 2];
+ d31 = +HEAPF32[i35 + (i14 * 12 | 0) + 8 >> 2];
+ if ((i21 | 0) > 0) {
+ d27 = d25 + d17;
+ i35 = 0;
+ do {
+ d41 = +Math_sin(+d29);
+ HEAPF32[i13 >> 2] = d41;
+ d42 = +Math_cos(+d29);
+ HEAPF32[i7 >> 2] = d42;
+ d39 = +Math_sin(+d31);
+ HEAPF32[i8 >> 2] = d39;
+ d38 = +Math_cos(+d31);
+ HEAPF32[i12 >> 2] = d38;
+ d40 = +(d28 - (d22 * d42 - d23 * d41));
+ d41 = +(d33 - (d23 * d42 + d22 * d41));
+ i44 = i4;
+ HEAPF32[i44 >> 2] = d40;
+ HEAPF32[i44 + 4 >> 2] = d41;
+ d41 = +(d30 - (d24 * d38 - d19 * d39));
+ d39 = +(d34 - (d19 * d38 + d24 * d39));
+ i44 = i5;
+ HEAPF32[i44 >> 2] = d41;
+ HEAPF32[i44 + 4 >> 2] = d39;
+ __ZN24b2PositionSolverManifold10InitializeEP27b2ContactPositionConstraintRK11b2TransformS4_i(i3, i26, i4, i5, i35);
+ i44 = i3;
+ d39 = +HEAPF32[i44 >> 2];
+ d41 = +HEAPF32[i44 + 4 >> 2];
+ i44 = i10;
+ d38 = +HEAPF32[i44 >> 2];
+ d40 = +HEAPF32[i44 + 4 >> 2];
+ d42 = +HEAPF32[i11 >> 2];
+ d36 = d38 - d28;
+ d37 = d40 - d33;
+ d38 = d38 - d30;
+ d40 = d40 - d34;
+ d32 = d32 < d42 ? d32 : d42;
+ d42 = (d42 + .004999999888241291) * .20000000298023224;
+ d43 = d42 < 0.0 ? d42 : 0.0;
+ d42 = d41 * d36 - d39 * d37;
+ d45 = d41 * d38 - d39 * d40;
+ d42 = d45 * d20 * d45 + (d27 + d42 * d18 * d42);
+ if (d42 > 0.0) {
+ d42 = -(d43 < -.20000000298023224 ? -.20000000298023224 : d43) / d42;
+ } else {
+ d42 = 0.0;
+ }
+ d45 = d39 * d42;
+ d43 = d41 * d42;
+ d28 = d28 - d25 * d45;
+ d33 = d33 - d25 * d43;
+ d29 = d29 - d18 * (d36 * d43 - d37 * d45);
+ d30 = d30 + d17 * d45;
+ d34 = d34 + d17 * d43;
+ d31 = d31 + d20 * (d38 * d43 - d40 * d45);
+ i35 = i35 + 1 | 0;
+ } while ((i35 | 0) != (i21 | 0));
+ i35 = HEAP32[i9 >> 2] | 0;
+ }
+ d45 = +d28;
+ d43 = +d33;
+ i35 = i35 + (i16 * 12 | 0) | 0;
+ HEAPF32[i35 >> 2] = d45;
+ HEAPF32[i35 + 4 >> 2] = d43;
+ i35 = HEAP32[i9 >> 2] | 0;
+ HEAPF32[i35 + (i16 * 12 | 0) + 8 >> 2] = d29;
+ d43 = +d30;
+ d45 = +d34;
+ i35 = i35 + (i14 * 12 | 0) | 0;
+ HEAPF32[i35 >> 2] = d43;
+ HEAPF32[i35 + 4 >> 2] = d45;
+ i35 = HEAP32[i9 >> 2] | 0;
+ HEAPF32[i35 + (i14 * 12 | 0) + 8 >> 2] = d31;
+ i15 = i15 + 1 | 0;
+ } while ((i15 | 0) < (HEAP32[i2 >> 2] | 0));
+ i44 = d32 >= -.014999999664723873;
+ STACKTOP = i1;
+ return i44 | 0;
+}
+function __Z22b2CollideEdgeAndCircleP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK13b2CircleShapeS6_(i1, i7, i6, i22, i5) {
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i22 = i22 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, i14 = 0, i15 = 0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, d23 = 0.0, d24 = 0.0;
+ i4 = STACKTOP;
+ i2 = i1 + 60 | 0;
+ HEAP32[i2 >> 2] = 0;
+ i3 = i22 + 12 | 0;
+ d9 = +HEAPF32[i5 + 12 >> 2];
+ d23 = +HEAPF32[i3 >> 2];
+ d17 = +HEAPF32[i5 + 8 >> 2];
+ d18 = +HEAPF32[i22 + 16 >> 2];
+ d21 = +HEAPF32[i5 >> 2] + (d9 * d23 - d17 * d18) - +HEAPF32[i6 >> 2];
+ d18 = d23 * d17 + d9 * d18 + +HEAPF32[i5 + 4 >> 2] - +HEAPF32[i6 + 4 >> 2];
+ d9 = +HEAPF32[i6 + 12 >> 2];
+ d17 = +HEAPF32[i6 + 8 >> 2];
+ d23 = d21 * d9 + d18 * d17;
+ d17 = d9 * d18 - d21 * d17;
+ i6 = i7 + 12 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i6 = HEAP32[i6 + 4 >> 2] | 0;
+ d21 = (HEAP32[tempDoublePtr >> 2] = i5, +HEAPF32[tempDoublePtr >> 2]);
+ d18 = (HEAP32[tempDoublePtr >> 2] = i6, +HEAPF32[tempDoublePtr >> 2]);
+ i15 = i7 + 20 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ i15 = HEAP32[i15 + 4 >> 2] | 0;
+ d9 = (HEAP32[tempDoublePtr >> 2] = i14, +HEAPF32[tempDoublePtr >> 2]);
+ d10 = (HEAP32[tempDoublePtr >> 2] = i15, +HEAPF32[tempDoublePtr >> 2]);
+ d8 = d9 - d21;
+ d16 = d10 - d18;
+ d19 = d8 * (d9 - d23) + d16 * (d10 - d17);
+ d13 = d23 - d21;
+ d12 = d17 - d18;
+ d20 = d13 * d8 + d12 * d16;
+ d11 = +HEAPF32[i7 + 8 >> 2] + +HEAPF32[i22 + 8 >> 2];
+ if (d20 <= 0.0) {
+ if (d13 * d13 + d12 * d12 > d11 * d11) {
+ STACKTOP = i4;
+ return;
+ }
+ if ((HEAP8[i7 + 44 | 0] | 0) != 0 ? (i22 = i7 + 28 | 0, d24 = +HEAPF32[i22 >> 2], (d21 - d23) * (d21 - d24) + (d18 - d17) * (d18 - +HEAPF32[i22 + 4 >> 2]) > 0.0) : 0) {
+ STACKTOP = i4;
+ return;
+ }
+ HEAP32[i2 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 0;
+ HEAPF32[i1 + 40 >> 2] = 0.0;
+ HEAPF32[i1 + 44 >> 2] = 0.0;
+ i14 = i1 + 48 | 0;
+ HEAP32[i14 >> 2] = i5;
+ HEAP32[i14 + 4 >> 2] = i6;
+ i14 = i1 + 16 | 0;
+ HEAP32[i14 >> 2] = 0;
+ HEAP8[i14] = 0;
+ HEAP8[i14 + 1 | 0] = 0;
+ HEAP8[i14 + 2 | 0] = 0;
+ HEAP8[i14 + 3 | 0] = 0;
+ i14 = i3;
+ i15 = HEAP32[i14 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ STACKTOP = i4;
+ return;
+ }
+ if (d19 <= 0.0) {
+ d8 = d23 - d9;
+ d12 = d17 - d10;
+ if (d8 * d8 + d12 * d12 > d11 * d11) {
+ STACKTOP = i4;
+ return;
+ }
+ if ((HEAP8[i7 + 45 | 0] | 0) != 0 ? (i22 = i7 + 36 | 0, d24 = +HEAPF32[i22 >> 2], d8 * (d24 - d9) + d12 * (+HEAPF32[i22 + 4 >> 2] - d10) > 0.0) : 0) {
+ STACKTOP = i4;
+ return;
+ }
+ HEAP32[i2 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 0;
+ HEAPF32[i1 + 40 >> 2] = 0.0;
+ HEAPF32[i1 + 44 >> 2] = 0.0;
+ i22 = i1 + 48 | 0;
+ HEAP32[i22 >> 2] = i14;
+ HEAP32[i22 + 4 >> 2] = i15;
+ i14 = i1 + 16 | 0;
+ HEAP32[i14 >> 2] = 0;
+ HEAP8[i14] = 1;
+ HEAP8[i14 + 1 | 0] = 0;
+ HEAP8[i14 + 2 | 0] = 0;
+ HEAP8[i14 + 3 | 0] = 0;
+ i14 = i3;
+ i15 = HEAP32[i14 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ STACKTOP = i4;
+ return;
+ }
+ d24 = d8 * d8 + d16 * d16;
+ if (!(d24 > 0.0)) {
+ ___assert_fail(5560, 5576, 127, 5616);
+ }
+ d24 = 1.0 / d24;
+ d23 = d23 - (d21 * d19 + d9 * d20) * d24;
+ d24 = d17 - (d18 * d19 + d10 * d20) * d24;
+ if (d23 * d23 + d24 * d24 > d11 * d11) {
+ STACKTOP = i4;
+ return;
+ }
+ d9 = -d16;
+ if (d8 * d12 + d13 * d9 < 0.0) {
+ d8 = -d8;
+ } else {
+ d16 = d9;
+ }
+ d9 = +Math_sqrt(+(d8 * d8 + d16 * d16));
+ if (!(d9 < 1.1920928955078125e-7)) {
+ d24 = 1.0 / d9;
+ d16 = d16 * d24;
+ d8 = d8 * d24;
+ }
+ HEAP32[i2 >> 2] = 1;
+ HEAP32[i1 + 56 >> 2] = 1;
+ d23 = +d16;
+ d24 = +d8;
+ i14 = i1 + 40 | 0;
+ HEAPF32[i14 >> 2] = d23;
+ HEAPF32[i14 + 4 >> 2] = d24;
+ i14 = i1 + 48 | 0;
+ HEAP32[i14 >> 2] = i5;
+ HEAP32[i14 + 4 >> 2] = i6;
+ i14 = i1 + 16 | 0;
+ HEAP32[i14 >> 2] = 0;
+ HEAP8[i14] = 0;
+ HEAP8[i14 + 1 | 0] = 0;
+ HEAP8[i14 + 2 | 0] = 1;
+ HEAP8[i14 + 3 | 0] = 0;
+ i14 = i3;
+ i15 = HEAP32[i14 + 4 >> 2] | 0;
+ i22 = i1;
+ HEAP32[i22 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i22 + 4 >> 2] = i15;
+ STACKTOP = i4;
+ return;
+}
+function __ZN6b2BodyC2EPK9b2BodyDefP7b2World(i1, i2, i5) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, d13 = 0.0;
+ i3 = STACKTOP;
+ i9 = i2 + 4 | 0;
+ d13 = +HEAPF32[i9 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1496, 1520, 27, 1552);
+ }
+ d13 = +HEAPF32[i2 + 8 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1496, 1520, 27, 1552);
+ }
+ i6 = i2 + 16 | 0;
+ d13 = +HEAPF32[i6 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1560, 1520, 28, 1552);
+ }
+ d13 = +HEAPF32[i2 + 20 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1560, 1520, 28, 1552);
+ }
+ i7 = i2 + 12 | 0;
+ d13 = +HEAPF32[i7 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1592, 1520, 29, 1552);
+ }
+ i8 = i2 + 24 | 0;
+ d13 = +HEAPF32[i8 >> 2];
+ if (!(d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf)) {
+ ___assert_fail(1616, 1520, 30, 1552);
+ }
+ i4 = i2 + 32 | 0;
+ d13 = +HEAPF32[i4 >> 2];
+ if (!(d13 >= 0.0) | d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf ^ 1) {
+ ___assert_fail(1648, 1520, 31, 1552);
+ }
+ i10 = i2 + 28 | 0;
+ d13 = +HEAPF32[i10 >> 2];
+ if (!(d13 >= 0.0) | d13 == d13 & 0.0 == 0.0 & d13 > -inf & d13 < inf ^ 1) {
+ ___assert_fail(1712, 1520, 32, 1552);
+ }
+ i11 = i1 + 4 | 0;
+ i12 = (HEAP8[i2 + 39 | 0] | 0) == 0 ? 0 : 8;
+ HEAP16[i11 >> 1] = i12;
+ if ((HEAP8[i2 + 38 | 0] | 0) != 0) {
+ i12 = (i12 & 65535 | 16) & 65535;
+ HEAP16[i11 >> 1] = i12;
+ }
+ if ((HEAP8[i2 + 36 | 0] | 0) != 0) {
+ i12 = (i12 & 65535 | 4) & 65535;
+ HEAP16[i11 >> 1] = i12;
+ }
+ if ((HEAP8[i2 + 37 | 0] | 0) != 0) {
+ i12 = (i12 & 65535 | 2) & 65535;
+ HEAP16[i11 >> 1] = i12;
+ }
+ if ((HEAP8[i2 + 40 | 0] | 0) != 0) {
+ HEAP16[i11 >> 1] = i12 & 65535 | 32;
+ }
+ HEAP32[i1 + 88 >> 2] = i5;
+ i11 = i9;
+ i12 = HEAP32[i11 >> 2] | 0;
+ i11 = HEAP32[i11 + 4 >> 2] | 0;
+ i9 = i1 + 12 | 0;
+ HEAP32[i9 >> 2] = i12;
+ HEAP32[i9 + 4 >> 2] = i11;
+ d13 = +HEAPF32[i7 >> 2];
+ HEAPF32[i1 + 20 >> 2] = +Math_sin(+d13);
+ HEAPF32[i1 + 24 >> 2] = +Math_cos(+d13);
+ HEAPF32[i1 + 28 >> 2] = 0.0;
+ HEAPF32[i1 + 32 >> 2] = 0.0;
+ i9 = i1 + 36 | 0;
+ HEAP32[i9 >> 2] = i12;
+ HEAP32[i9 + 4 >> 2] = i11;
+ i9 = i1 + 44 | 0;
+ HEAP32[i9 >> 2] = i12;
+ HEAP32[i9 + 4 >> 2] = i11;
+ HEAPF32[i1 + 52 >> 2] = +HEAPF32[i7 >> 2];
+ HEAPF32[i1 + 56 >> 2] = +HEAPF32[i7 >> 2];
+ HEAPF32[i1 + 60 >> 2] = 0.0;
+ HEAP32[i1 + 108 >> 2] = 0;
+ HEAP32[i1 + 112 >> 2] = 0;
+ HEAP32[i1 + 92 >> 2] = 0;
+ HEAP32[i1 + 96 >> 2] = 0;
+ i9 = i6;
+ i11 = HEAP32[i9 + 4 >> 2] | 0;
+ i12 = i1 + 64 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAPF32[i1 + 72 >> 2] = +HEAPF32[i8 >> 2];
+ HEAPF32[i1 + 132 >> 2] = +HEAPF32[i10 >> 2];
+ HEAPF32[i1 + 136 >> 2] = +HEAPF32[i4 >> 2];
+ HEAPF32[i1 + 140 >> 2] = +HEAPF32[i2 + 48 >> 2];
+ HEAPF32[i1 + 76 >> 2] = 0.0;
+ HEAPF32[i1 + 80 >> 2] = 0.0;
+ HEAPF32[i1 + 84 >> 2] = 0.0;
+ HEAPF32[i1 + 144 >> 2] = 0.0;
+ i12 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i1 >> 2] = i12;
+ i4 = i1 + 116 | 0;
+ if ((i12 | 0) == 2) {
+ HEAPF32[i4 >> 2] = 1.0;
+ HEAPF32[i1 + 120 >> 2] = 1.0;
+ i11 = i1 + 124 | 0;
+ HEAPF32[i11 >> 2] = 0.0;
+ i11 = i1 + 128 | 0;
+ HEAPF32[i11 >> 2] = 0.0;
+ i11 = i2 + 44 | 0;
+ i11 = HEAP32[i11 >> 2] | 0;
+ i12 = i1 + 148 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = i1 + 100 | 0;
+ HEAP32[i12 >> 2] = 0;
+ i12 = i1 + 104 | 0;
+ HEAP32[i12 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+ } else {
+ HEAPF32[i4 >> 2] = 0.0;
+ HEAPF32[i1 + 120 >> 2] = 0.0;
+ i11 = i1 + 124 | 0;
+ HEAPF32[i11 >> 2] = 0.0;
+ i11 = i1 + 128 | 0;
+ HEAPF32[i11 >> 2] = 0.0;
+ i11 = i2 + 44 | 0;
+ i11 = HEAP32[i11 >> 2] | 0;
+ i12 = i1 + 148 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = i1 + 100 | 0;
+ HEAP32[i12 >> 2] = 0;
+ i12 = i1 + 104 | 0;
+ HEAP32[i12 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+ }
+}
+function __ZN24b2PositionSolverManifold10InitializeEP27b2ContactPositionConstraintRK11b2TransformS4_i(i2, i1, i13, i12, i15) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i13 = i13 | 0;
+ i12 = i12 | 0;
+ i15 = i15 | 0;
+ var i3 = 0, d4 = 0.0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, i14 = 0, d16 = 0.0, d17 = 0.0, d18 = 0.0, i19 = 0, i20 = 0;
+ i3 = STACKTOP;
+ if ((HEAP32[i1 + 84 >> 2] | 0) <= 0) {
+ ___assert_fail(6752, 6520, 617, 6776);
+ }
+ i14 = HEAP32[i1 + 72 >> 2] | 0;
+ if ((i14 | 0) == 1) {
+ i19 = i13 + 12 | 0;
+ d5 = +HEAPF32[i19 >> 2];
+ d6 = +HEAPF32[i1 + 16 >> 2];
+ i14 = i13 + 8 | 0;
+ d7 = +HEAPF32[i14 >> 2];
+ d9 = +HEAPF32[i1 + 20 >> 2];
+ d4 = d5 * d6 - d7 * d9;
+ d9 = d6 * d7 + d5 * d9;
+ d5 = +d4;
+ d7 = +d9;
+ i20 = i2;
+ HEAPF32[i20 >> 2] = d5;
+ HEAPF32[i20 + 4 >> 2] = d7;
+ d7 = +HEAPF32[i19 >> 2];
+ d5 = +HEAPF32[i1 + 24 >> 2];
+ d6 = +HEAPF32[i14 >> 2];
+ d8 = +HEAPF32[i1 + 28 >> 2];
+ d16 = +HEAPF32[i12 + 12 >> 2];
+ d18 = +HEAPF32[i1 + (i15 << 3) >> 2];
+ d17 = +HEAPF32[i12 + 8 >> 2];
+ d11 = +HEAPF32[i1 + (i15 << 3) + 4 >> 2];
+ d10 = +HEAPF32[i12 >> 2] + (d16 * d18 - d17 * d11);
+ d11 = d18 * d17 + d16 * d11 + +HEAPF32[i12 + 4 >> 2];
+ HEAPF32[i2 + 16 >> 2] = d4 * (d10 - (+HEAPF32[i13 >> 2] + (d7 * d5 - d6 * d8))) + (d11 - (d5 * d6 + d7 * d8 + +HEAPF32[i13 + 4 >> 2])) * d9 - +HEAPF32[i1 + 76 >> 2] - +HEAPF32[i1 + 80 >> 2];
+ d10 = +d10;
+ d11 = +d11;
+ i15 = i2 + 8 | 0;
+ HEAPF32[i15 >> 2] = d10;
+ HEAPF32[i15 + 4 >> 2] = d11;
+ STACKTOP = i3;
+ return;
+ } else if ((i14 | 0) == 2) {
+ i19 = i12 + 12 | 0;
+ d7 = +HEAPF32[i19 >> 2];
+ d8 = +HEAPF32[i1 + 16 >> 2];
+ i20 = i12 + 8 | 0;
+ d9 = +HEAPF32[i20 >> 2];
+ d18 = +HEAPF32[i1 + 20 >> 2];
+ d17 = d7 * d8 - d9 * d18;
+ d18 = d8 * d9 + d7 * d18;
+ d7 = +d17;
+ d9 = +d18;
+ i14 = i2;
+ HEAPF32[i14 >> 2] = d7;
+ HEAPF32[i14 + 4 >> 2] = d9;
+ d9 = +HEAPF32[i19 >> 2];
+ d7 = +HEAPF32[i1 + 24 >> 2];
+ d8 = +HEAPF32[i20 >> 2];
+ d10 = +HEAPF32[i1 + 28 >> 2];
+ d6 = +HEAPF32[i13 + 12 >> 2];
+ d4 = +HEAPF32[i1 + (i15 << 3) >> 2];
+ d5 = +HEAPF32[i13 + 8 >> 2];
+ d16 = +HEAPF32[i1 + (i15 << 3) + 4 >> 2];
+ d11 = +HEAPF32[i13 >> 2] + (d6 * d4 - d5 * d16);
+ d16 = d4 * d5 + d6 * d16 + +HEAPF32[i13 + 4 >> 2];
+ HEAPF32[i2 + 16 >> 2] = d17 * (d11 - (+HEAPF32[i12 >> 2] + (d9 * d7 - d8 * d10))) + (d16 - (d7 * d8 + d9 * d10 + +HEAPF32[i12 + 4 >> 2])) * d18 - +HEAPF32[i1 + 76 >> 2] - +HEAPF32[i1 + 80 >> 2];
+ d11 = +d11;
+ d16 = +d16;
+ i20 = i2 + 8 | 0;
+ HEAPF32[i20 >> 2] = d11;
+ HEAPF32[i20 + 4 >> 2] = d16;
+ d17 = +-d17;
+ d18 = +-d18;
+ i20 = i2;
+ HEAPF32[i20 >> 2] = d17;
+ HEAPF32[i20 + 4 >> 2] = d18;
+ STACKTOP = i3;
+ return;
+ } else if ((i14 | 0) == 0) {
+ d7 = +HEAPF32[i13 + 12 >> 2];
+ d8 = +HEAPF32[i1 + 24 >> 2];
+ d18 = +HEAPF32[i13 + 8 >> 2];
+ d6 = +HEAPF32[i1 + 28 >> 2];
+ d4 = +HEAPF32[i13 >> 2] + (d7 * d8 - d18 * d6);
+ d6 = d8 * d18 + d7 * d6 + +HEAPF32[i13 + 4 >> 2];
+ d7 = +HEAPF32[i12 + 12 >> 2];
+ d18 = +HEAPF32[i1 >> 2];
+ d8 = +HEAPF32[i12 + 8 >> 2];
+ d9 = +HEAPF32[i1 + 4 >> 2];
+ d5 = +HEAPF32[i12 >> 2] + (d7 * d18 - d8 * d9);
+ d9 = d18 * d8 + d7 * d9 + +HEAPF32[i12 + 4 >> 2];
+ d7 = d5 - d4;
+ d8 = d9 - d6;
+ d18 = +d7;
+ d10 = +d8;
+ i20 = i2;
+ HEAPF32[i20 >> 2] = d18;
+ HEAPF32[i20 + 4 >> 2] = d10;
+ d10 = +Math_sqrt(+(d7 * d7 + d8 * d8));
+ if (d10 < 1.1920928955078125e-7) {
+ d10 = d7;
+ d11 = d8;
+ } else {
+ d11 = 1.0 / d10;
+ d10 = d7 * d11;
+ HEAPF32[i2 >> 2] = d10;
+ d11 = d8 * d11;
+ HEAPF32[i2 + 4 >> 2] = d11;
+ }
+ d17 = +((d4 + d5) * .5);
+ d18 = +((d6 + d9) * .5);
+ i20 = i2 + 8 | 0;
+ HEAPF32[i20 >> 2] = d17;
+ HEAPF32[i20 + 4 >> 2] = d18;
+ HEAPF32[i2 + 16 >> 2] = d7 * d10 + d8 * d11 - +HEAPF32[i1 + 76 >> 2] - +HEAPF32[i1 + 80 >> 2];
+ STACKTOP = i3;
+ return;
+ } else {
+ STACKTOP = i3;
+ return;
+ }
+}
+function __ZNSt3__118__insertion_sort_3IRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i5, i1, i2) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i6 = i4 + 12 | 0;
+ i3 = i4;
+ i7 = i5 + 24 | 0;
+ i8 = i5 + 12 | 0;
+ i10 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i8, i5) | 0;
+ i9 = FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i7, i8) | 0;
+ do {
+ if (i10) {
+ if (i9) {
+ HEAP32[i6 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ break;
+ }
+ HEAP32[i6 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i7, i8) | 0) {
+ HEAP32[i6 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ }
+ } else {
+ if (i9) {
+ HEAP32[i6 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ HEAP32[i7 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i8, i5) | 0) {
+ HEAP32[i6 + 0 >> 2] = HEAP32[i5 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ HEAP32[i5 + 0 >> 2] = HEAP32[i8 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i8 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i8 + 8 >> 2];
+ HEAP32[i8 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ }
+ }
+ }
+ } while (0);
+ i6 = i5 + 36 | 0;
+ if ((i6 | 0) == (i1 | 0)) {
+ STACKTOP = i4;
+ return;
+ }
+ while (1) {
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i6, i7) | 0) {
+ HEAP32[i3 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i8 = i6;
+ while (1) {
+ HEAP32[i8 + 0 >> 2] = HEAP32[i7 + 0 >> 2];
+ HEAP32[i8 + 4 >> 2] = HEAP32[i7 + 4 >> 2];
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ if ((i7 | 0) == (i5 | 0)) {
+ break;
+ }
+ i8 = i7 + -12 | 0;
+ if (FUNCTION_TABLE_iii[HEAP32[i2 >> 2] & 3](i3, i8) | 0) {
+ i10 = i7;
+ i7 = i8;
+ i8 = i10;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i7 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i7 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ }
+ i7 = i6 + 12 | 0;
+ if ((i7 | 0) == (i1 | 0)) {
+ break;
+ } else {
+ i10 = i6;
+ i6 = i7;
+ i7 = i10;
+ }
+ }
+ STACKTOP = i4;
+ return;
+}
+function __ZNK20b2SeparationFunction8EvaluateEiif(i10, i12, i11, d9) {
+ i10 = i10 | 0;
+ i12 = i12 | 0;
+ i11 = i11 | 0;
+ d9 = +d9;
+ var d1 = 0.0, d2 = 0.0, d3 = 0.0, d4 = 0.0, d5 = 0.0, d6 = 0.0, i7 = 0, d8 = 0.0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0, i17 = 0, d18 = 0.0, d19 = 0.0;
+ i7 = STACKTOP;
+ d14 = 1.0 - d9;
+ d3 = d14 * +HEAPF32[i10 + 32 >> 2] + +HEAPF32[i10 + 36 >> 2] * d9;
+ d4 = +Math_sin(+d3);
+ d3 = +Math_cos(+d3);
+ d5 = +HEAPF32[i10 + 8 >> 2];
+ d6 = +HEAPF32[i10 + 12 >> 2];
+ d2 = d14 * +HEAPF32[i10 + 16 >> 2] + +HEAPF32[i10 + 24 >> 2] * d9 - (d3 * d5 - d4 * d6);
+ d6 = d14 * +HEAPF32[i10 + 20 >> 2] + +HEAPF32[i10 + 28 >> 2] * d9 - (d4 * d5 + d3 * d6);
+ d5 = d14 * +HEAPF32[i10 + 68 >> 2] + +HEAPF32[i10 + 72 >> 2] * d9;
+ d1 = +Math_sin(+d5);
+ d5 = +Math_cos(+d5);
+ d15 = +HEAPF32[i10 + 44 >> 2];
+ d16 = +HEAPF32[i10 + 48 >> 2];
+ d8 = d14 * +HEAPF32[i10 + 52 >> 2] + +HEAPF32[i10 + 60 >> 2] * d9 - (d5 * d15 - d1 * d16);
+ d9 = d14 * +HEAPF32[i10 + 56 >> 2] + +HEAPF32[i10 + 64 >> 2] * d9 - (d1 * d15 + d5 * d16);
+ i17 = HEAP32[i10 + 80 >> 2] | 0;
+ if ((i17 | 0) == 0) {
+ d14 = +HEAPF32[i10 + 92 >> 2];
+ d13 = +HEAPF32[i10 + 96 >> 2];
+ i17 = HEAP32[i10 >> 2] | 0;
+ if (!((i12 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i17 + 20 >> 2] | 0) <= (i12 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i17 = (HEAP32[i17 + 16 >> 2] | 0) + (i12 << 3) | 0;
+ d15 = +HEAPF32[i17 >> 2];
+ d16 = +HEAPF32[i17 + 4 >> 2];
+ i10 = HEAP32[i10 + 4 >> 2] | 0;
+ if (!((i11 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i10 + 20 >> 2] | 0) <= (i11 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i17 = (HEAP32[i10 + 16 >> 2] | 0) + (i11 << 3) | 0;
+ d19 = +HEAPF32[i17 >> 2];
+ d18 = +HEAPF32[i17 + 4 >> 2];
+ d16 = d14 * (d8 + (d5 * d19 - d1 * d18) - (d2 + (d3 * d15 - d4 * d16))) + d13 * (d9 + (d1 * d19 + d5 * d18) - (d6 + (d4 * d15 + d3 * d16)));
+ STACKTOP = i7;
+ return +d16;
+ } else if ((i17 | 0) == 1) {
+ d14 = +HEAPF32[i10 + 92 >> 2];
+ d13 = +HEAPF32[i10 + 96 >> 2];
+ d16 = +HEAPF32[i10 + 84 >> 2];
+ d15 = +HEAPF32[i10 + 88 >> 2];
+ i10 = HEAP32[i10 + 4 >> 2] | 0;
+ if (!((i11 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i10 + 20 >> 2] | 0) <= (i11 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i17 = (HEAP32[i10 + 16 >> 2] | 0) + (i11 << 3) | 0;
+ d18 = +HEAPF32[i17 >> 2];
+ d19 = +HEAPF32[i17 + 4 >> 2];
+ d19 = (d3 * d14 - d4 * d13) * (d8 + (d5 * d18 - d1 * d19) - (d2 + (d3 * d16 - d4 * d15))) + (d4 * d14 + d3 * d13) * (d9 + (d1 * d18 + d5 * d19) - (d6 + (d4 * d16 + d3 * d15)));
+ STACKTOP = i7;
+ return +d19;
+ } else if ((i17 | 0) == 2) {
+ d16 = +HEAPF32[i10 + 92 >> 2];
+ d15 = +HEAPF32[i10 + 96 >> 2];
+ d14 = +HEAPF32[i10 + 84 >> 2];
+ d13 = +HEAPF32[i10 + 88 >> 2];
+ i10 = HEAP32[i10 >> 2] | 0;
+ if (!((i12 | 0) > -1)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ if ((HEAP32[i10 + 20 >> 2] | 0) <= (i12 | 0)) {
+ ___assert_fail(3640, 3672, 103, 3704);
+ }
+ i17 = (HEAP32[i10 + 16 >> 2] | 0) + (i12 << 3) | 0;
+ d18 = +HEAPF32[i17 >> 2];
+ d19 = +HEAPF32[i17 + 4 >> 2];
+ d19 = (d5 * d16 - d1 * d15) * (d2 + (d3 * d18 - d4 * d19) - (d8 + (d5 * d14 - d1 * d13))) + (d1 * d16 + d5 * d15) * (d6 + (d4 * d18 + d3 * d19) - (d9 + (d1 * d14 + d5 * d13)));
+ STACKTOP = i7;
+ return +d19;
+ } else {
+ ___assert_fail(3616, 3560, 242, 3624);
+ }
+ return 0.0;
+}
+function __ZN6b2Body13ResetMassDataEv(i2) {
+ i2 = i2 | 0;
+ var d1 = 0.0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, d14 = 0.0, d15 = 0.0, d16 = 0.0, i17 = 0, d18 = 0.0, d19 = 0.0, i20 = 0, d21 = 0.0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i10 = i3;
+ i8 = i2 + 116 | 0;
+ i9 = i2 + 120 | 0;
+ i4 = i2 + 124 | 0;
+ i5 = i2 + 128 | 0;
+ i6 = i2 + 28 | 0;
+ HEAPF32[i6 >> 2] = 0.0;
+ HEAPF32[i2 + 32 >> 2] = 0.0;
+ HEAP32[i8 + 0 >> 2] = 0;
+ HEAP32[i8 + 4 >> 2] = 0;
+ HEAP32[i8 + 8 >> 2] = 0;
+ HEAP32[i8 + 12 >> 2] = 0;
+ i11 = HEAP32[i2 >> 2] | 0;
+ if ((i11 | 0) == 2) {
+ i17 = 3784;
+ d16 = +HEAPF32[i17 >> 2];
+ d18 = +HEAPF32[i17 + 4 >> 2];
+ i17 = HEAP32[i2 + 100 >> 2] | 0;
+ if ((i17 | 0) != 0) {
+ i11 = i10 + 4 | 0;
+ i12 = i10 + 8 | 0;
+ i13 = i10 + 12 | 0;
+ d14 = 0.0;
+ d15 = 0.0;
+ do {
+ d19 = +HEAPF32[i17 >> 2];
+ if (!(d19 == 0.0)) {
+ i20 = HEAP32[i17 + 12 >> 2] | 0;
+ FUNCTION_TABLE_viid[HEAP32[(HEAP32[i20 >> 2] | 0) + 28 >> 2] & 3](i20, i10, d19);
+ d14 = +HEAPF32[i10 >> 2];
+ d15 = d14 + +HEAPF32[i8 >> 2];
+ HEAPF32[i8 >> 2] = d15;
+ d16 = d16 + d14 * +HEAPF32[i11 >> 2];
+ d18 = d18 + d14 * +HEAPF32[i12 >> 2];
+ d14 = +HEAPF32[i13 >> 2] + +HEAPF32[i4 >> 2];
+ HEAPF32[i4 >> 2] = d14;
+ }
+ i17 = HEAP32[i17 + 4 >> 2] | 0;
+ } while ((i17 | 0) != 0);
+ if (d15 > 0.0) {
+ d19 = 1.0 / d15;
+ HEAPF32[i9 >> 2] = d19;
+ d16 = d16 * d19;
+ d18 = d18 * d19;
+ } else {
+ i7 = 11;
+ }
+ } else {
+ d14 = 0.0;
+ i7 = 11;
+ }
+ if ((i7 | 0) == 11) {
+ HEAPF32[i8 >> 2] = 1.0;
+ HEAPF32[i9 >> 2] = 1.0;
+ d15 = 1.0;
+ }
+ do {
+ if (d14 > 0.0 ? (HEAP16[i2 + 4 >> 1] & 16) == 0 : 0) {
+ d14 = d14 - (d18 * d18 + d16 * d16) * d15;
+ HEAPF32[i4 >> 2] = d14;
+ if (d14 > 0.0) {
+ d1 = 1.0 / d14;
+ break;
+ } else {
+ ___assert_fail(1872, 1520, 319, 1856);
+ }
+ } else {
+ i7 = 17;
+ }
+ } while (0);
+ if ((i7 | 0) == 17) {
+ HEAPF32[i4 >> 2] = 0.0;
+ d1 = 0.0;
+ }
+ HEAPF32[i5 >> 2] = d1;
+ i20 = i2 + 44 | 0;
+ i17 = i20;
+ d19 = +HEAPF32[i17 >> 2];
+ d14 = +HEAPF32[i17 + 4 >> 2];
+ d21 = +d16;
+ d1 = +d18;
+ i17 = i6;
+ HEAPF32[i17 >> 2] = d21;
+ HEAPF32[i17 + 4 >> 2] = d1;
+ d1 = +HEAPF32[i2 + 24 >> 2];
+ d21 = +HEAPF32[i2 + 20 >> 2];
+ d15 = +HEAPF32[i2 + 12 >> 2] + (d1 * d16 - d21 * d18);
+ d16 = d16 * d21 + d1 * d18 + +HEAPF32[i2 + 16 >> 2];
+ d1 = +d15;
+ d18 = +d16;
+ HEAPF32[i20 >> 2] = d1;
+ HEAPF32[i20 + 4 >> 2] = d18;
+ i20 = i2 + 36 | 0;
+ HEAPF32[i20 >> 2] = d1;
+ HEAPF32[i20 + 4 >> 2] = d18;
+ d18 = +HEAPF32[i2 + 72 >> 2];
+ i20 = i2 + 64 | 0;
+ HEAPF32[i20 >> 2] = +HEAPF32[i20 >> 2] - d18 * (d16 - d14);
+ i20 = i2 + 68 | 0;
+ HEAPF32[i20 >> 2] = d18 * (d15 - d19) + +HEAPF32[i20 >> 2];
+ STACKTOP = i3;
+ return;
+ } else if ((i11 | 0) == 1 | (i11 | 0) == 0) {
+ i17 = i2 + 12 | 0;
+ i13 = HEAP32[i17 >> 2] | 0;
+ i17 = HEAP32[i17 + 4 >> 2] | 0;
+ i20 = i2 + 36 | 0;
+ HEAP32[i20 >> 2] = i13;
+ HEAP32[i20 + 4 >> 2] = i17;
+ i20 = i2 + 44 | 0;
+ HEAP32[i20 >> 2] = i13;
+ HEAP32[i20 + 4 >> 2] = i17;
+ HEAPF32[i2 + 52 >> 2] = +HEAPF32[i2 + 56 >> 2];
+ STACKTOP = i3;
+ return;
+ } else {
+ ___assert_fail(1824, 1520, 284, 1856);
+ }
+}
+function __ZN9b2Contact6UpdateEP17b2ContactListener(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i2 = i3;
+ i10 = i1 + 64 | 0;
+ i6 = i2 + 0 | 0;
+ i7 = i10 + 0 | 0;
+ i5 = i6 + 64 | 0;
+ do {
+ HEAP32[i6 >> 2] = HEAP32[i7 >> 2];
+ i6 = i6 + 4 | 0;
+ i7 = i7 + 4 | 0;
+ } while ((i6 | 0) < (i5 | 0));
+ i6 = i1 + 4 | 0;
+ i11 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i11 | 4;
+ i11 = i11 >>> 1;
+ i14 = HEAP32[i1 + 48 >> 2] | 0;
+ i15 = HEAP32[i1 + 52 >> 2] | 0;
+ i5 = (HEAP8[i15 + 38 | 0] | HEAP8[i14 + 38 | 0]) << 24 >> 24 != 0;
+ i8 = HEAP32[i14 + 8 >> 2] | 0;
+ i7 = HEAP32[i15 + 8 >> 2] | 0;
+ i12 = i8 + 12 | 0;
+ i13 = i7 + 12 | 0;
+ if (!i5) {
+ FUNCTION_TABLE_viiii[HEAP32[HEAP32[i1 >> 2] >> 2] & 15](i1, i10, i12, i13);
+ i12 = i1 + 124 | 0;
+ i10 = (HEAP32[i12 >> 2] | 0) > 0;
+ L4 : do {
+ if (i10) {
+ i19 = HEAP32[i2 + 60 >> 2] | 0;
+ if ((i19 | 0) > 0) {
+ i18 = 0;
+ } else {
+ i9 = 0;
+ while (1) {
+ HEAPF32[i1 + (i9 * 20 | 0) + 72 >> 2] = 0.0;
+ HEAPF32[i1 + (i9 * 20 | 0) + 76 >> 2] = 0.0;
+ i9 = i9 + 1 | 0;
+ if ((i9 | 0) >= (HEAP32[i12 >> 2] | 0)) {
+ break L4;
+ }
+ }
+ }
+ do {
+ i16 = i1 + (i18 * 20 | 0) + 72 | 0;
+ HEAPF32[i16 >> 2] = 0.0;
+ i15 = i1 + (i18 * 20 | 0) + 76 | 0;
+ HEAPF32[i15 >> 2] = 0.0;
+ i14 = HEAP32[i1 + (i18 * 20 | 0) + 80 >> 2] | 0;
+ i17 = 0;
+ while (1) {
+ i13 = i17 + 1 | 0;
+ if ((HEAP32[i2 + (i17 * 20 | 0) + 16 >> 2] | 0) == (i14 | 0)) {
+ i9 = 7;
+ break;
+ }
+ if ((i13 | 0) < (i19 | 0)) {
+ i17 = i13;
+ } else {
+ break;
+ }
+ }
+ if ((i9 | 0) == 7) {
+ i9 = 0;
+ HEAPF32[i16 >> 2] = +HEAPF32[i2 + (i17 * 20 | 0) + 8 >> 2];
+ HEAPF32[i15 >> 2] = +HEAPF32[i2 + (i17 * 20 | 0) + 12 >> 2];
+ }
+ i18 = i18 + 1 | 0;
+ } while ((i18 | 0) < (HEAP32[i12 >> 2] | 0));
+ }
+ } while (0);
+ i9 = i11 & 1;
+ if (i10 ^ (i9 | 0) != 0) {
+ i11 = i8 + 4 | 0;
+ i12 = HEAPU16[i11 >> 1] | 0;
+ if ((i12 & 2 | 0) == 0) {
+ HEAP16[i11 >> 1] = i12 | 2;
+ HEAPF32[i8 + 144 >> 2] = 0.0;
+ }
+ i8 = i7 + 4 | 0;
+ i11 = HEAPU16[i8 >> 1] | 0;
+ if ((i11 & 2 | 0) == 0) {
+ HEAP16[i8 >> 1] = i11 | 2;
+ HEAPF32[i7 + 144 >> 2] = 0.0;
+ }
+ }
+ } else {
+ i10 = __Z13b2TestOverlapPK7b2ShapeiS1_iRK11b2TransformS4_(HEAP32[i14 + 12 >> 2] | 0, HEAP32[i1 + 56 >> 2] | 0, HEAP32[i15 + 12 >> 2] | 0, HEAP32[i1 + 60 >> 2] | 0, i12, i13) | 0;
+ HEAP32[i1 + 124 >> 2] = 0;
+ i9 = i11 & 1;
+ }
+ i7 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i10 ? i7 | 2 : i7 & -3;
+ i8 = (i9 | 0) == 0;
+ i6 = i10 ^ 1;
+ i7 = (i4 | 0) == 0;
+ if (!(i8 ^ 1 | i6 | i7)) {
+ FUNCTION_TABLE_vii[HEAP32[(HEAP32[i4 >> 2] | 0) + 8 >> 2] & 15](i4, i1);
+ }
+ if (!(i8 | i10 | i7)) {
+ FUNCTION_TABLE_vii[HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] & 15](i4, i1);
+ }
+ if (i5 | i6 | i7) {
+ STACKTOP = i3;
+ return;
+ }
+ FUNCTION_TABLE_viii[HEAP32[(HEAP32[i4 >> 2] | 0) + 16 >> 2] & 3](i4, i1, i2);
+ STACKTOP = i3;
+ return;
+}
+function __ZN13b2DynamicTree10RemoveLeafEi(i1, i12) {
+ i1 = i1 | 0;
+ i12 = i12 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, i13 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i1 >> 2] | 0) == (i12 | 0)) {
+ HEAP32[i1 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i1 + 4 | 0;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i5 + (i12 * 36 | 0) + 20 >> 2] | 0;
+ i4 = i5 + (i6 * 36 | 0) + 20 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ i13 = HEAP32[i5 + (i6 * 36 | 0) + 24 >> 2] | 0;
+ if ((i13 | 0) == (i12 | 0)) {
+ i13 = HEAP32[i5 + (i6 * 36 | 0) + 28 >> 2] | 0;
+ }
+ if ((i7 | 0) == -1) {
+ HEAP32[i1 >> 2] = i13;
+ HEAP32[i5 + (i13 * 36 | 0) + 20 >> 2] = -1;
+ if (!((i6 | 0) > -1)) {
+ ___assert_fail(3e3, 2944, 97, 3040);
+ }
+ if ((HEAP32[i1 + 12 >> 2] | 0) <= (i6 | 0)) {
+ ___assert_fail(3e3, 2944, 97, 3040);
+ }
+ i3 = i1 + 8 | 0;
+ if ((HEAP32[i3 >> 2] | 0) <= 0) {
+ ___assert_fail(3056, 2944, 98, 3040);
+ }
+ i13 = i1 + 16 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i5 + (i6 * 36 | 0) + 32 >> 2] = -1;
+ HEAP32[i13 >> 2] = i6;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + -1;
+ STACKTOP = i2;
+ return;
+ }
+ i12 = i5 + (i7 * 36 | 0) + 24 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i13;
+ } else {
+ HEAP32[i5 + (i7 * 36 | 0) + 28 >> 2] = i13;
+ }
+ HEAP32[i5 + (i13 * 36 | 0) + 20 >> 2] = i7;
+ if (!((i6 | 0) > -1)) {
+ ___assert_fail(3e3, 2944, 97, 3040);
+ }
+ if ((HEAP32[i1 + 12 >> 2] | 0) <= (i6 | 0)) {
+ ___assert_fail(3e3, 2944, 97, 3040);
+ }
+ i12 = i1 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) <= 0) {
+ ___assert_fail(3056, 2944, 98, 3040);
+ }
+ i13 = i1 + 16 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i5 + (i6 * 36 | 0) + 32 >> 2] = -1;
+ HEAP32[i13 >> 2] = i6;
+ HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + -1;
+ do {
+ i4 = __ZN13b2DynamicTree7BalanceEi(i1, i7) | 0;
+ i7 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i7 + (i4 * 36 | 0) + 24 >> 2] | 0;
+ i5 = HEAP32[i7 + (i4 * 36 | 0) + 28 >> 2] | 0;
+ d10 = +HEAPF32[i7 + (i6 * 36 | 0) >> 2];
+ d11 = +HEAPF32[i7 + (i5 * 36 | 0) >> 2];
+ d9 = +HEAPF32[i7 + (i6 * 36 | 0) + 4 >> 2];
+ d8 = +HEAPF32[i7 + (i5 * 36 | 0) + 4 >> 2];
+ d10 = +(d10 < d11 ? d10 : d11);
+ d11 = +(d9 < d8 ? d9 : d8);
+ i13 = i7 + (i4 * 36 | 0) | 0;
+ HEAPF32[i13 >> 2] = d10;
+ HEAPF32[i13 + 4 >> 2] = d11;
+ d11 = +HEAPF32[i7 + (i6 * 36 | 0) + 8 >> 2];
+ d10 = +HEAPF32[i7 + (i5 * 36 | 0) + 8 >> 2];
+ d9 = +HEAPF32[i7 + (i6 * 36 | 0) + 12 >> 2];
+ d8 = +HEAPF32[i7 + (i5 * 36 | 0) + 12 >> 2];
+ d10 = +(d11 > d10 ? d11 : d10);
+ d11 = +(d9 > d8 ? d9 : d8);
+ i7 = i7 + (i4 * 36 | 0) + 8 | 0;
+ HEAPF32[i7 >> 2] = d10;
+ HEAPF32[i7 + 4 >> 2] = d11;
+ i7 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i7 + (i6 * 36 | 0) + 32 >> 2] | 0;
+ i5 = HEAP32[i7 + (i5 * 36 | 0) + 32 >> 2] | 0;
+ HEAP32[i7 + (i4 * 36 | 0) + 32 >> 2] = ((i6 | 0) > (i5 | 0) ? i6 : i5) + 1;
+ i7 = HEAP32[i7 + (i4 * 36 | 0) + 20 >> 2] | 0;
+ } while (!((i7 | 0) == -1));
+ STACKTOP = i2;
+ return;
+}
+function __ZN9b2Simplex6Solve3Ev(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, d4 = 0.0, d5 = 0.0, d6 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, i22 = 0;
+ i1 = STACKTOP;
+ i2 = i7 + 16 | 0;
+ d17 = +HEAPF32[i2 >> 2];
+ d15 = +HEAPF32[i2 + 4 >> 2];
+ i2 = i7 + 36 | 0;
+ i3 = i7 + 52 | 0;
+ d14 = +HEAPF32[i3 >> 2];
+ d16 = +HEAPF32[i3 + 4 >> 2];
+ i3 = i7 + 72 | 0;
+ i22 = i7 + 88 | 0;
+ d18 = +HEAPF32[i22 >> 2];
+ d11 = +HEAPF32[i22 + 4 >> 2];
+ d20 = d14 - d17;
+ d10 = d16 - d15;
+ d9 = d17 * d20 + d15 * d10;
+ d8 = d14 * d20 + d16 * d10;
+ d4 = d18 - d17;
+ d19 = d11 - d15;
+ d6 = d17 * d4 + d15 * d19;
+ d5 = d18 * d4 + d11 * d19;
+ d21 = d18 - d14;
+ d12 = d11 - d16;
+ d13 = d14 * d21 + d16 * d12;
+ d12 = d18 * d21 + d11 * d12;
+ d4 = d20 * d19 - d10 * d4;
+ d10 = (d14 * d11 - d16 * d18) * d4;
+ d11 = (d15 * d18 - d17 * d11) * d4;
+ d4 = (d17 * d16 - d15 * d14) * d4;
+ if (!(!(d9 >= -0.0) | !(d6 >= -0.0))) {
+ HEAPF32[i7 + 24 >> 2] = 1.0;
+ HEAP32[i7 + 108 >> 2] = 1;
+ STACKTOP = i1;
+ return;
+ }
+ if (!(!(d9 < -0.0) | !(d8 > 0.0) | !(d4 <= 0.0))) {
+ d21 = 1.0 / (d8 - d9);
+ HEAPF32[i7 + 24 >> 2] = d8 * d21;
+ HEAPF32[i7 + 60 >> 2] = -(d9 * d21);
+ HEAP32[i7 + 108 >> 2] = 2;
+ STACKTOP = i1;
+ return;
+ }
+ if (!(!(d6 < -0.0) | !(d5 > 0.0) | !(d11 <= 0.0))) {
+ d21 = 1.0 / (d5 - d6);
+ HEAPF32[i7 + 24 >> 2] = d5 * d21;
+ HEAPF32[i7 + 96 >> 2] = -(d6 * d21);
+ HEAP32[i7 + 108 >> 2] = 2;
+ i7 = i2 + 0 | 0;
+ i3 = i3 + 0 | 0;
+ i2 = i7 + 36 | 0;
+ do {
+ HEAP32[i7 >> 2] = HEAP32[i3 >> 2];
+ i7 = i7 + 4 | 0;
+ i3 = i3 + 4 | 0;
+ } while ((i7 | 0) < (i2 | 0));
+ STACKTOP = i1;
+ return;
+ }
+ if (!(!(d8 <= 0.0) | !(d13 >= -0.0))) {
+ HEAPF32[i7 + 60 >> 2] = 1.0;
+ HEAP32[i7 + 108 >> 2] = 1;
+ i7 = i7 + 0 | 0;
+ i3 = i2 + 0 | 0;
+ i2 = i7 + 36 | 0;
+ do {
+ HEAP32[i7 >> 2] = HEAP32[i3 >> 2];
+ i7 = i7 + 4 | 0;
+ i3 = i3 + 4 | 0;
+ } while ((i7 | 0) < (i2 | 0));
+ STACKTOP = i1;
+ return;
+ }
+ if (!(!(d5 <= 0.0) | !(d12 <= 0.0))) {
+ HEAPF32[i7 + 96 >> 2] = 1.0;
+ HEAP32[i7 + 108 >> 2] = 1;
+ i7 = i7 + 0 | 0;
+ i3 = i3 + 0 | 0;
+ i2 = i7 + 36 | 0;
+ do {
+ HEAP32[i7 >> 2] = HEAP32[i3 >> 2];
+ i7 = i7 + 4 | 0;
+ i3 = i3 + 4 | 0;
+ } while ((i7 | 0) < (i2 | 0));
+ STACKTOP = i1;
+ return;
+ }
+ if (!(d13 < -0.0) | !(d12 > 0.0) | !(d10 <= 0.0)) {
+ d21 = 1.0 / (d4 + (d10 + d11));
+ HEAPF32[i7 + 24 >> 2] = d10 * d21;
+ HEAPF32[i7 + 60 >> 2] = d11 * d21;
+ HEAPF32[i7 + 96 >> 2] = d4 * d21;
+ HEAP32[i7 + 108 >> 2] = 3;
+ STACKTOP = i1;
+ return;
+ } else {
+ d21 = 1.0 / (d12 - d13);
+ HEAPF32[i7 + 60 >> 2] = d12 * d21;
+ HEAPF32[i7 + 96 >> 2] = -(d13 * d21);
+ HEAP32[i7 + 108 >> 2] = 2;
+ i7 = i7 + 0 | 0;
+ i3 = i3 + 0 | 0;
+ i2 = i7 + 36 | 0;
+ do {
+ HEAP32[i7 >> 2] = HEAP32[i3 >> 2];
+ i7 = i7 + 4 | 0;
+ i3 = i3 + 4 | 0;
+ } while ((i7 | 0) < (i2 | 0));
+ STACKTOP = i1;
+ return;
+ }
+}
+function __ZN16b2ContactManager7CollideEv(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i2 = STACKTOP;
+ i8 = HEAP32[i3 + 60 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = i3 + 12 | 0;
+ i6 = i3 + 4 | 0;
+ i5 = i3 + 72 | 0;
+ i4 = i3 + 68 | 0;
+ L4 : while (1) {
+ i12 = HEAP32[i8 + 48 >> 2] | 0;
+ i10 = HEAP32[i8 + 52 >> 2] | 0;
+ i11 = HEAP32[i8 + 56 >> 2] | 0;
+ i9 = HEAP32[i8 + 60 >> 2] | 0;
+ i15 = HEAP32[i12 + 8 >> 2] | 0;
+ i13 = HEAP32[i10 + 8 >> 2] | 0;
+ i16 = i8 + 4 | 0;
+ do {
+ if ((HEAP32[i16 >> 2] & 8 | 0) == 0) {
+ i1 = 11;
+ } else {
+ if (!(__ZNK6b2Body13ShouldCollideEPKS_(i13, i15) | 0)) {
+ i16 = HEAP32[i8 + 12 >> 2] | 0;
+ __ZN16b2ContactManager7DestroyEP9b2Contact(i3, i8);
+ i8 = i16;
+ break;
+ }
+ i14 = HEAP32[i4 >> 2] | 0;
+ if ((i14 | 0) != 0 ? !(FUNCTION_TABLE_iiii[HEAP32[(HEAP32[i14 >> 2] | 0) + 8 >> 2] & 7](i14, i12, i10) | 0) : 0) {
+ i16 = HEAP32[i8 + 12 >> 2] | 0;
+ __ZN16b2ContactManager7DestroyEP9b2Contact(i3, i8);
+ i8 = i16;
+ break;
+ }
+ HEAP32[i16 >> 2] = HEAP32[i16 >> 2] & -9;
+ i1 = 11;
+ }
+ } while (0);
+ do {
+ if ((i1 | 0) == 11) {
+ i1 = 0;
+ if ((HEAP16[i15 + 4 >> 1] & 2) == 0) {
+ i14 = 0;
+ } else {
+ i14 = (HEAP32[i15 >> 2] | 0) != 0;
+ }
+ if ((HEAP16[i13 + 4 >> 1] & 2) == 0) {
+ i13 = 0;
+ } else {
+ i13 = (HEAP32[i13 >> 2] | 0) != 0;
+ }
+ if (!(i14 | i13)) {
+ i8 = HEAP32[i8 + 12 >> 2] | 0;
+ break;
+ }
+ i11 = HEAP32[(HEAP32[i12 + 24 >> 2] | 0) + (i11 * 28 | 0) + 24 >> 2] | 0;
+ i9 = HEAP32[(HEAP32[i10 + 24 >> 2] | 0) + (i9 * 28 | 0) + 24 >> 2] | 0;
+ if (!((i11 | 0) > -1)) {
+ i1 = 19;
+ break L4;
+ }
+ i10 = HEAP32[i7 >> 2] | 0;
+ if ((i10 | 0) <= (i11 | 0)) {
+ i1 = 19;
+ break L4;
+ }
+ i12 = HEAP32[i6 >> 2] | 0;
+ if (!((i9 | 0) > -1 & (i10 | 0) > (i9 | 0))) {
+ i1 = 21;
+ break L4;
+ }
+ if (+HEAPF32[i12 + (i9 * 36 | 0) >> 2] - +HEAPF32[i12 + (i11 * 36 | 0) + 8 >> 2] > 0.0 | +HEAPF32[i12 + (i9 * 36 | 0) + 4 >> 2] - +HEAPF32[i12 + (i11 * 36 | 0) + 12 >> 2] > 0.0 | +HEAPF32[i12 + (i11 * 36 | 0) >> 2] - +HEAPF32[i12 + (i9 * 36 | 0) + 8 >> 2] > 0.0 | +HEAPF32[i12 + (i11 * 36 | 0) + 4 >> 2] - +HEAPF32[i12 + (i9 * 36 | 0) + 12 >> 2] > 0.0) {
+ i16 = HEAP32[i8 + 12 >> 2] | 0;
+ __ZN16b2ContactManager7DestroyEP9b2Contact(i3, i8);
+ i8 = i16;
+ break;
+ } else {
+ __ZN9b2Contact6UpdateEP17b2ContactListener(i8, HEAP32[i5 >> 2] | 0);
+ i8 = HEAP32[i8 + 12 >> 2] | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i8 | 0) == 0) {
+ i1 = 25;
+ break;
+ }
+ }
+ if ((i1 | 0) == 19) {
+ ___assert_fail(1904, 1952, 159, 2008);
+ } else if ((i1 | 0) == 21) {
+ ___assert_fail(1904, 1952, 159, 2008);
+ } else if ((i1 | 0) == 25) {
+ STACKTOP = i2;
+ return;
+ }
+}
+function __ZN16b2ContactManager7AddPairEPvS0_(i1, i5, i6) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i5 + 16 >> 2] | 0;
+ i3 = HEAP32[i6 + 16 >> 2] | 0;
+ i5 = HEAP32[i5 + 20 >> 2] | 0;
+ i6 = HEAP32[i6 + 20 >> 2] | 0;
+ i8 = HEAP32[i4 + 8 >> 2] | 0;
+ i7 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i8 | 0) == (i7 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i10 = HEAP32[i7 + 112 >> 2] | 0;
+ L4 : do {
+ if ((i10 | 0) != 0) {
+ while (1) {
+ if ((HEAP32[i10 >> 2] | 0) == (i8 | 0)) {
+ i9 = HEAP32[i10 + 4 >> 2] | 0;
+ i12 = HEAP32[i9 + 48 >> 2] | 0;
+ i13 = HEAP32[i9 + 52 >> 2] | 0;
+ i11 = HEAP32[i9 + 56 >> 2] | 0;
+ i9 = HEAP32[i9 + 60 >> 2] | 0;
+ if ((i12 | 0) == (i4 | 0) & (i13 | 0) == (i3 | 0) & (i11 | 0) == (i5 | 0) & (i9 | 0) == (i6 | 0)) {
+ i9 = 22;
+ break;
+ }
+ if ((i12 | 0) == (i3 | 0) & (i13 | 0) == (i4 | 0) & (i11 | 0) == (i6 | 0) & (i9 | 0) == (i5 | 0)) {
+ i9 = 22;
+ break;
+ }
+ }
+ i10 = HEAP32[i10 + 12 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ break L4;
+ }
+ }
+ if ((i9 | 0) == 22) {
+ STACKTOP = i2;
+ return;
+ }
+ }
+ } while (0);
+ if (!(__ZNK6b2Body13ShouldCollideEPKS_(i7, i8) | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = HEAP32[i1 + 68 >> 2] | 0;
+ if ((i7 | 0) != 0 ? !(FUNCTION_TABLE_iiii[HEAP32[(HEAP32[i7 >> 2] | 0) + 8 >> 2] & 7](i7, i4, i3) | 0) : 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i5 = __ZN9b2Contact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i4, i5, i3, i6, HEAP32[i1 + 76 >> 2] | 0) | 0;
+ if ((i5 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP32[(HEAP32[i5 + 48 >> 2] | 0) + 8 >> 2] | 0;
+ i3 = HEAP32[(HEAP32[i5 + 52 >> 2] | 0) + 8 >> 2] | 0;
+ HEAP32[i5 + 8 >> 2] = 0;
+ i7 = i1 + 60 | 0;
+ HEAP32[i5 + 12 >> 2] = HEAP32[i7 >> 2];
+ i6 = HEAP32[i7 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ HEAP32[i6 + 8 >> 2] = i5;
+ }
+ HEAP32[i7 >> 2] = i5;
+ i8 = i5 + 16 | 0;
+ HEAP32[i5 + 20 >> 2] = i5;
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 + 24 >> 2] = 0;
+ i6 = i4 + 112 | 0;
+ HEAP32[i5 + 28 >> 2] = HEAP32[i6 >> 2];
+ i7 = HEAP32[i6 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ HEAP32[i7 + 8 >> 2] = i8;
+ }
+ HEAP32[i6 >> 2] = i8;
+ i6 = i5 + 32 | 0;
+ HEAP32[i5 + 36 >> 2] = i5;
+ HEAP32[i6 >> 2] = i4;
+ HEAP32[i5 + 40 >> 2] = 0;
+ i7 = i3 + 112 | 0;
+ HEAP32[i5 + 44 >> 2] = HEAP32[i7 >> 2];
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ HEAP32[i5 + 8 >> 2] = i6;
+ }
+ HEAP32[i7 >> 2] = i6;
+ i5 = i4 + 4 | 0;
+ i6 = HEAPU16[i5 >> 1] | 0;
+ if ((i6 & 2 | 0) == 0) {
+ HEAP16[i5 >> 1] = i6 | 2;
+ HEAPF32[i4 + 144 >> 2] = 0.0;
+ }
+ i4 = i3 + 4 | 0;
+ i5 = HEAPU16[i4 >> 1] | 0;
+ if ((i5 & 2 | 0) == 0) {
+ HEAP16[i4 >> 1] = i5 | 2;
+ HEAPF32[i3 + 144 >> 2] = 0.0;
+ }
+ i13 = i1 + 64 | 0;
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN12b2BroadPhase11UpdatePairsI16b2ContactManagerEEvPT_(i5, i2) {
+ i5 = i5 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i6 = i3;
+ i1 = i5 + 52 | 0;
+ HEAP32[i1 >> 2] = 0;
+ i4 = i5 + 40 | 0;
+ i12 = HEAP32[i4 >> 2] | 0;
+ do {
+ if ((i12 | 0) > 0) {
+ i9 = i5 + 32 | 0;
+ i11 = i5 + 56 | 0;
+ i8 = i5 + 12 | 0;
+ i10 = i5 + 4 | 0;
+ i13 = 0;
+ while (1) {
+ i14 = HEAP32[(HEAP32[i9 >> 2] | 0) + (i13 << 2) >> 2] | 0;
+ HEAP32[i11 >> 2] = i14;
+ if (!((i14 | 0) == -1)) {
+ if (!((i14 | 0) > -1)) {
+ i8 = 6;
+ break;
+ }
+ if ((HEAP32[i8 >> 2] | 0) <= (i14 | 0)) {
+ i8 = 6;
+ break;
+ }
+ __ZNK13b2DynamicTree5QueryI12b2BroadPhaseEEvPT_RK6b2AABB(i5, i5, (HEAP32[i10 >> 2] | 0) + (i14 * 36 | 0) | 0);
+ i12 = HEAP32[i4 >> 2] | 0;
+ }
+ i13 = i13 + 1 | 0;
+ if ((i13 | 0) >= (i12 | 0)) {
+ i8 = 9;
+ break;
+ }
+ }
+ if ((i8 | 0) == 6) {
+ ___assert_fail(1904, 1952, 159, 2008);
+ } else if ((i8 | 0) == 9) {
+ i7 = HEAP32[i1 >> 2] | 0;
+ break;
+ }
+ } else {
+ i7 = 0;
+ }
+ } while (0);
+ HEAP32[i4 >> 2] = 0;
+ i4 = i5 + 44 | 0;
+ i14 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i6 >> 2] = 3;
+ __ZNSt3__16__sortIRPFbRK6b2PairS3_EPS1_EEvT0_S8_T_(i14, i14 + (i7 * 12 | 0) | 0, i6);
+ if ((HEAP32[i1 >> 2] | 0) <= 0) {
+ STACKTOP = i3;
+ return;
+ }
+ i6 = i5 + 12 | 0;
+ i7 = i5 + 4 | 0;
+ i9 = 0;
+ L18 : while (1) {
+ i8 = HEAP32[i4 >> 2] | 0;
+ i5 = i8 + (i9 * 12 | 0) | 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ if (!((i10 | 0) > -1)) {
+ i8 = 14;
+ break;
+ }
+ i12 = HEAP32[i6 >> 2] | 0;
+ if ((i12 | 0) <= (i10 | 0)) {
+ i8 = 14;
+ break;
+ }
+ i11 = HEAP32[i7 >> 2] | 0;
+ i8 = i8 + (i9 * 12 | 0) + 4 | 0;
+ i13 = HEAP32[i8 >> 2] | 0;
+ if (!((i13 | 0) > -1 & (i12 | 0) > (i13 | 0))) {
+ i8 = 16;
+ break;
+ }
+ __ZN16b2ContactManager7AddPairEPvS0_(i2, HEAP32[i11 + (i10 * 36 | 0) + 16 >> 2] | 0, HEAP32[i11 + (i13 * 36 | 0) + 16 >> 2] | 0);
+ i10 = HEAP32[i1 >> 2] | 0;
+ while (1) {
+ i9 = i9 + 1 | 0;
+ if ((i9 | 0) >= (i10 | 0)) {
+ i8 = 21;
+ break L18;
+ }
+ i11 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i11 + (i9 * 12 | 0) >> 2] | 0) != (HEAP32[i5 >> 2] | 0)) {
+ continue L18;
+ }
+ if ((HEAP32[i11 + (i9 * 12 | 0) + 4 >> 2] | 0) != (HEAP32[i8 >> 2] | 0)) {
+ continue L18;
+ }
+ }
+ }
+ if ((i8 | 0) == 14) {
+ ___assert_fail(1904, 1952, 153, 1992);
+ } else if ((i8 | 0) == 16) {
+ ___assert_fail(1904, 1952, 153, 1992);
+ } else if ((i8 | 0) == 21) {
+ STACKTOP = i3;
+ return;
+ }
+}
+function __ZNK13b2DynamicTree5QueryI12b2BroadPhaseEEvPT_RK6b2AABB(i9, i4, i7) {
+ i9 = i9 | 0;
+ i4 = i4 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1040 | 0;
+ i3 = i2;
+ i1 = i3 + 4 | 0;
+ HEAP32[i3 >> 2] = i1;
+ i5 = i3 + 1028 | 0;
+ HEAP32[i5 >> 2] = 0;
+ i6 = i3 + 1032 | 0;
+ HEAP32[i6 >> 2] = 256;
+ i14 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i14 + (HEAP32[i5 >> 2] << 2) >> 2] = HEAP32[i9 >> 2];
+ i15 = HEAP32[i5 >> 2] | 0;
+ i16 = i15 + 1 | 0;
+ HEAP32[i5 >> 2] = i16;
+ L1 : do {
+ if ((i15 | 0) > -1) {
+ i9 = i9 + 4 | 0;
+ i11 = i7 + 4 | 0;
+ i12 = i7 + 8 | 0;
+ i10 = i7 + 12 | 0;
+ while (1) {
+ i16 = i16 + -1 | 0;
+ HEAP32[i5 >> 2] = i16;
+ i13 = HEAP32[i14 + (i16 << 2) >> 2] | 0;
+ do {
+ if (!((i13 | 0) == -1) ? (i8 = HEAP32[i9 >> 2] | 0, !(+HEAPF32[i7 >> 2] - +HEAPF32[i8 + (i13 * 36 | 0) + 8 >> 2] > 0.0 | +HEAPF32[i11 >> 2] - +HEAPF32[i8 + (i13 * 36 | 0) + 12 >> 2] > 0.0 | +HEAPF32[i8 + (i13 * 36 | 0) >> 2] - +HEAPF32[i12 >> 2] > 0.0 | +HEAPF32[i8 + (i13 * 36 | 0) + 4 >> 2] - +HEAPF32[i10 >> 2] > 0.0)) : 0) {
+ i15 = i8 + (i13 * 36 | 0) + 24 | 0;
+ if ((HEAP32[i15 >> 2] | 0) == -1) {
+ if (!(__ZN12b2BroadPhase13QueryCallbackEi(i4, i13) | 0)) {
+ break L1;
+ }
+ i16 = HEAP32[i5 >> 2] | 0;
+ break;
+ }
+ if ((i16 | 0) == (HEAP32[i6 >> 2] | 0) ? (HEAP32[i6 >> 2] = i16 << 1, i16 = __Z7b2Alloci(i16 << 3) | 0, HEAP32[i3 >> 2] = i16, _memcpy(i16 | 0, i14 | 0, HEAP32[i5 >> 2] << 2 | 0) | 0, (i14 | 0) != (i1 | 0)) : 0) {
+ __Z6b2FreePv(i14);
+ }
+ i14 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i14 + (HEAP32[i5 >> 2] << 2) >> 2] = HEAP32[i15 >> 2];
+ i15 = (HEAP32[i5 >> 2] | 0) + 1 | 0;
+ HEAP32[i5 >> 2] = i15;
+ i13 = i8 + (i13 * 36 | 0) + 28 | 0;
+ if ((i15 | 0) == (HEAP32[i6 >> 2] | 0) ? (HEAP32[i6 >> 2] = i15 << 1, i16 = __Z7b2Alloci(i15 << 3) | 0, HEAP32[i3 >> 2] = i16, _memcpy(i16 | 0, i14 | 0, HEAP32[i5 >> 2] << 2 | 0) | 0, (i14 | 0) != (i1 | 0)) : 0) {
+ __Z6b2FreePv(i14);
+ }
+ HEAP32[(HEAP32[i3 >> 2] | 0) + (HEAP32[i5 >> 2] << 2) >> 2] = HEAP32[i13 >> 2];
+ i16 = (HEAP32[i5 >> 2] | 0) + 1 | 0;
+ HEAP32[i5 >> 2] = i16;
+ }
+ } while (0);
+ if ((i16 | 0) <= 0) {
+ break L1;
+ }
+ i14 = HEAP32[i3 >> 2] | 0;
+ }
+ }
+ } while (0);
+ i4 = HEAP32[i3 >> 2] | 0;
+ if ((i4 | 0) == (i1 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ __Z6b2FreePv(i4);
+ HEAP32[i3 >> 2] = 0;
+ STACKTOP = i2;
+ return;
+}
+function __ZN15b2ContactSolver9WarmStartEv(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, d10 = 0.0, d11 = 0.0, d12 = 0.0, i13 = 0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0, d21 = 0.0, i22 = 0, d23 = 0.0, i24 = 0, d25 = 0.0, d26 = 0.0, d27 = 0.0;
+ i1 = STACKTOP;
+ i2 = i4 + 48 | 0;
+ if ((HEAP32[i2 >> 2] | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = i4 + 40 | 0;
+ i5 = i4 + 28 | 0;
+ i22 = HEAP32[i5 >> 2] | 0;
+ i8 = 0;
+ do {
+ i9 = HEAP32[i3 >> 2] | 0;
+ i7 = HEAP32[i9 + (i8 * 152 | 0) + 112 >> 2] | 0;
+ i6 = HEAP32[i9 + (i8 * 152 | 0) + 116 >> 2] | 0;
+ d10 = +HEAPF32[i9 + (i8 * 152 | 0) + 120 >> 2];
+ d14 = +HEAPF32[i9 + (i8 * 152 | 0) + 128 >> 2];
+ d12 = +HEAPF32[i9 + (i8 * 152 | 0) + 124 >> 2];
+ d11 = +HEAPF32[i9 + (i8 * 152 | 0) + 132 >> 2];
+ i13 = HEAP32[i9 + (i8 * 152 | 0) + 144 >> 2] | 0;
+ i4 = i22 + (i7 * 12 | 0) | 0;
+ i24 = i4;
+ d17 = +HEAPF32[i24 >> 2];
+ d19 = +HEAPF32[i24 + 4 >> 2];
+ d20 = +HEAPF32[i22 + (i7 * 12 | 0) + 8 >> 2];
+ i24 = i22 + (i6 * 12 | 0) | 0;
+ d21 = +HEAPF32[i24 >> 2];
+ d23 = +HEAPF32[i24 + 4 >> 2];
+ d18 = +HEAPF32[i22 + (i6 * 12 | 0) + 8 >> 2];
+ i22 = i9 + (i8 * 152 | 0) + 72 | 0;
+ d15 = +HEAPF32[i22 >> 2];
+ d16 = +HEAPF32[i22 + 4 >> 2];
+ if ((i13 | 0) > 0) {
+ i22 = 0;
+ do {
+ d27 = +HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) + 16 >> 2];
+ d25 = +HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) + 20 >> 2];
+ d26 = d15 * d27 + d16 * d25;
+ d25 = d16 * d27 - d15 * d25;
+ d20 = d20 - d14 * (+HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) >> 2] * d25 - +HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) + 4 >> 2] * d26);
+ d17 = d17 - d10 * d26;
+ d19 = d19 - d10 * d25;
+ d18 = d18 + d11 * (d25 * +HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) + 8 >> 2] - d26 * +HEAPF32[i9 + (i8 * 152 | 0) + (i22 * 36 | 0) + 12 >> 2]);
+ d21 = d21 + d12 * d26;
+ d23 = d23 + d12 * d25;
+ i22 = i22 + 1 | 0;
+ } while ((i22 | 0) != (i13 | 0));
+ }
+ d27 = +d17;
+ d26 = +d19;
+ i22 = i4;
+ HEAPF32[i22 >> 2] = d27;
+ HEAPF32[i22 + 4 >> 2] = d26;
+ i22 = HEAP32[i5 >> 2] | 0;
+ HEAPF32[i22 + (i7 * 12 | 0) + 8 >> 2] = d20;
+ d26 = +d21;
+ d27 = +d23;
+ i22 = i22 + (i6 * 12 | 0) | 0;
+ HEAPF32[i22 >> 2] = d26;
+ HEAPF32[i22 + 4 >> 2] = d27;
+ i22 = HEAP32[i5 >> 2] | 0;
+ HEAPF32[i22 + (i6 * 12 | 0) + 8 >> 2] = d18;
+ i8 = i8 + 1 | 0;
+ } while ((i8 | 0) < (HEAP32[i2 >> 2] | 0));
+ STACKTOP = i1;
+ return;
+}
+function __ZNK14b2PolygonShape7RayCastEP15b2RayCastOutputRK14b2RayCastInputRK11b2Transformi(i1, i5, i8, i7, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i8 = i8 | 0;
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, d3 = 0.0, i6 = 0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, i14 = 0, i15 = 0, i16 = 0, d17 = 0.0, d18 = 0.0, d19 = 0.0, d20 = 0.0;
+ i4 = STACKTOP;
+ d10 = +HEAPF32[i7 >> 2];
+ d9 = +HEAPF32[i8 >> 2] - d10;
+ d18 = +HEAPF32[i7 + 4 >> 2];
+ d11 = +HEAPF32[i8 + 4 >> 2] - d18;
+ i6 = i7 + 12 | 0;
+ d17 = +HEAPF32[i6 >> 2];
+ i7 = i7 + 8 | 0;
+ d19 = +HEAPF32[i7 >> 2];
+ d12 = d9 * d17 + d11 * d19;
+ d9 = d17 * d11 - d9 * d19;
+ d10 = +HEAPF32[i8 + 8 >> 2] - d10;
+ d18 = +HEAPF32[i8 + 12 >> 2] - d18;
+ d11 = d17 * d10 + d19 * d18 - d12;
+ d10 = d17 * d18 - d19 * d10 - d9;
+ i8 = i8 + 16 | 0;
+ i14 = HEAP32[i1 + 148 >> 2] | 0;
+ do {
+ if ((i14 | 0) > 0) {
+ i16 = 0;
+ i15 = -1;
+ d13 = 0.0;
+ d17 = +HEAPF32[i8 >> 2];
+ L3 : while (1) {
+ d20 = +HEAPF32[i1 + (i16 << 3) + 84 >> 2];
+ d19 = +HEAPF32[i1 + (i16 << 3) + 88 >> 2];
+ d18 = (+HEAPF32[i1 + (i16 << 3) + 20 >> 2] - d12) * d20 + (+HEAPF32[i1 + (i16 << 3) + 24 >> 2] - d9) * d19;
+ d19 = d11 * d20 + d10 * d19;
+ do {
+ if (d19 == 0.0) {
+ if (d18 < 0.0) {
+ i1 = 0;
+ i14 = 18;
+ break L3;
+ }
+ } else {
+ if (d19 < 0.0 ? d18 < d13 * d19 : 0) {
+ i15 = i16;
+ d13 = d18 / d19;
+ break;
+ }
+ if (d19 > 0.0 ? d18 < d17 * d19 : 0) {
+ d17 = d18 / d19;
+ }
+ }
+ } while (0);
+ i16 = i16 + 1 | 0;
+ if (d17 < d13) {
+ i1 = 0;
+ i14 = 18;
+ break;
+ }
+ if ((i16 | 0) >= (i14 | 0)) {
+ i14 = 13;
+ break;
+ }
+ }
+ if ((i14 | 0) == 13) {
+ if (d13 >= 0.0) {
+ i2 = i15;
+ d3 = d13;
+ break;
+ }
+ ___assert_fail(376, 328, 249, 424);
+ } else if ((i14 | 0) == 18) {
+ STACKTOP = i4;
+ return i1 | 0;
+ }
+ } else {
+ i2 = -1;
+ d3 = 0.0;
+ }
+ } while (0);
+ if (!(d3 <= +HEAPF32[i8 >> 2])) {
+ ___assert_fail(376, 328, 249, 424);
+ }
+ if (!((i2 | 0) > -1)) {
+ i16 = 0;
+ STACKTOP = i4;
+ return i16 | 0;
+ }
+ HEAPF32[i5 + 8 >> 2] = d3;
+ d18 = +HEAPF32[i6 >> 2];
+ d13 = +HEAPF32[i1 + (i2 << 3) + 84 >> 2];
+ d17 = +HEAPF32[i7 >> 2];
+ d20 = +HEAPF32[i1 + (i2 << 3) + 88 >> 2];
+ d19 = +(d18 * d13 - d17 * d20);
+ d20 = +(d13 * d17 + d18 * d20);
+ i16 = i5;
+ HEAPF32[i16 >> 2] = d19;
+ HEAPF32[i16 + 4 >> 2] = d20;
+ i16 = 1;
+ STACKTOP = i4;
+ return i16 | 0;
+}
+function __ZN7b2World4StepEfii(i1, d9, i11, i12) {
+ i1 = i1 | 0;
+ d9 = +d9;
+ i11 = i11 | 0;
+ i12 = i12 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i13 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i4 + 27 | 0;
+ i5 = i4;
+ i8 = i4 + 26 | 0;
+ i10 = i4 + 25 | 0;
+ i7 = i4 + 24 | 0;
+ __ZN7b2TimerC2Ev(i3);
+ i2 = i1 + 102868 | 0;
+ i13 = HEAP32[i2 >> 2] | 0;
+ if ((i13 & 1 | 0) != 0) {
+ __ZN16b2ContactManager15FindNewContactsEv(i1 + 102872 | 0);
+ i13 = HEAP32[i2 >> 2] & -2;
+ HEAP32[i2 >> 2] = i13;
+ }
+ HEAP32[i2 >> 2] = i13 | 2;
+ HEAPF32[i5 >> 2] = d9;
+ HEAP32[i5 + 12 >> 2] = i11;
+ HEAP32[i5 + 16 >> 2] = i12;
+ if (d9 > 0.0) {
+ HEAPF32[i5 + 4 >> 2] = 1.0 / d9;
+ } else {
+ HEAPF32[i5 + 4 >> 2] = 0.0;
+ }
+ i11 = i1 + 102988 | 0;
+ HEAPF32[i5 + 8 >> 2] = +HEAPF32[i11 >> 2] * d9;
+ HEAP8[i5 + 20 | 0] = HEAP8[i1 + 102992 | 0] | 0;
+ __ZN7b2TimerC2Ev(i8);
+ __ZN16b2ContactManager7CollideEv(i1 + 102872 | 0);
+ HEAPF32[i1 + 103e3 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i8);
+ if ((HEAP8[i1 + 102995 | 0] | 0) != 0 ? +HEAPF32[i5 >> 2] > 0.0 : 0) {
+ __ZN7b2TimerC2Ev(i10);
+ __ZN7b2World5SolveERK10b2TimeStep(i1, i5);
+ HEAPF32[i1 + 103004 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i10);
+ }
+ if ((HEAP8[i1 + 102993 | 0] | 0) != 0) {
+ d9 = +HEAPF32[i5 >> 2];
+ if (d9 > 0.0) {
+ __ZN7b2TimerC2Ev(i7);
+ __ZN7b2World8SolveTOIERK10b2TimeStep(i1, i5);
+ HEAPF32[i1 + 103024 >> 2] = +__ZNK7b2Timer15GetMillisecondsEv(i7);
+ i6 = 12;
+ }
+ } else {
+ i6 = 12;
+ }
+ if ((i6 | 0) == 12) {
+ d9 = +HEAPF32[i5 >> 2];
+ }
+ if (d9 > 0.0) {
+ HEAPF32[i11 >> 2] = +HEAPF32[i5 + 4 >> 2];
+ }
+ i5 = HEAP32[i2 >> 2] | 0;
+ if ((i5 & 4 | 0) == 0) {
+ i13 = i5 & -3;
+ HEAP32[i2 >> 2] = i13;
+ d9 = +__ZNK7b2Timer15GetMillisecondsEv(i3);
+ i13 = i1 + 102996 | 0;
+ HEAPF32[i13 >> 2] = d9;
+ STACKTOP = i4;
+ return;
+ }
+ i6 = HEAP32[i1 + 102952 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ i13 = i5 & -3;
+ HEAP32[i2 >> 2] = i13;
+ d9 = +__ZNK7b2Timer15GetMillisecondsEv(i3);
+ i13 = i1 + 102996 | 0;
+ HEAPF32[i13 >> 2] = d9;
+ STACKTOP = i4;
+ return;
+ }
+ do {
+ HEAPF32[i6 + 76 >> 2] = 0.0;
+ HEAPF32[i6 + 80 >> 2] = 0.0;
+ HEAPF32[i6 + 84 >> 2] = 0.0;
+ i6 = HEAP32[i6 + 96 >> 2] | 0;
+ } while ((i6 | 0) != 0);
+ i13 = i5 & -3;
+ HEAP32[i2 >> 2] = i13;
+ d9 = +__ZNK7b2Timer15GetMillisecondsEv(i3);
+ i13 = i1 + 102996 | 0;
+ HEAPF32[i13 >> 2] = d9;
+ STACKTOP = i4;
+ return;
+}
+function __ZL19b2FindMaxSeparationPiPK14b2PolygonShapeRK11b2TransformS2_S5_(i1, i5, i6, i3, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i7 = 0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, i12 = 0, i13 = 0, i14 = 0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0;
+ i2 = STACKTOP;
+ i7 = HEAP32[i5 + 148 >> 2] | 0;
+ d17 = +HEAPF32[i4 + 12 >> 2];
+ d19 = +HEAPF32[i3 + 12 >> 2];
+ d18 = +HEAPF32[i4 + 8 >> 2];
+ d16 = +HEAPF32[i3 + 16 >> 2];
+ d15 = +HEAPF32[i6 + 12 >> 2];
+ d10 = +HEAPF32[i5 + 12 >> 2];
+ d8 = +HEAPF32[i6 + 8 >> 2];
+ d9 = +HEAPF32[i5 + 16 >> 2];
+ d11 = +HEAPF32[i4 >> 2] + (d17 * d19 - d18 * d16) - (+HEAPF32[i6 >> 2] + (d15 * d10 - d8 * d9));
+ d9 = d19 * d18 + d17 * d16 + +HEAPF32[i4 + 4 >> 2] - (d10 * d8 + d15 * d9 + +HEAPF32[i6 + 4 >> 2]);
+ d10 = d15 * d11 + d8 * d9;
+ d8 = d15 * d9 - d11 * d8;
+ if ((i7 | 0) > 0) {
+ i14 = 0;
+ i13 = 0;
+ d9 = -3.4028234663852886e+38;
+ while (1) {
+ d11 = d10 * +HEAPF32[i5 + (i13 << 3) + 84 >> 2] + d8 * +HEAPF32[i5 + (i13 << 3) + 88 >> 2];
+ i12 = d11 > d9;
+ i14 = i12 ? i13 : i14;
+ i13 = i13 + 1 | 0;
+ if ((i13 | 0) == (i7 | 0)) {
+ break;
+ } else {
+ d9 = i12 ? d11 : d9;
+ }
+ }
+ } else {
+ i14 = 0;
+ }
+ d9 = +__ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i5, i6, i14, i3, i4);
+ i12 = ((i14 | 0) > 0 ? i14 : i7) + -1 | 0;
+ d8 = +__ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i5, i6, i12, i3, i4);
+ i13 = i14 + 1 | 0;
+ i13 = (i13 | 0) < (i7 | 0) ? i13 : 0;
+ d10 = +__ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i5, i6, i13, i3, i4);
+ if (d8 > d9 & d8 > d10) {
+ while (1) {
+ i13 = ((i12 | 0) > 0 ? i12 : i7) + -1 | 0;
+ d9 = +__ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i5, i6, i13, i3, i4);
+ if (d9 > d8) {
+ i12 = i13;
+ d8 = d9;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i1 >> 2] = i12;
+ STACKTOP = i2;
+ return +d8;
+ }
+ if (d10 > d9) {
+ i12 = i13;
+ d8 = d10;
+ } else {
+ d19 = d9;
+ HEAP32[i1 >> 2] = i14;
+ STACKTOP = i2;
+ return +d19;
+ }
+ while (1) {
+ i13 = i12 + 1 | 0;
+ i13 = (i13 | 0) < (i7 | 0) ? i13 : 0;
+ d9 = +__ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i5, i6, i13, i3, i4);
+ if (d9 > d8) {
+ i12 = i13;
+ d8 = d9;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i1 >> 2] = i12;
+ STACKTOP = i2;
+ return +d8;
+}
+function __ZN9b2Fixture11SynchronizeEP12b2BroadPhaseRK11b2TransformS4_(i10, i8, i7, i2) {
+ i10 = i10 | 0;
+ i8 = i8 | 0;
+ i7 = i7 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0, i27 = 0;
+ i9 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i5 = i9 + 24 | 0;
+ i6 = i9 + 8 | 0;
+ i3 = i9;
+ i4 = i10 + 28 | 0;
+ if ((HEAP32[i4 >> 2] | 0) <= 0) {
+ STACKTOP = i9;
+ return;
+ }
+ i1 = i10 + 24 | 0;
+ i18 = i10 + 12 | 0;
+ i19 = i5 + 4 | 0;
+ i20 = i6 + 4 | 0;
+ i13 = i5 + 8 | 0;
+ i14 = i6 + 8 | 0;
+ i15 = i5 + 12 | 0;
+ i16 = i6 + 12 | 0;
+ i11 = i2 + 4 | 0;
+ i22 = i7 + 4 | 0;
+ i12 = i3 + 4 | 0;
+ i21 = 0;
+ do {
+ i10 = HEAP32[i1 >> 2] | 0;
+ i27 = HEAP32[i18 >> 2] | 0;
+ i17 = i10 + (i21 * 28 | 0) + 20 | 0;
+ FUNCTION_TABLE_viiii[HEAP32[(HEAP32[i27 >> 2] | 0) + 24 >> 2] & 15](i27, i5, i7, HEAP32[i17 >> 2] | 0);
+ i27 = HEAP32[i18 >> 2] | 0;
+ FUNCTION_TABLE_viiii[HEAP32[(HEAP32[i27 >> 2] | 0) + 24 >> 2] & 15](i27, i6, i2, HEAP32[i17 >> 2] | 0);
+ i17 = i10 + (i21 * 28 | 0) | 0;
+ d25 = +HEAPF32[i5 >> 2];
+ d26 = +HEAPF32[i6 >> 2];
+ d24 = +HEAPF32[i19 >> 2];
+ d23 = +HEAPF32[i20 >> 2];
+ d25 = +(d25 < d26 ? d25 : d26);
+ d26 = +(d24 < d23 ? d24 : d23);
+ i27 = i17;
+ HEAPF32[i27 >> 2] = d25;
+ HEAPF32[i27 + 4 >> 2] = d26;
+ d25 = +HEAPF32[i13 >> 2];
+ d26 = +HEAPF32[i14 >> 2];
+ d23 = +HEAPF32[i15 >> 2];
+ d24 = +HEAPF32[i16 >> 2];
+ d25 = +(d25 > d26 ? d25 : d26);
+ d26 = +(d23 > d24 ? d23 : d24);
+ i27 = i10 + (i21 * 28 | 0) + 8 | 0;
+ HEAPF32[i27 >> 2] = d25;
+ HEAPF32[i27 + 4 >> 2] = d26;
+ d26 = +HEAPF32[i11 >> 2] - +HEAPF32[i22 >> 2];
+ HEAPF32[i3 >> 2] = +HEAPF32[i2 >> 2] - +HEAPF32[i7 >> 2];
+ HEAPF32[i12 >> 2] = d26;
+ __ZN12b2BroadPhase9MoveProxyEiRK6b2AABBRK6b2Vec2(i8, HEAP32[i10 + (i21 * 28 | 0) + 24 >> 2] | 0, i17, i3);
+ i21 = i21 + 1 | 0;
+ } while ((i21 | 0) < (HEAP32[i4 >> 2] | 0));
+ STACKTOP = i9;
+ return;
+}
+function __ZN12b2EPCollider24ComputePolygonSeparationEv(i2, i9) {
+ i2 = i2 | 0;
+ i9 = i9 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, d6 = 0.0, d7 = 0.0, i8 = 0, d10 = 0.0, d11 = 0.0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, d16 = 0.0, d17 = 0.0, d18 = 0.0, d19 = 0.0, i20 = 0, d21 = 0.0, d22 = 0.0, d23 = 0.0, d24 = 0.0, d25 = 0.0, d26 = 0.0;
+ i15 = STACKTOP;
+ HEAP32[i2 >> 2] = 0;
+ i3 = i2 + 4 | 0;
+ HEAP32[i3 >> 2] = -1;
+ i4 = i2 + 8 | 0;
+ HEAPF32[i4 >> 2] = -3.4028234663852886e+38;
+ d7 = +HEAPF32[i9 + 216 >> 2];
+ d6 = +HEAPF32[i9 + 212 >> 2];
+ i5 = HEAP32[i9 + 128 >> 2] | 0;
+ if ((i5 | 0) <= 0) {
+ STACKTOP = i15;
+ return;
+ }
+ d17 = +HEAPF32[i9 + 164 >> 2];
+ d18 = +HEAPF32[i9 + 168 >> 2];
+ d11 = +HEAPF32[i9 + 172 >> 2];
+ d10 = +HEAPF32[i9 + 176 >> 2];
+ d16 = +HEAPF32[i9 + 244 >> 2];
+ i12 = i9 + 228 | 0;
+ i13 = i9 + 232 | 0;
+ i14 = i9 + 236 | 0;
+ i1 = i9 + 240 | 0;
+ d19 = -3.4028234663852886e+38;
+ i20 = 0;
+ while (1) {
+ d23 = +HEAPF32[i9 + (i20 << 3) + 64 >> 2];
+ d21 = -d23;
+ d22 = -+HEAPF32[i9 + (i20 << 3) + 68 >> 2];
+ d26 = +HEAPF32[i9 + (i20 << 3) >> 2];
+ d25 = +HEAPF32[i9 + (i20 << 3) + 4 >> 2];
+ d24 = (d26 - d17) * d21 + (d25 - d18) * d22;
+ d25 = (d26 - d11) * d21 + (d25 - d10) * d22;
+ d24 = d24 < d25 ? d24 : d25;
+ if (d24 > d16) {
+ break;
+ }
+ if (!(d7 * d23 + d6 * d22 >= 0.0)) {
+ if (!((d21 - +HEAPF32[i12 >> 2]) * d6 + (d22 - +HEAPF32[i13 >> 2]) * d7 < -.03490658849477768) & d24 > d19) {
+ i8 = 8;
+ }
+ } else {
+ if (!((d21 - +HEAPF32[i14 >> 2]) * d6 + (d22 - +HEAPF32[i1 >> 2]) * d7 < -.03490658849477768) & d24 > d19) {
+ i8 = 8;
+ }
+ }
+ if ((i8 | 0) == 8) {
+ i8 = 0;
+ HEAP32[i2 >> 2] = 2;
+ HEAP32[i3 >> 2] = i20;
+ HEAPF32[i4 >> 2] = d24;
+ d19 = d24;
+ }
+ i20 = i20 + 1 | 0;
+ if ((i20 | 0) >= (i5 | 0)) {
+ i8 = 10;
+ break;
+ }
+ }
+ if ((i8 | 0) == 10) {
+ STACKTOP = i15;
+ return;
+ }
+ HEAP32[i2 >> 2] = 2;
+ HEAP32[i3 >> 2] = i20;
+ HEAPF32[i4 >> 2] = d24;
+ STACKTOP = i15;
+ return;
+}
+function __ZNK11b2EdgeShape7RayCastEP15b2RayCastOutputRK14b2RayCastInputRK11b2Transformi(i17, i1, i2, i18, i3) {
+ i17 = i17 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i18 = i18 | 0;
+ i3 = i3 | 0;
+ var d4 = 0.0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0;
+ i3 = STACKTOP;
+ d6 = +HEAPF32[i18 >> 2];
+ d7 = +HEAPF32[i2 >> 2] - d6;
+ d9 = +HEAPF32[i18 + 4 >> 2];
+ d4 = +HEAPF32[i2 + 4 >> 2] - d9;
+ d11 = +HEAPF32[i18 + 12 >> 2];
+ d5 = +HEAPF32[i18 + 8 >> 2];
+ d8 = d7 * d11 + d4 * d5;
+ d7 = d11 * d4 - d7 * d5;
+ d6 = +HEAPF32[i2 + 8 >> 2] - d6;
+ d9 = +HEAPF32[i2 + 12 >> 2] - d9;
+ d4 = d11 * d6 + d5 * d9 - d8;
+ d6 = d11 * d9 - d5 * d6 - d7;
+ i18 = i17 + 12 | 0;
+ d5 = +HEAPF32[i18 >> 2];
+ d9 = +HEAPF32[i18 + 4 >> 2];
+ i18 = i17 + 20 | 0;
+ d11 = +HEAPF32[i18 >> 2];
+ d11 = d11 - d5;
+ d12 = +HEAPF32[i18 + 4 >> 2] - d9;
+ d15 = -d11;
+ d10 = d11 * d11 + d12 * d12;
+ d13 = +Math_sqrt(+d10);
+ if (d13 < 1.1920928955078125e-7) {
+ d13 = d12;
+ } else {
+ d16 = 1.0 / d13;
+ d13 = d12 * d16;
+ d15 = d16 * d15;
+ }
+ d14 = (d9 - d7) * d15 + (d5 - d8) * d13;
+ d16 = d6 * d15 + d4 * d13;
+ if (d16 == 0.0) {
+ i18 = 0;
+ STACKTOP = i3;
+ return i18 | 0;
+ }
+ d16 = d14 / d16;
+ if (d16 < 0.0) {
+ i18 = 0;
+ STACKTOP = i3;
+ return i18 | 0;
+ }
+ if (+HEAPF32[i2 + 16 >> 2] < d16 | d10 == 0.0) {
+ i18 = 0;
+ STACKTOP = i3;
+ return i18 | 0;
+ }
+ d12 = (d11 * (d8 + d4 * d16 - d5) + d12 * (d7 + d6 * d16 - d9)) / d10;
+ if (d12 < 0.0 | d12 > 1.0) {
+ i18 = 0;
+ STACKTOP = i3;
+ return i18 | 0;
+ }
+ HEAPF32[i1 + 8 >> 2] = d16;
+ if (d14 > 0.0) {
+ d14 = +-d13;
+ d16 = +-d15;
+ i18 = i1;
+ HEAPF32[i18 >> 2] = d14;
+ HEAPF32[i18 + 4 >> 2] = d16;
+ i18 = 1;
+ STACKTOP = i3;
+ return i18 | 0;
+ } else {
+ d14 = +d13;
+ d16 = +d15;
+ i18 = i1;
+ HEAPF32[i18 >> 2] = d14;
+ HEAPF32[i18 + 4 >> 2] = d16;
+ i18 = 1;
+ STACKTOP = i3;
+ return i18 | 0;
+ }
+ return 0;
+}
+function ___dynamic_cast(i7, i6, i11, i5) {
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i11 = i11 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i8 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i2 = i1;
+ i3 = HEAP32[i7 >> 2] | 0;
+ i4 = i7 + (HEAP32[i3 + -8 >> 2] | 0) | 0;
+ i3 = HEAP32[i3 + -4 >> 2] | 0;
+ HEAP32[i2 >> 2] = i11;
+ HEAP32[i2 + 4 >> 2] = i7;
+ HEAP32[i2 + 8 >> 2] = i6;
+ HEAP32[i2 + 12 >> 2] = i5;
+ i9 = i2 + 16 | 0;
+ i10 = i2 + 20 | 0;
+ i6 = i2 + 24 | 0;
+ i8 = i2 + 28 | 0;
+ i5 = i2 + 32 | 0;
+ i7 = i2 + 40 | 0;
+ i12 = (i3 | 0) == (i11 | 0);
+ i13 = i9 + 0 | 0;
+ i11 = i13 + 36 | 0;
+ do {
+ HEAP32[i13 >> 2] = 0;
+ i13 = i13 + 4 | 0;
+ } while ((i13 | 0) < (i11 | 0));
+ HEAP16[i9 + 36 >> 1] = 0;
+ HEAP8[i9 + 38 | 0] = 0;
+ if (i12) {
+ HEAP32[i2 + 48 >> 2] = 1;
+ FUNCTION_TABLE_viiiiii[HEAP32[(HEAP32[i3 >> 2] | 0) + 20 >> 2] & 3](i3, i2, i4, i4, 1, 0);
+ i13 = (HEAP32[i6 >> 2] | 0) == 1 ? i4 : 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ FUNCTION_TABLE_viiiii[HEAP32[(HEAP32[i3 >> 2] | 0) + 24 >> 2] & 3](i3, i2, i4, 1, 0);
+ i2 = HEAP32[i2 + 36 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ if ((HEAP32[i7 >> 2] | 0) != 1) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ if ((HEAP32[i8 >> 2] | 0) != 1) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i13 = (HEAP32[i5 >> 2] | 0) == 1 ? HEAP32[i10 >> 2] | 0 : 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else if ((i2 | 0) == 1) {
+ if ((HEAP32[i6 >> 2] | 0) != 1) {
+ if ((HEAP32[i7 >> 2] | 0) != 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ if ((HEAP32[i8 >> 2] | 0) != 1) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ if ((HEAP32[i5 >> 2] | 0) != 1) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ i13 = HEAP32[i9 >> 2] | 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ return 0;
+}
+function __ZNK14b2PolygonShape11ComputeMassEP10b2MassDataf(i4, i1, d2) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ d2 = +d2;
+ var i3 = 0, i5 = 0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, i12 = 0, d13 = 0.0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, d18 = 0.0, i19 = 0, d20 = 0.0, d21 = 0.0, d22 = 0.0, d23 = 0.0;
+ i3 = STACKTOP;
+ i5 = HEAP32[i4 + 148 >> 2] | 0;
+ if ((i5 | 0) > 2) {
+ d7 = 0.0;
+ d6 = 0.0;
+ i12 = 0;
+ } else {
+ ___assert_fail(432, 328, 306, 456);
+ }
+ do {
+ d6 = d6 + +HEAPF32[i4 + (i12 << 3) + 20 >> 2];
+ d7 = d7 + +HEAPF32[i4 + (i12 << 3) + 24 >> 2];
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) < (i5 | 0));
+ d11 = 1.0 / +(i5 | 0);
+ d6 = d6 * d11;
+ d11 = d7 * d11;
+ i16 = i4 + 20 | 0;
+ i19 = i4 + 24 | 0;
+ d9 = 0.0;
+ d10 = 0.0;
+ d7 = 0.0;
+ d8 = 0.0;
+ i17 = 0;
+ do {
+ d18 = +HEAPF32[i4 + (i17 << 3) + 20 >> 2] - d6;
+ d13 = +HEAPF32[i4 + (i17 << 3) + 24 >> 2] - d11;
+ i17 = i17 + 1 | 0;
+ i12 = (i17 | 0) < (i5 | 0);
+ if (i12) {
+ i14 = i4 + (i17 << 3) + 20 | 0;
+ i15 = i4 + (i17 << 3) + 24 | 0;
+ } else {
+ i14 = i16;
+ i15 = i19;
+ }
+ d21 = +HEAPF32[i14 >> 2] - d6;
+ d20 = +HEAPF32[i15 >> 2] - d11;
+ d22 = d18 * d20 - d13 * d21;
+ d23 = d22 * .5;
+ d8 = d8 + d23;
+ d23 = d23 * .3333333432674408;
+ d9 = d9 + (d18 + d21) * d23;
+ d10 = d10 + (d13 + d20) * d23;
+ d7 = d7 + d22 * .0833333358168602 * (d21 * d21 + (d18 * d18 + d18 * d21) + (d20 * d20 + (d13 * d13 + d13 * d20)));
+ } while (i12);
+ d13 = d8 * d2;
+ HEAPF32[i1 >> 2] = d13;
+ if (d8 > 1.1920928955078125e-7) {
+ d23 = 1.0 / d8;
+ d22 = d9 * d23;
+ d23 = d10 * d23;
+ d20 = d6 + d22;
+ d21 = d11 + d23;
+ d11 = +d20;
+ d18 = +d21;
+ i19 = i1 + 4 | 0;
+ HEAPF32[i19 >> 2] = d11;
+ HEAPF32[i19 + 4 >> 2] = d18;
+ HEAPF32[i1 + 12 >> 2] = d7 * d2 + d13 * (d20 * d20 + d21 * d21 - (d22 * d22 + d23 * d23));
+ STACKTOP = i3;
+ return;
+ } else {
+ ___assert_fail(472, 328, 352, 456);
+ }
+}
+function __ZN16b2ContactManager7DestroyEP9b2Contact(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ i5 = HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 8 >> 2] | 0;
+ i4 = HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 8 >> 2] | 0;
+ i6 = HEAP32[i1 + 72 >> 2] | 0;
+ if ((i6 | 0) != 0 ? (HEAP32[i2 + 4 >> 2] & 2 | 0) != 0 : 0) {
+ FUNCTION_TABLE_vii[HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] & 15](i6, i2);
+ }
+ i7 = i2 + 8 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i6 = i2 + 12 | 0;
+ if ((i8 | 0) != 0) {
+ HEAP32[i8 + 12 >> 2] = HEAP32[i6 >> 2];
+ }
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i8 | 0) != 0) {
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 >> 2];
+ }
+ i7 = i1 + 60 | 0;
+ if ((HEAP32[i7 >> 2] | 0) == (i2 | 0)) {
+ HEAP32[i7 >> 2] = HEAP32[i6 >> 2];
+ }
+ i7 = i2 + 24 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i6 = i2 + 28 | 0;
+ if ((i8 | 0) != 0) {
+ HEAP32[i8 + 12 >> 2] = HEAP32[i6 >> 2];
+ }
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i8 | 0) != 0) {
+ HEAP32[i8 + 8 >> 2] = HEAP32[i7 >> 2];
+ }
+ i5 = i5 + 112 | 0;
+ if ((i2 + 16 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = HEAP32[i6 >> 2];
+ }
+ i6 = i2 + 40 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ i5 = i2 + 44 | 0;
+ if ((i7 | 0) != 0) {
+ HEAP32[i7 + 12 >> 2] = HEAP32[i5 >> 2];
+ }
+ i7 = HEAP32[i5 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ HEAP32[i7 + 8 >> 2] = HEAP32[i6 >> 2];
+ }
+ i4 = i4 + 112 | 0;
+ if ((i2 + 32 | 0) != (HEAP32[i4 >> 2] | 0)) {
+ i8 = i1 + 76 | 0;
+ i8 = HEAP32[i8 >> 2] | 0;
+ __ZN9b2Contact7DestroyEPS_P16b2BlockAllocator(i2, i8);
+ i8 = i1 + 64 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ i7 = i7 + -1 | 0;
+ HEAP32[i8 >> 2] = i7;
+ STACKTOP = i3;
+ return;
+ }
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i8 = i1 + 76 | 0;
+ i8 = HEAP32[i8 >> 2] | 0;
+ __ZN9b2Contact7DestroyEPS_P16b2BlockAllocator(i2, i8);
+ i8 = i1 + 64 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ i7 = i7 + -1 | 0;
+ HEAP32[i8 >> 2] = i7;
+ STACKTOP = i3;
+ return;
+}
+function __ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib(i6, i3, i4, i8, i7) {
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i8 = i8 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ if ((i6 | 0) == (HEAP32[i3 + 8 >> 2] | 0)) {
+ if ((HEAP32[i3 + 4 >> 2] | 0) != (i4 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ i2 = i3 + 28 | 0;
+ if ((HEAP32[i2 >> 2] | 0) == 1) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i2 >> 2] = i8;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) != (HEAP32[i3 >> 2] | 0)) {
+ i9 = HEAP32[i6 + 8 >> 2] | 0;
+ FUNCTION_TABLE_viiiii[HEAP32[(HEAP32[i9 >> 2] | 0) + 24 >> 2] & 3](i9, i3, i4, i8, i7);
+ STACKTOP = i1;
+ return;
+ }
+ if ((HEAP32[i3 + 16 >> 2] | 0) != (i4 | 0) ? (i5 = i3 + 20 | 0, (HEAP32[i5 >> 2] | 0) != (i4 | 0)) : 0) {
+ HEAP32[i3 + 32 >> 2] = i8;
+ i8 = i3 + 44 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == 4) {
+ STACKTOP = i1;
+ return;
+ }
+ i9 = i3 + 52 | 0;
+ HEAP8[i9] = 0;
+ i10 = i3 + 53 | 0;
+ HEAP8[i10] = 0;
+ i6 = HEAP32[i6 + 8 >> 2] | 0;
+ FUNCTION_TABLE_viiiiii[HEAP32[(HEAP32[i6 >> 2] | 0) + 20 >> 2] & 3](i6, i3, i4, i4, 1, i7);
+ if ((HEAP8[i10] | 0) != 0) {
+ if ((HEAP8[i9] | 0) == 0) {
+ i6 = 1;
+ i2 = 13;
+ }
+ } else {
+ i6 = 0;
+ i2 = 13;
+ }
+ do {
+ if ((i2 | 0) == 13) {
+ HEAP32[i5 >> 2] = i4;
+ i10 = i3 + 40 | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + 1;
+ if ((HEAP32[i3 + 36 >> 2] | 0) == 1 ? (HEAP32[i3 + 24 >> 2] | 0) == 2 : 0) {
+ HEAP8[i3 + 54 | 0] = 1;
+ if (i6) {
+ break;
+ }
+ } else {
+ i2 = 16;
+ }
+ if ((i2 | 0) == 16 ? i6 : 0) {
+ break;
+ }
+ HEAP32[i8 >> 2] = 4;
+ STACKTOP = i1;
+ return;
+ }
+ } while (0);
+ HEAP32[i8 >> 2] = 3;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i8 | 0) != 1) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i3 + 32 >> 2] = 1;
+ STACKTOP = i1;
+ return;
+}
+function __ZN16b2BlockAllocator8AllocateEi(i4, i2) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i1 = STACKTOP;
+ if ((i2 | 0) == 0) {
+ i9 = 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ if ((i2 | 0) <= 0) {
+ ___assert_fail(1376, 1312, 104, 1392);
+ }
+ if ((i2 | 0) > 640) {
+ i9 = __Z7b2Alloci(i2) | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ i9 = HEAP8[632 + i2 | 0] | 0;
+ i5 = i9 & 255;
+ if (!((i9 & 255) < 14)) {
+ ___assert_fail(1408, 1312, 112, 1392);
+ }
+ i2 = i4 + (i5 << 2) + 12 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ HEAP32[i2 >> 2] = HEAP32[i3 >> 2];
+ i9 = i3;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ i3 = i4 + 4 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i7 = i4 + 8 | 0;
+ if ((i6 | 0) == (HEAP32[i7 >> 2] | 0)) {
+ i9 = HEAP32[i4 >> 2] | 0;
+ i6 = i6 + 128 | 0;
+ HEAP32[i7 >> 2] = i6;
+ i6 = __Z7b2Alloci(i6 << 3) | 0;
+ HEAP32[i4 >> 2] = i6;
+ _memcpy(i6 | 0, i9 | 0, HEAP32[i3 >> 2] << 3 | 0) | 0;
+ _memset((HEAP32[i4 >> 2] | 0) + (HEAP32[i3 >> 2] << 3) | 0, 0, 1024) | 0;
+ __Z6b2FreePv(i9);
+ i6 = HEAP32[i3 >> 2] | 0;
+ }
+ i9 = HEAP32[i4 >> 2] | 0;
+ i7 = __Z7b2Alloci(16384) | 0;
+ i4 = i9 + (i6 << 3) + 4 | 0;
+ HEAP32[i4 >> 2] = i7;
+ i5 = HEAP32[576 + (i5 << 2) >> 2] | 0;
+ HEAP32[i9 + (i6 << 3) >> 2] = i5;
+ i6 = 16384 / (i5 | 0) | 0;
+ if ((Math_imul(i6, i5) | 0) >= 16385) {
+ ___assert_fail(1448, 1312, 140, 1392);
+ }
+ i6 = i6 + -1 | 0;
+ if ((i6 | 0) > 0) {
+ i9 = 0;
+ while (1) {
+ i8 = i9 + 1 | 0;
+ HEAP32[i7 + (Math_imul(i9, i5) | 0) >> 2] = i7 + (Math_imul(i8, i5) | 0);
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i8 | 0) == (i6 | 0)) {
+ break;
+ } else {
+ i9 = i8;
+ }
+ }
+ }
+ HEAP32[i7 + (Math_imul(i6, i5) | 0) >> 2] = 0;
+ HEAP32[i2 >> 2] = HEAP32[HEAP32[i4 >> 2] >> 2];
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + 1;
+ i9 = HEAP32[i4 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+}
+function __ZN9b2Contact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i4, i5, i1, i3, i6) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ if ((HEAP8[4200] | 0) == 0) {
+ HEAP32[1002] = 3;
+ HEAP32[4012 >> 2] = 3;
+ HEAP8[4016 | 0] = 1;
+ HEAP32[4104 >> 2] = 4;
+ HEAP32[4108 >> 2] = 4;
+ HEAP8[4112 | 0] = 1;
+ HEAP32[4032 >> 2] = 4;
+ HEAP32[4036 >> 2] = 4;
+ HEAP8[4040 | 0] = 0;
+ HEAP32[4128 >> 2] = 5;
+ HEAP32[4132 >> 2] = 5;
+ HEAP8[4136 | 0] = 1;
+ HEAP32[4056 >> 2] = 6;
+ HEAP32[4060 >> 2] = 6;
+ HEAP8[4064 | 0] = 1;
+ HEAP32[4020 >> 2] = 6;
+ HEAP32[4024 >> 2] = 6;
+ HEAP8[4028 | 0] = 0;
+ HEAP32[4080 >> 2] = 7;
+ HEAP32[4084 >> 2] = 7;
+ HEAP8[4088 | 0] = 1;
+ HEAP32[4116 >> 2] = 7;
+ HEAP32[4120 >> 2] = 7;
+ HEAP8[4124 | 0] = 0;
+ HEAP32[4152 >> 2] = 8;
+ HEAP32[4156 >> 2] = 8;
+ HEAP8[4160 | 0] = 1;
+ HEAP32[4044 >> 2] = 8;
+ HEAP32[4048 >> 2] = 8;
+ HEAP8[4052 | 0] = 0;
+ HEAP32[4176 >> 2] = 9;
+ HEAP32[4180 >> 2] = 9;
+ HEAP8[4184 | 0] = 1;
+ HEAP32[4140 >> 2] = 9;
+ HEAP32[4144 >> 2] = 9;
+ HEAP8[4148 | 0] = 0;
+ HEAP8[4200] = 1;
+ }
+ i7 = HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 4 >> 2] | 0;
+ i8 = HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 4 >> 2] | 0;
+ if (!(i7 >>> 0 < 4)) {
+ ___assert_fail(4208, 4256, 80, 4344);
+ }
+ if (!(i8 >>> 0 < 4)) {
+ ___assert_fail(4296, 4256, 81, 4344);
+ }
+ i9 = HEAP32[4008 + (i7 * 48 | 0) + (i8 * 12 | 0) >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = 0;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ if ((HEAP8[4008 + (i7 * 48 | 0) + (i8 * 12 | 0) + 8 | 0] | 0) == 0) {
+ i9 = FUNCTION_TABLE_iiiiii[i9 & 15](i1, i3, i4, i5, i6) | 0;
+ STACKTOP = i2;
+ return i9 | 0;
+ } else {
+ i9 = FUNCTION_TABLE_iiiiii[i9 & 15](i4, i5, i1, i3, i6) | 0;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ return 0;
+}
+function __ZN13b2DynamicTree9MoveProxyEiRK6b2AABBRK6b2Vec2(i1, i2, i13, i9) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i13 = i13 | 0;
+ i9 = i9 | 0;
+ var i3 = 0, i4 = 0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d10 = 0.0, d11 = 0.0, i12 = 0;
+ i4 = STACKTOP;
+ if (!((i2 | 0) > -1)) {
+ ___assert_fail(3072, 2944, 135, 3152);
+ }
+ if ((HEAP32[i1 + 12 >> 2] | 0) <= (i2 | 0)) {
+ ___assert_fail(3072, 2944, 135, 3152);
+ }
+ i3 = i1 + 4 | 0;
+ i12 = HEAP32[i3 >> 2] | 0;
+ if (!((HEAP32[i12 + (i2 * 36 | 0) + 24 >> 2] | 0) == -1)) {
+ ___assert_fail(3120, 2944, 137, 3152);
+ }
+ if (((+HEAPF32[i12 + (i2 * 36 | 0) >> 2] <= +HEAPF32[i13 >> 2] ? +HEAPF32[i12 + (i2 * 36 | 0) + 4 >> 2] <= +HEAPF32[i13 + 4 >> 2] : 0) ? +HEAPF32[i13 + 8 >> 2] <= +HEAPF32[i12 + (i2 * 36 | 0) + 8 >> 2] : 0) ? +HEAPF32[i13 + 12 >> 2] <= +HEAPF32[i12 + (i2 * 36 | 0) + 12 >> 2] : 0) {
+ i13 = 0;
+ STACKTOP = i4;
+ return i13 | 0;
+ }
+ __ZN13b2DynamicTree10RemoveLeafEi(i1, i2);
+ i12 = i13;
+ d6 = +HEAPF32[i12 >> 2];
+ d8 = +HEAPF32[i12 + 4 >> 2];
+ i13 = i13 + 8 | 0;
+ d10 = +HEAPF32[i13 >> 2];
+ d6 = d6 + -.10000000149011612;
+ d8 = d8 + -.10000000149011612;
+ d10 = d10 + .10000000149011612;
+ d5 = +HEAPF32[i13 + 4 >> 2] + .10000000149011612;
+ d11 = +HEAPF32[i9 >> 2] * 2.0;
+ d7 = +HEAPF32[i9 + 4 >> 2] * 2.0;
+ if (d11 < 0.0) {
+ d6 = d6 + d11;
+ } else {
+ d10 = d11 + d10;
+ }
+ if (d7 < 0.0) {
+ d8 = d8 + d7;
+ } else {
+ d5 = d7 + d5;
+ }
+ i13 = HEAP32[i3 >> 2] | 0;
+ d7 = +d6;
+ d11 = +d8;
+ i12 = i13 + (i2 * 36 | 0) | 0;
+ HEAPF32[i12 >> 2] = d7;
+ HEAPF32[i12 + 4 >> 2] = d11;
+ d10 = +d10;
+ d11 = +d5;
+ i13 = i13 + (i2 * 36 | 0) + 8 | 0;
+ HEAPF32[i13 >> 2] = d10;
+ HEAPF32[i13 + 4 >> 2] = d11;
+ __ZN13b2DynamicTree10InsertLeafEi(i1, i2);
+ i13 = 1;
+ STACKTOP = i4;
+ return i13 | 0;
+}
+function __ZNK9b2Simplex16GetWitnessPointsEP6b2Vec2S1_(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, d6 = 0.0, d7 = 0.0, d8 = 0.0, i9 = 0, i10 = 0, d11 = 0.0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i1 + 108 >> 2] | 0;
+ if ((i3 | 0) == 2) {
+ i9 = i1 + 24 | 0;
+ d7 = +HEAPF32[i9 >> 2];
+ i3 = i1 + 60 | 0;
+ d8 = +HEAPF32[i3 >> 2];
+ d6 = +(d7 * +HEAPF32[i1 >> 2] + d8 * +HEAPF32[i1 + 36 >> 2]);
+ d8 = +(d7 * +HEAPF32[i1 + 4 >> 2] + d8 * +HEAPF32[i1 + 40 >> 2]);
+ HEAPF32[i4 >> 2] = d6;
+ HEAPF32[i4 + 4 >> 2] = d8;
+ d8 = +HEAPF32[i9 >> 2];
+ d6 = +HEAPF32[i3 >> 2];
+ d7 = +(d8 * +HEAPF32[i1 + 8 >> 2] + d6 * +HEAPF32[i1 + 44 >> 2]);
+ d6 = +(d8 * +HEAPF32[i1 + 12 >> 2] + d6 * +HEAPF32[i1 + 48 >> 2]);
+ HEAPF32[i5 >> 2] = d7;
+ HEAPF32[i5 + 4 >> 2] = d6;
+ STACKTOP = i2;
+ return;
+ } else if ((i3 | 0) == 1) {
+ i10 = i1;
+ i9 = HEAP32[i10 + 4 >> 2] | 0;
+ i3 = i4;
+ HEAP32[i3 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i3 + 4 >> 2] = i9;
+ i3 = i1 + 8 | 0;
+ i4 = HEAP32[i3 + 4 >> 2] | 0;
+ i9 = i5;
+ HEAP32[i9 >> 2] = HEAP32[i3 >> 2];
+ HEAP32[i9 + 4 >> 2] = i4;
+ STACKTOP = i2;
+ return;
+ } else if ((i3 | 0) == 0) {
+ ___assert_fail(2712, 2672, 217, 2752);
+ } else if ((i3 | 0) == 3) {
+ d11 = +HEAPF32[i1 + 24 >> 2];
+ d6 = +HEAPF32[i1 + 60 >> 2];
+ d8 = +HEAPF32[i1 + 96 >> 2];
+ d7 = +(d11 * +HEAPF32[i1 >> 2] + d6 * +HEAPF32[i1 + 36 >> 2] + d8 * +HEAPF32[i1 + 72 >> 2]);
+ d8 = +(d11 * +HEAPF32[i1 + 4 >> 2] + d6 * +HEAPF32[i1 + 40 >> 2] + d8 * +HEAPF32[i1 + 76 >> 2]);
+ i10 = i4;
+ HEAPF32[i10 >> 2] = d7;
+ HEAPF32[i10 + 4 >> 2] = d8;
+ i10 = i5;
+ HEAPF32[i10 >> 2] = d7;
+ HEAPF32[i10 + 4 >> 2] = d8;
+ STACKTOP = i2;
+ return;
+ } else {
+ ___assert_fail(2712, 2672, 236, 2752);
+ }
+}
+function __ZNK12b2ChainShape12GetChildEdgeEP11b2EdgeShapei(i4, i3, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ if (!((i1 | 0) > -1)) {
+ ___assert_fail(6832, 6792, 89, 6872);
+ }
+ i5 = i4 + 16 | 0;
+ if (((HEAP32[i5 >> 2] | 0) + -1 | 0) <= (i1 | 0)) {
+ ___assert_fail(6832, 6792, 89, 6872);
+ }
+ HEAP32[i3 + 4 >> 2] = 1;
+ HEAPF32[i3 + 8 >> 2] = +HEAPF32[i4 + 8 >> 2];
+ i6 = i4 + 12 | 0;
+ i7 = (HEAP32[i6 >> 2] | 0) + (i1 << 3) | 0;
+ i8 = HEAP32[i7 + 4 >> 2] | 0;
+ i9 = i3 + 12 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ i9 = (HEAP32[i6 >> 2] | 0) + (i1 + 1 << 3) | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i3 + 20 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ i7 = i3 + 28 | 0;
+ if ((i1 | 0) > 0) {
+ i10 = (HEAP32[i6 >> 2] | 0) + (i1 + -1 << 3) | 0;
+ i8 = HEAP32[i10 + 4 >> 2] | 0;
+ i9 = i7;
+ HEAP32[i9 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ HEAP8[i3 + 44 | 0] = 1;
+ } else {
+ i8 = i4 + 20 | 0;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i10 = i7;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP8[i3 + 44 | 0] = HEAP8[i4 + 36 | 0] | 0;
+ }
+ i7 = i3 + 36 | 0;
+ if (((HEAP32[i5 >> 2] | 0) + -2 | 0) > (i1 | 0)) {
+ i8 = (HEAP32[i6 >> 2] | 0) + (i1 + 2 << 3) | 0;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i10 = i7;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP8[i3 + 45 | 0] = 1;
+ STACKTOP = i2;
+ return;
+ } else {
+ i8 = i4 + 28 | 0;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i10 = i7;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP8[i3 + 45 | 0] = HEAP8[i4 + 37 | 0] | 0;
+ STACKTOP = i2;
+ return;
+ }
+}
+function __ZN15b2DistanceProxy3SetEPK7b2Shapei(i3, i1, i5) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 4 >> 2] | 0;
+ if ((i4 | 0) == 1) {
+ HEAP32[i3 + 16 >> 2] = i1 + 12;
+ HEAP32[i3 + 20 >> 2] = 2;
+ HEAPF32[i3 + 24 >> 2] = +HEAPF32[i1 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+ } else if ((i4 | 0) == 3) {
+ if (!((i5 | 0) > -1)) {
+ ___assert_fail(2632, 2672, 53, 2704);
+ }
+ i4 = i1 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) <= (i5 | 0)) {
+ ___assert_fail(2632, 2672, 53, 2704);
+ }
+ i7 = i1 + 12 | 0;
+ i9 = (HEAP32[i7 >> 2] | 0) + (i5 << 3) | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i6 = i3;
+ HEAP32[i6 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i6 + 4 >> 2] = i8;
+ i6 = i5 + 1 | 0;
+ i5 = i3 + 8 | 0;
+ i7 = HEAP32[i7 >> 2] | 0;
+ if ((i6 | 0) < (HEAP32[i4 >> 2] | 0)) {
+ i7 = i7 + (i6 << 3) | 0;
+ i8 = HEAP32[i7 + 4 >> 2] | 0;
+ i9 = i5;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ } else {
+ i8 = HEAP32[i7 + 4 >> 2] | 0;
+ i9 = i5;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ }
+ HEAP32[i3 + 16 >> 2] = i3;
+ HEAP32[i3 + 20 >> 2] = 2;
+ HEAPF32[i3 + 24 >> 2] = +HEAPF32[i1 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+ } else if ((i4 | 0) == 2) {
+ HEAP32[i3 + 16 >> 2] = i1 + 20;
+ HEAP32[i3 + 20 >> 2] = HEAP32[i1 + 148 >> 2];
+ HEAPF32[i3 + 24 >> 2] = +HEAPF32[i1 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+ } else if ((i4 | 0) == 0) {
+ HEAP32[i3 + 16 >> 2] = i1 + 12;
+ HEAP32[i3 + 20 >> 2] = 1;
+ HEAPF32[i3 + 24 >> 2] = +HEAPF32[i1 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+ } else {
+ ___assert_fail(2712, 2672, 81, 2704);
+ }
+}
+function __ZL16b2EdgeSeparationPK14b2PolygonShapeRK11b2TransformiS1_S4_(i2, i7, i4, i5, i6) {
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var d1 = 0.0, d3 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, i12 = 0, i13 = 0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, i18 = 0, i19 = 0, i20 = 0;
+ i12 = STACKTOP;
+ i13 = HEAP32[i5 + 148 >> 2] | 0;
+ if (!((i4 | 0) > -1)) {
+ ___assert_fail(5640, 5688, 32, 5752);
+ }
+ if ((HEAP32[i2 + 148 >> 2] | 0) <= (i4 | 0)) {
+ ___assert_fail(5640, 5688, 32, 5752);
+ }
+ d11 = +HEAPF32[i7 + 12 >> 2];
+ d9 = +HEAPF32[i2 + (i4 << 3) + 84 >> 2];
+ d1 = +HEAPF32[i7 + 8 >> 2];
+ d3 = +HEAPF32[i2 + (i4 << 3) + 88 >> 2];
+ d8 = d11 * d9 - d1 * d3;
+ d3 = d9 * d1 + d11 * d3;
+ d9 = +HEAPF32[i6 + 12 >> 2];
+ d10 = +HEAPF32[i6 + 8 >> 2];
+ d16 = d9 * d8 + d10 * d3;
+ d14 = d9 * d3 - d8 * d10;
+ if ((i13 | 0) > 0) {
+ i19 = 0;
+ i20 = 0;
+ d15 = 3.4028234663852886e+38;
+ while (1) {
+ d17 = d16 * +HEAPF32[i5 + (i19 << 3) + 20 >> 2] + d14 * +HEAPF32[i5 + (i19 << 3) + 24 >> 2];
+ i18 = d17 < d15;
+ i20 = i18 ? i19 : i20;
+ i19 = i19 + 1 | 0;
+ if ((i19 | 0) == (i13 | 0)) {
+ break;
+ } else {
+ d15 = i18 ? d17 : d15;
+ }
+ }
+ } else {
+ i20 = 0;
+ }
+ d16 = +HEAPF32[i2 + (i4 << 3) + 20 >> 2];
+ d17 = +HEAPF32[i2 + (i4 << 3) + 24 >> 2];
+ d14 = +HEAPF32[i5 + (i20 << 3) + 20 >> 2];
+ d15 = +HEAPF32[i5 + (i20 << 3) + 24 >> 2];
+ STACKTOP = i12;
+ return +(d8 * (+HEAPF32[i6 >> 2] + (d9 * d14 - d10 * d15) - (+HEAPF32[i7 >> 2] + (d11 * d16 - d1 * d17))) + d3 * (d14 * d10 + d9 * d15 + +HEAPF32[i6 + 4 >> 2] - (d16 * d1 + d11 * d17 + +HEAPF32[i7 + 4 >> 2])));
+}
+function __Z4iterv() {
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, d5 = 0.0, d6 = 0.0, d7 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i2 = i1;
+ i3 = i1 + 32 | 0;
+ i4 = HEAP32[16] | 0;
+ if ((i4 | 0) >= (HEAP32[4] | 0)) {
+ HEAP32[16] = i4 + 1;
+ __Z7measurePl(i3, HEAP32[8] | 0);
+ d7 = +HEAPF32[i3 + 4 >> 2];
+ d6 = +(HEAP32[10] | 0) / 1.0e6 * 1.0e3;
+ d5 = +(HEAP32[12] | 0) / 1.0e6 * 1.0e3;
+ HEAPF64[tempDoublePtr >> 3] = +HEAPF32[i3 >> 2];
+ HEAP32[i2 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i4 = i2 + 8 | 0;
+ HEAPF64[tempDoublePtr >> 3] = d7;
+ HEAP32[i4 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i4 = i2 + 16 | 0;
+ HEAPF64[tempDoublePtr >> 3] = d6;
+ HEAP32[i4 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i4 = i2 + 24 | 0;
+ HEAPF64[tempDoublePtr >> 3] = d5;
+ HEAP32[i4 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ _printf(96, i2 | 0) | 0;
+ _emscripten_run_script(152);
+ if ((HEAP32[18] | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ _emscripten_cancel_main_loop();
+ STACKTOP = i1;
+ return;
+ }
+ i3 = _clock() | 0;
+ __ZN7b2World4StepEfii(HEAP32[6] | 0, .01666666753590107, 3, 3);
+ i3 = (_clock() | 0) - i3 | 0;
+ i2 = HEAP32[16] | 0;
+ HEAP32[(HEAP32[8] | 0) + (i2 << 2) >> 2] = i3;
+ if ((i3 | 0) < (HEAP32[10] | 0)) {
+ HEAP32[10] = i3;
+ }
+ if ((i3 | 0) > (HEAP32[12] | 0)) {
+ HEAP32[12] = i3;
+ }
+ HEAP32[16] = i2 + 1;
+ STACKTOP = i1;
+ return;
+}
+function __ZN13b2DynamicTree12AllocateNodeEv(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i2 = i5 + 16 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ if ((i3 | 0) == -1) {
+ i4 = i5 + 8 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ i3 = i5 + 12 | 0;
+ if ((i6 | 0) != (HEAP32[i3 >> 2] | 0)) {
+ ___assert_fail(2912, 2944, 61, 2984);
+ }
+ i5 = i5 + 4 | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i3 >> 2] = i6 << 1;
+ i6 = __Z7b2Alloci(i6 * 72 | 0) | 0;
+ HEAP32[i5 >> 2] = i6;
+ _memcpy(i6 | 0, i7 | 0, (HEAP32[i4 >> 2] | 0) * 36 | 0) | 0;
+ __Z6b2FreePv(i7);
+ i6 = HEAP32[i4 >> 2] | 0;
+ i7 = (HEAP32[i3 >> 2] | 0) + -1 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) < (i7 | 0)) {
+ i7 = i6;
+ while (1) {
+ i6 = i7 + 1 | 0;
+ HEAP32[i5 + (i7 * 36 | 0) + 20 >> 2] = i6;
+ HEAP32[i5 + (i7 * 36 | 0) + 32 >> 2] = -1;
+ i7 = (HEAP32[i3 >> 2] | 0) + -1 | 0;
+ if ((i6 | 0) < (i7 | 0)) {
+ i7 = i6;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP32[i5 + (i7 * 36 | 0) + 20 >> 2] = -1;
+ HEAP32[i5 + (((HEAP32[i3 >> 2] | 0) + -1 | 0) * 36 | 0) + 32 >> 2] = -1;
+ i3 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i2 >> 2] = i3;
+ } else {
+ i4 = i5 + 8 | 0;
+ i5 = HEAP32[i5 + 4 >> 2] | 0;
+ }
+ i7 = i5 + (i3 * 36 | 0) + 20 | 0;
+ HEAP32[i2 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = -1;
+ HEAP32[i5 + (i3 * 36 | 0) + 24 >> 2] = -1;
+ HEAP32[i5 + (i3 * 36 | 0) + 28 >> 2] = -1;
+ HEAP32[i5 + (i3 * 36 | 0) + 32 >> 2] = 0;
+ HEAP32[i5 + (i3 * 36 | 0) + 16 >> 2] = 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 1;
+ STACKTOP = i1;
+ return i3 | 0;
+}
+function __ZN9b2Fixture6CreateEP16b2BlockAllocatorP6b2BodyPK12b2FixtureDef(i1, i5, i4, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i6 = 0, i7 = 0, d8 = 0.0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 40 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAPF32[i1 + 16 >> 2] = +HEAPF32[i3 + 8 >> 2];
+ HEAPF32[i1 + 20 >> 2] = +HEAPF32[i3 + 12 >> 2];
+ HEAP32[i1 + 8 >> 2] = i4;
+ HEAP32[i1 + 4 >> 2] = 0;
+ i4 = i1 + 32 | 0;
+ i6 = i3 + 22 | 0;
+ HEAP16[i4 + 0 >> 1] = HEAP16[i6 + 0 >> 1] | 0;
+ HEAP16[i4 + 2 >> 1] = HEAP16[i6 + 2 >> 1] | 0;
+ HEAP16[i4 + 4 >> 1] = HEAP16[i6 + 4 >> 1] | 0;
+ HEAP8[i1 + 38 | 0] = HEAP8[i3 + 20 | 0] | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ i4 = FUNCTION_TABLE_iii[HEAP32[(HEAP32[i4 >> 2] | 0) + 8 >> 2] & 3](i4, i5) | 0;
+ HEAP32[i1 + 12 >> 2] = i4;
+ i4 = FUNCTION_TABLE_ii[HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] & 3](i4) | 0;
+ i6 = __ZN16b2BlockAllocator8AllocateEi(i5, i4 * 28 | 0) | 0;
+ i5 = i1 + 24 | 0;
+ HEAP32[i5 >> 2] = i6;
+ if ((i4 | 0) > 0) {
+ i7 = 0;
+ } else {
+ i7 = i1 + 28 | 0;
+ HEAP32[i7 >> 2] = 0;
+ i7 = i3 + 16 | 0;
+ d8 = +HEAPF32[i7 >> 2];
+ HEAPF32[i1 >> 2] = d8;
+ STACKTOP = i2;
+ return;
+ }
+ do {
+ HEAP32[i6 + (i7 * 28 | 0) + 16 >> 2] = 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i6 + (i7 * 28 | 0) + 24 >> 2] = -1;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != (i4 | 0));
+ i7 = i1 + 28 | 0;
+ HEAP32[i7 >> 2] = 0;
+ i7 = i3 + 16 | 0;
+ d8 = +HEAPF32[i7 >> 2];
+ HEAPF32[i1 >> 2] = d8;
+ STACKTOP = i2;
+ return;
+}
+function __Z19b2ClipSegmentToLineP12b2ClipVertexPKS_RK6b2Vec2fi(i4, i1, i5, d9, i2) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ d9 = +d9;
+ i2 = i2 | 0;
+ var i3 = 0, i6 = 0, d7 = 0.0, i8 = 0, i10 = 0, d11 = 0.0, d12 = 0.0, i13 = 0;
+ i3 = STACKTOP;
+ d12 = +HEAPF32[i5 >> 2];
+ d11 = +HEAPF32[i5 + 4 >> 2];
+ i5 = i1 + 4 | 0;
+ d7 = d12 * +HEAPF32[i1 >> 2] + d11 * +HEAPF32[i5 >> 2] - d9;
+ i6 = i1 + 12 | 0;
+ i8 = i1 + 16 | 0;
+ d9 = d12 * +HEAPF32[i6 >> 2] + d11 * +HEAPF32[i8 >> 2] - d9;
+ if (!(d7 <= 0.0)) {
+ i10 = 0;
+ } else {
+ HEAP32[i4 + 0 >> 2] = HEAP32[i1 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i1 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i1 + 8 >> 2];
+ i10 = 1;
+ }
+ if (d9 <= 0.0) {
+ i13 = i10 + 1 | 0;
+ i10 = i4 + (i10 * 12 | 0) | 0;
+ HEAP32[i10 + 0 >> 2] = HEAP32[i6 + 0 >> 2];
+ HEAP32[i10 + 4 >> 2] = HEAP32[i6 + 4 >> 2];
+ HEAP32[i10 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i10 = i13;
+ }
+ if (!(d7 * d9 < 0.0)) {
+ i13 = i10;
+ STACKTOP = i3;
+ return i13 | 0;
+ }
+ d9 = d7 / (d7 - d9);
+ d11 = +HEAPF32[i1 >> 2];
+ d12 = +HEAPF32[i5 >> 2];
+ d11 = +(d11 + d9 * (+HEAPF32[i6 >> 2] - d11));
+ d12 = +(d12 + d9 * (+HEAPF32[i8 >> 2] - d12));
+ i13 = i4 + (i10 * 12 | 0) | 0;
+ HEAPF32[i13 >> 2] = d11;
+ HEAPF32[i13 + 4 >> 2] = d12;
+ i13 = i4 + (i10 * 12 | 0) + 8 | 0;
+ HEAP8[i13] = i2;
+ HEAP8[i13 + 1 | 0] = HEAP8[i1 + 9 | 0] | 0;
+ HEAP8[i13 + 2 | 0] = 0;
+ HEAP8[i13 + 3 | 0] = 1;
+ i13 = i10 + 1 | 0;
+ STACKTOP = i3;
+ return i13 | 0;
+}
+function __Z16b2CollideCirclesP10b2ManifoldPK13b2CircleShapeRK11b2TransformS3_S6_(i1, i7, i8, i6, i9) {
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ i6 = i6 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, d10 = 0.0, d11 = 0.0, d12 = 0.0, d13 = 0.0, d14 = 0.0, d15 = 0.0, d16 = 0.0, d17 = 0.0, d18 = 0.0;
+ i2 = STACKTOP;
+ i4 = i1 + 60 | 0;
+ HEAP32[i4 >> 2] = 0;
+ i3 = i7 + 12 | 0;
+ d10 = +HEAPF32[i8 + 12 >> 2];
+ d14 = +HEAPF32[i3 >> 2];
+ d13 = +HEAPF32[i8 + 8 >> 2];
+ d11 = +HEAPF32[i7 + 16 >> 2];
+ i5 = i6 + 12 | 0;
+ d16 = +HEAPF32[i9 + 12 >> 2];
+ d18 = +HEAPF32[i5 >> 2];
+ d17 = +HEAPF32[i9 + 8 >> 2];
+ d15 = +HEAPF32[i6 + 16 >> 2];
+ d12 = +HEAPF32[i9 >> 2] + (d16 * d18 - d17 * d15) - (+HEAPF32[i8 >> 2] + (d10 * d14 - d13 * d11));
+ d11 = d18 * d17 + d16 * d15 + +HEAPF32[i9 + 4 >> 2] - (d14 * d13 + d10 * d11 + +HEAPF32[i8 + 4 >> 2]);
+ d10 = +HEAPF32[i7 + 8 >> 2] + +HEAPF32[i6 + 8 >> 2];
+ if (d12 * d12 + d11 * d11 > d10 * d10) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i1 + 56 >> 2] = 0;
+ i9 = i3;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i1 + 48 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ HEAPF32[i1 + 40 >> 2] = 0.0;
+ HEAPF32[i1 + 44 >> 2] = 0.0;
+ HEAP32[i4 >> 2] = 1;
+ i7 = i5;
+ i8 = HEAP32[i7 + 4 >> 2] | 0;
+ i9 = i1;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ HEAP32[i1 + 16 >> 2] = 0;
+ STACKTOP = i2;
+ return;
+}
+function __ZNK14b2PolygonShape11ComputeAABBEP6b2AABBRK11b2Transformi(i1, i2, i7, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ i3 = i3 | 0;
+ var d4 = 0.0, d5 = 0.0, d6 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0, d11 = 0.0, d12 = 0.0, i13 = 0, d14 = 0.0, d15 = 0.0, d16 = 0.0;
+ i3 = STACKTOP;
+ d4 = +HEAPF32[i7 + 12 >> 2];
+ d15 = +HEAPF32[i1 + 20 >> 2];
+ d5 = +HEAPF32[i7 + 8 >> 2];
+ d12 = +HEAPF32[i1 + 24 >> 2];
+ d6 = +HEAPF32[i7 >> 2];
+ d9 = d6 + (d4 * d15 - d5 * d12);
+ d8 = +HEAPF32[i7 + 4 >> 2];
+ d12 = d15 * d5 + d4 * d12 + d8;
+ i7 = HEAP32[i1 + 148 >> 2] | 0;
+ if ((i7 | 0) > 1) {
+ d10 = d9;
+ d11 = d12;
+ i13 = 1;
+ do {
+ d16 = +HEAPF32[i1 + (i13 << 3) + 20 >> 2];
+ d14 = +HEAPF32[i1 + (i13 << 3) + 24 >> 2];
+ d15 = d6 + (d4 * d16 - d5 * d14);
+ d14 = d16 * d5 + d4 * d14 + d8;
+ d10 = d10 < d15 ? d10 : d15;
+ d11 = d11 < d14 ? d11 : d14;
+ d9 = d9 > d15 ? d9 : d15;
+ d12 = d12 > d14 ? d12 : d14;
+ i13 = i13 + 1 | 0;
+ } while ((i13 | 0) < (i7 | 0));
+ } else {
+ d11 = d12;
+ d10 = d9;
+ }
+ d16 = +HEAPF32[i1 + 8 >> 2];
+ d14 = +(d10 - d16);
+ d15 = +(d11 - d16);
+ i13 = i2;
+ HEAPF32[i13 >> 2] = d14;
+ HEAPF32[i13 + 4 >> 2] = d15;
+ d15 = +(d9 + d16);
+ d16 = +(d12 + d16);
+ i13 = i2 + 8 | 0;
+ HEAPF32[i13 >> 2] = d15;
+ HEAPF32[i13 + 4 >> 2] = d16;
+ STACKTOP = i3;
+ return;
+}
+function __ZNK10__cxxabiv120__si_class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib(i5, i1, i4, i6, i3, i7) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i5 | 0) != (HEAP32[i1 + 8 >> 2] | 0)) {
+ i5 = HEAP32[i5 + 8 >> 2] | 0;
+ FUNCTION_TABLE_viiiiii[HEAP32[(HEAP32[i5 >> 2] | 0) + 20 >> 2] & 3](i5, i1, i4, i6, i3, i7);
+ STACKTOP = i2;
+ return;
+ }
+ HEAP8[i1 + 53 | 0] = 1;
+ if ((HEAP32[i1 + 4 >> 2] | 0) != (i6 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP8[i1 + 52 | 0] = 1;
+ i5 = i1 + 16 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i5 >> 2] = i4;
+ HEAP32[i1 + 24 >> 2] = i3;
+ HEAP32[i1 + 36 >> 2] = 1;
+ if (!((HEAP32[i1 + 48 >> 2] | 0) == 1 & (i3 | 0) == 1)) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i2;
+ return;
+ }
+ if ((i6 | 0) != (i4 | 0)) {
+ i7 = i1 + 36 | 0;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + 1;
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i2;
+ return;
+ }
+ i4 = i1 + 24 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) == 2) {
+ HEAP32[i4 >> 2] = i3;
+ } else {
+ i3 = i5;
+ }
+ if (!((HEAP32[i1 + 48 >> 2] | 0) == 1 & (i3 | 0) == 1)) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN6b2Body13CreateFixtureEPK12b2FixtureDef(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i2 = i1 + 88 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if ((HEAP32[i4 + 102868 >> 2] & 2 | 0) != 0) {
+ ___assert_fail(1776, 1520, 153, 1808);
+ }
+ i6 = __ZN16b2BlockAllocator8AllocateEi(i4, 44) | 0;
+ if ((i6 | 0) == 0) {
+ i6 = 0;
+ } else {
+ __ZN9b2FixtureC2Ev(i6);
+ }
+ __ZN9b2Fixture6CreateEP16b2BlockAllocatorP6b2BodyPK12b2FixtureDef(i6, i4, i1, i5);
+ if (!((HEAP16[i1 + 4 >> 1] & 32) == 0)) {
+ __ZN9b2Fixture13CreateProxiesEP12b2BroadPhaseRK11b2Transform(i6, (HEAP32[i2 >> 2] | 0) + 102872 | 0, i1 + 12 | 0);
+ }
+ i5 = i1 + 100 | 0;
+ HEAP32[i6 + 4 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i5 >> 2] = i6;
+ i5 = i1 + 104 | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ HEAP32[i6 + 8 >> 2] = i1;
+ if (!(+HEAPF32[i6 >> 2] > 0.0)) {
+ i5 = HEAP32[i2 >> 2] | 0;
+ i5 = i5 + 102868 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i4 = i4 | 1;
+ HEAP32[i5 >> 2] = i4;
+ STACKTOP = i3;
+ return i6 | 0;
+ }
+ __ZN6b2Body13ResetMassDataEv(i1);
+ i5 = HEAP32[i2 >> 2] | 0;
+ i5 = i5 + 102868 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i4 = i4 | 1;
+ HEAP32[i5 >> 2] = i4;
+ STACKTOP = i3;
+ return i6 | 0;
+}
+function __Z13b2TestOverlapPK7b2ShapeiS1_iRK11b2TransformS4_(i6, i5, i4, i3, i2, i1) {
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i8 = STACKTOP;
+ STACKTOP = STACKTOP + 128 | 0;
+ i9 = i8 + 36 | 0;
+ i10 = i8 + 24 | 0;
+ i7 = i8;
+ HEAP32[i9 + 16 >> 2] = 0;
+ HEAP32[i9 + 20 >> 2] = 0;
+ HEAPF32[i9 + 24 >> 2] = 0.0;
+ HEAP32[i9 + 44 >> 2] = 0;
+ HEAP32[i9 + 48 >> 2] = 0;
+ HEAPF32[i9 + 52 >> 2] = 0.0;
+ __ZN15b2DistanceProxy3SetEPK7b2Shapei(i9, i6, i5);
+ __ZN15b2DistanceProxy3SetEPK7b2Shapei(i9 + 28 | 0, i4, i3);
+ i6 = i9 + 56 | 0;
+ HEAP32[i6 + 0 >> 2] = HEAP32[i2 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i2 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ HEAP32[i6 + 12 >> 2] = HEAP32[i2 + 12 >> 2];
+ i6 = i9 + 72 | 0;
+ HEAP32[i6 + 0 >> 2] = HEAP32[i1 + 0 >> 2];
+ HEAP32[i6 + 4 >> 2] = HEAP32[i1 + 4 >> 2];
+ HEAP32[i6 + 8 >> 2] = HEAP32[i1 + 8 >> 2];
+ HEAP32[i6 + 12 >> 2] = HEAP32[i1 + 12 >> 2];
+ HEAP8[i9 + 88 | 0] = 1;
+ HEAP16[i10 + 4 >> 1] = 0;
+ __Z10b2DistanceP16b2DistanceOutputP14b2SimplexCachePK15b2DistanceInput(i7, i10, i9);
+ STACKTOP = i8;
+ return +HEAPF32[i7 + 16 >> 2] < 11920928955078125.0e-22 | 0;
+}
+function __ZNK10__cxxabiv117__class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib(i6, i1, i4, i5, i2, i3) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i3 = STACKTOP;
+ if ((HEAP32[i1 + 8 >> 2] | 0) != (i6 | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP8[i1 + 53 | 0] = 1;
+ if ((HEAP32[i1 + 4 >> 2] | 0) != (i5 | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP8[i1 + 52 | 0] = 1;
+ i5 = i1 + 16 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i5 >> 2] = i4;
+ HEAP32[i1 + 24 >> 2] = i2;
+ HEAP32[i1 + 36 >> 2] = 1;
+ if (!((HEAP32[i1 + 48 >> 2] | 0) == 1 & (i2 | 0) == 1)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i3;
+ return;
+ }
+ if ((i6 | 0) != (i4 | 0)) {
+ i6 = i1 + 36 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 1;
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i3;
+ return;
+ }
+ i4 = i1 + 24 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) == 2) {
+ HEAP32[i4 >> 2] = i2;
+ } else {
+ i2 = i5;
+ }
+ if (!((HEAP32[i1 + 48 >> 2] | 0) == 1 & (i2 | 0) == 1)) {
+ STACKTOP = i3;
+ return;
+ }
+ HEAP8[i1 + 54 | 0] = 1;
+ STACKTOP = i3;
+ return;
+}
+function __ZNK11b2EdgeShape5CloneEP16b2BlockAllocator(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 48) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 0;
+ } else {
+ HEAP32[i3 >> 2] = 240;
+ HEAP32[i3 + 4 >> 2] = 1;
+ HEAPF32[i3 + 8 >> 2] = .009999999776482582;
+ i4 = i3 + 28 | 0;
+ HEAP32[i4 + 0 >> 2] = 0;
+ HEAP32[i4 + 4 >> 2] = 0;
+ HEAP32[i4 + 8 >> 2] = 0;
+ HEAP32[i4 + 12 >> 2] = 0;
+ HEAP16[i4 + 16 >> 1] = 0;
+ }
+ i6 = i1 + 4 | 0;
+ i5 = HEAP32[i6 + 4 >> 2] | 0;
+ i4 = i3 + 4 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i4 + 4 >> 2] = i5;
+ i4 = i3 + 12 | 0;
+ i1 = i1 + 12 | 0;
+ HEAP32[i4 + 0 >> 2] = HEAP32[i1 + 0 >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[i1 + 4 >> 2];
+ HEAP32[i4 + 8 >> 2] = HEAP32[i1 + 8 >> 2];
+ HEAP32[i4 + 12 >> 2] = HEAP32[i1 + 12 >> 2];
+ HEAP32[i4 + 16 >> 2] = HEAP32[i1 + 16 >> 2];
+ HEAP32[i4 + 20 >> 2] = HEAP32[i1 + 20 >> 2];
+ HEAP32[i4 + 24 >> 2] = HEAP32[i1 + 24 >> 2];
+ HEAP32[i4 + 28 >> 2] = HEAP32[i1 + 28 >> 2];
+ HEAP16[i4 + 32 >> 1] = HEAP16[i1 + 32 >> 1] | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function __ZN7b2WorldC2ERK6b2Vec2(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ __ZN16b2BlockAllocatorC2Ev(i1);
+ __ZN16b2StackAllocatorC2Ev(i1 + 68 | 0);
+ __ZN16b2ContactManagerC2Ev(i1 + 102872 | 0);
+ i6 = i1 + 102968 | 0;
+ HEAP32[i1 + 102980 >> 2] = 0;
+ HEAP32[i1 + 102984 >> 2] = 0;
+ i4 = i1 + 102952 | 0;
+ i5 = i1 + 102992 | 0;
+ HEAP32[i4 + 0 >> 2] = 0;
+ HEAP32[i4 + 4 >> 2] = 0;
+ HEAP32[i4 + 8 >> 2] = 0;
+ HEAP32[i4 + 12 >> 2] = 0;
+ HEAP8[i5] = 1;
+ HEAP8[i1 + 102993 | 0] = 1;
+ HEAP8[i1 + 102994 | 0] = 0;
+ HEAP8[i1 + 102995 | 0] = 1;
+ HEAP8[i1 + 102976 | 0] = 1;
+ i5 = i2;
+ i4 = HEAP32[i5 + 4 >> 2] | 0;
+ i2 = i6;
+ HEAP32[i2 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i2 + 4 >> 2] = i4;
+ HEAP32[i1 + 102868 >> 2] = 4;
+ HEAPF32[i1 + 102988 >> 2] = 0.0;
+ HEAP32[i1 + 102948 >> 2] = i1;
+ i2 = i1 + 102996 | 0;
+ HEAP32[i2 + 0 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = 0;
+ HEAP32[i2 + 8 >> 2] = 0;
+ HEAP32[i2 + 12 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 24 >> 2] = 0;
+ HEAP32[i2 + 28 >> 2] = 0;
+ STACKTOP = i3;
+ return;
+}
+function __ZNK10__cxxabiv117__class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib(i6, i3, i4, i1, i2) {
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i5 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i3 + 8 >> 2] | 0) == (i6 | 0)) {
+ if ((HEAP32[i3 + 4 >> 2] | 0) != (i4 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i3 + 28 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 1) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i3 >> 2] = i1;
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP32[i3 >> 2] | 0) != (i6 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP32[i3 + 16 >> 2] | 0) != (i4 | 0) ? (i5 = i3 + 20 | 0, (HEAP32[i5 >> 2] | 0) != (i4 | 0)) : 0) {
+ HEAP32[i3 + 32 >> 2] = i1;
+ HEAP32[i5 >> 2] = i4;
+ i6 = i3 + 40 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 1;
+ if ((HEAP32[i3 + 36 >> 2] | 0) == 1 ? (HEAP32[i3 + 24 >> 2] | 0) == 2 : 0) {
+ HEAP8[i3 + 54 | 0] = 1;
+ }
+ HEAP32[i3 + 44 >> 2] = 4;
+ STACKTOP = i2;
+ return;
+ }
+ if ((i1 | 0) != 1) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i3 + 32 >> 2] = 1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN9b2Contact7DestroyEPS_P16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ if ((HEAP8[4200] | 0) == 0) {
+ ___assert_fail(4352, 4256, 103, 4376);
+ }
+ i4 = HEAP32[i1 + 48 >> 2] | 0;
+ if ((HEAP32[i1 + 124 >> 2] | 0) > 0) {
+ i7 = HEAP32[i4 + 8 >> 2] | 0;
+ i6 = i7 + 4 | 0;
+ i5 = HEAPU16[i6 >> 1] | 0;
+ if ((i5 & 2 | 0) == 0) {
+ HEAP16[i6 >> 1] = i5 | 2;
+ HEAPF32[i7 + 144 >> 2] = 0.0;
+ }
+ i7 = HEAP32[i1 + 52 >> 2] | 0;
+ i6 = HEAP32[i7 + 8 >> 2] | 0;
+ i5 = i6 + 4 | 0;
+ i8 = HEAPU16[i5 >> 1] | 0;
+ if ((i8 & 2 | 0) == 0) {
+ HEAP16[i5 >> 1] = i8 | 2;
+ HEAPF32[i6 + 144 >> 2] = 0.0;
+ }
+ } else {
+ i7 = HEAP32[i1 + 52 >> 2] | 0;
+ }
+ i4 = HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 4 >> 2] | 0;
+ i5 = HEAP32[(HEAP32[i7 + 12 >> 2] | 0) + 4 >> 2] | 0;
+ if ((i4 | 0) > -1 & (i5 | 0) < 4) {
+ FUNCTION_TABLE_vii[HEAP32[4008 + (i4 * 48 | 0) + (i5 * 12 | 0) + 4 >> 2] & 15](i1, i2);
+ STACKTOP = i3;
+ return;
+ } else {
+ ___assert_fail(4384, 4256, 114, 4376);
+ }
+}
+function __ZN9b2Fixture13CreateProxiesEP12b2BroadPhaseRK11b2Transform(i5, i4, i1) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ i3 = i5 + 28 | 0;
+ if ((HEAP32[i3 >> 2] | 0) != 0) {
+ ___assert_fail(2088, 2112, 124, 2144);
+ }
+ i6 = i5 + 12 | 0;
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = FUNCTION_TABLE_ii[HEAP32[(HEAP32[i8 >> 2] | 0) + 12 >> 2] & 3](i8) | 0;
+ HEAP32[i3 >> 2] = i8;
+ if ((i8 | 0) <= 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = i5 + 24 | 0;
+ i8 = 0;
+ do {
+ i9 = HEAP32[i7 >> 2] | 0;
+ i10 = i9 + (i8 * 28 | 0) | 0;
+ i11 = HEAP32[i6 >> 2] | 0;
+ FUNCTION_TABLE_viiii[HEAP32[(HEAP32[i11 >> 2] | 0) + 24 >> 2] & 15](i11, i10, i1, i8);
+ HEAP32[i9 + (i8 * 28 | 0) + 24 >> 2] = __ZN12b2BroadPhase11CreateProxyERK6b2AABBPv(i4, i10, i10) | 0;
+ HEAP32[i9 + (i8 * 28 | 0) + 16 >> 2] = i5;
+ HEAP32[i9 + (i8 * 28 | 0) + 20 >> 2] = i8;
+ i8 = i8 + 1 | 0;
+ } while ((i8 | 0) < (HEAP32[i3 >> 2] | 0));
+ STACKTOP = i2;
+ return;
+}
+function __ZNK10__cxxabiv117__class_type_info9can_catchEPKNS_16__shim_type_infoERPv(i1, i5, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i3 = i2;
+ if ((i1 | 0) == (i5 | 0)) {
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ if ((i5 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ i5 = ___dynamic_cast(i5, 6952, 7008, 0) | 0;
+ if ((i5 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ i7 = i3 + 0 | 0;
+ i6 = i7 + 56 | 0;
+ do {
+ HEAP32[i7 >> 2] = 0;
+ i7 = i7 + 4 | 0;
+ } while ((i7 | 0) < (i6 | 0));
+ HEAP32[i3 >> 2] = i5;
+ HEAP32[i3 + 8 >> 2] = i1;
+ HEAP32[i3 + 12 >> 2] = -1;
+ HEAP32[i3 + 48 >> 2] = 1;
+ FUNCTION_TABLE_viiii[HEAP32[(HEAP32[i5 >> 2] | 0) + 28 >> 2] & 15](i5, i3, HEAP32[i4 >> 2] | 0, 1);
+ if ((HEAP32[i3 + 24 >> 2] | 0) != 1) {
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ HEAP32[i4 >> 2] = HEAP32[i3 + 16 >> 2];
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function __ZN8b2IslandC2EiiiP16b2StackAllocatorP17b2ContactListener(i1, i4, i3, i2, i5, i6) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i7 = 0, i8 = 0;
+ i7 = STACKTOP;
+ i8 = i1 + 40 | 0;
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i1 + 44 >> 2] = i3;
+ HEAP32[i1 + 48 >> 2] = i2;
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i1 + 36 >> 2] = 0;
+ HEAP32[i1 + 32 >> 2] = 0;
+ HEAP32[i1 >> 2] = i5;
+ HEAP32[i1 + 4 >> 2] = i6;
+ HEAP32[i1 + 8 >> 2] = __ZN16b2StackAllocator8AllocateEi(i5, i4 << 2) | 0;
+ HEAP32[i1 + 12 >> 2] = __ZN16b2StackAllocator8AllocateEi(HEAP32[i1 >> 2] | 0, i3 << 2) | 0;
+ HEAP32[i1 + 16 >> 2] = __ZN16b2StackAllocator8AllocateEi(HEAP32[i1 >> 2] | 0, i2 << 2) | 0;
+ HEAP32[i1 + 24 >> 2] = __ZN16b2StackAllocator8AllocateEi(HEAP32[i1 >> 2] | 0, (HEAP32[i8 >> 2] | 0) * 12 | 0) | 0;
+ HEAP32[i1 + 20 >> 2] = __ZN16b2StackAllocator8AllocateEi(HEAP32[i1 >> 2] | 0, (HEAP32[i8 >> 2] | 0) * 12 | 0) | 0;
+ STACKTOP = i7;
+ return;
+}
+function __ZNK11b2EdgeShape11ComputeAABBEP6b2AABBRK11b2Transformi(i8, i1, i10, i2) {
+ i8 = i8 | 0;
+ i1 = i1 | 0;
+ i10 = i10 | 0;
+ i2 = i2 | 0;
+ var d3 = 0.0, d4 = 0.0, d5 = 0.0, d6 = 0.0, d7 = 0.0, d9 = 0.0, d11 = 0.0, d12 = 0.0;
+ i2 = STACKTOP;
+ d7 = +HEAPF32[i10 + 12 >> 2];
+ d9 = +HEAPF32[i8 + 12 >> 2];
+ d11 = +HEAPF32[i10 + 8 >> 2];
+ d3 = +HEAPF32[i8 + 16 >> 2];
+ d6 = +HEAPF32[i10 >> 2];
+ d5 = d6 + (d7 * d9 - d11 * d3);
+ d12 = +HEAPF32[i10 + 4 >> 2];
+ d3 = d9 * d11 + d7 * d3 + d12;
+ d9 = +HEAPF32[i8 + 20 >> 2];
+ d4 = +HEAPF32[i8 + 24 >> 2];
+ d6 = d6 + (d7 * d9 - d11 * d4);
+ d4 = d12 + (d11 * d9 + d7 * d4);
+ d7 = +HEAPF32[i8 + 8 >> 2];
+ d9 = +((d5 < d6 ? d5 : d6) - d7);
+ d12 = +((d3 < d4 ? d3 : d4) - d7);
+ i10 = i1;
+ HEAPF32[i10 >> 2] = d9;
+ HEAPF32[i10 + 4 >> 2] = d12;
+ d5 = +(d7 + (d5 > d6 ? d5 : d6));
+ d12 = +(d7 + (d3 > d4 ? d3 : d4));
+ i10 = i1 + 8 | 0;
+ HEAPF32[i10 >> 2] = d5;
+ HEAPF32[i10 + 4 >> 2] = d12;
+ STACKTOP = i2;
+ return;
+}
+function __ZNK14b2PolygonShape9TestPointERK11b2TransformRK6b2Vec2(i2, i3, i6) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, d4 = 0.0, d5 = 0.0, i7 = 0, d8 = 0.0, d9 = 0.0, d10 = 0.0;
+ i1 = STACKTOP;
+ d8 = +HEAPF32[i6 >> 2] - +HEAPF32[i3 >> 2];
+ d9 = +HEAPF32[i6 + 4 >> 2] - +HEAPF32[i3 + 4 >> 2];
+ d10 = +HEAPF32[i3 + 12 >> 2];
+ d5 = +HEAPF32[i3 + 8 >> 2];
+ d4 = d8 * d10 + d9 * d5;
+ d5 = d10 * d9 - d8 * d5;
+ i3 = HEAP32[i2 + 148 >> 2] | 0;
+ if ((i3 | 0) > 0) {
+ i6 = 0;
+ } else {
+ i7 = 1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ while (1) {
+ i7 = i6 + 1 | 0;
+ if ((d4 - +HEAPF32[i2 + (i6 << 3) + 20 >> 2]) * +HEAPF32[i2 + (i6 << 3) + 84 >> 2] + (d5 - +HEAPF32[i2 + (i6 << 3) + 24 >> 2]) * +HEAPF32[i2 + (i6 << 3) + 88 >> 2] > 0.0) {
+ i3 = 0;
+ i2 = 4;
+ break;
+ }
+ if ((i7 | 0) < (i3 | 0)) {
+ i6 = i7;
+ } else {
+ i3 = 1;
+ i2 = 4;
+ break;
+ }
+ }
+ if ((i2 | 0) == 4) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ return 0;
+}
+function __ZN16b2StackAllocator8AllocateEi(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i3 = i4 + 102796 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ if ((i6 | 0) >= 32) {
+ ___assert_fail(3896, 3808, 38, 3936);
+ }
+ i1 = i4 + (i6 * 12 | 0) + 102412 | 0;
+ HEAP32[i4 + (i6 * 12 | 0) + 102416 >> 2] = i5;
+ i7 = i4 + 102400 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 + i5 | 0) > 102400) {
+ HEAP32[i1 >> 2] = __Z7b2Alloci(i5) | 0;
+ HEAP8[i4 + (i6 * 12 | 0) + 102420 | 0] = 1;
+ } else {
+ HEAP32[i1 >> 2] = i4 + i8;
+ HEAP8[i4 + (i6 * 12 | 0) + 102420 | 0] = 0;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + i5;
+ }
+ i6 = i4 + 102404 | 0;
+ i5 = (HEAP32[i6 >> 2] | 0) + i5 | 0;
+ HEAP32[i6 >> 2] = i5;
+ i4 = i4 + 102408 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = (i6 | 0) > (i5 | 0) ? i6 : i5;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return HEAP32[i1 >> 2] | 0;
+}
+function __ZN12b2BroadPhase13QueryCallbackEi(i5, i1) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i4 = i5 + 56 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == (i1 | 0)) {
+ STACKTOP = i2;
+ return 1;
+ }
+ i3 = i5 + 52 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i8 = i5 + 48 | 0;
+ i5 = i5 + 44 | 0;
+ if ((i6 | 0) == (HEAP32[i8 >> 2] | 0)) {
+ i7 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 << 1;
+ i6 = __Z7b2Alloci(i6 * 24 | 0) | 0;
+ HEAP32[i5 >> 2] = i6;
+ _memcpy(i6 | 0, i7 | 0, (HEAP32[i3 >> 2] | 0) * 12 | 0) | 0;
+ __Z6b2FreePv(i7);
+ i7 = HEAP32[i4 >> 2] | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 + (i6 * 12 | 0) >> 2] = (i7 | 0) > (i1 | 0) ? i1 : i7;
+ i4 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i5 + ((HEAP32[i3 >> 2] | 0) * 12 | 0) + 4 >> 2] = (i4 | 0) < (i1 | 0) ? i1 : i4;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return 1;
+}
+function __ZNK10__cxxabiv120__si_class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi(i5, i4, i3, i1) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i6 = 0;
+ i2 = STACKTOP;
+ if ((i5 | 0) != (HEAP32[i4 + 8 >> 2] | 0)) {
+ i6 = HEAP32[i5 + 8 >> 2] | 0;
+ FUNCTION_TABLE_viiii[HEAP32[(HEAP32[i6 >> 2] | 0) + 28 >> 2] & 15](i6, i4, i3, i1);
+ STACKTOP = i2;
+ return;
+ }
+ i5 = i4 + 16 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i5 >> 2] = i3;
+ HEAP32[i4 + 24 >> 2] = i1;
+ HEAP32[i4 + 36 >> 2] = 1;
+ STACKTOP = i2;
+ return;
+ }
+ if ((i6 | 0) != (i3 | 0)) {
+ i6 = i4 + 36 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 1;
+ HEAP32[i4 + 24 >> 2] = 2;
+ HEAP8[i4 + 54 | 0] = 1;
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i4 + 24 | 0;
+ if ((HEAP32[i3 >> 2] | 0) != 2) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i3 >> 2] = i1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN6b2Body19SynchronizeFixturesEv(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, d6 = 0.0, d7 = 0.0, d8 = 0.0, d9 = 0.0, d10 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ d8 = +HEAPF32[i5 + 52 >> 2];
+ d9 = +Math_sin(+d8);
+ HEAPF32[i3 + 8 >> 2] = d9;
+ d8 = +Math_cos(+d8);
+ HEAPF32[i3 + 12 >> 2] = d8;
+ d10 = +HEAPF32[i5 + 28 >> 2];
+ d6 = +HEAPF32[i5 + 32 >> 2];
+ d7 = +(+HEAPF32[i5 + 36 >> 2] - (d8 * d10 - d9 * d6));
+ d6 = +(+HEAPF32[i5 + 40 >> 2] - (d10 * d9 + d8 * d6));
+ i2 = i3;
+ HEAPF32[i2 >> 2] = d7;
+ HEAPF32[i2 + 4 >> 2] = d6;
+ i2 = (HEAP32[i5 + 88 >> 2] | 0) + 102872 | 0;
+ i4 = HEAP32[i5 + 100 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i5 = i5 + 12 | 0;
+ do {
+ __ZN9b2Fixture11SynchronizeEP12b2BroadPhaseRK11b2TransformS4_(i4, i2, i3, i5);
+ i4 = HEAP32[i4 + 4 >> 2] | 0;
+ } while ((i4 | 0) != 0);
+ STACKTOP = i1;
+ return;
+}
+function __ZN13b2DynamicTreeC2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i4 = STACKTOP;
+ HEAP32[i1 >> 2] = -1;
+ i3 = i1 + 12 | 0;
+ HEAP32[i3 >> 2] = 16;
+ HEAP32[i1 + 8 >> 2] = 0;
+ i6 = __Z7b2Alloci(576) | 0;
+ i2 = i1 + 4 | 0;
+ HEAP32[i2 >> 2] = i6;
+ _memset(i6 | 0, 0, (HEAP32[i3 >> 2] | 0) * 36 | 0) | 0;
+ i6 = (HEAP32[i3 >> 2] | 0) + -1 | 0;
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i6 | 0) > 0) {
+ i6 = 0;
+ while (1) {
+ i5 = i6 + 1 | 0;
+ HEAP32[i2 + (i6 * 36 | 0) + 20 >> 2] = i5;
+ HEAP32[i2 + (i6 * 36 | 0) + 32 >> 2] = -1;
+ i6 = (HEAP32[i3 >> 2] | 0) + -1 | 0;
+ if ((i5 | 0) < (i6 | 0)) {
+ i6 = i5;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP32[i2 + (i6 * 36 | 0) + 20 >> 2] = -1;
+ HEAP32[i2 + (((HEAP32[i3 >> 2] | 0) + -1 | 0) * 36 | 0) + 32 >> 2] = -1;
+ HEAP32[i1 + 16 >> 2] = 0;
+ HEAP32[i1 + 20 >> 2] = 0;
+ HEAP32[i1 + 24 >> 2] = 0;
+ STACKTOP = i4;
+ return;
+}
+function __Z7measurePl(i1, i9) {
+ i1 = i1 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, d5 = 0.0, d6 = 0.0, i7 = 0, d8 = 0.0, i10 = 0, d11 = 0.0;
+ i2 = STACKTOP;
+ i3 = HEAP32[4] | 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + ((4 * i3 | 0) + 15 & -16) | 0;
+ i7 = (i3 | 0) > 0;
+ if (i7) {
+ i10 = 0;
+ d6 = 0.0;
+ do {
+ d8 = +(HEAP32[i9 + (i10 << 2) >> 2] | 0) / 1.0e6 * 1.0e3;
+ HEAPF32[i4 + (i10 << 2) >> 2] = d8;
+ d6 = d6 + d8;
+ i10 = i10 + 1 | 0;
+ } while ((i10 | 0) < (i3 | 0));
+ d5 = +(i3 | 0);
+ d6 = d6 / d5;
+ HEAPF32[i1 >> 2] = d6;
+ if (i7) {
+ i7 = 0;
+ d8 = 0.0;
+ do {
+ d11 = +HEAPF32[i4 + (i7 << 2) >> 2] - d6;
+ d8 = d8 + d11 * d11;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (i3 | 0));
+ } else {
+ d8 = 0.0;
+ }
+ } else {
+ d5 = +(i3 | 0);
+ HEAPF32[i1 >> 2] = 0.0 / d5;
+ d8 = 0.0;
+ }
+ HEAPF32[i1 + 4 >> 2] = +Math_sqrt(+(d8 / d5));
+ STACKTOP = i2;
+ return;
+}
+function __ZN13b2DynamicTree11CreateProxyERK6b2AABBPv(i1, i3, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i4 = 0, i5 = 0, i6 = 0, d7 = 0.0, d8 = 0.0, i9 = 0;
+ i5 = STACKTOP;
+ i4 = __ZN13b2DynamicTree12AllocateNodeEv(i1) | 0;
+ i6 = i1 + 4 | 0;
+ d7 = +(+HEAPF32[i3 >> 2] + -.10000000149011612);
+ d8 = +(+HEAPF32[i3 + 4 >> 2] + -.10000000149011612);
+ i9 = (HEAP32[i6 >> 2] | 0) + (i4 * 36 | 0) | 0;
+ HEAPF32[i9 >> 2] = d7;
+ HEAPF32[i9 + 4 >> 2] = d8;
+ d8 = +(+HEAPF32[i3 + 8 >> 2] + .10000000149011612);
+ d7 = +(+HEAPF32[i3 + 12 >> 2] + .10000000149011612);
+ i3 = (HEAP32[i6 >> 2] | 0) + (i4 * 36 | 0) + 8 | 0;
+ HEAPF32[i3 >> 2] = d8;
+ HEAPF32[i3 + 4 >> 2] = d7;
+ HEAP32[(HEAP32[i6 >> 2] | 0) + (i4 * 36 | 0) + 16 >> 2] = i2;
+ HEAP32[(HEAP32[i6 >> 2] | 0) + (i4 * 36 | 0) + 32 >> 2] = 0;
+ __ZN13b2DynamicTree10InsertLeafEi(i1, i4);
+ STACKTOP = i5;
+ return i4 | 0;
+}
+function __ZN16b2BlockAllocatorC2Ev(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i4 = i3 + 8 | 0;
+ HEAP32[i4 >> 2] = 128;
+ HEAP32[i3 + 4 >> 2] = 0;
+ i5 = __Z7b2Alloci(1024) | 0;
+ HEAP32[i3 >> 2] = i5;
+ _memset(i5 | 0, 0, HEAP32[i4 >> 2] << 3 | 0) | 0;
+ i4 = i3 + 12 | 0;
+ i3 = i4 + 56 | 0;
+ do {
+ HEAP32[i4 >> 2] = 0;
+ i4 = i4 + 4 | 0;
+ } while ((i4 | 0) < (i3 | 0));
+ if ((HEAP8[1280] | 0) == 0) {
+ i3 = 1;
+ i4 = 0;
+ } else {
+ STACKTOP = i2;
+ return;
+ }
+ do {
+ if ((i4 | 0) >= 14) {
+ i1 = 3;
+ break;
+ }
+ if ((i3 | 0) > (HEAP32[576 + (i4 << 2) >> 2] | 0)) {
+ i4 = i4 + 1 | 0;
+ HEAP8[632 + i3 | 0] = i4;
+ } else {
+ HEAP8[632 + i3 | 0] = i4;
+ }
+ i3 = i3 + 1 | 0;
+ } while ((i3 | 0) < 641);
+ if ((i1 | 0) == 3) {
+ ___assert_fail(1288, 1312, 73, 1352);
+ }
+ HEAP8[1280] = 1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN24b2ChainAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i6 = i5;
+ i7 = HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0;
+ HEAP32[i6 >> 2] = 240;
+ HEAP32[i6 + 4 >> 2] = 1;
+ HEAPF32[i6 + 8 >> 2] = .009999999776482582;
+ i8 = i6 + 28 | 0;
+ HEAP32[i8 + 0 >> 2] = 0;
+ HEAP32[i8 + 4 >> 2] = 0;
+ HEAP32[i8 + 8 >> 2] = 0;
+ HEAP32[i8 + 12 >> 2] = 0;
+ HEAP16[i8 + 16 >> 1] = 0;
+ __ZNK12b2ChainShape12GetChildEdgeEP11b2EdgeShapei(i7, i6, HEAP32[i2 + 56 >> 2] | 0);
+ __Z23b2CollideEdgeAndPolygonP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK14b2PolygonShapeS6_(i4, i6, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __ZN23b2ChainAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i6 = i5;
+ i7 = HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0;
+ HEAP32[i6 >> 2] = 240;
+ HEAP32[i6 + 4 >> 2] = 1;
+ HEAPF32[i6 + 8 >> 2] = .009999999776482582;
+ i8 = i6 + 28 | 0;
+ HEAP32[i8 + 0 >> 2] = 0;
+ HEAP32[i8 + 4 >> 2] = 0;
+ HEAP32[i8 + 8 >> 2] = 0;
+ HEAP32[i8 + 12 >> 2] = 0;
+ HEAP16[i8 + 16 >> 1] = 0;
+ __ZNK12b2ChainShape12GetChildEdgeEP11b2EdgeShapei(i7, i6, HEAP32[i2 + 56 >> 2] | 0);
+ __Z22b2CollideEdgeAndCircleP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK13b2CircleShapeS6_(i4, i6, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __ZN15b2ContactSolver13StoreImpulsesEv(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i4 + 48 >> 2] | 0;
+ if ((i2 | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = HEAP32[i4 + 40 >> 2] | 0;
+ i4 = HEAP32[i4 + 44 >> 2] | 0;
+ i5 = 0;
+ do {
+ i6 = HEAP32[i4 + (HEAP32[i3 + (i5 * 152 | 0) + 148 >> 2] << 2) >> 2] | 0;
+ i7 = HEAP32[i3 + (i5 * 152 | 0) + 144 >> 2] | 0;
+ if ((i7 | 0) > 0) {
+ i8 = 0;
+ do {
+ HEAPF32[i6 + (i8 * 20 | 0) + 72 >> 2] = +HEAPF32[i3 + (i5 * 152 | 0) + (i8 * 36 | 0) + 16 >> 2];
+ HEAPF32[i6 + (i8 * 20 | 0) + 76 >> 2] = +HEAPF32[i3 + (i5 * 152 | 0) + (i8 * 36 | 0) + 20 >> 2];
+ i8 = i8 + 1 | 0;
+ } while ((i8 | 0) < (i7 | 0));
+ }
+ i5 = i5 + 1 | 0;
+ } while ((i5 | 0) < (i2 | 0));
+ STACKTOP = i1;
+ return;
+}
+function __ZN16b2StackAllocator4FreeEPv(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i2 = i1 + 102796 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if ((i4 | 0) <= 0) {
+ ___assert_fail(3952, 3808, 63, 3976);
+ }
+ i6 = i4 + -1 | 0;
+ if ((HEAP32[i1 + (i6 * 12 | 0) + 102412 >> 2] | 0) != (i5 | 0)) {
+ ___assert_fail(3984, 3808, 65, 3976);
+ }
+ if ((HEAP8[i1 + (i6 * 12 | 0) + 102420 | 0] | 0) == 0) {
+ i5 = i1 + (i6 * 12 | 0) + 102416 | 0;
+ i6 = i1 + 102400 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) - (HEAP32[i5 >> 2] | 0);
+ } else {
+ __Z6b2FreePv(i5);
+ i5 = i1 + (i6 * 12 | 0) + 102416 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ }
+ i6 = i1 + 102404 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) - (HEAP32[i5 >> 2] | 0);
+ HEAP32[i2 >> 2] = i4 + -1;
+ STACKTOP = i3;
+ return;
+}
+function __ZNK10__cxxabiv117__class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi(i5, i4, i3, i2) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i6 = 0;
+ i1 = STACKTOP;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != (i5 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ i5 = i4 + 16 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i5 >> 2] = i3;
+ HEAP32[i4 + 24 >> 2] = i2;
+ HEAP32[i4 + 36 >> 2] = 1;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) != (i3 | 0)) {
+ i6 = i4 + 36 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 1;
+ HEAP32[i4 + 24 >> 2] = 2;
+ HEAP8[i4 + 54 | 0] = 1;
+ STACKTOP = i1;
+ return;
+ }
+ i3 = i4 + 24 | 0;
+ if ((HEAP32[i3 >> 2] | 0) != 2) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i3 >> 2] = i2;
+ STACKTOP = i1;
+ return;
+}
+function __ZN12b2BroadPhase11CreateProxyERK6b2AABBPv(i2, i4, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i3 = __ZN13b2DynamicTree11CreateProxyERK6b2AABBPv(i2, i4, i3) | 0;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 1;
+ i4 = i2 + 40 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ i6 = i2 + 36 | 0;
+ i2 = i2 + 32 | 0;
+ if ((i5 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ i7 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 << 1;
+ i5 = __Z7b2Alloci(i5 << 3) | 0;
+ HEAP32[i2 >> 2] = i5;
+ _memcpy(i5 | 0, i7 | 0, HEAP32[i4 >> 2] << 2 | 0) | 0;
+ __Z6b2FreePv(i7);
+ i5 = HEAP32[i4 >> 2] | 0;
+ }
+ HEAP32[(HEAP32[i2 >> 2] | 0) + (i5 << 2) >> 2] = i3;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 1;
+ STACKTOP = i1;
+ return i3 | 0;
+}
+function __ZN9b2ContactC2EP9b2FixtureiS1_i(i1, i4, i6, i3, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i7 = 0, d8 = 0.0, d9 = 0.0;
+ i2 = STACKTOP;
+ HEAP32[i1 >> 2] = 4440;
+ HEAP32[i1 + 4 >> 2] = 4;
+ HEAP32[i1 + 48 >> 2] = i4;
+ HEAP32[i1 + 52 >> 2] = i3;
+ HEAP32[i1 + 56 >> 2] = i6;
+ HEAP32[i1 + 60 >> 2] = i5;
+ HEAP32[i1 + 124 >> 2] = 0;
+ HEAP32[i1 + 128 >> 2] = 0;
+ i5 = i4 + 16 | 0;
+ i6 = i1 + 8 | 0;
+ i7 = i6 + 40 | 0;
+ do {
+ HEAP32[i6 >> 2] = 0;
+ i6 = i6 + 4 | 0;
+ } while ((i6 | 0) < (i7 | 0));
+ HEAPF32[i1 + 136 >> 2] = +Math_sqrt(+(+HEAPF32[i5 >> 2] * +HEAPF32[i3 + 16 >> 2]));
+ d8 = +HEAPF32[i4 + 20 >> 2];
+ d9 = +HEAPF32[i3 + 20 >> 2];
+ HEAPF32[i1 + 140 >> 2] = d8 > d9 ? d8 : d9;
+ STACKTOP = i2;
+ return;
+}
+function __ZN12b2BroadPhase9MoveProxyEiRK6b2AABBRK6b2Vec2(i3, i1, i5, i4) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ if (!(__ZN13b2DynamicTree9MoveProxyEiRK6b2AABBRK6b2Vec2(i3, i1, i5, i4) | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = i3 + 40 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ i6 = i3 + 36 | 0;
+ i3 = i3 + 32 | 0;
+ if ((i5 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ i7 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 << 1;
+ i5 = __Z7b2Alloci(i5 << 3) | 0;
+ HEAP32[i3 >> 2] = i5;
+ _memcpy(i5 | 0, i7 | 0, HEAP32[i4 >> 2] << 2 | 0) | 0;
+ __Z6b2FreePv(i7);
+ i5 = HEAP32[i4 >> 2] | 0;
+ }
+ HEAP32[(HEAP32[i3 >> 2] | 0) + (i5 << 2) >> 2] = i1;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN24b2ChainAndPolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i3, i4, i5, i6) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i6 = __ZN16b2BlockAllocator8AllocateEi(i6, 144) | 0;
+ if ((i6 | 0) == 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i6, i1, i3, i4, i5);
+ HEAP32[i6 >> 2] = 6032;
+ if ((HEAP32[(HEAP32[(HEAP32[i6 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 3) {
+ ___assert_fail(6048, 6096, 43, 6152);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i6 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 2) {
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ ___assert_fail(6184, 6096, 44, 6152);
+ }
+ return 0;
+}
+function __ZN23b2ChainAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i3, i4, i5, i6) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i6 = __ZN16b2BlockAllocator8AllocateEi(i6, 144) | 0;
+ if ((i6 | 0) == 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i6, i1, i3, i4, i5);
+ HEAP32[i6 >> 2] = 5784;
+ if ((HEAP32[(HEAP32[(HEAP32[i6 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 3) {
+ ___assert_fail(5800, 5848, 43, 5904);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i6 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 0) {
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ ___assert_fail(5928, 5848, 44, 5904);
+ }
+ return 0;
+}
+function __ZN25b2PolygonAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i4, i2, i5, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 144) | 0;
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i4;
+ return i5 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i3, i1, 0, i2, 0);
+ HEAP32[i3 >> 2] = 4984;
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 2) {
+ ___assert_fail(5e3, 5048, 41, 5104);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 0) {
+ i5 = i3;
+ STACKTOP = i4;
+ return i5 | 0;
+ } else {
+ ___assert_fail(5136, 5048, 42, 5104);
+ }
+ return 0;
+}
+function __ZN23b2EdgeAndPolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i4, i2, i5, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 144) | 0;
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i4;
+ return i5 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i3, i1, 0, i2, 0);
+ HEAP32[i3 >> 2] = 4736;
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 1) {
+ ___assert_fail(4752, 4800, 41, 4856);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 2) {
+ i5 = i3;
+ STACKTOP = i4;
+ return i5 | 0;
+ } else {
+ ___assert_fail(4880, 4800, 42, 4856);
+ }
+ return 0;
+}
+function __ZN22b2EdgeAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i4, i2, i5, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 144) | 0;
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i4;
+ return i5 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i3, i1, 0, i2, 0);
+ HEAP32[i3 >> 2] = 4488;
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 1) {
+ ___assert_fail(4504, 4552, 41, 4608);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 0) {
+ i5 = i3;
+ STACKTOP = i4;
+ return i5 | 0;
+ } else {
+ ___assert_fail(4632, 4552, 42, 4608);
+ }
+ return 0;
+}
+function __ZN14b2PolygonShape8SetAsBoxEff(i1, d3, d2) {
+ i1 = i1 | 0;
+ d3 = +d3;
+ d2 = +d2;
+ var d4 = 0.0, d5 = 0.0;
+ HEAP32[i1 + 148 >> 2] = 4;
+ d4 = -d3;
+ d5 = -d2;
+ HEAPF32[i1 + 20 >> 2] = d4;
+ HEAPF32[i1 + 24 >> 2] = d5;
+ HEAPF32[i1 + 28 >> 2] = d3;
+ HEAPF32[i1 + 32 >> 2] = d5;
+ HEAPF32[i1 + 36 >> 2] = d3;
+ HEAPF32[i1 + 40 >> 2] = d2;
+ HEAPF32[i1 + 44 >> 2] = d4;
+ HEAPF32[i1 + 48 >> 2] = d2;
+ HEAPF32[i1 + 84 >> 2] = 0.0;
+ HEAPF32[i1 + 88 >> 2] = -1.0;
+ HEAPF32[i1 + 92 >> 2] = 1.0;
+ HEAPF32[i1 + 96 >> 2] = 0.0;
+ HEAPF32[i1 + 100 >> 2] = 0.0;
+ HEAPF32[i1 + 104 >> 2] = 1.0;
+ HEAPF32[i1 + 108 >> 2] = -1.0;
+ HEAPF32[i1 + 112 >> 2] = 0.0;
+ HEAPF32[i1 + 12 >> 2] = 0.0;
+ HEAPF32[i1 + 16 >> 2] = 0.0;
+ return;
+}
+function __ZN16b2PolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i4, i2, i5, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 144) | 0;
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i4;
+ return i5 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i3, i1, 0, i2, 0);
+ HEAP32[i3 >> 2] = 5240;
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 2) {
+ ___assert_fail(5256, 5304, 44, 5352);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 2) {
+ i5 = i3;
+ STACKTOP = i4;
+ return i5 | 0;
+ } else {
+ ___assert_fail(5376, 5304, 45, 5352);
+ }
+ return 0;
+}
+function __ZN15b2CircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator(i1, i4, i2, i5, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 144) | 0;
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i4;
+ return i5 | 0;
+ }
+ __ZN9b2ContactC2EP9b2FixtureiS1_i(i3, i1, 0, i2, 0);
+ HEAP32[i3 >> 2] = 6288;
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) != 0) {
+ ___assert_fail(6304, 6352, 44, 6400);
+ }
+ if ((HEAP32[(HEAP32[(HEAP32[i3 + 52 >> 2] | 0) + 12 >> 2] | 0) + 4 >> 2] | 0) == 0) {
+ i5 = i3;
+ STACKTOP = i4;
+ return i5 | 0;
+ } else {
+ ___assert_fail(6416, 6352, 45, 6400);
+ }
+ return 0;
+}
+function __ZN7b2World10CreateBodyEPK9b2BodyDef(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i5 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i1 + 102868 >> 2] & 2 | 0) != 0) {
+ ___assert_fail(2160, 2184, 109, 2216);
+ }
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i1, 152) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 0;
+ } else {
+ __ZN6b2BodyC2EPK9b2BodyDefP7b2World(i3, i4, i1);
+ }
+ HEAP32[i3 + 92 >> 2] = 0;
+ i4 = i1 + 102952 | 0;
+ HEAP32[i3 + 96 >> 2] = HEAP32[i4 >> 2];
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ HEAP32[i5 + 92 >> 2] = i3;
+ }
+ HEAP32[i4 >> 2] = i3;
+ i5 = i1 + 102960 | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function __ZNK6b2Body13ShouldCollideEPKS_(i4, i2) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ if ((HEAP32[i4 >> 2] | 0) != 2 ? (HEAP32[i2 >> 2] | 0) != 2 : 0) {
+ i2 = 0;
+ } else {
+ i3 = 3;
+ }
+ L3 : do {
+ if ((i3 | 0) == 3) {
+ i3 = HEAP32[i4 + 108 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ i2 = 1;
+ } else {
+ while (1) {
+ if ((HEAP32[i3 >> 2] | 0) == (i2 | 0) ? (HEAP8[(HEAP32[i3 + 4 >> 2] | 0) + 61 | 0] | 0) == 0 : 0) {
+ i2 = 0;
+ break L3;
+ }
+ i3 = HEAP32[i3 + 12 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ i2 = 1;
+ break;
+ }
+ }
+ }
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function __ZNK14b2PolygonShape5CloneEP16b2BlockAllocator(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i3 = __ZN16b2BlockAllocator8AllocateEi(i3, 152) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 0;
+ } else {
+ HEAP32[i3 >> 2] = 504;
+ HEAP32[i3 + 4 >> 2] = 2;
+ HEAPF32[i3 + 8 >> 2] = .009999999776482582;
+ HEAP32[i3 + 148 >> 2] = 0;
+ HEAPF32[i3 + 12 >> 2] = 0.0;
+ HEAPF32[i3 + 16 >> 2] = 0.0;
+ }
+ i6 = i1 + 4 | 0;
+ i5 = HEAP32[i6 + 4 >> 2] | 0;
+ i4 = i3 + 4 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i4 + 4 >> 2] = i5;
+ _memcpy(i3 + 12 | 0, i1 + 12 | 0, 140) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function __ZN7b2World16SetAllowSleepingEb(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = i2 + 102976 | 0;
+ if ((i4 & 1 | 0) == (HEAPU8[i3] | 0 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP8[i3] = i4 & 1;
+ if (i4) {
+ STACKTOP = i1;
+ return;
+ }
+ i2 = HEAP32[i2 + 102952 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ do {
+ i3 = i2 + 4 | 0;
+ i4 = HEAPU16[i3 >> 1] | 0;
+ if ((i4 & 2 | 0) == 0) {
+ HEAP16[i3 >> 1] = i4 | 2;
+ HEAPF32[i2 + 144 >> 2] = 0.0;
+ }
+ i2 = HEAP32[i2 + 96 >> 2] | 0;
+ } while ((i2 | 0) != 0);
+ STACKTOP = i1;
+ return;
+}
+function __ZN16b2BlockAllocator4FreeEPvi(i3, i1, i4) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i4 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i4 | 0) <= 0) {
+ ___assert_fail(1376, 1312, 164, 1488);
+ }
+ if ((i4 | 0) > 640) {
+ __Z6b2FreePv(i1);
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP8[632 + i4 | 0] | 0;
+ if (!((i4 & 255) < 14)) {
+ ___assert_fail(1408, 1312, 173, 1488);
+ }
+ i4 = i3 + ((i4 & 255) << 2) + 12 | 0;
+ HEAP32[i1 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i4 >> 2] = i1;
+ STACKTOP = i2;
+ return;
+}
+function __ZN15b2ContactFilter13ShouldCollideEP9b2FixtureS1_(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ i3 = STACKTOP;
+ i4 = HEAP16[i2 + 36 >> 1] | 0;
+ if (!(i4 << 16 >> 16 != (HEAP16[i1 + 36 >> 1] | 0) | i4 << 16 >> 16 == 0)) {
+ i4 = i4 << 16 >> 16 > 0;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ if ((HEAP16[i1 + 32 >> 1] & HEAP16[i2 + 34 >> 1]) << 16 >> 16 == 0) {
+ i4 = 0;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ i4 = (HEAP16[i1 + 34 >> 1] & HEAP16[i2 + 32 >> 1]) << 16 >> 16 != 0;
+ STACKTOP = i3;
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function __ZN6b2Body13CreateFixtureEPK7b2Shapef(i1, i3, d2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ d2 = +d2;
+ var i4 = 0, i5 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i5 = i4;
+ HEAP16[i5 + 22 >> 1] = 1;
+ HEAP16[i5 + 24 >> 1] = -1;
+ HEAP16[i5 + 26 >> 1] = 0;
+ HEAP32[i5 + 4 >> 2] = 0;
+ HEAPF32[i5 + 8 >> 2] = .20000000298023224;
+ HEAPF32[i5 + 12 >> 2] = 0.0;
+ HEAP8[i5 + 20 | 0] = 0;
+ HEAP32[i5 >> 2] = i3;
+ HEAPF32[i5 + 16 >> 2] = d2;
+ i3 = __ZN6b2Body13CreateFixtureEPK12b2FixtureDef(i1, i5) | 0;
+ STACKTOP = i4;
+ return i3 | 0;
+}
+function __Znwj(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i2 = (i2 | 0) == 0 ? 1 : i2;
+ while (1) {
+ i3 = _malloc(i2) | 0;
+ if ((i3 | 0) != 0) {
+ i2 = 6;
+ break;
+ }
+ i3 = HEAP32[1914] | 0;
+ HEAP32[1914] = i3 + 0;
+ if ((i3 | 0) == 0) {
+ i2 = 5;
+ break;
+ }
+ FUNCTION_TABLE_v[i3 & 3]();
+ }
+ if ((i2 | 0) == 5) {
+ i3 = ___cxa_allocate_exception(4) | 0;
+ HEAP32[i3 >> 2] = 7672;
+ ___cxa_throw(i3 | 0, 7720, 30);
+ } else if ((i2 | 0) == 6) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ return 0;
+}
+function __ZN8b2IslandD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i1 >> 2] | 0, HEAP32[i1 + 20 >> 2] | 0);
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i1 >> 2] | 0, HEAP32[i1 + 24 >> 2] | 0);
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i1 >> 2] | 0, HEAP32[i1 + 16 >> 2] | 0);
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i1 >> 2] | 0, HEAP32[i1 + 12 >> 2] | 0);
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i1 >> 2] | 0, HEAP32[i1 + 8 >> 2] | 0);
+ STACKTOP = i2;
+ return;
+}
+function __ZN16b2BlockAllocatorD2Ev(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i3 = i2 + 4 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if ((HEAP32[i3 >> 2] | 0) > 0) {
+ i5 = 0;
+ } else {
+ i5 = i4;
+ __Z6b2FreePv(i5);
+ STACKTOP = i1;
+ return;
+ }
+ do {
+ __Z6b2FreePv(HEAP32[i4 + (i5 << 3) + 4 >> 2] | 0);
+ i5 = i5 + 1 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ } while ((i5 | 0) < (HEAP32[i3 >> 2] | 0));
+ __Z6b2FreePv(i4);
+ STACKTOP = i1;
+ return;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function __ZNK11b2EdgeShape11ComputeMassEP10b2MassDataf(i2, i1, d3) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ d3 = +d3;
+ var i4 = 0, d5 = 0.0;
+ i4 = STACKTOP;
+ HEAPF32[i1 >> 2] = 0.0;
+ d5 = +((+HEAPF32[i2 + 12 >> 2] + +HEAPF32[i2 + 20 >> 2]) * .5);
+ d3 = +((+HEAPF32[i2 + 16 >> 2] + +HEAPF32[i2 + 24 >> 2]) * .5);
+ i2 = i1 + 4 | 0;
+ HEAPF32[i2 >> 2] = d5;
+ HEAPF32[i2 + 4 >> 2] = d3;
+ HEAPF32[i1 + 12 >> 2] = 0.0;
+ STACKTOP = i4;
+ return;
+}
+function __ZN11b2EdgeShape3SetERK6b2Vec2S2_(i1, i3, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i4 = 0, i5 = 0;
+ i5 = i3;
+ i3 = HEAP32[i5 + 4 >> 2] | 0;
+ i4 = i1 + 12 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i4 + 4 >> 2] = i3;
+ i4 = i2;
+ i2 = HEAP32[i4 + 4 >> 2] | 0;
+ i3 = i1 + 20 | 0;
+ HEAP32[i3 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i3 + 4 >> 2] = i2;
+ HEAP8[i1 + 44 | 0] = 0;
+ HEAP8[i1 + 45 | 0] = 0;
+ return;
+}
+function __ZN25b2PolygonAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ __Z25b2CollidePolygonAndCircleP10b2ManifoldPK14b2PolygonShapeRK11b2TransformPK13b2CircleShapeS6_(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __ZN23b2EdgeAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ __Z23b2CollideEdgeAndPolygonP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK14b2PolygonShapeS6_(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __ZN22b2EdgeAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ __Z22b2CollideEdgeAndCircleP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK13b2CircleShapeS6_(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __Z23b2CollideEdgeAndPolygonP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK14b2PolygonShapeS6_(i5, i4, i3, i2, i1) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i6 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 256 | 0;
+ __ZN12b2EPCollider7CollideEP10b2ManifoldPK11b2EdgeShapeRK11b2TransformPK14b2PolygonShapeS7_(i6, i5, i4, i3, i2, i1);
+ STACKTOP = i6;
+ return;
+}
+function __ZN16b2PolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ __Z17b2CollidePolygonsP10b2ManifoldPK14b2PolygonShapeRK11b2TransformS3_S6_(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __ZN15b2CircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_(i2, i4, i3, i1) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ __Z16b2CollideCirclesP10b2ManifoldPK13b2CircleShapeRK11b2TransformS3_S6_(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 12 >> 2] | 0, i3, HEAP32[(HEAP32[i2 + 52 >> 2] | 0) + 12 >> 2] | 0, i1);
+ STACKTOP = i5;
+ return;
+}
+function __Z14b2PairLessThanRK6b2PairS1_(i2, i5) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i2 >> 2] | 0;
+ i3 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) >= (i3 | 0)) {
+ if ((i4 | 0) == (i3 | 0)) {
+ i2 = (HEAP32[i2 + 4 >> 2] | 0) < (HEAP32[i5 + 4 >> 2] | 0);
+ } else {
+ i2 = 0;
+ }
+ } else {
+ i2 = 1;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function __ZN9b2FixtureC2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ HEAP16[i1 + 32 >> 1] = 1;
+ HEAP16[i1 + 34 >> 1] = -1;
+ HEAP16[i1 + 36 >> 1] = 0;
+ HEAP32[i1 + 40 >> 2] = 0;
+ HEAP32[i1 + 24 >> 2] = 0;
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i1 + 0 >> 2] = 0;
+ HEAP32[i1 + 4 >> 2] = 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i1 + 12 >> 2] = 0;
+ STACKTOP = i2;
+ return;
+}
+function __ZN12b2BroadPhaseC2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZN13b2DynamicTreeC2Ev(i1);
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i1 + 48 >> 2] = 16;
+ HEAP32[i1 + 52 >> 2] = 0;
+ HEAP32[i1 + 44 >> 2] = __Z7b2Alloci(192) | 0;
+ HEAP32[i1 + 36 >> 2] = 16;
+ HEAP32[i1 + 40 >> 2] = 0;
+ HEAP32[i1 + 32 >> 2] = __Z7b2Alloci(64) | 0;
+ STACKTOP = i2;
+ return;
+}
+function __ZN16b2StackAllocatorD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i1 + 102400 >> 2] | 0) != 0) {
+ ___assert_fail(3792, 3808, 32, 3848);
+ }
+ if ((HEAP32[i1 + 102796 >> 2] | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ } else {
+ ___assert_fail(3872, 3808, 33, 3848);
+ }
+}
+function __ZN15b2ContactSolverD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = i1 + 32 | 0;
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i3 >> 2] | 0, HEAP32[i1 + 40 >> 2] | 0);
+ __ZN16b2StackAllocator4FreeEPv(HEAP32[i3 >> 2] | 0, HEAP32[i1 + 36 >> 2] | 0);
+ STACKTOP = i2;
+ return;
+}
+function __ZN25b2PolygonAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN24b2ChainAndPolygonContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN23b2EdgeAndPolygonContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN23b2ChainAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN22b2EdgeAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN16b2ContactManagerC2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZN12b2BroadPhaseC2Ev(i1);
+ HEAP32[i1 + 60 >> 2] = 0;
+ HEAP32[i1 + 64 >> 2] = 0;
+ HEAP32[i1 + 68 >> 2] = 1888;
+ HEAP32[i1 + 72 >> 2] = 1896;
+ HEAP32[i1 + 76 >> 2] = 0;
+ STACKTOP = i2;
+ return;
+}
+function __ZN16b2PolygonContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function __ZN15b2CircleContact7DestroyEP9b2ContactP16b2BlockAllocator(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ FUNCTION_TABLE_vi[HEAP32[(HEAP32[i1 >> 2] | 0) + 4 >> 2] & 31](i1);
+ __ZN16b2BlockAllocator4FreeEPvi(i2, i1, 144);
+ STACKTOP = i3;
+ return;
+}
+function dynCall_viiiiii(i7, i6, i5, i4, i3, i2, i1) {
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_viiiiii[i7 & 3](i6 | 0, i5 | 0, i4 | 0, i3 | 0, i2 | 0, i1 | 0);
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function dynCall_iiiiii(i6, i5, i4, i3, i2, i1) {
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iiiiii[i6 & 15](i5 | 0, i4 | 0, i3 | 0, i2 | 0, i1 | 0) | 0;
+}
+function dynCall_viiiii(i6, i5, i4, i3, i2, i1) {
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_viiiii[i6 & 3](i5 | 0, i4 | 0, i3 | 0, i2 | 0, i1 | 0);
+}
+function __ZN16b2ContactManager15FindNewContactsEv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZN12b2BroadPhase11UpdatePairsI16b2ContactManagerEEvPT_(i1, i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN16b2StackAllocatorC2Ev(i1) {
+ i1 = i1 | 0;
+ HEAP32[i1 + 102400 >> 2] = 0;
+ HEAP32[i1 + 102404 >> 2] = 0;
+ HEAP32[i1 + 102408 >> 2] = 0;
+ HEAP32[i1 + 102796 >> 2] = 0;
+ return;
+}
+function dynCall_viiii(i5, i4, i3, i2, i1) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_viiii[i5 & 15](i4 | 0, i3 | 0, i2 | 0, i1 | 0);
+}
+function dynCall_iiii(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iiii[i4 & 7](i3 | 0, i2 | 0, i1 | 0) | 0;
+}
+function dynCall_viii(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_viii[i4 & 3](i3 | 0, i2 | 0, i1 | 0);
+}
+function __ZNSt9bad_allocD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZNSt9exceptionD2Ev(i1 | 0);
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function __ZN13b2DynamicTreeD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __Z6b2FreePv(HEAP32[i1 + 4 >> 2] | 0);
+ STACKTOP = i2;
+ return;
+}
+function dynCall_viid(i4, i3, i2, d1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ d1 = +d1;
+ FUNCTION_TABLE_viid[i4 & 3](i3 | 0, i2 | 0, +d1);
+}
+function __ZN17b2ContactListener9PostSolveEP9b2ContactPK16b2ContactImpulse(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ return;
+}
+function __ZN10__cxxabiv120__si_class_type_infoD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN10__cxxabiv117__class_type_infoD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZNSt9bad_allocD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZNSt9exceptionD2Ev(i1 | 0);
+ STACKTOP = i2;
+ return;
+}
+function dynCall_iii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iii[i3 & 3](i2 | 0, i1 | 0) | 0;
+}
+function b8(i1, i2, i3, i4, i5, i6) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ abort(8);
+}
+function __ZN25b2PolygonAndCircleContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN17b2ContactListener8PreSolveEP9b2ContactPK10b2Manifold(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ return;
+}
+function __ZN24b2ChainAndPolygonContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN23b2EdgeAndPolygonContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN23b2ChainAndCircleContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZNK11b2EdgeShape9TestPointERK11b2TransformRK6b2Vec2(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ return 0;
+}
+function __ZN22b2EdgeAndCircleContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZdlPv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i1 | 0) != 0) {
+ _free(i1);
+ }
+ STACKTOP = i2;
+ return;
+}
+function b10(i1, i2, i3, i4, i5) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ abort(10);
+ return 0;
+}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function __Z7b2Alloci(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _malloc(i1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function __ZN17b2ContactListenerD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN16b2PolygonContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function dynCall_vii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vii[i3 & 15](i2 | 0, i1 | 0);
+}
+function __ZN15b2ContactFilterD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN15b2CircleContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN14b2PolygonShapeD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __Znaj(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = __Znwj(i1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function __ZN11b2EdgeShapeD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function __ZN9b2ContactD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function b1(i1, i2, i3, i4, i5) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ abort(1);
+}
+function __Z6b2FreePv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _free(i1);
+ STACKTOP = i2;
+ return;
+}
+function ___clang_call_terminate(i1) {
+ i1 = i1 | 0;
+ ___cxa_begin_catch(i1 | 0) | 0;
+ __ZSt9terminatev();
+}
+function __ZN17b2ContactListener12BeginContactEP9b2Contact(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ return;
+}
+function dynCall_ii(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_ii[i2 & 3](i1 | 0) | 0;
+}
+function __ZN17b2ContactListener10EndContactEP9b2Contact(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ return;
+}
+function b11(i1, i2, i3, i4) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ abort(11);
+}
+function dynCall_vi(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vi[i2 & 31](i1 | 0);
+}
+function b0(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ abort(0);
+ return 0;
+}
+function __ZNK10__cxxabiv116__shim_type_info5noop2Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZNK10__cxxabiv116__shim_type_info5noop1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function b5(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ abort(5);
+}
+function __ZNK14b2PolygonShape13GetChildCountEv(i1) {
+ i1 = i1 | 0;
+ return 1;
+}
+function __ZN10__cxxabiv116__shim_type_infoD2Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function b7(i1, i2, d3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ d3 = +d3;
+ abort(7);
+}
+function __ZNK11b2EdgeShape13GetChildCountEv(i1) {
+ i1 = i1 | 0;
+ return 1;
+}
+function __ZNK7b2Timer15GetMillisecondsEv(i1) {
+ i1 = i1 | 0;
+ return 0.0;
+}
+function __ZN25b2PolygonAndCircleContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN24b2ChainAndPolygonContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function b9(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(9);
+ return 0;
+}
+function __ZN23b2EdgeAndPolygonContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN23b2ChainAndCircleContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN22b2EdgeAndCircleContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function dynCall_v(i1) {
+ i1 = i1 | 0;
+ FUNCTION_TABLE_v[i1 & 3]();
+}
+function __ZNKSt9bad_alloc4whatEv(i1) {
+ i1 = i1 | 0;
+ return 7688;
+}
+function ___cxa_pure_virtual__wrapper() {
+ ___cxa_pure_virtual();
+}
+function __ZN17b2ContactListenerD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN16b2PolygonContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN15b2ContactFilterD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN15b2CircleContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN14b2PolygonShapeD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function b3(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(3);
+}
+function runPostSets() {
+ HEAP32[1932] = __ZTISt9exception;
+}
+function __ZN11b2EdgeShapeD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZNSt9type_infoD2Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN7b2Timer5ResetEv(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function __ZN9b2ContactD1Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function __ZN7b2TimerC2Ev(i1) {
+ i1 = i1 | 0;
+ return;
+}
+function b4(i1) {
+ i1 = i1 | 0;
+ abort(4);
+ return 0;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+function b2(i1) {
+ i1 = i1 | 0;
+ abort(2);
+}
+function b6() {
+ abort(6);
+}
+
+// EMSCRIPTEN_END_FUNCS
+ var FUNCTION_TABLE_iiii = [b0,__ZNK11b2EdgeShape9TestPointERK11b2TransformRK6b2Vec2,__ZNK14b2PolygonShape9TestPointERK11b2TransformRK6b2Vec2,__ZN15b2ContactFilter13ShouldCollideEP9b2FixtureS1_,__ZNK10__cxxabiv117__class_type_info9can_catchEPKNS_16__shim_type_infoERPv,b0,b0,b0];
+ var FUNCTION_TABLE_viiiii = [b1,__ZNK10__cxxabiv117__class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib,__ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib,b1];
+ var FUNCTION_TABLE_vi = [b2,__ZN11b2EdgeShapeD1Ev,__ZN11b2EdgeShapeD0Ev,__ZN14b2PolygonShapeD1Ev,__ZN14b2PolygonShapeD0Ev,__ZN17b2ContactListenerD1Ev,__ZN17b2ContactListenerD0Ev,__ZN15b2ContactFilterD1Ev,__ZN15b2ContactFilterD0Ev,__ZN9b2ContactD1Ev,__ZN9b2ContactD0Ev,__ZN22b2EdgeAndCircleContactD1Ev,__ZN22b2EdgeAndCircleContactD0Ev,__ZN23b2EdgeAndPolygonContactD1Ev,__ZN23b2EdgeAndPolygonContactD0Ev,__ZN25b2PolygonAndCircleContactD1Ev,__ZN25b2PolygonAndCircleContactD0Ev,__ZN16b2PolygonContactD1Ev,__ZN16b2PolygonContactD0Ev,__ZN23b2ChainAndCircleContactD1Ev,__ZN23b2ChainAndCircleContactD0Ev,__ZN24b2ChainAndPolygonContactD1Ev,__ZN24b2ChainAndPolygonContactD0Ev,__ZN15b2CircleContactD1Ev,__ZN15b2CircleContactD0Ev,__ZN10__cxxabiv116__shim_type_infoD2Ev,__ZN10__cxxabiv117__class_type_infoD0Ev,__ZNK10__cxxabiv116__shim_type_info5noop1Ev,__ZNK10__cxxabiv116__shim_type_info5noop2Ev
+ ,__ZN10__cxxabiv120__si_class_type_infoD0Ev,__ZNSt9bad_allocD2Ev,__ZNSt9bad_allocD0Ev];
+ var FUNCTION_TABLE_vii = [b3,__ZN17b2ContactListener12BeginContactEP9b2Contact,__ZN17b2ContactListener10EndContactEP9b2Contact,__ZN15b2CircleContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN25b2PolygonAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN16b2PolygonContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN22b2EdgeAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN23b2EdgeAndPolygonContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN23b2ChainAndCircleContact7DestroyEP9b2ContactP16b2BlockAllocator,__ZN24b2ChainAndPolygonContact7DestroyEP9b2ContactP16b2BlockAllocator,b3,b3,b3,b3,b3,b3];
+ var FUNCTION_TABLE_ii = [b4,__ZNK11b2EdgeShape13GetChildCountEv,__ZNK14b2PolygonShape13GetChildCountEv,__ZNKSt9bad_alloc4whatEv];
+ var FUNCTION_TABLE_viii = [b5,__ZN17b2ContactListener8PreSolveEP9b2ContactPK10b2Manifold,__ZN17b2ContactListener9PostSolveEP9b2ContactPK16b2ContactImpulse,b5];
+ var FUNCTION_TABLE_v = [b6,___cxa_pure_virtual__wrapper,__Z4iterv,b6];
+ var FUNCTION_TABLE_viid = [b7,__ZNK11b2EdgeShape11ComputeMassEP10b2MassDataf,__ZNK14b2PolygonShape11ComputeMassEP10b2MassDataf,b7];
+ var FUNCTION_TABLE_viiiiii = [b8,__ZNK10__cxxabiv117__class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib,__ZNK10__cxxabiv120__si_class_type_info16search_above_dstEPNS_19__dynamic_cast_infoEPKvS4_ib,b8];
+ var FUNCTION_TABLE_iii = [b9,__ZNK11b2EdgeShape5CloneEP16b2BlockAllocator,__ZNK14b2PolygonShape5CloneEP16b2BlockAllocator,__Z14b2PairLessThanRK6b2PairS1_];
+ var FUNCTION_TABLE_iiiiii = [b10,__ZNK11b2EdgeShape7RayCastEP15b2RayCastOutputRK14b2RayCastInputRK11b2Transformi,__ZNK14b2PolygonShape7RayCastEP15b2RayCastOutputRK14b2RayCastInputRK11b2Transformi,__ZN15b2CircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN25b2PolygonAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN16b2PolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN22b2EdgeAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN23b2EdgeAndPolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN23b2ChainAndCircleContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,__ZN24b2ChainAndPolygonContact6CreateEP9b2FixtureiS1_iP16b2BlockAllocator,b10,b10,b10,b10,b10,b10];
+ var FUNCTION_TABLE_viiii = [b11,__ZNK11b2EdgeShape11ComputeAABBEP6b2AABBRK11b2Transformi,__ZNK14b2PolygonShape11ComputeAABBEP6b2AABBRK11b2Transformi,__ZN22b2EdgeAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN23b2EdgeAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN25b2PolygonAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN16b2PolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN23b2ChainAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN24b2ChainAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN15b2CircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZNK10__cxxabiv117__class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi,__ZNK10__cxxabiv120__si_class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi,b11,b11,b11,b11];
+
+ return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_viiiii: dynCall_viiiii, dynCall_vi: dynCall_vi, dynCall_vii: dynCall_vii, dynCall_ii: dynCall_ii, dynCall_viii: dynCall_viii, dynCall_v: dynCall_v, dynCall_viid: dynCall_viid, dynCall_viiiiii: dynCall_viiiiii, dynCall_iii: dynCall_iii, dynCall_iiiiii: dynCall_iiiiii, dynCall_viiii: dynCall_viiii };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_viiiii": invoke_viiiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_viii": invoke_viii, "invoke_v": invoke_v, "invoke_viid": invoke_viid, "invoke_viiiiii": invoke_viiiiii, "invoke_iii": invoke_iii, "invoke_iiiiii": invoke_iiiiii, "invoke_viiii": invoke_viiii, "___cxa_throw": ___cxa_throw, "_emscripten_run_script": _emscripten_run_script, "_cosf": _cosf, "_send": _send, "__ZSt9terminatev": __ZSt9terminatev, "__reallyNegative": __reallyNegative, "___cxa_is_number_type": ___cxa_is_number_type, "___assert_fail": ___assert_fail, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_pwrite": _pwrite, "___setErrNo": ___setErrNo, "_sbrk": _sbrk, "___cxa_begin_catch": ___cxa_begin_catch, "_sinf": _sinf, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_clock": _clock, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_puts": _puts, "_mkport": _mkport, "_floorf": _floorf, "_sqrtf": _sqrtf, "_write": _write, "_emscripten_set_main_loop": _emscripten_set_main_loop, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_printf": _printf, "___cxa_does_inherit": ___cxa_does_inherit, "__exit": __exit, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_emscripten_cancel_main_loop": _emscripten_cancel_main_loop, "__formatString": __formatString, "_fputs": _fputs, "_exit": _exit, "___cxa_pure_virtual": ___cxa_pure_virtual, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+var dynCall_iiii = Module["dynCall_iiii"] = asm["dynCall_iiii"];
+var dynCall_viiiii = Module["dynCall_viiiii"] = asm["dynCall_viiiii"];
+var dynCall_vi = Module["dynCall_vi"] = asm["dynCall_vi"];
+var dynCall_vii = Module["dynCall_vii"] = asm["dynCall_vii"];
+var dynCall_ii = Module["dynCall_ii"] = asm["dynCall_ii"];
+var dynCall_viii = Module["dynCall_viii"] = asm["dynCall_viii"];
+var dynCall_v = Module["dynCall_v"] = asm["dynCall_v"];
+var dynCall_viid = Module["dynCall_viid"] = asm["dynCall_viid"];
+var dynCall_viiiiii = Module["dynCall_viiiiii"] = asm["dynCall_viiiiii"];
+var dynCall_iii = Module["dynCall_iii"] = asm["dynCall_iii"];
+var dynCall_iiiiii = Module["dynCall_iiiiii"] = asm["dynCall_iiiiii"];
+var dynCall_viiii = Module["dynCall_viiii"] = asm["dynCall_viiii"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/copy.js b/deps/v8/test/mjsunit/wasm/embenchen/copy.js
new file mode 100644
index 0000000000..70609aa242
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/copy.js
@@ -0,0 +1,5979 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT = 'sum:8930\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(27);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([101,114,114,111,114,58,32,37,100,92,110,0,0,0,0,0,115,117,109,58,37,100,10,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+
+
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+ Module["_memset"] = _memset;
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+ function _free() {
+ }
+ Module["_free"] = _free;
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var _free=env._free;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _printf=env._printf;
+ var _send=env._send;
+ var _pwrite=env._pwrite;
+ var __reallyNegative=env.__reallyNegative;
+ var _fwrite=env._fwrite;
+ var _malloc=env._malloc;
+ var _mkport=env._mkport;
+ var _fprintf=env._fprintf;
+ var ___setErrNo=env.___setErrNo;
+ var __formatString=env.__formatString;
+ var _fileno=env._fileno;
+ var _fflush=env._fflush;
+ var _write=env._write;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _main(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i3 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i3 | 0) {
+ case 50:
+ {
+ i3 = 625;
+ break L1;
+ }
+ case 51:
+ {
+ i4 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 6250;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 12500;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 75;
+ break L1;
+ }
+ case 48:
+ {
+ i12 = 0;
+ STACKTOP = i1;
+ return i12 | 0;
+ }
+ default:
+ {
+ HEAP32[i2 >> 2] = i3 + -48;
+ _printf(8, i2 | 0) | 0;
+ i12 = -1;
+ STACKTOP = i1;
+ return i12 | 0;
+ }
+ }
+ } else {
+ i4 = 4;
+ }
+ } while (0);
+ if ((i4 | 0) == 4) {
+ i3 = 1250;
+ }
+ i4 = 0;
+ i12 = 0;
+ do {
+ i9 = (i4 | 0) % 10 | 0;
+ i5 = i9 + i4 | 0;
+ i6 = (i4 | 0) % 255 | 0;
+ i8 = (i4 | 0) % 15 | 0;
+ i10 = ((i4 | 0) % 120 | 0 | 0) % 1024 | 0;
+ i11 = ((i4 | 0) % 1024 | 0) + i4 | 0;
+ i5 = ((i5 | 0) % 1024 | 0) + i5 | 0;
+ i8 = ((i8 | 0) % 1024 | 0) + i8 | 0;
+ i6 = (((i6 | 0) % 1024 | 0) + i6 + i10 | 0) % 1024 | 0;
+ i7 = 0;
+ do {
+ i17 = i7 << 1;
+ i14 = (i7 | 0) % 120 | 0;
+ i18 = (i17 | 0) % 1024 | 0;
+ i19 = (i9 + i7 | 0) % 1024 | 0;
+ i16 = ((i7 | 0) % 255 | 0 | 0) % 1024 | 0;
+ i15 = (i7 | 0) % 1024 | 0;
+ i13 = ((i7 | 0) % 15 | 0 | 0) % 1024 | 0;
+ i12 = (((i19 + i18 + i16 + i10 + i15 + i13 + ((i11 + i19 | 0) % 1024 | 0) + ((i5 + i18 | 0) % 1024 | 0) + ((i18 + i17 + i16 | 0) % 1024 | 0) + i6 + ((i8 + i15 | 0) % 1024 | 0) + ((((i14 | 0) % 1024 | 0) + i14 + i13 | 0) % 1024 | 0) | 0) % 100 | 0) + i12 | 0) % 10240 | 0;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != 5e4);
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) < (i3 | 0));
+ HEAP32[i2 >> 2] = i12;
+ _printf(24, i2 | 0) | 0;
+ i19 = 0;
+ STACKTOP = i1;
+ return i19 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function runPostSets() {}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+
+
+ return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/corrections.js b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
new file mode 100644
index 0000000000..23bec5f5c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
@@ -0,0 +1,5986 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT = 'final: 40006013:58243.\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(35);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([101,114,114,111,114,58,32,37,100,92,110,0,0,0,0,0,102,105,110,97,108,58,32,37,100,58,37,100,46,10,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+
+
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+ Module["_memset"] = _memset;
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+ function _free() {
+ }
+ Module["_free"] = _free;
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var _free=env._free;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _printf=env._printf;
+ var _send=env._send;
+ var _pwrite=env._pwrite;
+ var __reallyNegative=env.__reallyNegative;
+ var _fwrite=env._fwrite;
+ var _malloc=env._malloc;
+ var _mkport=env._mkport;
+ var _fprintf=env._fprintf;
+ var ___setErrNo=env.___setErrNo;
+ var __formatString=env.__formatString;
+ var _fileno=env._fileno;
+ var _fflush=env._fflush;
+ var _write=env._write;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _main(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i3 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i3 | 0) {
+ case 50:
+ {
+ i3 = 3500;
+ break L1;
+ }
+ case 51:
+ {
+ i4 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 35e3;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 7e4;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 550;
+ break L1;
+ }
+ case 48:
+ {
+ i11 = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ default:
+ {
+ HEAP32[i2 >> 2] = i3 + -48;
+ _printf(8, i2 | 0) | 0;
+ i11 = -1;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ }
+ } else {
+ i4 = 4;
+ }
+ } while (0);
+ if ((i4 | 0) == 4) {
+ i3 = 7e3;
+ }
+ i11 = 0;
+ i8 = 0;
+ i5 = 0;
+ while (1) {
+ i6 = ((i5 | 0) % 5 | 0) + 1 | 0;
+ i4 = ((i5 | 0) % 3 | 0) + 1 | 0;
+ i7 = 0;
+ while (1) {
+ i11 = ((i7 | 0) / (i6 | 0) | 0) + i11 | 0;
+ if (i11 >>> 0 > 1e3) {
+ i11 = (i11 >>> 0) / (i4 >>> 0) | 0;
+ }
+ if ((i7 & 3 | 0) == 0) {
+ i11 = i11 + (Math_imul((i7 & 7 | 0) == 0 ? 1 : -1, i7) | 0) | 0;
+ }
+ i10 = i11 << 16 >> 16;
+ i10 = (Math_imul(i10, i10) | 0) & 255;
+ i9 = i10 + (i8 & 65535) | 0;
+ i7 = i7 + 1 | 0;
+ if ((i7 | 0) == 2e4) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i5 = i5 + 1 | 0;
+ if ((i5 | 0) < (i3 | 0)) {
+ i8 = i9;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i2 >> 2] = i11;
+ HEAP32[i2 + 4 >> 2] = i8 + i10 & 65535;
+ _printf(24, i2 | 0) | 0;
+ i11 = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function runPostSets() {}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+
+
+ return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js b/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js
new file mode 100644
index 0000000000..8c03a344f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js
@@ -0,0 +1,8438 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT =
+ '123456789\n' +
+ '213456789\n' +
+ '231456789\n' +
+ '321456789\n' +
+ '312456789\n' +
+ '132456789\n' +
+ '234156789\n' +
+ '324156789\n' +
+ '342156789\n' +
+ '432156789\n' +
+ '423156789\n' +
+ '243156789\n' +
+ '341256789\n' +
+ '431256789\n' +
+ '413256789\n' +
+ '143256789\n' +
+ '134256789\n' +
+ '314256789\n' +
+ '412356789\n' +
+ '142356789\n' +
+ '124356789\n' +
+ '214356789\n' +
+ '241356789\n' +
+ '421356789\n' +
+ '234516789\n' +
+ '324516789\n' +
+ '342516789\n' +
+ '432516789\n' +
+ '423516789\n' +
+ '243516789\n' +
+ 'Pfannkuchen(9) = 30.\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(547);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([101,114,114,111,114,58,32,37,100,10,0,0,0,0,0,0,80,102,97,110,110,107,117,99,104,101,110,40,37,100,41,32,61,32,37,100,46,10,0,0,37,100,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+ function _fputc(c, stream) {
+ // int fputc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputc.html
+ var chr = unSign(c & 0xFF);
+ HEAP8[((_fputc.ret)|0)]=chr;
+ var fd = _fileno(stream);
+ var ret = _write(fd, _fputc.ret, 1);
+ if (ret == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return -1;
+ } else {
+ return chr;
+ }
+ }function _putchar(c) {
+ // int putchar(int c);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/putchar.html
+ return _fputc(c, HEAP32[((_stdout)>>2)]);
+ }
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+ function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+
+ Module["_memset"] = _memset;
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+ function _abort() {
+ Module['abort']();
+ }
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+_fputc.ret = allocate([0], "i8", ALLOC_STATIC);
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var _fflush=env._fflush;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _putchar=env._putchar;
+ var _fputc=env._fputc;
+ var _send=env._send;
+ var _pwrite=env._pwrite;
+ var _abort=env._abort;
+ var __reallyNegative=env.__reallyNegative;
+ var _fwrite=env._fwrite;
+ var _sbrk=env._sbrk;
+ var _mkport=env._mkport;
+ var _fprintf=env._fprintf;
+ var ___setErrNo=env.___setErrNo;
+ var __formatString=env.__formatString;
+ var _fileno=env._fileno;
+ var _printf=env._printf;
+ var _time=env._time;
+ var _sysconf=env._sysconf;
+ var _write=env._write;
+ var ___errno_location=env.___errno_location;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[14] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 96 + (i5 << 2) | 0;
+ i5 = 96 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[14] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[64 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 96 + (i7 << 2) | 0;
+ i7 = 96 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[14] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[64 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[76 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 96 + (i9 << 2) | 0;
+ i7 = HEAP32[14] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 96 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[14] = i7 | i8;
+ i28 = 96 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[64 >> 2] = i4;
+ HEAP32[76 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[60 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[360 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[72 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 360 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[60 >> 2] = HEAP32[60 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[64 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[76 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 96 + (i9 << 2) | 0;
+ i7 = HEAP32[14] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 96 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[14] = i7 | i8;
+ i25 = 96 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[64 >> 2] = i2;
+ HEAP32[76 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[60 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[360 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[360 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[64 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[72 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 360 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[60 >> 2] = HEAP32[60 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 96 + (i6 << 2) | 0;
+ i5 = HEAP32[14] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 96 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[14] = i5 | i4;
+ i21 = 96 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 360 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[60 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[60 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[72 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[64 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[76 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[76 >> 2] = i2 + i12;
+ HEAP32[64 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[64 >> 2] = 0;
+ HEAP32[76 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[68 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[68 >> 2] = i31;
+ i32 = HEAP32[80 >> 2] | 0;
+ HEAP32[80 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[132] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[536 >> 2] = i18;
+ HEAP32[532 >> 2] = i18;
+ HEAP32[540 >> 2] = -1;
+ HEAP32[544 >> 2] = -1;
+ HEAP32[548 >> 2] = 0;
+ HEAP32[500 >> 2] = 0;
+ HEAP32[132] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[536 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[496 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[488 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[500 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[80 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 504 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[68 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[532 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[488 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[496 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[536 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[500 >> 2] = HEAP32[500 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[488 >> 2] | 0) + i14 | 0;
+ HEAP32[488 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[492 >> 2] | 0) >>> 0) {
+ HEAP32[492 >> 2] = i15;
+ }
+ i15 = HEAP32[80 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 504 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[68 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[80 >> 2] = i15 + i3;
+ HEAP32[68 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[84 >> 2] = HEAP32[544 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ HEAP32[72 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 504 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[80 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[76 >> 2] | 0)) {
+ i32 = (HEAP32[64 >> 2] | 0) + i10 | 0;
+ HEAP32[64 >> 2] = i32;
+ HEAP32[76 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 360 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[60 >> 2] = HEAP32[60 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 96 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[14] = HEAP32[14] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 96 + (i10 << 2) | 0;
+ i9 = HEAP32[14] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 96 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[14] = i9 | i5;
+ i3 = 96 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 360 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[60 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[60 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L444 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L444;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[72 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[68 >> 2] | 0) + i10 | 0;
+ HEAP32[68 >> 2] = i32;
+ HEAP32[80 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 504 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[80 >> 2] = i17 + i4;
+ HEAP32[68 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[84 >> 2] = HEAP32[544 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[504 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[508 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[512 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[516 >> 2];
+ HEAP32[504 >> 2] = i17;
+ HEAP32[508 >> 2] = i14;
+ HEAP32[516 >> 2] = 0;
+ HEAP32[512 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 96 + (i4 << 2) | 0;
+ i5 = HEAP32[14] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 96 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[14] = i5 | i3;
+ i7 = 96 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 360 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[60 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[60 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[72 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[72 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[72 >> 2] = i17;
+ }
+ HEAP32[504 >> 2] = i17;
+ HEAP32[508 >> 2] = i14;
+ HEAP32[516 >> 2] = 0;
+ HEAP32[92 >> 2] = HEAP32[132];
+ HEAP32[88 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 96 + (i32 << 2) | 0;
+ HEAP32[96 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[96 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[80 >> 2] = i17 + i2;
+ HEAP32[68 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[84 >> 2] = HEAP32[544 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[68 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[68 >> 2] = i31;
+ i32 = HEAP32[80 >> 2] | 0;
+ HEAP32[80 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[72 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[76 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[64 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 96 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[14] = HEAP32[14] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 360 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[60 >> 2] = HEAP32[60 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[80 >> 2] | 0)) {
+ i21 = (HEAP32[68 >> 2] | 0) + i11 | 0;
+ HEAP32[68 >> 2] = i21;
+ HEAP32[80 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[76 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[76 >> 2] = 0;
+ HEAP32[64 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[76 >> 2] | 0)) {
+ i21 = (HEAP32[64 >> 2] | 0) + i11 | 0;
+ HEAP32[64 >> 2] = i21;
+ HEAP32[76 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 360 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[60 >> 2] = HEAP32[60 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 96 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[14] = HEAP32[14] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[76 >> 2] | 0)) {
+ HEAP32[64 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 96 + (i7 << 2) | 0;
+ i8 = HEAP32[14] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 96 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[14] = i8 | i6;
+ i4 = 96 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 360 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[60 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L205 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L205;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[72 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[72 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[60 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[88 >> 2] | 0) + -1 | 0;
+ HEAP32[88 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 512 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[88 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function __Z15fannkuch_workerPv(i9) {
+ i9 = i9 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0;
+ i3 = STACKTOP;
+ i7 = HEAP32[i9 + 4 >> 2] | 0;
+ i6 = i7 << 2;
+ i5 = _malloc(i6) | 0;
+ i2 = _malloc(i6) | 0;
+ i6 = _malloc(i6) | 0;
+ i10 = (i7 | 0) > 0;
+ if (i10) {
+ i8 = 0;
+ do {
+ HEAP32[i5 + (i8 << 2) >> 2] = i8;
+ i8 = i8 + 1 | 0;
+ } while ((i8 | 0) != (i7 | 0));
+ i8 = i7 + -1 | 0;
+ i17 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i5 + (i17 << 2) >> 2] = i8;
+ i9 = i5 + (i8 << 2) | 0;
+ HEAP32[i9 >> 2] = i17;
+ if (i10) {
+ i10 = i7 << 2;
+ i11 = 0;
+ i12 = i7;
+ L7 : while (1) {
+ if ((i12 | 0) > 1) {
+ while (1) {
+ i13 = i12 + -1 | 0;
+ HEAP32[i6 + (i13 << 2) >> 2] = i12;
+ if ((i13 | 0) > 1) {
+ i12 = i13;
+ } else {
+ i12 = 1;
+ break;
+ }
+ }
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ if ((i13 | 0) != 0 ? (HEAP32[i9 >> 2] | 0) != (i8 | 0) : 0) {
+ _memcpy(i2 | 0, i5 | 0, i10 | 0) | 0;
+ i15 = 0;
+ i14 = HEAP32[i2 >> 2] | 0;
+ while (1) {
+ i17 = i14 + -1 | 0;
+ if ((i17 | 0) > 1) {
+ i16 = 1;
+ do {
+ i20 = i2 + (i16 << 2) | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ i18 = i2 + (i17 << 2) | 0;
+ HEAP32[i20 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i18 >> 2] = i19;
+ i16 = i16 + 1 | 0;
+ i17 = i17 + -1 | 0;
+ } while ((i16 | 0) < (i17 | 0));
+ }
+ i15 = i15 + 1 | 0;
+ i20 = i2 + (i14 << 2) | 0;
+ i16 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i20 >> 2] = i14;
+ if ((i16 | 0) == 0) {
+ break;
+ } else {
+ i14 = i16;
+ }
+ }
+ i11 = (i11 | 0) < (i15 | 0) ? i15 : i11;
+ }
+ if ((i12 | 0) >= (i8 | 0)) {
+ i8 = 34;
+ break;
+ }
+ while (1) {
+ if ((i12 | 0) > 0) {
+ i14 = 0;
+ while (1) {
+ i15 = i14 + 1 | 0;
+ HEAP32[i5 + (i14 << 2) >> 2] = HEAP32[i5 + (i15 << 2) >> 2];
+ if ((i15 | 0) == (i12 | 0)) {
+ i14 = i12;
+ break;
+ } else {
+ i14 = i15;
+ }
+ }
+ } else {
+ i14 = 0;
+ }
+ HEAP32[i5 + (i14 << 2) >> 2] = i13;
+ i14 = i6 + (i12 << 2) | 0;
+ i20 = (HEAP32[i14 >> 2] | 0) + -1 | 0;
+ HEAP32[i14 >> 2] = i20;
+ i14 = i12 + 1 | 0;
+ if ((i20 | 0) > 0) {
+ continue L7;
+ }
+ if ((i14 | 0) >= (i8 | 0)) {
+ i8 = 34;
+ break L7;
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ i12 = i14;
+ }
+ }
+ if ((i8 | 0) == 34) {
+ _free(i5);
+ _free(i2);
+ _free(i6);
+ STACKTOP = i3;
+ return i11 | 0;
+ }
+ } else {
+ i1 = i9;
+ i4 = i8;
+ }
+ } else {
+ i4 = i7 + -1 | 0;
+ i20 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i5 + (i20 << 2) >> 2] = i4;
+ i1 = i5 + (i4 << 2) | 0;
+ HEAP32[i1 >> 2] = i20;
+ }
+ i11 = 0;
+ L36 : while (1) {
+ if ((i7 | 0) > 1) {
+ while (1) {
+ i8 = i7 + -1 | 0;
+ HEAP32[i6 + (i8 << 2) >> 2] = i7;
+ if ((i8 | 0) > 1) {
+ i7 = i8;
+ } else {
+ i7 = 1;
+ break;
+ }
+ }
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) != 0 ? (HEAP32[i1 >> 2] | 0) != (i4 | 0) : 0) {
+ i10 = 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ while (1) {
+ i13 = i9 + -1 | 0;
+ if ((i13 | 0) > 1) {
+ i12 = 1;
+ do {
+ i18 = i2 + (i12 << 2) | 0;
+ i19 = HEAP32[i18 >> 2] | 0;
+ i20 = i2 + (i13 << 2) | 0;
+ HEAP32[i18 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i20 >> 2] = i19;
+ i12 = i12 + 1 | 0;
+ i13 = i13 + -1 | 0;
+ } while ((i12 | 0) < (i13 | 0));
+ }
+ i10 = i10 + 1 | 0;
+ i20 = i2 + (i9 << 2) | 0;
+ i12 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i20 >> 2] = i9;
+ if ((i12 | 0) == 0) {
+ break;
+ } else {
+ i9 = i12;
+ }
+ }
+ i11 = (i11 | 0) < (i10 | 0) ? i10 : i11;
+ }
+ if ((i7 | 0) >= (i4 | 0)) {
+ i8 = 34;
+ break;
+ }
+ while (1) {
+ if ((i7 | 0) > 0) {
+ i9 = 0;
+ while (1) {
+ i10 = i9 + 1 | 0;
+ HEAP32[i5 + (i9 << 2) >> 2] = HEAP32[i5 + (i10 << 2) >> 2];
+ if ((i10 | 0) == (i7 | 0)) {
+ i9 = i7;
+ break;
+ } else {
+ i9 = i10;
+ }
+ }
+ } else {
+ i9 = 0;
+ }
+ HEAP32[i5 + (i9 << 2) >> 2] = i8;
+ i9 = i6 + (i7 << 2) | 0;
+ i20 = (HEAP32[i9 >> 2] | 0) + -1 | 0;
+ HEAP32[i9 >> 2] = i20;
+ i9 = i7 + 1 | 0;
+ if ((i20 | 0) > 0) {
+ continue L36;
+ }
+ if ((i9 | 0) >= (i4 | 0)) {
+ i8 = 34;
+ break L36;
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ i7 = i9;
+ }
+ }
+ if ((i8 | 0) == 34) {
+ _free(i5);
+ _free(i2);
+ _free(i6);
+ STACKTOP = i3;
+ return i11 | 0;
+ }
+ return 0;
+}
+function _main(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i1 = i2;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i3 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i3 | 0) {
+ case 50:
+ {
+ i3 = 10;
+ break L1;
+ }
+ case 51:
+ {
+ i4 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 11;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 12;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 9;
+ break L1;
+ }
+ case 48:
+ {
+ i11 = 0;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ default:
+ {
+ HEAP32[i1 >> 2] = i3 + -48;
+ _printf(8, i1 | 0) | 0;
+ i11 = -1;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ }
+ } else {
+ i4 = 4;
+ }
+ } while (0);
+ if ((i4 | 0) == 4) {
+ i3 = 11;
+ }
+ i5 = i3 + -1 | 0;
+ i6 = 0;
+ i7 = 0;
+ while (1) {
+ i4 = _malloc(12) | 0;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i4 + 4 >> 2] = i3;
+ HEAP32[i4 + 8 >> 2] = i6;
+ i7 = i7 + 1 | 0;
+ if ((i7 | 0) == (i5 | 0)) {
+ break;
+ } else {
+ i6 = i4;
+ }
+ }
+ i5 = i3 << 2;
+ i6 = _malloc(i5) | 0;
+ i5 = _malloc(i5) | 0;
+ i7 = 0;
+ do {
+ HEAP32[i6 + (i7 << 2) >> 2] = i7;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != (i3 | 0));
+ i8 = i3;
+ i7 = 30;
+ L19 : do {
+ i9 = 0;
+ do {
+ HEAP32[i1 >> 2] = (HEAP32[i6 + (i9 << 2) >> 2] | 0) + 1;
+ _printf(48, i1 | 0) | 0;
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) != (i3 | 0));
+ _putchar(10) | 0;
+ i7 = i7 + -1 | 0;
+ if ((i8 | 0) <= 1) {
+ if ((i8 | 0) == (i3 | 0)) {
+ break;
+ }
+ } else {
+ while (1) {
+ i9 = i8 + -1 | 0;
+ HEAP32[i5 + (i9 << 2) >> 2] = i8;
+ if ((i9 | 0) > 1) {
+ i8 = i9;
+ } else {
+ i8 = 1;
+ break;
+ }
+ }
+ }
+ while (1) {
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((i8 | 0) > 0) {
+ i11 = 0;
+ while (1) {
+ i10 = i11 + 1 | 0;
+ HEAP32[i6 + (i11 << 2) >> 2] = HEAP32[i6 + (i10 << 2) >> 2];
+ if ((i10 | 0) == (i8 | 0)) {
+ i10 = i8;
+ break;
+ } else {
+ i11 = i10;
+ }
+ }
+ } else {
+ i10 = 0;
+ }
+ HEAP32[i6 + (i10 << 2) >> 2] = i9;
+ i9 = i5 + (i8 << 2) | 0;
+ i11 = (HEAP32[i9 >> 2] | 0) + -1 | 0;
+ HEAP32[i9 >> 2] = i11;
+ i9 = i8 + 1 | 0;
+ if ((i11 | 0) > 0) {
+ break;
+ }
+ if ((i9 | 0) == (i3 | 0)) {
+ break L19;
+ } else {
+ i8 = i9;
+ }
+ }
+ } while ((i7 | 0) != 0);
+ _free(i6);
+ _free(i5);
+ if ((i4 | 0) == 0) {
+ i5 = 0;
+ } else {
+ i5 = 0;
+ while (1) {
+ i6 = __Z15fannkuch_workerPv(i4) | 0;
+ i5 = (i5 | 0) < (i6 | 0) ? i6 : i5;
+ i6 = HEAP32[i4 + 8 >> 2] | 0;
+ _free(i4);
+ if ((i6 | 0) == 0) {
+ break;
+ } else {
+ i4 = i6;
+ }
+ }
+ }
+ HEAP32[i1 >> 2] = i3;
+ HEAP32[i1 + 4 >> 2] = i5;
+ _printf(24, i1 | 0) | 0;
+ i11 = 0;
+ STACKTOP = i2;
+ return i11 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function runPostSets() {}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+
+
+ return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_putchar": _putchar, "_fputc": _fputc, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_sbrk": _sbrk, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_printf": _printf, "_time": _time, "_sysconf": _sysconf, "_write": _write, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/fasta.js b/deps/v8/test/mjsunit/wasm/embenchen/fasta.js
new file mode 100644
index 0000000000..1cd47fa1db
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/fasta.js
@@ -0,0 +1,8608 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT =
+ 'GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA\n' +
+ 'TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT\n' +
+ 'AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG\n' +
+ 'GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG\n' +
+ 'CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT\n' +
+ 'GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA\n' +
+ 'GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA\n' +
+ 'TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG\n' +
+ 'AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA\n' +
+ 'GCCTGGGCGA\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(1155);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([38,2,0,0,0,0,0,0,42,0,0,0,0,0,0,0,97,0,0,0,113,61,138,62,0,0,0,0,99,0,0,0,143,194,245,61,0,0,0,0,103,0,0,0,143,194,245,61,0,0,0,0,116,0,0,0,113,61,138,62,0,0,0,0,66,0,0,0,10,215,163,60,0,0,0,0,68,0,0,0,10,215,163,60,0,0,0,0,72,0,0,0,10,215,163,60,0,0,0,0,75,0,0,0,10,215,163,60,0,0,0,0,77,0,0,0,10,215,163,60,0,0,0,0,78,0,0,0,10,215,163,60,0,0,0,0,82,0,0,0,10,215,163,60,0,0,0,0,83,0,0,0,10,215,163,60,0,0,0,0,86,0,0,0,10,215,163,60,0,0,0,0,87,0,0,0,10,215,163,60,0,0,0,0,89,0,0,0,10,215,163,60,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,97,0,0,0,233,28,155,62,0,0,0,0,99,0,0,0,114,189,74,62,0,0,0,0,103,0,0,0,215,73,74,62,0,0,0,0,116,0,0,0,114,95,154,62,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,114,114,111,114,58,32,37,100,10,0,0,0,0,0,0,71,71,67,67,71,71,71,67,71,67,71,71,84,71,71,67,84,67,65,67,71,67,67,84,71,84,65,65,84,67,67,67,65,71,67,65,67,84,84,84,71,71,71,65,71,71,67,67,71,65,71,71,67,71,71,71,67,71,71,65,84,67,65,67,67,84,71,65,71,71,84,67,65,71,71,65,71,84,84,67,71,65,71,65,67,67,65,71,67,67,84,71,71,67,67,65,65,67,65,84,71,71,84,71,65,65,65,67,67,67,67,71,84,67,84,67,84,65,67,84,65,65,65,65,65,84,65,67,65,65,65,65,65,84,84,65,71,67,67,71,71,71,67,71,84,71,71,84,71,71,67,71,67,71,67,71,67,67,84,71,84,65,65,84,67,67,67,65,71,67,84,65,67,84,67,71,71,71,65,71,71,67,84,71,65,71,71,67,65,71,71,65,71,65,65,84,67,71,67,84,84,71,65,65,67,67,67,71,71,71,65,71,71,67,71,71,65,71,71,84,84,71,67,65,71,84,71,65,71,67,67,71,65,71,65,84,67,71,67,71,67,67,65,67,84,71,67,65,67,84,67,67,65,71,67,67,84,71,71,71,67,71,65,67,65,71,65,71,67,71,65,71,65,67,84,67,67,71,84,67,84,67,65,65,65,65,65,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,120,4,0,0,1,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,115,116,100,58,58,98,97,100,95,97,108,108,111,99,0,0,83,116,57,98,97,100,95,97,108,108,111,99,0,0,0,0,8,0,0,0,104,4,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+
+ function __ZSt18uncaught_exceptionv() { // std::uncaught_exception()
+ return !!__ZSt18uncaught_exceptionv.uncaught_exception;
+ }
+
+
+
+ function ___cxa_is_number_type(type) {
+ var isNumber = false;
+ try { if (type == __ZTIi) isNumber = true } catch(e){}
+ try { if (type == __ZTIj) isNumber = true } catch(e){}
+ try { if (type == __ZTIl) isNumber = true } catch(e){}
+ try { if (type == __ZTIm) isNumber = true } catch(e){}
+ try { if (type == __ZTIx) isNumber = true } catch(e){}
+ try { if (type == __ZTIy) isNumber = true } catch(e){}
+ try { if (type == __ZTIf) isNumber = true } catch(e){}
+ try { if (type == __ZTId) isNumber = true } catch(e){}
+ try { if (type == __ZTIe) isNumber = true } catch(e){}
+ try { if (type == __ZTIc) isNumber = true } catch(e){}
+ try { if (type == __ZTIa) isNumber = true } catch(e){}
+ try { if (type == __ZTIh) isNumber = true } catch(e){}
+ try { if (type == __ZTIs) isNumber = true } catch(e){}
+ try { if (type == __ZTIt) isNumber = true } catch(e){}
+ return isNumber;
+ }function ___cxa_does_inherit(definiteType, possibilityType, possibility) {
+ if (possibility == 0) return false;
+ if (possibilityType == 0 || possibilityType == definiteType)
+ return true;
+ var possibility_type_info;
+ if (___cxa_is_number_type(possibilityType)) {
+ possibility_type_info = possibilityType;
+ } else {
+ var possibility_type_infoAddr = HEAP32[((possibilityType)>>2)] - 8;
+ possibility_type_info = HEAP32[((possibility_type_infoAddr)>>2)];
+ }
+ switch (possibility_type_info) {
+ case 0: // possibility is a pointer
+ // See if definite type is a pointer
+ var definite_type_infoAddr = HEAP32[((definiteType)>>2)] - 8;
+ var definite_type_info = HEAP32[((definite_type_infoAddr)>>2)];
+ if (definite_type_info == 0) {
+ // Also a pointer; compare base types of pointers
+ var defPointerBaseAddr = definiteType+8;
+ var defPointerBaseType = HEAP32[((defPointerBaseAddr)>>2)];
+ var possPointerBaseAddr = possibilityType+8;
+ var possPointerBaseType = HEAP32[((possPointerBaseAddr)>>2)];
+ return ___cxa_does_inherit(defPointerBaseType, possPointerBaseType, possibility);
+ } else
+ return false; // one pointer and one non-pointer
+ case 1: // class with no base class
+ return false;
+ case 2: // class with base class
+ var parentTypeAddr = possibilityType + 8;
+ var parentType = HEAP32[((parentTypeAddr)>>2)];
+ return ___cxa_does_inherit(definiteType, parentType, possibility);
+ default:
+ return false; // some unencountered type
+ }
+ }
+
+
+
+ var ___cxa_last_thrown_exception=0;function ___resumeException(ptr) {
+ if (!___cxa_last_thrown_exception) { ___cxa_last_thrown_exception = ptr; }
+ throw ptr + " - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";
+ }
+
+ var ___cxa_exception_header_size=8;function ___cxa_find_matching_catch(thrown, throwntype) {
+ if (thrown == -1) thrown = ___cxa_last_thrown_exception;
+ header = thrown - ___cxa_exception_header_size;
+ if (throwntype == -1) throwntype = HEAP32[((header)>>2)];
+ var typeArray = Array.prototype.slice.call(arguments, 2);
+
+ // If throwntype is a pointer, this means a pointer has been
+ // thrown. When a pointer is thrown, actually what's thrown
+ // is a pointer to the pointer. We'll dereference it.
+ if (throwntype != 0 && !___cxa_is_number_type(throwntype)) {
+ var throwntypeInfoAddr= HEAP32[((throwntype)>>2)] - 8;
+ var throwntypeInfo= HEAP32[((throwntypeInfoAddr)>>2)];
+ if (throwntypeInfo == 0)
+ thrown = HEAP32[((thrown)>>2)];
+ }
+ // The different catch blocks are denoted by different types.
+ // Due to inheritance, those types may not precisely match the
+ // type of the thrown object. Find one which matches, and
+ // return the type of the catch block which should be called.
+ for (var i = 0; i < typeArray.length; i++) {
+ if (___cxa_does_inherit(typeArray[i], throwntype, thrown))
+ return ((asm["setTempRet0"](typeArray[i]),thrown)|0);
+ }
+ // Shouldn't happen unless we have bogus data in typeArray
+ // or encounter a type for which emscripten doesn't have suitable
+ // typeinfo defined. Best-efforts match just in case.
+ return ((asm["setTempRet0"](throwntype),thrown)|0);
+ }function ___cxa_throw(ptr, type, destructor) {
+ if (!___cxa_throw.initialized) {
+ try {
+ HEAP32[((__ZTVN10__cxxabiv119__pointer_type_infoE)>>2)]=0; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ try {
+ HEAP32[((__ZTVN10__cxxabiv117__class_type_infoE)>>2)]=1; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ try {
+ HEAP32[((__ZTVN10__cxxabiv120__si_class_type_infoE)>>2)]=2; // Workaround for libcxxabi integration bug
+ } catch(e){}
+ ___cxa_throw.initialized = true;
+ }
+ var header = ptr - ___cxa_exception_header_size;
+ HEAP32[((header)>>2)]=type;
+ HEAP32[(((header)+(4))>>2)]=destructor;
+ ___cxa_last_thrown_exception = ptr;
+ if (!("uncaught_exception" in __ZSt18uncaught_exceptionv)) {
+ __ZSt18uncaught_exceptionv.uncaught_exception = 1;
+ } else {
+ __ZSt18uncaught_exceptionv.uncaught_exception++;
+ }
+ throw ptr + " - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";
+ }
+
+
+ Module["_memset"] = _memset;
+
+ function _abort() {
+ Module['abort']();
+ }
+
+
+
+
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+
+ function _fputs(s, stream) {
+ // int fputs(const char *restrict s, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputs.html
+ var fd = _fileno(stream);
+ return _write(fd, s, _strlen(s));
+ }
+
+ function _fputc(c, stream) {
+ // int fputc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputc.html
+ var chr = unSign(c & 0xFF);
+ HEAP8[((_fputc.ret)|0)]=chr;
+ var fd = _fileno(stream);
+ var ret = _write(fd, _fputc.ret, 1);
+ if (ret == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return -1;
+ } else {
+ return chr;
+ }
+ }function _puts(s) {
+ // int puts(const char *s);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/puts.html
+ // NOTE: puts() always writes an extra newline.
+ var stdout = HEAP32[((_stdout)>>2)];
+ var ret = _fputs(s, stdout);
+ if (ret < 0) {
+ return ret;
+ } else {
+ var newlineRet = _fputc(10, stdout);
+ return (newlineRet < 0) ? -1 : ret + 1;
+ }
+ }
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+ function __ZNSt9exceptionD2Ev() {}
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;function ___cxa_allocate_exception(size) {
+ var ptr = _malloc(size + ___cxa_exception_header_size);
+ return ptr + ___cxa_exception_header_size;
+ }
+
+ var __ZTISt9exception=allocate([allocate([1,0,0,0,0,0,0], "i8", ALLOC_STATIC)+8, 0], "i32", ALLOC_STATIC);
+
+ function __ZTVN10__cxxabiv120__si_class_type_infoE() {
+ Module['printErr']('missing function: _ZTVN10__cxxabiv120__si_class_type_infoE'); abort(-1);
+ }
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+_fputc.ret = allocate([0], "i8", ALLOC_STATIC);
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function invoke_ii(index,a1) {
+ try {
+ return Module["dynCall_ii"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vi(index,a1) {
+ try {
+ Module["dynCall_vi"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_v(index) {
+ try {
+ Module["dynCall_v"](index);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+ var __ZTISt9exception=env.__ZTISt9exception|0;
+ var __ZTVN10__cxxabiv120__si_class_type_infoE=env.__ZTVN10__cxxabiv120__si_class_type_infoE|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var invoke_ii=env.invoke_ii;
+ var invoke_vi=env.invoke_vi;
+ var invoke_v=env.invoke_v;
+ var _send=env._send;
+ var ___setErrNo=env.___setErrNo;
+ var ___cxa_is_number_type=env.___cxa_is_number_type;
+ var ___cxa_allocate_exception=env.___cxa_allocate_exception;
+ var ___cxa_find_matching_catch=env.___cxa_find_matching_catch;
+ var _fflush=env._fflush;
+ var _time=env._time;
+ var _pwrite=env._pwrite;
+ var __reallyNegative=env.__reallyNegative;
+ var _sbrk=env._sbrk;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _fileno=env._fileno;
+ var ___resumeException=env.___resumeException;
+ var __ZSt18uncaught_exceptionv=env.__ZSt18uncaught_exceptionv;
+ var _sysconf=env._sysconf;
+ var _puts=env._puts;
+ var _mkport=env._mkport;
+ var _write=env._write;
+ var ___errno_location=env.___errno_location;
+ var __ZNSt9exceptionD2Ev=env.__ZNSt9exceptionD2Ev;
+ var _fputc=env._fputc;
+ var ___cxa_throw=env.___cxa_throw;
+ var _abort=env._abort;
+ var _fwrite=env._fwrite;
+ var ___cxa_does_inherit=env.___cxa_does_inherit;
+ var _fprintf=env._fprintf;
+ var __formatString=env.__formatString;
+ var _fputs=env._fputs;
+ var _printf=env._printf;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[146] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 624 + (i5 << 2) | 0;
+ i5 = 624 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[146] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[592 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 624 + (i7 << 2) | 0;
+ i7 = 624 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[146] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[592 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[604 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 624 + (i9 << 2) | 0;
+ i7 = HEAP32[146] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 624 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[146] = i7 | i8;
+ i28 = 624 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[592 >> 2] = i4;
+ HEAP32[604 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[588 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[888 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[600 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 888 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[588 >> 2] = HEAP32[588 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[592 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[604 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 624 + (i9 << 2) | 0;
+ i7 = HEAP32[146] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 624 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[146] = i7 | i8;
+ i25 = 624 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[592 >> 2] = i2;
+ HEAP32[604 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[588 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[888 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[888 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[592 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[600 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 888 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[588 >> 2] = HEAP32[588 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 624 + (i6 << 2) | 0;
+ i5 = HEAP32[146] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 624 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[146] = i5 | i4;
+ i21 = 624 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 888 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[588 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[588 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[600 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[592 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[604 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[604 >> 2] = i2 + i12;
+ HEAP32[592 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[592 >> 2] = 0;
+ HEAP32[604 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[596 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[596 >> 2] = i31;
+ i32 = HEAP32[608 >> 2] | 0;
+ HEAP32[608 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[264] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[1064 >> 2] = i18;
+ HEAP32[1060 >> 2] = i18;
+ HEAP32[1068 >> 2] = -1;
+ HEAP32[1072 >> 2] = -1;
+ HEAP32[1076 >> 2] = 0;
+ HEAP32[1028 >> 2] = 0;
+ HEAP32[264] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[1064 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[1024 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[1016 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[1028 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[608 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 1032 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[596 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[1060 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[1016 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[1024 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[1064 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[1028 >> 2] = HEAP32[1028 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[1016 >> 2] | 0) + i14 | 0;
+ HEAP32[1016 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[1020 >> 2] | 0) >>> 0) {
+ HEAP32[1020 >> 2] = i15;
+ }
+ i15 = HEAP32[608 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 1032 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[596 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[608 >> 2] = i15 + i3;
+ HEAP32[596 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[612 >> 2] = HEAP32[1072 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ HEAP32[600 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 1032 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[608 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[604 >> 2] | 0)) {
+ i32 = (HEAP32[592 >> 2] | 0) + i10 | 0;
+ HEAP32[592 >> 2] = i32;
+ HEAP32[604 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 888 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[588 >> 2] = HEAP32[588 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 624 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[146] = HEAP32[146] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 624 + (i10 << 2) | 0;
+ i9 = HEAP32[146] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 624 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[146] = i9 | i5;
+ i3 = 624 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 888 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[588 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[588 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L444 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L444;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[600 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[596 >> 2] | 0) + i10 | 0;
+ HEAP32[596 >> 2] = i32;
+ HEAP32[608 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 1032 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[608 >> 2] = i17 + i4;
+ HEAP32[596 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[612 >> 2] = HEAP32[1072 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[1032 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[1036 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[1040 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[1044 >> 2];
+ HEAP32[1032 >> 2] = i17;
+ HEAP32[1036 >> 2] = i14;
+ HEAP32[1044 >> 2] = 0;
+ HEAP32[1040 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 624 + (i4 << 2) | 0;
+ i5 = HEAP32[146] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 624 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[146] = i5 | i3;
+ i7 = 624 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 888 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[588 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[588 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[600 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[600 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[600 >> 2] = i17;
+ }
+ HEAP32[1032 >> 2] = i17;
+ HEAP32[1036 >> 2] = i14;
+ HEAP32[1044 >> 2] = 0;
+ HEAP32[620 >> 2] = HEAP32[264];
+ HEAP32[616 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 624 + (i32 << 2) | 0;
+ HEAP32[624 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[624 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[608 >> 2] = i17 + i2;
+ HEAP32[596 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[612 >> 2] = HEAP32[1072 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[596 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[596 >> 2] = i31;
+ i32 = HEAP32[608 >> 2] | 0;
+ HEAP32[608 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[600 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[604 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[592 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 624 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[146] = HEAP32[146] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 888 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[588 >> 2] = HEAP32[588 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[608 >> 2] | 0)) {
+ i21 = (HEAP32[596 >> 2] | 0) + i11 | 0;
+ HEAP32[596 >> 2] = i21;
+ HEAP32[608 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[604 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[604 >> 2] = 0;
+ HEAP32[592 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[604 >> 2] | 0)) {
+ i21 = (HEAP32[592 >> 2] | 0) + i11 | 0;
+ HEAP32[592 >> 2] = i21;
+ HEAP32[604 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 888 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[588 >> 2] = HEAP32[588 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 624 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[146] = HEAP32[146] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[604 >> 2] | 0)) {
+ HEAP32[592 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 624 + (i7 << 2) | 0;
+ i8 = HEAP32[146] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 624 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[146] = i8 | i6;
+ i4 = 624 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 888 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[588 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L204 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L204;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[600 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[600 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[588 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[616 >> 2] | 0) + -1 | 0;
+ HEAP32[616 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 1040 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[616 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function _main(i7, i8) {
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, d9 = 0.0, d10 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 4272 | 0;
+ i3 = i2;
+ i5 = i2 + 4248 | 0;
+ i4 = i2 + 2128 | 0;
+ i1 = i2 + 8 | 0;
+ L1 : do {
+ if ((i7 | 0) > 1) {
+ i7 = HEAP8[HEAP32[i8 + 4 >> 2] | 0] | 0;
+ switch (i7 | 0) {
+ case 50:
+ {
+ i3 = 95e5;
+ break L1;
+ }
+ case 51:
+ {
+ i6 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 95e6;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 19e7;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 95e4;
+ break L1;
+ }
+ case 48:
+ {
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ default:
+ {
+ HEAP32[i3 >> 2] = i7 + -48;
+ _printf(280, i3 | 0) | 0;
+ i8 = -1;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ }
+ } else {
+ i6 = 4;
+ }
+ } while (0);
+ if ((i6 | 0) == 4) {
+ i3 = 19e6;
+ }
+ HEAP32[i5 + 8 >> 2] = 0;
+ HEAP32[i5 + 4 >> 2] = 287;
+ i8 = __Znaj(347) | 0;
+ HEAP32[i5 >> 2] = i8;
+ _memcpy(i8 | 0, 296, 287) | 0;
+ i8 = i8 + 287 | 0;
+ i7 = 296 | 0;
+ i6 = i8 + 60 | 0;
+ do {
+ HEAP8[i8] = HEAP8[i7] | 0;
+ i8 = i8 + 1 | 0;
+ i7 = i7 + 1 | 0;
+ } while ((i8 | 0) < (i6 | 0));
+ i7 = i3 << 1;
+ while (1) {
+ i6 = i7 >>> 0 < 60 ? i7 : 60;
+ __ZN14RotatingString5writeEj(i5, i6);
+ if ((i7 | 0) == (i6 | 0)) {
+ break;
+ } else {
+ i7 = i7 - i6 | 0;
+ }
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ __ZdaPv(i5);
+ }
+ if ((HEAP32[6] | 0) == 0) {
+ i6 = 24;
+ i5 = 0;
+ } else {
+ i5 = 24;
+ d9 = 0.0;
+ while (1) {
+ i6 = i5 + 4 | 0;
+ d9 = d9 + +HEAPF32[i6 >> 2];
+ d10 = d9 < 1.0 ? d9 : 1.0;
+ HEAPF32[i6 >> 2] = d10;
+ HEAP32[i5 + 8 >> 2] = ~~(d10 * 512.0) >>> 0;
+ i5 = i5 + 12 | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 0) {
+ i6 = 24;
+ i5 = 0;
+ break;
+ }
+ }
+ }
+ do {
+ while (1) {
+ i8 = HEAP32[i6 + 8 >> 2] | 0;
+ if (i5 >>> 0 > i8 >>> 0 & (i8 | 0) != 0) {
+ i6 = i6 + 12 | 0;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i4 + (i5 << 2) >> 2] = i6;
+ i5 = i5 + 1 | 0;
+ } while ((i5 | 0) != 513);
+ HEAP32[i4 + 2116 >> 2] = 0;
+ __Z9makeFastaI10RandomizedEvPKcS2_jRT_(0, 0, i3 * 3 | 0, i4);
+ if ((HEAP32[54] | 0) == 0) {
+ i5 = 216;
+ i4 = 0;
+ } else {
+ i5 = 216;
+ d9 = 0.0;
+ while (1) {
+ i4 = i5 + 4 | 0;
+ d9 = d9 + +HEAPF32[i4 >> 2];
+ d10 = d9 < 1.0 ? d9 : 1.0;
+ HEAPF32[i4 >> 2] = d10;
+ HEAP32[i5 + 8 >> 2] = ~~(d10 * 512.0) >>> 0;
+ i5 = i5 + 12 | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 0) {
+ i5 = 216;
+ i4 = 0;
+ break;
+ }
+ }
+ }
+ do {
+ while (1) {
+ i8 = HEAP32[i5 + 8 >> 2] | 0;
+ if (i4 >>> 0 > i8 >>> 0 & (i8 | 0) != 0) {
+ i5 = i5 + 12 | 0;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i1 + (i4 << 2) >> 2] = i5;
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) != 513);
+ HEAP32[i1 + 2116 >> 2] = 0;
+ __Z9makeFastaI10RandomizedEvPKcS2_jRT_(0, 0, i3 * 5 | 0, i1);
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function __Z9makeFastaI10RandomizedEvPKcS2_jRT_(i3, i2, i6, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i4 = 0, i5 = 0, i7 = 0, d8 = 0.0, i9 = 0;
+ i2 = STACKTOP;
+ if ((i6 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = i1 + 2116 | 0;
+ i3 = i1 + 2052 | 0;
+ while (1) {
+ i5 = i6 >>> 0 < 60 ? i6 : 60;
+ if ((i5 | 0) != 0) {
+ i7 = 0;
+ do {
+ i9 = ((((HEAP32[4] | 0) * 3877 | 0) + 29573 | 0) >>> 0) % 139968 | 0;
+ HEAP32[4] = i9;
+ d8 = +(i9 >>> 0) / 139968.0;
+ i9 = HEAP32[i1 + (~~(d8 * 512.0) >>> 0 << 2) >> 2] | 0;
+ while (1) {
+ if (+HEAPF32[i9 + 4 >> 2] < d8) {
+ i9 = i9 + 12 | 0;
+ } else {
+ break;
+ }
+ }
+ HEAP8[i1 + i7 + 2052 | 0] = HEAP32[i9 >> 2];
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != (i5 | 0));
+ }
+ HEAP8[i1 + i5 + 2052 | 0] = 10;
+ i9 = i5 + 1 | 0;
+ HEAP8[i1 + i9 + 2052 | 0] = 0;
+ HEAP32[i4 >> 2] = i9;
+ i9 = _strlen(i3 | 0) | 0;
+ i7 = HEAP32[2] | 0;
+ if ((i9 | 0) > (i7 | 0)) {
+ if ((i7 | 0) > 0) {
+ HEAP8[i1 + i7 + 2052 | 0] = 0;
+ _puts(i3 | 0) | 0;
+ HEAP8[i1 + (HEAP32[2] | 0) + 2052 | 0] = 122;
+ HEAP32[2] = 0;
+ }
+ } else {
+ _puts(i3 | 0) | 0;
+ HEAP32[2] = (HEAP32[2] | 0) - i9;
+ }
+ if ((i6 | 0) == (i5 | 0)) {
+ break;
+ } else {
+ i6 = i6 - i5 | 0;
+ }
+ }
+ STACKTOP = i2;
+ return;
+}
+function __ZN14RotatingString5writeEj(i3, i4) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i5 = __Znaj(i4 + 2 | 0) | 0;
+ i2 = i3 + 8 | 0;
+ _memcpy(i5 | 0, (HEAP32[i3 >> 2] | 0) + (HEAP32[i2 >> 2] | 0) | 0, i4 | 0) | 0;
+ HEAP8[i5 + i4 | 0] = 0;
+ i7 = _strlen(i5 | 0) | 0;
+ i6 = HEAP32[2] | 0;
+ if ((i7 | 0) > (i6 | 0)) {
+ if ((i6 | 0) > 0) {
+ HEAP8[i5 + i6 | 0] = 0;
+ _puts(i5 | 0) | 0;
+ HEAP32[2] = 0;
+ i6 = 6;
+ } else {
+ i6 = 5;
+ }
+ } else {
+ _puts(i5 | 0) | 0;
+ HEAP32[2] = (HEAP32[2] | 0) - i7;
+ i6 = 5;
+ }
+ if ((i6 | 0) == 5 ? (i5 | 0) != 0 : 0) {
+ i6 = 6;
+ }
+ if ((i6 | 0) == 6) {
+ __ZdlPv(i5);
+ }
+ i4 = (HEAP32[i2 >> 2] | 0) + i4 | 0;
+ HEAP32[i2 >> 2] = i4;
+ i3 = HEAP32[i3 + 4 >> 2] | 0;
+ if (!(i4 >>> 0 > i3 >>> 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i2 >> 2] = i4 - i3;
+ STACKTOP = i1;
+ return;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function __Znwj(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i2 = (i2 | 0) == 0 ? 1 : i2;
+ while (1) {
+ i3 = _malloc(i2) | 0;
+ if ((i3 | 0) != 0) {
+ i2 = 6;
+ break;
+ }
+ i3 = HEAP32[270] | 0;
+ HEAP32[270] = i3 + 0;
+ if ((i3 | 0) == 0) {
+ i2 = 5;
+ break;
+ }
+ FUNCTION_TABLE_v[i3 & 0]();
+ }
+ if ((i2 | 0) == 5) {
+ i3 = ___cxa_allocate_exception(4) | 0;
+ HEAP32[i3 >> 2] = 1096;
+ ___cxa_throw(i3 | 0, 1144, 1);
+ } else if ((i2 | 0) == 6) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ return 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function __ZNSt9bad_allocD0Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZNSt9exceptionD2Ev(i1 | 0);
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function __ZNSt9bad_allocD2Ev(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZNSt9exceptionD2Ev(i1 | 0);
+ STACKTOP = i2;
+ return;
+}
+function __ZdlPv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i1 | 0) != 0) {
+ _free(i1);
+ }
+ STACKTOP = i2;
+ return;
+}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function __Znaj(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = __Znwj(i1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function runPostSets() {
+ HEAP32[286] = __ZTVN10__cxxabiv120__si_class_type_infoE;
+ HEAP32[288] = __ZTISt9exception;
+}
+function dynCall_ii(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_ii[i2 & 1](i1 | 0) | 0;
+}
+function __ZdaPv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ __ZdlPv(i1);
+ STACKTOP = i2;
+ return;
+}
+function dynCall_vi(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vi[i2 & 3](i1 | 0);
+}
+function dynCall_v(i1) {
+ i1 = i1 | 0;
+ FUNCTION_TABLE_v[i1 & 0]();
+}
+function __ZNKSt9bad_alloc4whatEv(i1) {
+ i1 = i1 | 0;
+ return 1112;
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function b0(i1) {
+ i1 = i1 | 0;
+ abort(0);
+ return 0;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+function b1(i1) {
+ i1 = i1 | 0;
+ abort(1);
+}
+function b2() {
+ abort(2);
+}
+
+// EMSCRIPTEN_END_FUNCS
+ var FUNCTION_TABLE_ii = [b0,__ZNKSt9bad_alloc4whatEv];
+ var FUNCTION_TABLE_vi = [b1,__ZNSt9bad_allocD2Ev,__ZNSt9bad_allocD0Ev,b1];
+ var FUNCTION_TABLE_v = [b2];
+
+ return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_ii: dynCall_ii, dynCall_vi: dynCall_vi, dynCall_v: dynCall_v };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_ii": invoke_ii, "invoke_vi": invoke_vi, "invoke_v": invoke_v, "_send": _send, "___setErrNo": ___setErrNo, "___cxa_is_number_type": ___cxa_is_number_type, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_time": _time, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_fputc": _fputc, "___cxa_throw": ___cxa_throw, "_abort": _abort, "_fwrite": _fwrite, "___cxa_does_inherit": ___cxa_does_inherit, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception, "__ZTVN10__cxxabiv120__si_class_type_infoE": __ZTVN10__cxxabiv120__si_class_type_infoE }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+var dynCall_ii = Module["dynCall_ii"] = asm["dynCall_ii"];
+var dynCall_vi = Module["dynCall_vi"] = asm["dynCall_vi"];
+var dynCall_v = Module["dynCall_v"] = asm["dynCall_v"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
new file mode 100644
index 0000000000..a5f8228b82
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
@@ -0,0 +1,42713 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT =
+ 'stretch tree of depth 10\t check: -1\n' +
+ '1448\t trees of depth 4\t check: -1448\n' +
+ '362\t trees of depth 6\t check: -362\n' +
+ '90\t trees of depth 8\t check: -90\n' +
+ 'long lived tree of depth 9\t check: -1\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+
+var Module;
+if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
+if (!Module.expectedDataFileDownloads) {
+ Module.expectedDataFileDownloads = 0;
+ Module.finishedDataFileDownloads = 0;
+}
+Module.expectedDataFileDownloads++;
+(function() {
+
+ function runWithFS() {
+
+function assert(check, msg) {
+ if (!check) throw msg + new Error().stack;
+}
+Module['FS_createDataFile']('/', 'binarytrees.lua', [45, 45, 32, 84, 104, 101, 32, 67, 111, 109, 112, 117, 116, 101, 114, 32, 76, 97, 110, 103, 117, 97, 103, 101, 32, 66, 101, 110, 99, 104, 109, 97, 114, 107, 115, 32, 71, 97, 109, 101, 10, 45, 45, 32, 104, 116, 116, 112, 58, 47, 47, 98, 101, 110, 99, 104, 109, 97, 114, 107, 115, 103, 97, 109, 101, 46, 97, 108, 105, 111, 116, 104, 46, 100, 101, 98, 105, 97, 110, 46, 111, 114, 103, 47, 10, 45, 45, 32, 99, 111, 110, 116, 114, 105, 98, 117, 116, 101, 100, 32, 98, 121, 32, 77, 105, 107, 101, 32, 80, 97, 108, 108, 10, 10, 108, 111, 99, 97, 108, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 105, 116, 101, 109, 44, 32, 100, 101, 112, 116, 104, 41, 10, 32, 32, 105, 102, 32, 100, 101, 112, 116, 104, 32, 62, 32, 48, 32, 116, 104, 101, 110, 10, 32, 32, 32, 32, 108, 111, 99, 97, 108, 32, 105, 32, 61, 32, 105, 116, 101, 109, 32, 43, 32, 105, 116, 101, 109, 10, 32, 32, 32, 32, 100, 101, 112, 116, 104, 32, 61, 32, 100, 101, 112, 116, 104, 32, 45, 32, 49, 10, 32, 32, 32, 32, 108, 111, 99, 97, 108, 32, 108, 101, 102, 116, 44, 32, 114, 105, 103, 104, 116, 32, 61, 32, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 105, 45, 49, 44, 32, 100, 101, 112, 116, 104, 41, 44, 32, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 105, 44, 32, 100, 101, 112, 116, 104, 41, 10, 32, 32, 32, 32, 114, 101, 116, 117, 114, 110, 32, 123, 32, 105, 116, 101, 109, 44, 32, 108, 101, 102, 116, 44, 32, 114, 105, 103, 104, 116, 32, 125, 10, 32, 32, 101, 108, 115, 101, 10, 32, 32, 32, 32, 114, 101, 116, 117, 114, 110, 32, 123, 32, 105, 116, 101, 109, 32, 125, 10, 32, 32, 101, 110, 100, 10, 101, 110, 100, 10, 10, 108, 111, 99, 97, 108, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 116, 114, 101, 101, 41, 10, 32, 32, 105, 102, 32, 116, 114, 101, 101, 91, 50, 93, 32, 116, 104, 101, 110, 10, 32, 32, 32, 32, 114, 101, 116, 117, 114, 110, 32, 116, 114, 101, 101, 91, 49, 93, 32, 43, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 116, 114, 101, 101, 91, 50, 93, 41, 32, 45, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 116, 114, 101, 101, 91, 51, 93, 41, 10, 32, 32, 101, 108, 115, 101, 10, 32, 32, 32, 32, 114, 101, 116, 117, 114, 110, 32, 116, 114, 101, 101, 91, 49, 93, 10, 32, 32, 101, 110, 100, 10, 101, 110, 100, 10, 10, 108, 111, 99, 97, 108, 32, 78, 32, 61, 32, 116, 111, 110, 117, 109, 98, 101, 114, 40, 97, 114, 103, 32, 97, 110, 100, 32, 97, 114, 103, 91, 49, 93, 41, 32, 111, 114, 32, 52, 10, 10, 105, 102, 32, 78, 32, 61, 61, 32, 48, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 48, 10, 101, 108, 115, 101, 105, 102, 32, 78, 32, 61, 61, 32, 49, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 57, 46, 53, 10, 101, 108, 115, 101, 105, 102, 32, 78, 32, 61, 61, 32, 50, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 49, 49, 46, 57, 57, 10, 101, 108, 115, 101, 105, 102, 32, 78, 32, 61, 61, 32, 51, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 49, 50, 46, 56, 53, 10, 101, 108, 115, 101, 105, 102, 32, 78, 32, 61, 61, 32, 52, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 49, 52, 46, 55, 50, 10, 101, 108, 115, 101, 105, 102, 32, 78, 32, 61, 61, 32, 53, 32, 116, 104, 101, 110, 10, 32, 32, 78, 32, 61, 32, 49, 53, 46, 56, 50, 10, 101, 110, 100, 10, 10, 108, 111, 99, 97, 108, 32, 109, 105, 110, 100, 101, 112, 116, 104, 32, 61, 32, 52, 10, 108, 111, 99, 97, 108, 32, 109, 97, 120, 100, 101, 112, 116, 104, 32, 61, 32, 109, 105, 110, 100, 101, 112, 116, 104, 32, 43, 32, 50, 10, 105, 102, 32, 109, 97, 120, 100, 101, 112, 116, 104, 32, 60, 32, 78, 32, 116, 104, 101, 110, 32, 109, 97, 120, 100, 101, 112, 116, 104, 32, 61, 32, 78, 32, 101, 110, 100, 10, 10, 100, 111, 10, 32, 32, 108, 111, 99, 97, 108, 32, 115, 116, 114, 101, 116, 99, 104, 100, 101, 112, 116, 104, 32, 61, 32, 109, 97, 120, 100, 101, 112, 116, 104, 32, 43, 32, 49, 10, 32, 32, 108, 111, 99, 97, 108, 32, 115, 116, 114, 101, 116, 99, 104, 116, 114, 101, 101, 32, 61, 32, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 48, 44, 32, 115, 116, 114, 101, 116, 99, 104, 100, 101, 112, 116, 104, 41, 10, 32, 32, 105, 111, 46, 119, 114, 105, 116, 101, 40, 115, 116, 114, 105, 110, 103, 46, 102, 111, 114, 109, 97, 116, 40, 34, 115, 116, 114, 101, 116, 99, 104, 32, 116, 114, 101, 101, 32, 111, 102, 32, 100, 101, 112, 116, 104, 32, 37, 100, 92, 116, 32, 99, 104, 101, 99, 107, 58, 32, 37, 100, 92, 110, 34, 44, 10, 32, 32, 32, 32, 115, 116, 114, 101, 116, 99, 104, 100, 101, 112, 116, 104, 44, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 115, 116, 114, 101, 116, 99, 104, 116, 114, 101, 101, 41, 41, 41, 10, 101, 110, 100, 10, 10, 108, 111, 99, 97, 108, 32, 108, 111, 110, 103, 108, 105, 118, 101, 100, 116, 114, 101, 101, 32, 61, 32, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 48, 44, 32, 109, 97, 120, 100, 101, 112, 116, 104, 41, 10, 10, 102, 111, 114, 32, 100, 101, 112, 116, 104, 61, 109, 105, 110, 100, 101, 112, 116, 104, 44, 109, 97, 120, 100, 101, 112, 116, 104, 44, 50, 32, 100, 111, 10, 32, 32, 108, 111, 99, 97, 108, 32, 105, 116, 101, 114, 97, 116, 105, 111, 110, 115, 32, 61, 32, 50, 32, 94, 32, 40, 109, 97, 120, 100, 101, 112, 116, 104, 32, 45, 32, 100, 101, 112, 116, 104, 32, 43, 32, 109, 105, 110, 100, 101, 112, 116, 104, 41, 10, 32, 32, 108, 111, 99, 97, 108, 32, 99, 104, 101, 99, 107, 32, 61, 32, 48, 10, 32, 32, 102, 111, 114, 32, 105, 61, 49, 44, 105, 116, 101, 114, 97, 116, 105, 111, 110, 115, 32, 100, 111, 10, 32, 32, 32, 32, 99, 104, 101, 99, 107, 32, 61, 32, 99, 104, 101, 99, 107, 32, 43, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 49, 44, 32, 100, 101, 112, 116, 104, 41, 41, 32, 43, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 66, 111, 116, 116, 111, 109, 85, 112, 84, 114, 101, 101, 40, 45, 49, 44, 32, 100, 101, 112, 116, 104, 41, 41, 10, 32, 32, 101, 110, 100, 10, 32, 32, 105, 111, 46, 119, 114, 105, 116, 101, 40, 115, 116, 114, 105, 110, 103, 46, 102, 111, 114, 109, 97, 116, 40, 34, 37, 100, 92, 116, 32, 116, 114, 101, 101, 115, 32, 111, 102, 32, 100, 101, 112, 116, 104, 32, 37, 100, 92, 116, 32, 99, 104, 101, 99, 107, 58, 32, 37, 100, 92, 110, 34, 44, 10, 32, 32, 32, 32, 105, 116, 101, 114, 97, 116, 105, 111, 110, 115, 42, 50, 44, 32, 100, 101, 112, 116, 104, 44, 32, 99, 104, 101, 99, 107, 41, 41, 10, 101, 110, 100, 10, 10, 105, 111, 46, 119, 114, 105, 116, 101, 40, 115, 116, 114, 105, 110, 103, 46, 102, 111, 114, 109, 97, 116, 40, 34, 108, 111, 110, 103, 32, 108, 105, 118, 101, 100, 32, 116, 114, 101, 101, 32, 111, 102, 32, 100, 101, 112, 116, 104, 32, 37, 100, 92, 116, 32, 99, 104, 101, 99, 107, 58, 32, 37, 100, 92, 110, 34, 44, 10, 32, 32, 109, 97, 120, 100, 101, 112, 116, 104, 44, 32, 73, 116, 101, 109, 67, 104, 101, 99, 107, 40, 108, 111, 110, 103, 108, 105, 118, 101, 100, 116, 114, 101, 101, 41, 41, 41, 10], true, true);
+
+ }
+ if (Module['calledRun']) {
+ runWithFS();
+ } else {
+ if (!Module['preRun']) Module['preRun'] = [];
+ Module["preRun"].push(runWithFS); // FS is not initialized yet, wait for it
+ }
+
+})();
+
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(13467);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([99,97,110,110,111,116,32,99,114,101,97,116,101,32,115,116,97,116,101,58,32,110,111,116,32,101,110,111,117,103,104,32,109,101,109,111,114,121,0,0,40,101,114,114,111,114,32,111,98,106,101,99,116,32,105,115,32,110,111,116,32,97,32,115,116,114,105,110,103,41,0,0,88,0,0,0,0,0,0,0,108,117,97,0,0,0,0,0,76,85,65,95,78,79,69,78,86,0,0,0,0,0,0,0,116,111,111,32,109,97,110,121,32,114,101,115,117,108,116,115,32,116,111,32,112,114,105,110,116,0,0,0,0,0,0,0,112,114,105,110,116,0,0,0,101,114,114,111,114,32,99,97,108,108,105,110,103,32,39,112,114,105,110,116,39,32,40,37,115,41,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,105,110,116,101,114,114,117,112,116,101,100,33,0,0,0,0,95,95,116,111,115,116,114,105,110,103,0,0,0,0,0,0,40,110,111,32,101,114,114,111,114,32,109,101,115,115,97,103,101,41,0,0,0,0,0,0,61,115,116,100,105,110,0,0,60,101,111,102,62,0,0,0,114,101,116,117,114,110,32,37,115,0,0,0,0,0,0,0,95,80,82,79,77,80,84,0,95,80,82,79,77,80,84,50,0,0,0,0,0,0,0,0,62,32,0,0,0,0,0,0,62,62,32,0,0,0,0,0,97,114,103,0,0,0,0,0,45,0,0,0,0,0,0,0,45,45,0,0,0,0,0,0,116,111,111,32,109,97,110,121,32,97,114,103,117,109,101,110,116,115,32,116,111,32,115,99,114,105,112,116,0,0,0,0,61,40,99,111,109,109,97,110,100,32,108,105,110,101,41,0,114,101,113,117,105,114,101,0,61,76,85,65,95,73,78,73,84,95,53,95,50,0,0,0,61,76,85,65,95,73,78,73,84,0,0,0,0,0,0,0,76,117,97,32,53,46,50,46,50,32,32,67,111,112,121,114,105,103,104,116,32,40,67,41,32,49,57,57,52,45,50,48,49,51,32,76,117,97,46,111,114,103,44,32,80,85,67,45,82,105,111,0,0,0,0,0,37,115,58,32,0,0,0,0,39,37,115,39,32,110,101,101,100,115,32,97,114,103,117,109,101,110,116,10,0,0,0,0,117,110,114,101,99,111,103,110,105,122,101,100,32,111,112,116,105,111,110,32,39,37,115,39,10,0,0,0,0,0,0,0,117,115,97,103,101,58,32,37,115,32,91,111,112,116,105,111,110,115,93,32,91,115,99,114,105,112,116,32,91,97,114,103,115,93,93,10,65,118,97,105,108,97,98,108,101,32,111,112,116,105,111,110,115,32,97,114,101,58,10,32,32,45,101,32,115,116,97,116,32,32,101,120,101,99,117,116,101,32,115,116,114,105,110,103,32,39,115,116,97,116,39,10,32,32,45,105,32,32,32,32,32,32,32,101,110,116,101,114,32,105,110,116,101,114,97,99,116,105,118,101,32,109,111,100,101,32,97,102,116,101,114,32,101,120,101,99,117,116,105,110,103,32,39,115,99,114,105,112,116,39,10,32,32,45,108,32,110,97,109,101,32,32,114,101,113,117,105,114,101,32,108,105,98,114,97,114,121,32,39,110,97,109,101,39,10,32,32,45,118,32,32,32,32,32,32,32,115,104,111,119,32,118,101,114,115,105,111,110,32,105,110,102,111,114,109,97,116,105,111,110,10,32,32,45,69,32,32,32,32,32,32,32,105,103,110,111,114,101,32,101,110,118,105,114,111,110,109,101,110,116,32,118,97,114,105,97,98,108,101,115,10,32,32,45,45,32,32,32,32,32,32,32,115,116,111,112,32,104,97,110,100,108,105,110,103,32,111,112,116,105,111,110,115,10,32,32,45,32,32,32,32,32,32,32,32,115,116,111,112,32,104,97,110,100,108,105,110,103,32,111,112,116,105,111,110,115,32,97,110,100,32,101,120,101,99,117,116,101,32,115,116,100,105,110,10,0,0,0,0,0,0,0,37,115,10,0,0,0,0,0,0,0,0,0,0,96,127,64,63,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,37,115,10,0,0,0,0,0,115,116,97,99,107,32,116,114,97,99,101,98,97,99,107,58,0,0,0,0,0,0,0,0,10,9,46,46,46,0,0,0,83,108,110,116,0,0,0,0,10,9,37,115,58,0,0,0,37,100,58,0,0,0,0,0,32,105,110,32,0,0,0,0,10,9,40,46,46,46,116,97,105,108,32,99,97,108,108,115,46,46,46,41,0,0,0,0,98,97,100,32,97,114,103,117,109,101,110,116,32,35,37,100,32,40,37,115,41,0,0,0,110,0,0,0,0,0,0,0,109,101,116,104,111,100,0,0,99,97,108,108,105,110,103,32,39,37,115,39,32,111,110,32,98,97,100,32,115,101,108,102,32,40,37,115,41,0,0,0,63,0,0,0,0,0,0,0,98,97,100,32,97,114,103,117,109,101,110,116,32,35,37,100,32,116,111,32,39,37,115,39,32,40,37,115,41,0,0,0,83,108,0,0,0,0,0,0,37,115,58,37,100,58,32,0,0,0,0,0,0,0,0,0,37,115,58,32,37,115,0,0,101,120,105,116,0,0,0,0,105,110,118,97,108,105,100,32,111,112,116,105,111,110,32,39,37,115,39,0,0,0,0,0,115,116,97,99,107,32,111,118,101,114,102,108,111,119,32,40,37,115,41,0,0,0,0,0,115,116,97,99,107,32,111,118,101,114,102,108,111,119,0,0,118,97,108,117,101,32,101,120,112,101,99,116,101,100,0,0,98,117,102,102,101,114,32,116,111,111,32,108,97,114,103,101,0,0,0,0,0,0,0,0,61,115,116,100,105,110,0,0,64,37,115,0,0,0,0,0,114,0,0,0,0,0,0,0,111,112,101,110,0,0,0,0,114,98,0,0,0,0,0,0,114,101,111,112,101,110,0,0,114,101,97,100,0,0,0,0,111,98,106,101,99,116,32,108,101,110,103,116,104,32,105,115,32,110,111,116,32,97,32,110,117,109,98,101,114,0,0,0,95,95,116,111,115,116,114,105,110,103,0,0,0,0,0,0,116,114,117,101,0,0,0,0,102,97,108,115,101,0,0,0,110,105,108,0,0,0,0,0,37,115,58,32,37,112,0,0,95,76,79,65,68,69,68,0,110,97,109,101,32,99,111,110,102,108,105,99,116,32,102,111,114,32,109,111,100,117,108,101,32,39,37,115,39,0,0,0,116,111,111,32,109,97,110,121,32,117,112,118,97,108,117,101,115,0,0,0,0,0,0,0,109,117,108,116,105,112,108,101,32,76,117,97,32,86,77,115,32,100,101,116,101,99,116,101,100,0,0,0,0,0,0,0,118,101,114,115,105,111,110,32,109,105,115,109,97,116,99,104,58,32,97,112,112,46,32,110,101,101,100,115,32,37,102,44,32,76,117,97,32,99,111,114,101,32,112,114,111,118,105,100,101,115,32,37,102,0,0,0,98,97,100,32,99,111,110,118,101,114,115,105,111,110,32,110,117,109,98,101,114,45,62,105,110,116,59,32,109,117,115,116,32,114,101,99,111,109,112,105,108,101,32,76,117,97,32,119,105,116,104,32,112,114,111,112,101,114,32,115,101,116,116,105,110,103,115,0,0,0,0,0,80,65,78,73,67,58,32,117,110,112,114,111,116,101,99,116,101,100,32,101,114,114,111,114,32,105,110,32,99,97,108,108,32,116,111,32,76,117,97,32,65,80,73,32,40,37,115,41,10,0,0,0,0,0,0,0,239,187,191,0,0,0,0,0,99,97,110,110,111,116,32,37,115,32,37,115,58,32,37,115,0,0,0,0,0,0,0,0,37,115,32,101,120,112,101,99,116,101,100,44,32,103,111,116,32,37,115,0,0,0,0,0,102,0,0,0,0,0,0,0,46,0,0,0,0,0,0,0,102,117,110,99,116,105,111,110,32,39,37,115,39,0,0,0,109,97,105,110,32,99,104,117,110,107,0,0,0,0,0,0,102,117,110,99,116,105,111,110,32,60,37,115,58,37,100,62,0,0,0,0,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,37,115,32,37,115,32,39,37,115,39,32,40,97,32,37,115,32,118,97,108,117,101,41,0,0,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,37,115,32,97,32,37,115,32,118,97,108,117,101,0,0,0,0,0,0,0,0,99,111,110,99,97,116,101,110,97,116,101,0,0,0,0,0,112,101,114,102,111,114,109,32,97,114,105,116,104,109,101,116,105,99,32,111,110,0,0,0,97,116,116,101,109,112,116,32,116,111,32,99,111,109,112,97,114,101,32,116,119,111,32,37,115,32,118,97,108,117,101,115,0,0,0,0,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,99,111,109,112,97,114,101,32,37,115,32,119,105,116,104,32,37,115,0,0,0,37,115,58,37,100,58,32,37,115,0,0,0,0,0,0,0,108,111,99,97,108,0,0,0,95,69,78,86,0,0,0,0,103,108,111,98,97,108,0,0,102,105,101,108,100,0,0,0,117,112,118,97,108,117,101,0,99,111,110,115,116,97,110,116,0,0,0,0,0,0,0,0,109,101,116,104,111,100,0,0,63,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,102,111,114,32,105,116,101,114,97,116,111,114,0,0,0,0,109,101,116,97,109,101,116,104,111,100,0,0,0,0,0,0,61,91,67,93,0,0,0,0,67,0,0,0,0,0,0,0,61,63,0,0,0,0,0,0,109,97,105,110,0,0,0,0,76,117,97,0,0,0,0,0,40,42,116,101,109,112,111,114,97,114,121,41,0,0,0,0,40,42,118,97,114,97,114,103,41,0,0,0,0,0,0,0,115,116,97,99,107,32,111,118,101,114,102,108,111,119,0,0,67,32,115,116,97,99,107,32,111,118,101,114,102,108,111,119,0,0,0,0,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,121,105,101,108,100,32,97,99,114,111,115,115,32,97,32,67,45,99,97,108,108,32,98,111,117,110,100,97,114,121,0,0,0,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,121,105,101,108,100,32,102,114,111,109,32,111,117,116,115,105,100,101,32,97,32,99,111,114,111,117,116,105,110,101,0,0,0,0,0,0,0,98,105,110,97,114,121,0,0,116,101,120,116,0,0,0,0,97,116,116,101,109,112,116,32,116,111,32,108,111,97,100,32,97,32,37,115,32,99,104,117,110,107,32,40,109,111,100,101,32,105,115,32,39,37,115,39,41,0,0,0,0,0,0,0,101,114,114,111,114,32,105,110,32,101,114,114,111,114,32,104,97,110,100,108,105,110,103,0,99,97,110,110,111,116,32,114,101,115,117,109,101,32,110,111,110,45,115,117,115,112,101,110,100,101,100,32,99,111,114,111,117,116,105,110,101,0,0,0,99,97,110,110,111,116,32,114,101,115,117,109,101,32,100,101,97,100,32,99,111,114,111,117,116,105,110,101,0,0,0,0,99,97,108,108,0,0,0,0,110,111,32,109,101,115,115,97,103,101,0,0,0,0,0,0,101,114,114,111,114,32,105,110,32,95,95,103,99,32,109,101,116,97,109,101,116,104,111,100,32,40,37,115,41,0,0,0,95,80,82,69,76,79,65,68,0,0,0,0,0,0,0,0,95,71,0,0,0,0,0,0,112,97,99,107,97,103,101,0,99,111,114,111,117,116,105,110,101,0,0,0,0,0,0,0,116,97,98,108,101,0,0,0,105,111,0,0,0,0,0,0,111,115,0,0,0,0,0,0,115,116,114,105,110,103,0,0,98,105,116,51,50,0,0,0,109,97,116,104,0,0,0,0,100,101,98,117,103,0,0,0,144,11,0,0,1,0,0,0,152,11,0,0,2,0,0,0,48,13,0,0,3,0,0,0,160,11,0,0,4,0,0,0,56,13,0,0,5,0,0,0,64,13,0,0,6,0,0,0,72,13,0,0,7,0,0,0,168,11,0,0,8,0,0,0,80,13,0,0,9,0,0,0,88,13,0,0,10,0,0,0,192,11,0,0,11,0,0,0,0,0,0,0,0,0,0,0,95,73,79,95,105,110,112,117,116,0,0,0,0,0,0,0,115,116,100,105,110,0,0,0,95,73,79,95,111,117,116,112,117,116,0,0,0,0,0,0,115,116,100,111,117,116,0,0,115,116,100,101,114,114,0,0,70,73,76,69,42,0,0,0,99,97,110,110,111,116,32,99,108,111,115,101,32,115,116,97,110,100,97,114,100,32,102,105,108,101,0,0,0,0,0,0,95,95,105,110,100,101,120,0,144,11,0,0,1,0,0,0,152,11,0,0,12,0,0,0,160,11,0,0,13,0,0,0,168,11,0,0,14,0,0,0,176,11,0,0,15,0,0,0,184,11,0,0,16,0,0,0,192,11,0,0,17,0,0,0,200,11,0,0,18,0,0,0,208,11,0,0,19,0,0,0,0,0,0,0,0,0,0,0,99,108,111,115,101,0,0,0,102,108,117,115,104,0,0,0,108,105,110,101,115,0,0,0,114,101,97,100,0,0,0,0,115,101,101,107,0,0,0,0,115,101,116,118,98,117,102,0,119,114,105,116,101,0,0,0,95,95,103,99,0,0,0,0,95,95,116,111,115,116,114,105,110,103,0,0,0,0,0,0,102,105,108,101,32,40,99,108,111,115,101,100,41,0,0,0,102,105,108,101,32,40,37,112,41,0,0,0,0,0,0,0,37,46,49,52,103,0,0,0,97,116,116,101,109,112,116,32,116,111,32,117,115,101,32,97,32,99,108,111,115,101,100,32,102,105,108,101,0,0,0,0,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,72,12,0,0,80,12,0,0,88,12,0,0,0,0,0,0,110,111,0,0,0,0,0,0,102,117,108,108,0,0,0,0,108,105,110,101,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,0,0,0,0,128,12,0,0,136,12,0,0,144,12,0,0,0,0,0,0,115,101,116,0,0,0,0,0,99,117,114,0,0,0,0,0,101,110,100,0,0,0,0,0,110,111,116,32,97,110,32,105,110,116,101,103,101,114,32,105,110,32,112,114,111,112,101,114,32,114,97,110,103,101,0,0,116,111,111,32,109,97,110,121,32,97,114,103,117,109,101,110,116,115,0,0,0,0,0,0,105,110,118,97,108,105,100,32,111,112,116,105,111,110,0,0,105,110,118,97,108,105,100,32,102,111,114,109,97,116,0,0,37,108,102,0,0,0,0,0,116,111,111,32,109,97,110,121,32,111,112,116,105,111,110,115,0,0,0,0,0,0,0,0,102,105,108,101,32,105,115,32,97,108,114,101,97,100,121,32,99,108,111,115,101,100,0,0,37,115,0,0,0,0,0,0,105,110,112,117,116,0,0,0,111,112,101,110,0,0,0,0,111,117,116,112,117,116,0,0,112,111,112,101,110,0,0,0,116,109,112,102,105,108,101,0,116,121,112,101,0,0,0,0,115,116,97,110,100,97,114,100,32,37,115,32,102,105,108,101,32,105,115,32,99,108,111,115,101,100,0,0,0,0,0,0,99,108,111,115,101,100,32,102,105,108,101,0,0,0,0,0,102,105,108,101,0,0,0,0,114,0,0,0,0,0,0,0,39,112,111,112,101,110,39,32,110,111,116,32,115,117,112,112,111,114,116,101,100,0,0,0,119,0,0,0,0,0,0,0,99,97,110,110,111,116,32,111,112,101,110,32,102,105,108,101,32,39,37,115,39,32,40,37,115,41,0,0,0,0,0,0,114,119,97,0,0,0,0,0,105,110,118,97,108,105,100,32,109,111,100,101,0,0,0,0,240,14,0,0,20,0,0,0,248,14,0,0,21,0,0,0,0,15,0,0,22,0,0,0,8,15,0,0,23,0,0,0,16,15,0,0,24,0,0,0,24,15,0,0,25,0,0,0,32,15,0,0,26,0,0,0,40,15,0,0,27,0,0,0,48,15,0,0,28,0,0,0,56,15,0,0,29,0,0,0,64,15,0,0,30,0,0,0,72,15,0,0,31,0,0,0,80,15,0,0,32,0,0,0,88,15,0,0,33,0,0,0,96,15,0,0,34,0,0,0,104,15,0,0,35,0,0,0,112,15,0,0,36,0,0,0,120,15,0,0,37,0,0,0,128,15,0,0,38,0,0,0,136,15,0,0,39,0,0,0,144,15,0,0,40,0,0,0,152,15,0,0,41,0,0,0,160,15,0,0,42,0,0,0,176,15,0,0,43,0,0,0,184,15,0,0,44,0,0,0,192,15,0,0,45,0,0,0,200,15,0,0,46,0,0,0,208,15,0,0,47,0,0,0,0,0,0,0,0,0,0,0,112,105,0,0,0,0,0,0,104,117,103,101,0,0,0,0,97,98,115,0,0,0,0,0,97,99,111,115,0,0,0,0,97,115,105,110,0,0,0,0,97,116,97,110,50,0,0,0,97,116,97,110,0,0,0,0,99,101,105,108,0,0,0,0,99,111,115,104,0,0,0,0,99,111,115,0,0,0,0,0,100,101,103,0,0,0,0,0,101,120,112,0,0,0,0,0,102,108,111,111,114,0,0,0,102,109,111,100,0,0,0,0,102,114,101,120,112,0,0,0,108,100,101,120,112,0,0,0,108,111,103,49,48,0,0,0,108,111,103,0,0,0,0,0,109,97,120,0,0,0,0,0,109,105,110,0,0,0,0,0,109,111,100,102,0,0,0,0,112,111,119,0,0,0,0,0,114,97,100,0,0,0,0,0,114,97,110,100,111,109,0,0,114,97,110,100,111,109,115,101,101,100,0,0,0,0,0,0,115,105,110,104,0,0,0,0,115,105,110,0,0,0,0,0,115,113,114,116,0,0,0,0,116,97,110,104,0,0,0,0,116,97,110,0,0,0,0,0,105,110,116,101,114,118,97,108,32,105,115,32,101,109,112,116,121,0,0,0,0,0,0,0,119,114,111,110,103,32,110,117,109,98,101,114,32,111,102,32,97,114,103,117,109,101,110,116,115,0,0,0,0,0,0,0,116,111,111,32,109,97,110,121,32,37,115,32,40,108,105,109,105,116,32,105,115,32,37,100,41,0,0,0,0,0,0,0,109,101,109,111,114,121,32,97,108,108,111,99,97,116,105,111,110,32,101,114,114,111,114,58,32,98,108,111,99,107,32,116,111,111,32,98,105,103,0,0,95,67,76,73,66,83,0,0,95,95,103,99,0,0,0,0,16,20,0,0,48,0,0,0,24,20,0,0,49,0,0,0,40,20,0,0,50,0,0,0,0,0,0,0,0,0,0,0,108,111,97,100,101,114,115,0,115,101,97,114,99,104,101,114,115,0,0,0,0,0,0,0,112,97,116,104,0,0,0,0,76,85,65,95,80,65,84,72,95,53,95,50,0,0,0,0,76,85,65,95,80,65,84,72,0,0,0,0,0,0,0,0,47,117,115,114,47,108,111,99,97,108,47,115,104,97,114,101,47,108,117,97,47,53,46,50,47,63,46,108,117,97,59,47,117,115,114,47,108,111,99,97,108,47,115,104,97,114,101,47,108,117,97,47,53,46,50,47,63,47,105,110,105,116,46,108,117,97,59,47,117,115,114,47,108,111,99,97,108,47,108,105,98,47,108,117,97,47,53,46,50,47,63,46,108,117,97,59,47,117,115,114,47,108,111,99,97,108,47,108,105,98,47,108,117,97,47,53,46,50,47,63,47,105,110,105,116,46,108,117,97,59,46,47,63,46,108,117,97,0,0,0,0,0,0,0,99,112,97,116,104,0,0,0,76,85,65,95,67,80,65,84,72,95,53,95,50,0,0,0,76,85,65,95,67,80,65,84,72,0,0,0,0,0,0,0,47,117,115,114,47,108,111,99,97,108,47,108,105,98,47,108,117,97,47,53,46,50,47,63,46,115,111,59,47,117,115,114,47,108,111,99,97,108,47,108,105,98,47,108,117,97,47,53,46,50,47,108,111,97,100,97,108,108,46,115,111,59,46,47,63,46,115,111,0,0,0,0,47,10,59,10,63,10,33,10,45,10,0,0,0,0,0,0,99,111,110,102,105,103,0,0,95,76,79,65,68,69,68,0,108,111,97,100,101,100,0,0,95,80,82,69,76,79,65,68,0,0,0,0,0,0,0,0,112,114,101,108,111,97,100,0,32,18,0,0,51,0,0,0,40,18,0,0,52,0,0,0,0,0,0,0,0,0,0,0,109,111,100,117,108,101,0,0,114,101,113,117,105,114,101,0,39,112,97,99,107,97,103,101,46,115,101,97,114,99,104,101,114,115,39,32,109,117,115,116,32,98,101,32,97,32,116,97,98,108,101,0,0,0,0,0,109,111,100,117,108,101,32,39,37,115,39,32,110,111,116,32,102,111,117,110,100,58,37,115,0,0,0,0,0,0,0,0,95,78,65,77,69,0,0,0,102,0,0,0,0,0,0,0,39,109,111,100,117,108,101,39,32,110,111,116,32,99,97,108,108,101,100,32,102,114,111,109,32,97,32,76,117,97,32,102,117,110,99,116,105,111,110,0,95,77,0,0,0,0,0,0,95,80,65,67,75,65,71,69,0,0,0,0,0,0,0,0,59,59,0,0,0,0,0,0,59,1,59,0,0,0,0,0,1,0,0,0,0,0,0,0,76,85,65,95,78,79,69,78,86,0,0,0,0,0,0,0,47,0,0,0,0,0,0,0,10,9,110,111,32,109,111,100,117,108,101,32,39,37,115,39,32,105,110,32,102,105,108,101,32,39,37,115,39,0,0,0,101,114,114,111,114,32,108,111,97,100,105,110,103,32,109,111,100,117,108,101,32,39,37,115,39,32,102,114,111,109,32,102,105,108,101,32,39,37,115,39,58,10,9,37,115,0,0,0,46,0,0,0,0,0,0,0,95,0,0,0,0,0,0,0,108,117,97,111,112,101,110,95,37,115,0,0,0,0,0,0,100,121,110,97,109,105,99,32,108,105,98,114,97,114,105,101,115,32,110,111,116,32,101,110,97,98,108,101,100,59,32,99,104,101,99,107,32,121,111,117,114,32,76,117,97,32,105,110,115,116,97,108,108,97,116,105,111,110,0,0,0,0,0,0,39,112,97,99,107,97,103,101,46,37,115,39,32,109,117,115,116,32,98,101,32,97,32,115,116,114,105,110,103,0,0,0,63,0,0,0,0,0,0,0,10,9,110,111,32,102,105,108,101,32,39,37,115,39,0,0,114,0,0,0,0,0,0,0,10,9,110,111,32,102,105,101,108,100,32,112,97,99,107,97,103,101,46,112,114,101,108,111,97,100,91,39,37,115,39,93,0,0,0,0,0,0,0,0,108,111,97,100,108,105,98,0,115,101,97,114,99,104,112,97,116,104,0,0,0,0,0,0,115,101,101,97,108,108,0,0,95,95,105,110,100,101,120,0,97,98,115,101,110,116,0,0,105,110,105,116,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,110,78,0,0,0,0,0,0,120,88,0,0,0,0,0,0,40,110,117,108,108,41,0,0,37,112,0,0,0,0,0,0,37,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,111,112,116,105,111,110,32,39,37,37,37,99,39,32,116,111,32,39,108,117,97,95,112,117,115,104,102,115,116,114,105,110,103,39,0,0,0,0,0,0,46,46,46,0,0,0,0,0,91,115,116,114,105,110,103,32,34,0,0,0,0,0,0,0,34,93,0,0,0,0,0,0,96,113,65,84,80,80,92,108,60,16,60,84,108,124,124,124,124,124,124,96,96,96,104,34,188,188,188,132,228,84,84,16,98,98,4,98,20,81,80,23,88,22,0,0,53,0,0,0,96,22,0,0,54,0,0,0,104,22,0,0,55,0,0,0,120,22,0,0,56,0,0,0,128,22,0,0,57,0,0,0,136,22,0,0,58,0,0,0,144,22,0,0,59,0,0,0,152,22,0,0,60,0,0,0,160,22,0,0,61,0,0,0,176,22,0,0,62,0,0,0,184,22,0,0,63,0,0,0,0,0,0,0,0,0,0,0,99,108,111,99,107,0,0,0,100,97,116,101,0,0,0,0,100,105,102,102,116,105,109,101,0,0,0,0,0,0,0,0,101,120,101,99,117,116,101,0,101,120,105,116,0,0,0,0,103,101,116,101,110,118,0,0,114,101,109,111,118,101,0,0,114,101,110,97,109,101,0,0,115,101,116,108,111,99,97,108,101,0,0,0,0,0,0,0,116,105,109,101,0,0,0,0,116,109,112,110,97,109,101,0,117,110,97,98,108,101,32,116,111,32,103,101,110,101,114,97,116,101,32,97,32,117,110,105,113,117,101,32,102,105,108,101,110,97,109,101,0,0,0,0,115,101,99,0,0,0,0,0,109,105,110,0,0,0,0,0,104,111,117,114,0,0,0,0,100,97,121,0,0,0,0,0,109,111,110,116,104,0,0,0,121,101,97,114,0,0,0,0,105,115,100,115,116,0,0,0,102,105,101,108,100,32,39,37,115,39,32,109,105,115,115,105,110,103,32,105,110,32,100,97,116,101,32,116,97,98,108,101,0,0,0,0,0,0,0,0,6,0,0,0,3,0,0,0,0,0,0,0,4,0,0,0,1,0,0,0,2,0,0,0,128,23,0,0,136,23,0,0,144,23,0,0,152,23,0,0,168,23,0,0,176,22,0,0,0,0,0,0,0,0,0,0,97,108,108,0,0,0,0,0,99,111,108,108,97,116,101,0,99,116,121,112,101,0,0,0,109,111,110,101,116,97,114,121,0,0,0,0,0,0,0,0,110,117,109,101,114,105,99,0,37,99,0,0,0,0,0,0,42,116,0,0,0,0,0,0,119,100,97,121,0,0,0,0,121,100,97,121,0,0,0,0,97,65,98,66,99,100,72,73,106,109,77,112,83,85,119,87,120,88,121,89,122,37,0,0,105,110,118,97,108,105,100,32,99,111,110,118,101,114,115,105,111,110,32,115,112,101,99,105,102,105,101,114,32,39,37,37,37,115,39,0,0,0,0,0,60,37,115,62,32,97,116,32,108,105,110,101,32,37,100,32,110,111,116,32,105,110,115,105,100,101,32,97,32,108,111,111,112,0,0,0,0,0,0,0,110,111,32,118,105,115,105,98,108,101,32,108,97,98,101,108,32,39,37,115,39,32,102,111,114,32,60,103,111,116,111,62,32,97,116,32,108,105,110,101,32,37,100,0,0,0,0,0,60,103,111,116,111,32,37,115,62,32,97,116,32,108,105,110,101,32,37,100,32,106,117,109,112,115,32,105,110,116,111,32,116,104,101,32,115,99,111,112,101,32,111,102,32,108,111,99,97,108,32,39,37,115,39,0,98,114,101,97,107,0,0,0,108,97,98,101,108,115,47,103,111,116,111,115,0,0,0,0,37,115,32,101,120,112,101,99,116,101,100,0,0,0,0,0,115,121,110,116,97,120,32,101,114,114,111,114,0,0,0,0,67,32,108,101,118,101,108,115,0,0,0,0,0,0,0,0,6,6,6,6,7,7,7,7,7,7,10,9,5,4,3,3,3,3,3,3,3,3,3,3,3,3,2,2,1,1,0,0,99,97,110,110,111,116,32,117,115,101,32,39,46,46,46,39,32,111,117,116,115,105,100,101,32,97,32,118,97,114,97,114,103,32,102,117,110,99,116,105,111,110,0,0,0,0,0,0,115,101,108,102,0,0,0,0,60,110,97,109,101,62,32,111,114,32,39,46,46,46,39,32,101,120,112,101,99,116,101,100,0,0,0,0,0,0,0,0,108,111,99,97,108,32,118,97,114,105,97,98,108,101,115,0,102,117,110,99,116,105,111,110,115,0,0,0,0,0,0,0,105,116,101,109,115,32,105,110,32,97,32,99,111,110,115,116,114,117,99,116,111,114,0,0,109,97,105,110,32,102,117,110,99,116,105,111,110,0,0,0,102,117,110,99,116,105,111,110,32,97,116,32,108,105,110,101,32,37,100,0,0,0,0,0,116,111,111,32,109,97,110,121,32,37,115,32,40,108,105,109,105,116,32,105,115,32,37,100,41,32,105,110,32,37,115,0,102,117,110,99,116,105,111,110,32,97,114,103,117,109,101,110,116,115,32,101,120,112,101,99,116,101,100,0,0,0,0,0,117,110,101,120,112,101,99,116,101,100,32,115,121,109,98,111,108,0,0,0,0,0,0,0,108,97,98,101,108,32,39,37,115,39,32,97,108,114,101,97,100,121,32,100,101,102,105,110,101,100,32,111,110,32,108,105,110,101,32,37,100,0,0,0,39,61,39,32,111,114,32,39,105,110,39,32,101,120,112,101,99,116,101,100,0,0,0,0,40,102,111,114,32,103,101,110,101,114,97,116,111,114,41,0,40,102,111,114,32,115,116,97,116,101,41,0,0,0,0,0,40,102,111,114,32,99,111,110,116,114,111,108,41,0,0,0,40,102,111,114,32,105,110,100,101,120,41,0,0,0,0,0,40,102,111,114,32,108,105,109,105,116,41,0,0,0,0,0,40,102,111,114,32,115,116,101,112,41,0,0,0,0,0,0,37,115,32,101,120,112,101,99,116,101,100,32,40,116,111,32,99,108,111,115,101,32,37,115,32,97,116,32,108,105,110,101,32,37,100,41,0,0,0,0,117,112,118,97,108,117,101,115,0,0,0,0,0,0,0,0,110,111,116,32,101,110,111,117,103,104,32,109,101,109,111,114,121,0,0,0,0,0,0,0,144,27,0,0,64,0,0,0,152,27,0,0,65,0,0,0,160,27,0,0,66,0,0,0,168,27,0,0,67,0,0,0,176,27,0,0,68,0,0,0,184,27,0,0,69,0,0,0,192,27,0,0,70,0,0,0,200,27,0,0,71,0,0,0,208,27,0,0,72,0,0,0,216,27,0,0,73,0,0,0,224,27,0,0,74,0,0,0,232,27,0,0,75,0,0,0,240,27,0,0,76,0,0,0,248,27,0,0,77,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,95,95,105,110,100,101,120,0,98,121,116,101,0,0,0,0,99,104,97,114,0,0,0,0,100,117,109,112,0,0,0,0,102,105,110,100,0,0,0,0,102,111,114,109,97,116,0,0,103,109,97,116,99,104,0,0,103,115,117,98,0,0,0,0,108,101,110,0,0,0,0,0,108,111,119,101,114,0,0,0,109,97,116,99,104,0,0,0,114,101,112,0,0,0,0,0,114,101,118,101,114,115,101,0,115,117,98,0,0,0,0,0,117,112,112,101,114,0,0,0,114,101,115,117,108,116,105,110,103,32,115,116,114,105,110,103,32,116,111,111,32,108,97,114,103,101,0,0,0,0,0,0,116,111,111,32,109,97,110,121,32,99,97,112,116,117,114,101,115,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,99,97,112,116,117,114,101,32,105,110,100,101,120,0,0,0,117,110,102,105,110,105,115,104,101,100,32,99,97,112,116,117,114,101,0,0,0,0,0,0,112,97,116,116,101,114,110,32,116,111,111,32,99,111,109,112,108,101,120,0,0,0,0,0,109,105,115,115,105,110,103,32,39,91,39,32,97,102,116,101,114,32,39,37,37,102,39,32,105,110,32,112,97,116,116,101,114,110,0,0,0,0,0,0,105,110,118,97,108,105,100,32,99,97,112,116,117,114,101,32,105,110,100,101,120,32,37,37,37,100,0,0,0,0,0,0,109,97,108,102,111,114,109,101,100,32,112,97,116,116,101,114,110,32,40,101,110,100,115,32,119,105,116,104,32,39,37,37,39,41,0,0,0,0,0,0,109,97,108,102,111,114,109,101,100,32,112,97,116,116,101,114,110,32,40,109,105,115,115,105,110,103,32,39,93,39,41,0,109,97,108,102,111,114,109,101,100,32,112,97,116,116,101,114,110,32,40,109,105,115,115,105,110,103,32,97,114,103,117,109,101,110,116,115,32,116,111,32,39,37,37,98,39,41,0,0,105,110,118,97,108,105,100,32,112,97,116,116,101,114,110,32,99,97,112,116,117,114,101,0,94,36,42,43,63,46,40,91,37,45,0,0,0,0,0,0,115,116,114,105,110,103,47,102,117,110,99,116,105,111,110,47,116,97,98,108,101,32,101,120,112,101,99,116,101,100,0,0,105,110,118,97,108,105,100,32,114,101,112,108,97,99,101,109,101,110,116,32,118,97,108,117,101,32,40,97,32,37,115,41,0,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,117,115,101,32,111,102,32,39,37,99,39,32,105,110,32,114,101,112,108,97,99,101,109,101,110,116,32,115,116,114,105,110,103,0,0,0,0,0,0,0,110,111,32,118,97,108,117,101,0,0,0,0,0,0,0,0,110,111,116,32,97,32,110,117,109,98,101,114,32,105,110,32,112,114,111,112,101,114,32,114,97,110,103,101,0,0,0,0,110,111,116,32,97,32,110,111,110,45,110,101,103,97,116,105,118,101,32,110,117,109,98,101,114,32,105,110,32,112,114,111,112,101,114,32,114,97,110,103,101,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,111,112,116,105,111,110,32,39,37,37,37,99,39,32,116,111,32,39,102,111,114,109,97,116,39,0,0,0,0,0,0,0,92,37,100,0,0,0,0,0,92,37,48,51,100,0,0,0,45,43,32,35,48,0,0,0,105,110,118,97,108,105,100,32,102,111,114,109,97,116,32,40,114,101,112,101,97,116,101,100,32,102,108,97,103,115,41,0,105,110,118,97,108,105,100,32,102,111,114,109,97,116,32,40,119,105,100,116,104,32,111,114,32,112,114,101,99,105,115,105,111,110,32,116,111,111,32,108,111,110,103,41,0,0,0,0,117,110,97,98,108,101,32,116,111,32,100,117,109,112,32,103,105,118,101,110,32,102,117,110,99,116,105,111,110,0,0,0,118,97,108,117,101,32,111,117,116,32,111,102,32,114,97,110,103,101,0,0,0,0,0,0,115,116,114,105,110,103,32,115,108,105,99,101,32,116,111,111,32,108,111,110,103,0,0,0,116,97,98,108,101,32,105,110,100,101,120,32,105,115,32,110,105,108,0,0,0,0,0,0,116,97,98,108,101,32,105,110,100,101,120,32,105,115,32,78,97,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,116,97,98,108,101,32,111,118,101,114,102,108,111,119,0,0,105,110,118,97,108,105,100,32,107,101,121,32,116,111,32,39,110,101,120,116,39,0,0,0,224,31,0,0,78,0,0,0,232,31,0,0,79,0,0,0,240,31,0,0,80,0,0,0,248,31,0,0,81,0,0,0,216,31,0,0,82,0,0,0,0,32,0,0,83,0,0,0,8,32,0,0,84,0,0,0,0,0,0,0,0,0,0,0,117,110,112,97,99,107,0,0,99,111,110,99,97,116,0,0,109,97,120,110,0,0,0,0,105,110,115,101,114,116,0,0,112,97,99,107,0,0,0,0,114,101,109,111,118,101,0,0,115,111,114,116,0,0,0,0,0,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,111,114,100,101,114,32,102,117,110,99,116,105,111,110,32,102,111,114,32,115,111,114,116,105,110,103,0,0,0,0,0,0,112,111,115,105,116,105,111,110,32,111,117,116,32,111,102,32,98,111,117,110,100,115,0,0,116,111,111,32,109,97,110,121,32,114,101,115,117,108,116,115,32,116,111,32,117,110,112,97,99,107,0,0,0,0,0,0,110,0,0,0,0,0,0,0,119,114,111,110,103,32,110,117,109,98,101,114,32,111,102,32,97,114,103,117,109,101,110,116,115,32,116,111,32,39,105,110,115,101,114,116,39,0,0,0,105,110,118,97,108,105,100,32,118,97,108,117,101,32,40,37,115,41,32,97,116,32,105,110,100,101,120,32,37,100,32,105,110,32,116,97,98,108,101,32,102,111,114,32,39,99,111,110,99,97,116,39,0,0,0,0,110,111,32,118,97,108,117,101,0,0,0,0,0,0,0,0,110,105,108,0,0,0,0,0,98,111,111,108,101,97,110,0,117,115,101,114,100,97,116,97,0,0,0,0,0,0,0,0,110,117,109,98,101,114,0,0,115,116,114,105,110,103,0,0,116,97,98,108,101,0,0,0,102,117,110,99,116,105,111,110,0,0,0,0,0,0,0,0,116,104,114,101,97,100,0,0,112,114,111,116,111,0,0,0,117,112,118,97,108,0,0,0,224,32,0,0,240,32,0,0,248,32,0,0,0,33,0,0,16,33,0,0,24,33,0,0,32,33,0,0,40,33,0,0,0,33,0,0,56,33,0,0,64,33,0,0,72,33,0,0,200,33,0,0,208,33,0,0,224,33,0,0,232,33,0,0,240,33,0,0,248,33,0,0,0,34,0,0,8,34,0,0,16,34,0,0,24,34,0,0,32,34,0,0,40,34,0,0,48,34,0,0,56,34,0,0,64,34,0,0,72,34,0,0,88,34,0,0,0,0,0,0,95,95,105,110,100,101,120,0,95,95,110,101,119,105,110,100,101,120,0,0,0,0,0,0,95,95,103,99,0,0,0,0,95,95,109,111,100,101,0,0,95,95,108,101,110,0,0,0,95,95,101,113,0,0,0,0,95,95,97,100,100,0,0,0,95,95,115,117,98,0,0,0,95,95,109,117,108,0,0,0,95,95,100,105,118,0,0,0,95,95,109,111,100,0,0,0,95,95,112,111,119,0,0,0,95,95,117,110,109,0,0,0,95,95,108,116,0,0,0,0,95,95,108,101,0,0,0,0,95,95,99,111,110,99,97,116,0,0,0,0,0,0,0,0,95,95,99,97,108,108,0,0,98,105,110,97,114,121,32,115,116,114,105,110,103,0,0,0,25,147,13,10,26,10,0,0,116,114,117,110,99,97,116,101,100,0,0,0,0,0,0,0,37,115,58,32,37,115,32,112,114,101,99,111,109,112,105,108,101,100,32,99,104,117,110,107,0,0,0,0,0,0,0,0,99,111,114,114,117,112,116,101,100,0,0,0,0,0,0,0,110,111,116,32,97,0,0,0,118,101,114,115,105,111,110,32,109,105,115,109,97,116,99,104,32,105,110,0,0,0,0,0,105,110,99,111,109,112,97,116,105,98,108,101,0,0,0,0,37,46,49,52,103,0,0,0,105,110,100,101,120,0,0,0,108,111,111,112,32,105,110,32,103,101,116,116,97,98,108,101,0,0,0,0,0,0,0,0,108,111,111,112,32,105,110,32,115,101,116,116,97,98,108,101,0,0,0,0,0,0,0,0,115,116,114,105,110,103,32,108,101,110,103,116,104,32,111,118,101,114,102,108,111,119,0,0,103,101,116,32,108,101,110,103,116,104,32,111,102,0,0,0,39,102,111,114,39,32,105,110,105,116,105,97,108,32,118,97,108,117,101,32,109,117,115,116,32,98,101,32,97,32,110,117,109,98,101,114,0,0,0,0,39,102,111,114,39,32,108,105,109,105,116,32,109,117,115,116,32,98,101,32,97,32,110,117,109,98,101,114,0,0,0,0,39,102,111,114,39,32,115,116,101,112,32,109,117,115,116,32,98,101,32,97,32,110,117,109,98,101,114,0,0,0,0,0,95,71,0,0,0,0,0,0,152,36,0,0,85,0,0,0,160,36,0,0,86,0,0,0,176,36,0,0,87,0,0,0,184,36,0,0,88,0,0,0,192,36,0,0,89,0,0,0,208,36,0,0,90,0,0,0,216,36,0,0,91,0,0,0,232,36,0,0,92,0,0,0,240,36,0,0,92,0,0,0,0,37,0,0,93,0,0,0,8,37,0,0,94,0,0,0,16,37,0,0,95,0,0,0,24,37,0,0,96,0,0,0,32,37,0,0,97,0,0,0,48,37,0,0,98,0,0,0,56,37,0,0,99,0,0,0,64,37,0,0,100,0,0,0,72,37,0,0,101,0,0,0,80,37,0,0,102,0,0,0,96,37,0,0,103,0,0,0,112,37,0,0,104,0,0,0,128,37,0,0,105,0,0,0,136,37,0,0,106,0,0,0,0,0,0,0,0,0,0,0,76,117,97,32,53,46,50,0,95,86,69,82,83,73,79,78,0,0,0,0,0,0,0,0,97,115,115,101,114,116,0,0,99,111,108,108,101,99,116,103,97,114,98,97,103,101,0,0,100,111,102,105,108,101,0,0,101,114,114,111,114,0,0,0,103,101,116,109,101,116,97,116,97,98,108,101,0,0,0,0,105,112,97,105,114,115,0,0,108,111,97,100,102,105,108,101,0,0,0,0,0,0,0,0,108,111,97,100,0,0,0,0,108,111,97,100,115,116,114,105,110,103,0,0,0,0,0,0,110,101,120,116,0,0,0,0,112,97,105,114,115,0,0,0,112,99,97,108,108,0,0,0,112,114,105,110,116,0,0,0,114,97,119,101,113,117,97,108,0,0,0,0,0,0,0,0,114,97,119,108,101,110,0,0,114,97,119,103,101,116,0,0,114,97,119,115,101,116,0,0,115,101,108,101,99,116,0,0,115,101,116,109,101,116,97,116,97,98,108,101,0,0,0,0,116,111,110,117,109,98,101,114,0,0,0,0,0,0,0,0,116,111,115,116,114,105,110,103,0,0,0,0,0,0,0,0,116,121,112,101,0,0,0,0,120,112,99,97,108,108,0,0,118,97,108,117,101,32,101,120,112,101,99,116,101,100,0,0,115,116,97,99,107,32,111,118,101,114,102,108,111,119,0,0,98,97,115,101,32,111,117,116,32,111,102,32,114,97,110,103,101,0,0,0,0,0,0,0,32,12,10,13,9,11,0,0,110,105,108,32,111,114,32,116,97,98,108,101,32,101,120,112,101,99,116,101,100,0,0,0,95,95,109,101,116,97,116,97,98,108,101,0,0,0,0,0,99,97,110,110,111,116,32,99,104,97,110,103,101,32,97,32,112,114,111,116,101,99,116,101,100,32,109,101,116,97,116,97,98,108,101,0,0,0,0,0,105,110,100,101,120,32,111,117,116,32,111,102,32,114,97,110,103,101,0,0,0,0,0,0,116,97,98,108,101,32,111,114,32,115,116,114,105,110,103,32,101,120,112,101,99,116,101,100,0,0,0,0,0,0,0,0,39,116,111,115,116,114,105,110,103,39,32,109,117,115,116,32,114,101,116,117,114,110,32,97,32,115,116,114,105,110,103,32,116,111,32,39,112,114,105,110,116,39,0,0,0,0,0,0,95,95,112,97,105,114,115,0,98,116,0,0,0,0,0,0,61,40,108,111,97,100,41,0,116,111,111,32,109,97,110,121,32,110,101,115,116,101,100,32,102,117,110,99,116,105,111,110,115,0,0,0,0,0,0,0,114,101,97,100,101,114,32,102,117,110,99,116,105,111,110,32,109,117,115,116,32,114,101,116,117,114,110,32,97,32,115,116,114,105,110,103,0,0,0,0,95,95,105,112,97,105,114,115,0,0,0,0,0,0,0,0,40,39,0,0,48,39,0,0,56,39,0,0,64,39,0,0,72,39,0,0,80,39,0,0,96,39,0,0,112,39,0,0,128,39,0,0,144,39,0,0,160,39,0,0,0,0,0,0,115,116,111,112,0,0,0,0,114,101,115,116,97,114,116,0,99,111,108,108,101,99,116,0,99,111,117,110,116,0,0,0,115,116,101,112,0,0,0,0,115,101,116,112,97,117,115,101,0,0,0,0,0,0,0,0,115,101,116,115,116,101,112,109,117,108,0,0,0,0,0,0,115,101,116,109,97,106,111,114,105,110,99,0,0,0,0,0,105,115,114,117,110,110,105,110,103,0,0,0,0,0,0,0,103,101,110,101,114,97,116,105,111,110,97,108,0,0,0,0,105,110,99,114,101,109,101,110,116,97,108,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,5,0,0,0,6,0,0,0,7,0,0,0,8,0,0,0,9,0,0,0,10,0,0,0,11,0,0,0,0,0,0,0,37,115,0,0,0,0,0,0,97,115,115,101,114,116,105,111,110,32,102,97,105,108,101,100,33,0,0,0,0,0,0,0,104,40,0,0,107], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+/* memory initializer */ allocate([112,40,0,0,108,0,0,0,120,40,0,0,109,0,0,0,128,40,0,0,110,0,0,0,136,40,0,0,111,0,0,0,144,40,0,0,112,0,0,0,152,40,0,0,113,0,0,0,160,40,0,0,114,0,0,0,168,40,0,0,115,0,0,0,176,40,0,0,116,0,0,0,184,40,0,0,117,0,0,0,192,40,0,0,118,0,0,0,0,0,0,0,0,0,0,0,97,114,115,104,105,102,116,0,98,97,110,100,0,0,0,0,98,110,111,116,0,0,0,0,98,111,114,0,0,0,0,0,98,120,111,114,0,0,0,0,98,116,101,115,116,0,0,0,101,120,116,114,97,99,116,0,108,114,111,116,97,116,101,0,108,115,104,105,102,116,0,0,114,101,112,108,97,99,101,0,114,114,111,116,97,116,101,0,114,115,104,105,102,116,0,0,102,105,101,108,100,32,99,97,110,110,111,116,32,98,101,32,110,101,103,97,116,105,118,101,0,0,0,0,0,0,0,0,119,105,100,116,104,32,109,117,115,116,32,98,101,32,112,111,115,105,116,105,118,101,0,0,116,114,121,105,110,103,32,116,111,32,97,99,99,101,115,115,32,110,111,110,45,101,120,105,115,116,101,110,116,32,98,105,116,115,0,0,0,0,0,0,102,117,110,99,116,105,111,110,32,111,114,32,101,120,112,114,101,115,115,105,111,110,32,116,111,111,32,99,111,109,112,108,101,120,0,0,0,0,0,0,99,111,110,115,116,114,117,99,116,111,114,32,116,111,111,32,108,111,110,103,0,0,0,0,99,111,110,115,116,97,110,116,115,0,0,0,0,0,0,0,111,112,99,111,100,101,115,0,99,111,110,116,114,111,108,32,115,116,114,117,99,116,117,114,101,32,116,111,111,32,108,111,110,103,0,0,0,0,0,0,216,41,0,0,119,0,0,0,224,41,0,0,120,0,0,0,232,41,0,0,121,0,0,0,240,41,0,0,122,0,0,0,248,41,0,0,123,0,0,0,0,42,0,0,124,0,0,0,0,0,0,0,0,0,0,0,99,114,101,97,116,101,0,0,114,101,115,117,109,101,0,0,114,117,110,110,105,110,103,0,115,116,97,116,117,115,0,0,119,114,97,112,0,0,0,0,121,105,101,108,100,0,0,0,116,111,111,32,109,97,110,121,32,97,114,103,117,109,101,110,116,115,32,116,111,32,114,101,115,117,109,101,0,0,0,0,99,97,110,110,111,116,32,114,101,115,117,109,101,32,100,101,97,100,32,99,111,114,111,117,116,105,110,101,0,0,0,0,116,111,111,32,109,97,110,121,32,114,101,115,117,108,116,115,32,116,111,32,114,101,115,117,109,101,0,0,0,0,0,0,99,111,114,111,117,116,105,110,101,32,101,120,112,101,99,116,101,100,0,0,0,0,0,0,115,117,115,112,101,110,100,101,100,0,0,0,0,0,0,0,110,111,114,109,97,108,0,0,100,101,97,100,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,8,8,8,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,12,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,22,22,22,22,22,22,22,22,22,22,4,4,4,4,4,4,4,21,21,21,21,21,21,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,5,4,21,21,21,21,21,21,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48,44,0,0,125,0,0,0,56,44,0,0,126,0,0,0,72,44,0,0,127,0,0,0,80,44,0,0,128,0,0,0,88,44,0,0,129,0,0,0,104,44,0,0,130,0,0,0,120,44,0,0,131,0,0,0,136,44,0,0,132,0,0,0,152,44,0,0,133,0,0,0,168,44,0,0,134,0,0,0,184,44,0,0,135,0,0,0,200,44,0,0,136,0,0,0,208,44,0,0,137,0,0,0,224,44,0,0,138,0,0,0,240,44,0,0,139,0,0,0,0,45,0,0,140,0,0,0,0,0,0,0,0,0,0,0,100,101,98,117,103,0,0,0,103,101,116,117,115,101,114,118,97,108,117,101,0,0,0,0,103,101,116,104,111,111,107,0,103,101,116,105,110,102,111,0,103,101,116,108,111,99,97,108,0,0,0,0,0,0,0,0,103,101,116,114,101,103,105,115,116,114,121,0,0,0,0,0,103,101,116,109,101,116,97,116,97,98,108,101,0,0,0,0,103,101,116,117,112,118,97,108,117,101,0,0,0,0,0,0,117,112,118,97,108,117,101,106,111,105,110,0,0,0,0,0,117,112,118,97,108,117,101,105,100,0,0,0,0,0,0,0,115,101,116,117,115,101,114,118,97,108,117,101,0,0,0,0,115,101,116,104,111,111,107,0,115,101,116,108,111,99,97,108,0,0,0,0,0,0,0,0,115,101,116,109,101,116,97,116,97,98,108,101,0,0,0,0,115,101,116,117,112,118,97,108,117,101,0,0,0,0,0,0,116,114,97,99,101,98,97,99,107,0,0,0,0,0,0,0,110,105,108,32,111,114,32,116,97,98,108,101,32,101,120,112,101,99,116,101,100,0,0,0,108,101,118,101,108,32,111,117,116,32,111,102,32,114,97,110,103,101,0,0,0,0,0,0,95,72,75,69,89,0,0,0,107,0,0,0,0,0,0,0,95,95,109,111,100,101,0,0,112,45,0,0,120,45,0,0,128,45,0,0,136,45,0,0,144,45,0,0,0,0,0,0,99,97,108,108,0,0,0,0,114,101,116,117,114,110,0,0,108,105,110,101,0,0,0,0,99,111,117,110,116,0,0,0,116,97,105,108,32,99,97,108,108,0,0,0,0,0,0,0,102,117,108,108,32,117,115,101,114,100,97,116,97,32,101,120,112,101,99,116,101,100,44,32,103,111,116,32,108,105,103,104,116,32,117,115,101,114,100,97,116,97,0,0,0,0,0,0,62,117,0,0,0,0,0,0,105,110,118,97,108,105,100,32,117,112,118,97,108,117,101,32,105,110,100,101,120,0,0,0,76,117,97,32,102,117,110,99,116,105,111,110,32,101,120,112,101,99,116,101,100,0,0,0,102,108,110,83,116,117,0,0,62,37,115,0,0,0,0,0,102,117,110,99,116,105,111,110,32,111,114,32,108,101,118,101,108,32,101,120,112,101,99,116,101,100,0,0,0,0,0,0,105,110,118,97,108,105,100,32,111,112,116,105,111,110,0,0,115,111,117,114,99,101,0,0,115,104,111,114,116,95,115,114,99,0,0,0,0,0,0,0,108,105,110,101,100,101,102,105,110,101,100,0,0,0,0,0,108,97,115,116,108,105,110,101,100,101,102,105,110,101,100,0,119,104,97,116,0,0,0,0,99,117,114,114,101,110,116,108,105,110,101,0,0,0,0,0,110,117,112,115,0,0,0,0,110,112,97,114,97,109,115,0,105,115,118,97,114,97,114,103,0,0,0,0,0,0,0,0,110,97,109,101,0,0,0,0,110,97,109,101,119,104,97,116,0,0,0,0,0,0,0,0,105,115,116,97,105,108,99,97,108,108,0,0,0,0,0,0,97,99,116,105,118,101,108,105,110,101,115,0,0,0,0,0,102,117,110,99,0,0,0,0,101,120,116,101,114,110,97,108,32,104,111,111,107,0,0,0,108,117,97,95,100,101,98,117,103,62,32,0,0,0,0,0,99,111,110,116,10,0,0,0,61,40,100,101,98,117,103,32,99,111,109,109,97,110,100,41,0,0,0,0,0,0,0,0,37,115,10,0,0,0,0,0,80,49,0,0,88,49,0,0,96,49,0,0,104,49,0,0,112,49,0,0,120,49,0,0,128,49,0,0,136,49,0,0,144,49,0,0,160,49,0,0,168,49,0,0,176,49,0,0,184,49,0,0,192,49,0,0,200,49,0,0,208,49,0,0,216,49,0,0,224,49,0,0,232,49,0,0,240,49,0,0,248,49,0,0,0,50,0,0,8,50,0,0,16,50,0,0,24,50,0,0,32,50,0,0,40,50,0,0,48,50,0,0,56,50,0,0,64,50,0,0,72,50,0,0,88,50,0,0,96,50,0,0,0,0,0,0,39,37,99,39,0,0,0,0,99,104,97,114,40,37,100,41,0,0,0,0,0,0,0,0,39,37,115,39,0,0,0,0,95,69,78,86,0,0,0,0,105,110,118,97,108,105,100,32,108,111,110,103,32,115,116,114,105,110,103,32,100,101,108,105,109,105,116,101,114,0,0,0,46,0,0,0,0,0,0,0,69,101,0,0,0,0,0,0,88,120,0,0,0,0,0,0,80,112,0,0,0,0,0,0,43,45,0,0,0,0,0,0,109,97,108,102,111,114,109,101,100,32,110,117,109,98,101,114,0,0,0,0,0,0,0,0,108,101,120,105,99,97,108,32,101,108,101,109,101,110,116,32,116,111,111,32,108,111,110,103,0,0,0,0,0,0,0,0,117,110,102,105,110,105,115,104,101,100,32,115,116,114,105,110,103,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,101,115,99,97,112,101,32,115,101,113,117,101,110,99,101,0,100,101,99,105,109,97,108,32,101,115,99,97,112,101,32,116,111,111,32,108,97,114,103,101,0,0,0,0,0,0,0,0,104,101,120,97,100,101,99,105,109,97,108,32,100,105,103,105,116,32,101,120,112,101,99,116,101,100,0,0,0,0,0,0,117,110,102,105,110,105,115,104,101,100,32,108,111,110,103,32,115,116,114,105,110,103,0,0,117,110,102,105,110,105,115,104,101,100,32,108,111,110,103,32,99,111,109,109,101,110,116,0,99,104,117,110,107,32,104,97,115,32,116,111,111,32,109,97,110,121,32,108,105,110,101,115,0,0,0,0,0,0,0,0,37,115,58,37,100,58,32,37,115,0,0,0,0,0,0,0,37,115,32,110,101,97,114,32,37,115,0,0,0,0,0,0,97,110,100,0,0,0,0,0,98,114,101,97,107,0,0,0,100,111,0,0,0,0,0,0,101,108,115,101,0,0,0,0,101,108,115,101,105,102,0,0,101,110,100,0,0,0,0,0,102,97,108,115,101,0,0,0,102,111,114,0,0,0,0,0,102,117,110,99,116,105,111,110,0,0,0,0,0,0,0,0,103,111,116,111,0,0,0,0,105,102,0,0,0,0,0,0,105,110,0,0,0,0,0,0,108,111,99,97,108,0,0,0,110,105,108,0,0,0,0,0,110,111,116,0,0,0,0,0,111,114,0,0,0,0,0,0,114,101,112,101,97,116,0,0,114,101,116,117,114,110,0,0,116,104,101,110,0,0,0,0,116,114,117,101,0,0,0,0,117,110,116,105,108,0,0,0,119,104,105,108,101,0,0,0,46,46,0,0,0,0,0,0,46,46,46,0,0,0,0,0,61,61,0,0,0,0,0,0,62,61,0,0,0,0,0,0,60,61,0,0,0,0,0,0,126,61,0,0,0,0,0,0,58,58,0,0,0,0,0,0,60,101,111,102,62,0,0,0,60,110,117,109,98,101,114,62,0,0,0,0,0,0,0,0,60,110,97,109,101,62,0,0,60,115,116,114,105,110,103,62,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,105,110,102,105,110,105,116,121,0,0,0,0,0,0,0,0,110,97,110,0,0,0,0,0,95,112,137,0,255,9,47,15,10,0,0,0,100,0,0,0,232,3,0,0,16,39,0,0,160,134,1,0,64,66,15,0,128,150,152,0,0,225,245,5], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE+10240);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+
+
+ Module["_rand_r"] = _rand_r;
+
+ var ___rand_seed=allocate([0x0273459b, 0, 0, 0], "i32", ALLOC_STATIC);
+ Module["_rand"] = _rand;
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+ function _lseek(fildes, offset, whence) {
+ // off_t lseek(int fildes, off_t offset, int whence);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/lseek.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ return FS.llseek(stream, offset, whence);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fseek(stream, offset, whence) {
+ // int fseek(FILE *stream, long offset, int whence);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fseek.html
+ var fd = _fileno(stream);
+ var ret = _lseek(fd, offset, whence);
+ if (ret == -1) {
+ return -1;
+ }
+ stream = FS.getStreamFromPtr(stream);
+ stream.eof = false;
+ return 0;
+ }
+
+
+ Module["_i64Subtract"] = _i64Subtract;
+
+
+ Module["_i64Add"] = _i64Add;
+
+ function _setlocale(category, locale) {
+ if (!_setlocale.ret) _setlocale.ret = allocate([0], 'i8', ALLOC_NORMAL);
+ return _setlocale.ret;
+ }
+
+
+ function _close(fildes) {
+ // int close(int fildes);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/close.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ FS.close(stream);
+ return 0;
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fsync(fildes) {
+ // int fsync(int fildes);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fsync.html
+ var stream = FS.getStream(fildes);
+ if (stream) {
+ // We write directly to the file system, so there's nothing to do here.
+ return 0;
+ } else {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ }function _fclose(stream) {
+ // int fclose(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fclose.html
+ var fd = _fileno(stream);
+ _fsync(fd);
+ return _close(fd);
+ }
+
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _recv(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _read(fd, buf, len);
+ }
+
+ function _pread(fildes, buf, nbyte, offset) {
+ // ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/read.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.read(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _read(fildes, buf, nbyte) {
+ // ssize_t read(int fildes, void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/read.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.read(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _fread(ptr, size, nitems, stream) {
+ // size_t fread(void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fread.html
+ var bytesToRead = nitems * size;
+ if (bytesToRead == 0) {
+ return 0;
+ }
+ var bytesRead = 0;
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (!streamObj) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return 0;
+ }
+ while (streamObj.ungotten.length && bytesToRead > 0) {
+ HEAP8[((ptr++)|0)]=streamObj.ungotten.pop();
+ bytesToRead--;
+ bytesRead++;
+ }
+ var err = _read(streamObj.fd, ptr, bytesToRead);
+ if (err == -1) {
+ if (streamObj) streamObj.error = true;
+ return 0;
+ }
+ bytesRead += err;
+ if (bytesRead < bytesToRead) streamObj.eof = true;
+ return Math.floor(bytesRead / size);
+ }
+
+ function _toupper(chr) {
+ if (chr >= 97 && chr <= 122) {
+ return chr - 97 + 65;
+ } else {
+ return chr;
+ }
+ }
+
+
+
+ function _open(path, oflag, varargs) {
+ // int open(const char *path, int oflag, ...);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/open.html
+ var mode = HEAP32[((varargs)>>2)];
+ path = Pointer_stringify(path);
+ try {
+ var stream = FS.open(path, oflag, mode);
+ return stream.fd;
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _fopen(filename, mode) {
+ // FILE *fopen(const char *restrict filename, const char *restrict mode);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fopen.html
+ var flags;
+ mode = Pointer_stringify(mode);
+ if (mode[0] == 'r') {
+ if (mode.indexOf('+') != -1) {
+ flags = 2;
+ } else {
+ flags = 0;
+ }
+ } else if (mode[0] == 'w') {
+ if (mode.indexOf('+') != -1) {
+ flags = 2;
+ } else {
+ flags = 1;
+ }
+ flags |= 64;
+ flags |= 512;
+ } else if (mode[0] == 'a') {
+ if (mode.indexOf('+') != -1) {
+ flags = 2;
+ } else {
+ flags = 1;
+ }
+ flags |= 64;
+ flags |= 1024;
+ } else {
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return 0;
+ }
+ var fd = _open(filename, flags, allocate([0x1FF, 0, 0, 0], 'i32', ALLOC_STACK)); // All creation permissions.
+ return fd === -1 ? 0 : FS.getPtrForStream(FS.getStream(fd));
+ }
+
+ var _emscripten_check_longjmp=true;
+
+
+
+ function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _fputc(c, stream) {
+ // int fputc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputc.html
+ var chr = unSign(c & 0xFF);
+ HEAP8[((_fputc.ret)|0)]=chr;
+ var fd = _fileno(stream);
+ var ret = _write(fd, _fputc.ret, 1);
+ if (ret == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return -1;
+ } else {
+ return chr;
+ }
+ }
+
+ var _log=Math_log;
+
+ var _emscripten_postinvoke=true;
+
+
+ function _putchar(c) {
+ // int putchar(int c);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/putchar.html
+ return _fputc(c, HEAP32[((_stdout)>>2)]);
+ }
+ Module["_saveSetjmp"] = _saveSetjmp;
+
+ function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+ function _system(command) {
+ // int system(const char *command);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/system.html
+ // Can't call external programs.
+ ___setErrNo(ERRNO_CODES.EAGAIN);
+ return -1;
+ }
+
+ function _frexp(x, exp_addr) {
+ var sig = 0, exp_ = 0;
+ if (x !== 0) {
+ var sign = 1;
+ if (x < 0) {
+ x = -x;
+ sign = -1;
+ }
+ var raw_exp = Math.log(x)/Math.log(2);
+ exp_ = Math.ceil(raw_exp);
+ if (exp_ === raw_exp) exp_ += 1;
+ sig = sign*x/Math.pow(2, exp_);
+ }
+ HEAP32[((exp_addr)>>2)]=exp_;
+ return sig;
+ }
+
+
+
+ var _tzname=allocate(8, "i32*", ALLOC_STATIC);
+
+ var _daylight=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _timezone=allocate(1, "i32*", ALLOC_STATIC);function _tzset() {
+ // TODO: Use (malleable) environment variables instead of system settings.
+ if (_tzset.called) return;
+ _tzset.called = true;
+
+ HEAP32[((_timezone)>>2)]=-(new Date()).getTimezoneOffset() * 60;
+
+ var winter = new Date(2000, 0, 1);
+ var summer = new Date(2000, 6, 1);
+ HEAP32[((_daylight)>>2)]=Number(winter.getTimezoneOffset() != summer.getTimezoneOffset());
+
+ var winterName = 'GMT'; // XXX do not rely on browser timezone info, it is very unpredictable | winter.toString().match(/\(([A-Z]+)\)/)[1];
+ var summerName = 'GMT'; // XXX do not rely on browser timezone info, it is very unpredictable | summer.toString().match(/\(([A-Z]+)\)/)[1];
+ var winterNamePtr = allocate(intArrayFromString(winterName), 'i8', ALLOC_NORMAL);
+ var summerNamePtr = allocate(intArrayFromString(summerName), 'i8', ALLOC_NORMAL);
+ HEAP32[((_tzname)>>2)]=winterNamePtr;
+ HEAP32[(((_tzname)+(4))>>2)]=summerNamePtr;
+ }function _mktime(tmPtr) {
+ _tzset();
+ var year = HEAP32[(((tmPtr)+(20))>>2)];
+ var timestamp = new Date(year >= 1900 ? year : year + 1900,
+ HEAP32[(((tmPtr)+(16))>>2)],
+ HEAP32[(((tmPtr)+(12))>>2)],
+ HEAP32[(((tmPtr)+(8))>>2)],
+ HEAP32[(((tmPtr)+(4))>>2)],
+ HEAP32[((tmPtr)>>2)],
+ 0).getTime() / 1000;
+ HEAP32[(((tmPtr)+(24))>>2)]=new Date(timestamp).getDay();
+ var yday = Math.round((timestamp - (new Date(year, 0, 1)).getTime()) / (1000 * 60 * 60 * 24));
+ HEAP32[(((tmPtr)+(28))>>2)]=yday;
+ return timestamp;
+ }
+
+ function _isalpha(chr) {
+ return (chr >= 97 && chr <= 122) ||
+ (chr >= 65 && chr <= 90);
+ }
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;function _tmpnam(s, dir, prefix) {
+ // char *tmpnam(char *s);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/tmpnam.html
+ // NOTE: The dir and prefix arguments are for internal use only.
+ var folder = FS.findObject(dir || '/tmp');
+ if (!folder || !folder.isFolder) {
+ dir = '/tmp';
+ folder = FS.findObject(dir);
+ if (!folder || !folder.isFolder) return 0;
+ }
+ var name = prefix || 'file';
+ do {
+ name += String.fromCharCode(65 + Math.floor(Math.random() * 25));
+ } while (name in folder.contents);
+ var result = dir + '/' + name;
+ if (!_tmpnam.buffer) _tmpnam.buffer = _malloc(256);
+ if (!s) s = _tmpnam.buffer;
+ writeAsciiToMemory(result, s);
+ return s;
+ }
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _log10(x) {
+ return Math.log(x) / Math.LN10;
+ }
+
+ function _isspace(chr) {
+ return (chr == 32) || (chr >= 9 && chr <= 13);
+ }
+
+
+ var ___tm_current=allocate(44, "i8", ALLOC_STATIC);
+
+
+ var ___tm_timezone=allocate(intArrayFromString("GMT"), "i8", ALLOC_STATIC);function _localtime_r(time, tmPtr) {
+ _tzset();
+ var date = new Date(HEAP32[((time)>>2)]*1000);
+ HEAP32[((tmPtr)>>2)]=date.getSeconds();
+ HEAP32[(((tmPtr)+(4))>>2)]=date.getMinutes();
+ HEAP32[(((tmPtr)+(8))>>2)]=date.getHours();
+ HEAP32[(((tmPtr)+(12))>>2)]=date.getDate();
+ HEAP32[(((tmPtr)+(16))>>2)]=date.getMonth();
+ HEAP32[(((tmPtr)+(20))>>2)]=date.getFullYear()-1900;
+ HEAP32[(((tmPtr)+(24))>>2)]=date.getDay();
+
+ var start = new Date(date.getFullYear(), 0, 1);
+ var yday = Math.floor((date.getTime() - start.getTime()) / (1000 * 60 * 60 * 24));
+ HEAP32[(((tmPtr)+(28))>>2)]=yday;
+ HEAP32[(((tmPtr)+(36))>>2)]=start.getTimezoneOffset() * 60;
+
+ var dst = Number(start.getTimezoneOffset() != date.getTimezoneOffset());
+ HEAP32[(((tmPtr)+(32))>>2)]=dst;
+
+ HEAP32[(((tmPtr)+(40))>>2)]=___tm_timezone;
+
+ return tmPtr;
+ }function _localtime(time) {
+ return _localtime_r(time, ___tm_current);
+ }
+
+ function _srand(seed) {
+ HEAP32[((___rand_seed)>>2)]=seed
+ }
+
+ var _emscripten_prep_setjmp=true;
+
+
+
+
+ Module["_testSetjmp"] = _testSetjmp;function _longjmp(env, value) {
+ asm['setThrew'](env, value || 1);
+ throw 'longjmp';
+ }function _emscripten_longjmp(env, value) {
+ _longjmp(env, value);
+ }
+
+ var _ceil=Math_ceil;
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+ var _llvm_pow_f64=Math_pow;
+
+
+
+ Module["_strlen"] = _strlen;function _fputs(s, stream) {
+ // int fputs(const char *restrict s, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputs.html
+ var fd = _fileno(stream);
+ return _write(fd, s, _strlen(s));
+ }
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+
+ function _sinh(x) {
+ var p = Math.pow(Math.E, x);
+ return (p - (1 / p)) / 2;
+ }
+
+ function _cosh(x) {
+ var p = Math.pow(Math.E, x);
+ return (p + (1 / p)) / 2;
+ }function _tanh(x) {
+ return _sinh(x) / _cosh(x);
+ }
+
+ function _signal(sig, func) {
+ // TODO
+ return 0;
+ }
+
+
+
+ function __getFloat(text) {
+ return /^[+-]?[0-9]*\.?[0-9]+([eE][+-]?[0-9]+)?/.exec(text);
+ }function __scanString(format, get, unget, varargs) {
+ if (!__scanString.whiteSpace) {
+ __scanString.whiteSpace = {};
+ __scanString.whiteSpace[32] = 1;
+ __scanString.whiteSpace[9] = 1;
+ __scanString.whiteSpace[10] = 1;
+ __scanString.whiteSpace[11] = 1;
+ __scanString.whiteSpace[12] = 1;
+ __scanString.whiteSpace[13] = 1;
+ }
+ // Supports %x, %4x, %d.%d, %lld, %s, %f, %lf.
+ // TODO: Support all format specifiers.
+ format = Pointer_stringify(format);
+ var soFar = 0;
+ if (format.indexOf('%n') >= 0) {
+ // need to track soFar
+ var _get = get;
+ get = function get() {
+ soFar++;
+ return _get();
+ }
+ var _unget = unget;
+ unget = function unget() {
+ soFar--;
+ return _unget();
+ }
+ }
+ var formatIndex = 0;
+ var argsi = 0;
+ var fields = 0;
+ var argIndex = 0;
+ var next;
+
+ mainLoop:
+ for (var formatIndex = 0; formatIndex < format.length;) {
+ if (format[formatIndex] === '%' && format[formatIndex+1] == 'n') {
+ var argPtr = HEAP32[(((varargs)+(argIndex))>>2)];
+ argIndex += Runtime.getAlignSize('void*', null, true);
+ HEAP32[((argPtr)>>2)]=soFar;
+ formatIndex += 2;
+ continue;
+ }
+
+ if (format[formatIndex] === '%') {
+ var nextC = format.indexOf('c', formatIndex+1);
+ if (nextC > 0) {
+ var maxx = 1;
+ if (nextC > formatIndex+1) {
+ var sub = format.substring(formatIndex+1, nextC);
+ maxx = parseInt(sub);
+ if (maxx != sub) maxx = 0;
+ }
+ if (maxx) {
+ var argPtr = HEAP32[(((varargs)+(argIndex))>>2)];
+ argIndex += Runtime.getAlignSize('void*', null, true);
+ fields++;
+ for (var i = 0; i < maxx; i++) {
+ next = get();
+ HEAP8[((argPtr++)|0)]=next;
+ if (next === 0) return i > 0 ? fields : fields-1; // we failed to read the full length of this field
+ }
+ formatIndex += nextC - formatIndex + 1;
+ continue;
+ }
+ }
+ }
+
+ // handle %[...]
+ if (format[formatIndex] === '%' && format.indexOf('[', formatIndex+1) > 0) {
+ var match = /\%([0-9]*)\[(\^)?(\]?[^\]]*)\]/.exec(format.substring(formatIndex));
+ if (match) {
+ var maxNumCharacters = parseInt(match[1]) || Infinity;
+ var negateScanList = (match[2] === '^');
+ var scanList = match[3];
+
+ // expand "middle" dashs into character sets
+ var middleDashMatch;
+ while ((middleDashMatch = /([^\-])\-([^\-])/.exec(scanList))) {
+ var rangeStartCharCode = middleDashMatch[1].charCodeAt(0);
+ var rangeEndCharCode = middleDashMatch[2].charCodeAt(0);
+ for (var expanded = ''; rangeStartCharCode <= rangeEndCharCode; expanded += String.fromCharCode(rangeStartCharCode++));
+ scanList = scanList.replace(middleDashMatch[1] + '-' + middleDashMatch[2], expanded);
+ }
+
+ var argPtr = HEAP32[(((varargs)+(argIndex))>>2)];
+ argIndex += Runtime.getAlignSize('void*', null, true);
+ fields++;
+
+ for (var i = 0; i < maxNumCharacters; i++) {
+ next = get();
+ if (negateScanList) {
+ if (scanList.indexOf(String.fromCharCode(next)) < 0) {
+ HEAP8[((argPtr++)|0)]=next;
+ } else {
+ unget();
+ break;
+ }
+ } else {
+ if (scanList.indexOf(String.fromCharCode(next)) >= 0) {
+ HEAP8[((argPtr++)|0)]=next;
+ } else {
+ unget();
+ break;
+ }
+ }
+ }
+
+ // write out null-terminating character
+ HEAP8[((argPtr++)|0)]=0;
+ formatIndex += match[0].length;
+
+ continue;
+ }
+ }
+ // remove whitespace
+ while (1) {
+ next = get();
+ if (next == 0) return fields;
+ if (!(next in __scanString.whiteSpace)) break;
+ }
+ unget();
+
+ if (format[formatIndex] === '%') {
+ formatIndex++;
+ var suppressAssignment = false;
+ if (format[formatIndex] == '*') {
+ suppressAssignment = true;
+ formatIndex++;
+ }
+ var maxSpecifierStart = formatIndex;
+ while (format[formatIndex].charCodeAt(0) >= 48 &&
+ format[formatIndex].charCodeAt(0) <= 57) {
+ formatIndex++;
+ }
+ var max_;
+ if (formatIndex != maxSpecifierStart) {
+ max_ = parseInt(format.slice(maxSpecifierStart, formatIndex), 10);
+ }
+ var long_ = false;
+ var half = false;
+ var longLong = false;
+ if (format[formatIndex] == 'l') {
+ long_ = true;
+ formatIndex++;
+ if (format[formatIndex] == 'l') {
+ longLong = true;
+ formatIndex++;
+ }
+ } else if (format[formatIndex] == 'h') {
+ half = true;
+ formatIndex++;
+ }
+ var type = format[formatIndex];
+ formatIndex++;
+ var curr = 0;
+ var buffer = [];
+ // Read characters according to the format. floats are trickier, they may be in an unfloat state in the middle, then be a valid float later
+ if (type == 'f' || type == 'e' || type == 'g' ||
+ type == 'F' || type == 'E' || type == 'G') {
+ next = get();
+ while (next > 0 && (!(next in __scanString.whiteSpace))) {
+ buffer.push(String.fromCharCode(next));
+ next = get();
+ }
+ var m = __getFloat(buffer.join(''));
+ var last = m ? m[0].length : 0;
+ for (var i = 0; i < buffer.length - last + 1; i++) {
+ unget();
+ }
+ buffer.length = last;
+ } else {
+ next = get();
+ var first = true;
+
+ // Strip the optional 0x prefix for %x.
+ if ((type == 'x' || type == 'X') && (next == 48)) {
+ var peek = get();
+ if (peek == 120 || peek == 88) {
+ next = get();
+ } else {
+ unget();
+ }
+ }
+
+ while ((curr < max_ || isNaN(max_)) && next > 0) {
+ if (!(next in __scanString.whiteSpace) && // stop on whitespace
+ (type == 's' ||
+ ((type === 'd' || type == 'u' || type == 'i') && ((next >= 48 && next <= 57) ||
+ (first && next == 45))) ||
+ ((type === 'x' || type === 'X') && (next >= 48 && next <= 57 ||
+ next >= 97 && next <= 102 ||
+ next >= 65 && next <= 70))) &&
+ (formatIndex >= format.length || next !== format[formatIndex].charCodeAt(0))) { // Stop when we read something that is coming up
+ buffer.push(String.fromCharCode(next));
+ next = get();
+ curr++;
+ first = false;
+ } else {
+ break;
+ }
+ }
+ unget();
+ }
+ if (buffer.length === 0) return 0; // Failure.
+ if (suppressAssignment) continue;
+
+ var text = buffer.join('');
+ var argPtr = HEAP32[(((varargs)+(argIndex))>>2)];
+ argIndex += Runtime.getAlignSize('void*', null, true);
+ switch (type) {
+ case 'd': case 'u': case 'i':
+ if (half) {
+ HEAP16[((argPtr)>>1)]=parseInt(text, 10);
+ } else if (longLong) {
+ (tempI64 = [parseInt(text, 10)>>>0,(tempDouble=parseInt(text, 10),(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((argPtr)>>2)]=tempI64[0],HEAP32[(((argPtr)+(4))>>2)]=tempI64[1]);
+ } else {
+ HEAP32[((argPtr)>>2)]=parseInt(text, 10);
+ }
+ break;
+ case 'X':
+ case 'x':
+ HEAP32[((argPtr)>>2)]=parseInt(text, 16);
+ break;
+ case 'F':
+ case 'f':
+ case 'E':
+ case 'e':
+ case 'G':
+ case 'g':
+ case 'E':
+ // fallthrough intended
+ if (long_) {
+ HEAPF64[((argPtr)>>3)]=parseFloat(text);
+ } else {
+ HEAPF32[((argPtr)>>2)]=parseFloat(text);
+ }
+ break;
+ case 's':
+ var array = intArrayFromString(text);
+ for (var j = 0; j < array.length; j++) {
+ HEAP8[(((argPtr)+(j))|0)]=array[j];
+ }
+ break;
+ }
+ fields++;
+ } else if (format[formatIndex].charCodeAt(0) in __scanString.whiteSpace) {
+ next = get();
+ while (next in __scanString.whiteSpace) {
+ if (next <= 0) break mainLoop; // End of input.
+ next = get();
+ }
+ unget(next);
+ formatIndex++;
+ } else {
+ // Not a specifier.
+ next = get();
+ if (format[formatIndex].charCodeAt(0) !== next) {
+ unget(next);
+ break mainLoop;
+ }
+ formatIndex++;
+ }
+ }
+ return fields;
+ }
+
+ function _fgetc(stream) {
+ // int fgetc(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fgetc.html
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (!streamObj) return -1;
+ if (streamObj.eof || streamObj.error) return -1;
+ var ret = _fread(_fgetc.ret, 1, 1, stream);
+ if (ret == 0) {
+ return -1;
+ } else if (ret == -1) {
+ streamObj.error = true;
+ return -1;
+ } else {
+ return HEAPU8[((_fgetc.ret)|0)];
+ }
+ }
+
+ function _ungetc(c, stream) {
+ // int ungetc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/ungetc.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) {
+ return -1;
+ }
+ if (c === -1) {
+ // do nothing for EOF character
+ return c;
+ }
+ c = unSign(c & 0xFF);
+ stream.ungotten.push(c);
+ stream.eof = false;
+ return c;
+ }function _fscanf(stream, format, varargs) {
+ // int fscanf(FILE *restrict stream, const char *restrict format, ... );
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/scanf.html
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (!streamObj) {
+ return -1;
+ }
+ var buffer = [];
+ function get() {
+ var c = _fgetc(stream);
+ buffer.push(c);
+ return c;
+ };
+ function unget() {
+ _ungetc(buffer.pop(), stream);
+ };
+ return __scanString(format, get, unget, varargs);
+ }
+
+ var _emscripten_preinvoke=true;
+
+ function _localeconv() {
+ // %struct.timeval = type { char* decimal point, other stuff... }
+ // var indexes = Runtime.calculateStructAlignment({ fields: ['i32', 'i32'] });
+ var me = _localeconv;
+ if (!me.ret) {
+ // These are defaults from the "C" locale
+ me.ret = allocate([
+ allocate(intArrayFromString('.'), 'i8', ALLOC_NORMAL),0,0,0, // decimal_point
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // thousands_sep
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // grouping
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // int_curr_symbol
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // currency_symbol
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // mon_decimal_point
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // mon_thousands_sep
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // mon_grouping
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0, // positive_sign
+ allocate(intArrayFromString(''), 'i8', ALLOC_NORMAL),0,0,0 // negative_sign
+ ], 'i8*', ALLOC_NORMAL); // Allocate strings in lconv, still don't allocate chars
+ }
+ return me.ret;
+ }
+
+
+ function _unlink(path) {
+ // int unlink(const char *path);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/unlink.html
+ path = Pointer_stringify(path);
+ try {
+ FS.unlink(path);
+ return 0;
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _rmdir(path) {
+ // int rmdir(const char *path);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/rmdir.html
+ path = Pointer_stringify(path);
+ try {
+ FS.rmdir(path);
+ return 0;
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _remove(path) {
+ // int remove(const char *path);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/remove.html
+ var ret = _unlink(path);
+ if (ret == -1) ret = _rmdir(path);
+ return ret;
+ }
+
+ function _freopen(filename, mode, stream) {
+ // FILE *freopen(const char *restrict filename, const char *restrict mode, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/freopen.html
+ if (!filename) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (!streamObj) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return 0;
+ }
+ if (_freopen.buffer) _free(_freopen.buffer);
+ filename = intArrayFromString(streamObj.path);
+ filename = allocate(filename, 'i8', ALLOC_NORMAL);
+ }
+ _fclose(stream);
+ return _fopen(filename, mode);
+ }
+
+
+ function _rename(old_path, new_path) {
+ // int rename(const char *old, const char *new);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/rename.html
+ old_path = Pointer_stringify(old_path);
+ new_path = Pointer_stringify(new_path);
+ try {
+ FS.rename(old_path, new_path);
+ return 0;
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _tmpfile() {
+ // FILE *tmpfile(void);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/tmpfile.html
+ // TODO: Delete the created file on closing.
+ if (_tmpfile.mode) {
+ _tmpfile.mode = allocate(intArrayFromString('w+'), 'i8', ALLOC_NORMAL);
+ }
+ return _fopen(_tmpnam(0), _tmpfile.mode);
+ }
+
+ function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+
+ Module["_memset"] = _memset;
+
+
+
+ Module["_bitshift64Shl"] = _bitshift64Shl;
+
+ function _abort() {
+ Module['abort']();
+ }
+
+
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var origArg = currArg;
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ if (argSize == 8 && i64Math) argText = i64Math.stringify(origArg[0], origArg[1], null); else
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ if (argSize == 8 && i64Math) argText = i64Math.stringify(origArg[0], origArg[1], true); else
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (argSize == 8 && i64Math) {
+ if (origArg[1]) {
+ argText = (origArg[1]>>>0).toString(16);
+ var lower = (origArg[0]>>>0).toString(16);
+ while (lower.length < 8) lower = '0' + lower;
+ argText += lower;
+ } else {
+ argText = (origArg[0]>>>0).toString(16);
+ }
+ } else
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }
+
+ function _fgets(s, n, stream) {
+ // char *fgets(char *restrict s, int n, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fgets.html
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (!streamObj) return 0;
+ if (streamObj.error || streamObj.eof) return 0;
+ var byte_;
+ for (var i = 0; i < n - 1 && byte_ != 10; i++) {
+ byte_ = _fgetc(stream);
+ if (byte_ == -1) {
+ if (streamObj.error || (streamObj.eof && i == 0)) return 0;
+ else if (streamObj.eof) break;
+ }
+ HEAP8[(((s)+(i))|0)]=byte_;
+ }
+ HEAP8[(((s)+(i))|0)]=0;
+ return s;
+ }
+
+ var _tan=Math_tan;
+
+ function _ispunct(chr) {
+ return (chr >= 33 && chr <= 47) ||
+ (chr >= 58 && chr <= 64) ||
+ (chr >= 91 && chr <= 96) ||
+ (chr >= 123 && chr <= 126);
+ }
+
+ function _feof(stream) {
+ // int feof(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/feof.html
+ stream = FS.getStreamFromPtr(stream);
+ return Number(stream && stream.eof);
+ }
+
+
+ Module["_tolower"] = _tolower;
+
+ var _asin=Math_asin;
+
+ function _clearerr(stream) {
+ // void clearerr(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/clearerr.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) {
+ return;
+ }
+ stream.eof = false;
+ stream.error = false;
+ }
+
+ var _fabs=Math_abs;
+
+ function _clock() {
+ if (_clock.start === undefined) _clock.start = Date.now();
+ return Math.floor((Date.now() - _clock.start) * (1000000/1000));
+ }
+
+
+ var _getc=_fgetc;
+
+ function _modf(x, intpart) {
+ HEAPF64[((intpart)>>3)]=Math.floor(x);
+ return x - HEAPF64[((intpart)>>3)];
+ }
+
+ var _sqrt=Math_sqrt;
+
+ function _isxdigit(chr) {
+ return (chr >= 48 && chr <= 57) ||
+ (chr >= 97 && chr <= 102) ||
+ (chr >= 65 && chr <= 70);
+ }
+
+ function _ftell(stream) {
+ // long ftell(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/ftell.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ if (FS.isChrdev(stream.node.mode)) {
+ ___setErrNo(ERRNO_CODES.ESPIPE);
+ return -1;
+ } else {
+ return stream.position;
+ }
+ }
+
+
+ function __exit(status) {
+ // void _exit(int status);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/exit.html
+ Module['exit'](status);
+ }function _exit(status) {
+ __exit(status);
+ }
+
+
+ function _snprintf(s, n, format, varargs) {
+ // int snprintf(char *restrict s, size_t n, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var limit = (n === undefined) ? result.length
+ : Math.min(result.length, Math.max(n - 1, 0));
+ if (s < 0) {
+ s = -s;
+ var buf = _malloc(limit+1);
+ HEAP32[((s)>>2)]=buf;
+ s = buf;
+ }
+ for (var i = 0; i < limit; i++) {
+ HEAP8[(((s)+(i))|0)]=result[i];
+ }
+ if (limit < n || (n === undefined)) HEAP8[(((s)+(i))|0)]=0;
+ return result.length;
+ }function _sprintf(s, format, varargs) {
+ // int sprintf(char *restrict s, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ return _snprintf(s, undefined, format, varargs);
+ }
+
+ var _emscripten_get_longjmp_result=true;
+
+ var _sin=Math_sin;
+
+
+ function _fmod(x, y) {
+ return x % y;
+ }var _fmodl=_fmod;
+
+
+
+ var _atan=Math_atan;
+
+ function _ferror(stream) {
+ // int ferror(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/ferror.html
+ stream = FS.getStreamFromPtr(stream);
+ return Number(stream && stream.error);
+ }
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+ function _copysign(a, b) {
+ return __reallyNegative(a) === __reallyNegative(b) ? a : -a;
+ }
+
+
+ function _gmtime_r(time, tmPtr) {
+ var date = new Date(HEAP32[((time)>>2)]*1000);
+ HEAP32[((tmPtr)>>2)]=date.getUTCSeconds();
+ HEAP32[(((tmPtr)+(4))>>2)]=date.getUTCMinutes();
+ HEAP32[(((tmPtr)+(8))>>2)]=date.getUTCHours();
+ HEAP32[(((tmPtr)+(12))>>2)]=date.getUTCDate();
+ HEAP32[(((tmPtr)+(16))>>2)]=date.getUTCMonth();
+ HEAP32[(((tmPtr)+(20))>>2)]=date.getUTCFullYear()-1900;
+ HEAP32[(((tmPtr)+(24))>>2)]=date.getUTCDay();
+ HEAP32[(((tmPtr)+(36))>>2)]=0;
+ HEAP32[(((tmPtr)+(32))>>2)]=0;
+ var start = new Date(date); // define date using UTC, start from Jan 01 00:00:00 UTC
+ start.setUTCDate(1);
+ start.setUTCMonth(0);
+ start.setUTCHours(0);
+ start.setUTCMinutes(0);
+ start.setUTCSeconds(0);
+ start.setUTCMilliseconds(0);
+ var yday = Math.floor((date.getTime() - start.getTime()) / (1000 * 60 * 60 * 24));
+ HEAP32[(((tmPtr)+(28))>>2)]=yday;
+ HEAP32[(((tmPtr)+(40))>>2)]=___tm_timezone;
+
+ return tmPtr;
+ }function _gmtime(time) {
+ return _gmtime_r(time, ___tm_current);
+ }
+
+ function _isgraph(chr) {
+ return 0x20 < chr && chr < 0x7F;
+ }
+
+
+
+ function _strerror_r(errnum, strerrbuf, buflen) {
+ if (errnum in ERRNO_MESSAGES) {
+ if (ERRNO_MESSAGES[errnum].length > buflen - 1) {
+ return ___setErrNo(ERRNO_CODES.ERANGE);
+ } else {
+ var msg = ERRNO_MESSAGES[errnum];
+ writeAsciiToMemory(msg, strerrbuf);
+ return 0;
+ }
+ } else {
+ return ___setErrNo(ERRNO_CODES.EINVAL);
+ }
+ }function _strerror(errnum) {
+ if (!_strerror.buffer) _strerror.buffer = _malloc(256);
+ _strerror_r(errnum, _strerror.buffer, 256);
+ return _strerror.buffer;
+ }
+
+
+
+
+
+ var _environ=allocate(1, "i32*", ALLOC_STATIC);var ___environ=_environ;function ___buildEnvironment(env) {
+ // WARNING: Arbitrary limit!
+ var MAX_ENV_VALUES = 64;
+ var TOTAL_ENV_SIZE = 1024;
+
+ // Statically allocate memory for the environment.
+ var poolPtr;
+ var envPtr;
+ if (!___buildEnvironment.called) {
+ ___buildEnvironment.called = true;
+ // Set default values. Use string keys for Closure Compiler compatibility.
+ ENV['USER'] = 'root';
+ ENV['PATH'] = '/';
+ ENV['PWD'] = '/';
+ ENV['HOME'] = '/home/emscripten';
+ ENV['LANG'] = 'en_US.UTF-8';
+ ENV['_'] = './this.program';
+ // Allocate memory.
+ poolPtr = allocate(TOTAL_ENV_SIZE, 'i8', ALLOC_STATIC);
+ envPtr = allocate(MAX_ENV_VALUES * 4,
+ 'i8*', ALLOC_STATIC);
+ HEAP32[((envPtr)>>2)]=poolPtr;
+ HEAP32[((_environ)>>2)]=envPtr;
+ } else {
+ envPtr = HEAP32[((_environ)>>2)];
+ poolPtr = HEAP32[((envPtr)>>2)];
+ }
+
+ // Collect key=value lines.
+ var strings = [];
+ var totalSize = 0;
+ for (var key in env) {
+ if (typeof env[key] === 'string') {
+ var line = key + '=' + env[key];
+ strings.push(line);
+ totalSize += line.length;
+ }
+ }
+ if (totalSize > TOTAL_ENV_SIZE) {
+ throw new Error('Environment size exceeded TOTAL_ENV_SIZE!');
+ }
+
+ // Make new.
+ var ptrSize = 4;
+ for (var i = 0; i < strings.length; i++) {
+ var line = strings[i];
+ writeAsciiToMemory(line, poolPtr);
+ HEAP32[(((envPtr)+(i * ptrSize))>>2)]=poolPtr;
+ poolPtr += line.length + 1;
+ }
+ HEAP32[(((envPtr)+(strings.length * ptrSize))>>2)]=0;
+ }var ENV={};function _getenv(name) {
+ // char *getenv(const char *name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/getenv.html
+ if (name === 0) return 0;
+ name = Pointer_stringify(name);
+ if (!ENV.hasOwnProperty(name)) return 0;
+
+ if (_getenv.ret) _free(_getenv.ret);
+ _getenv.ret = allocate(intArrayFromString(ENV[name]), 'i8', ALLOC_NORMAL);
+ return _getenv.ret;
+ }
+
+ var _emscripten_setjmp=true;
+
+ var _cos=Math_cos;
+
+ function _isalnum(chr) {
+ return (chr >= 48 && chr <= 57) ||
+ (chr >= 97 && chr <= 122) ||
+ (chr >= 65 && chr <= 90);
+ }
+
+ var _BItoD=true;
+
+ function _difftime(time1, time0) {
+ return time1 - time0;
+ }
+
+ var _floor=Math_floor;
+
+ function _iscntrl(chr) {
+ return (0 <= chr && chr <= 0x1F) || chr === 0x7F;
+ }
+
+ var _atan2=Math_atan2;
+
+ function _setvbuf(stream, buf, type, size) {
+ // int setvbuf(FILE *restrict stream, char *restrict buf, int type, size_t size);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/setvbuf.html
+ // TODO: Implement custom buffering.
+ return 0;
+ }
+
+ var _exp=Math_exp;
+
+ var _copysignl=_copysign;
+
+ function _islower(chr) {
+ return chr >= 97 && chr <= 122;
+ }
+
+ var _acos=Math_acos;
+
+ function _isupper(chr) {
+ return chr >= 65 && chr <= 90;
+ }
+
+
+ function __isLeapYear(year) {
+ return year%4 === 0 && (year%100 !== 0 || year%400 === 0);
+ }
+
+ function __arraySum(array, index) {
+ var sum = 0;
+ for (var i = 0; i <= index; sum += array[i++]);
+ return sum;
+ }
+
+
+ var __MONTH_DAYS_LEAP=[31,29,31,30,31,30,31,31,30,31,30,31];
+
+ var __MONTH_DAYS_REGULAR=[31,28,31,30,31,30,31,31,30,31,30,31];function __addDays(date, days) {
+ var newDate = new Date(date.getTime());
+ while(days > 0) {
+ var leap = __isLeapYear(newDate.getFullYear());
+ var currentMonth = newDate.getMonth();
+ var daysInCurrentMonth = (leap ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR)[currentMonth];
+
+ if (days > daysInCurrentMonth-newDate.getDate()) {
+ // we spill over to next month
+ days -= (daysInCurrentMonth-newDate.getDate()+1);
+ newDate.setDate(1);
+ if (currentMonth < 11) {
+ newDate.setMonth(currentMonth+1)
+ } else {
+ newDate.setMonth(0);
+ newDate.setFullYear(newDate.getFullYear()+1);
+ }
+ } else {
+ // we stay in current month
+ newDate.setDate(newDate.getDate()+days);
+ return newDate;
+ }
+ }
+
+ return newDate;
+ }function _strftime(s, maxsize, format, tm) {
+ // size_t strftime(char *restrict s, size_t maxsize, const char *restrict format, const struct tm *restrict timeptr);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/strftime.html
+
+ var date = {
+ tm_sec: HEAP32[((tm)>>2)],
+ tm_min: HEAP32[(((tm)+(4))>>2)],
+ tm_hour: HEAP32[(((tm)+(8))>>2)],
+ tm_mday: HEAP32[(((tm)+(12))>>2)],
+ tm_mon: HEAP32[(((tm)+(16))>>2)],
+ tm_year: HEAP32[(((tm)+(20))>>2)],
+ tm_wday: HEAP32[(((tm)+(24))>>2)],
+ tm_yday: HEAP32[(((tm)+(28))>>2)],
+ tm_isdst: HEAP32[(((tm)+(32))>>2)]
+ };
+
+ var pattern = Pointer_stringify(format);
+
+ // expand format
+ var EXPANSION_RULES_1 = {
+ '%c': '%a %b %d %H:%M:%S %Y', // Replaced by the locale's appropriate date and time representation - e.g., Mon Aug 3 14:02:01 2013
+ '%D': '%m/%d/%y', // Equivalent to %m / %d / %y
+ '%F': '%Y-%m-%d', // Equivalent to %Y - %m - %d
+ '%h': '%b', // Equivalent to %b
+ '%r': '%I:%M:%S %p', // Replaced by the time in a.m. and p.m. notation
+ '%R': '%H:%M', // Replaced by the time in 24-hour notation
+ '%T': '%H:%M:%S', // Replaced by the time
+ '%x': '%m/%d/%y', // Replaced by the locale's appropriate date representation
+ '%X': '%H:%M:%S', // Replaced by the locale's appropriate date representation
+ };
+ for (var rule in EXPANSION_RULES_1) {
+ pattern = pattern.replace(new RegExp(rule, 'g'), EXPANSION_RULES_1[rule]);
+ }
+
+ var WEEKDAYS = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
+ var MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+
+ function leadingSomething(value, digits, character) {
+ var str = typeof value === 'number' ? value.toString() : (value || '');
+ while (str.length < digits) {
+ str = character[0]+str;
+ }
+ return str;
+ };
+
+ function leadingNulls(value, digits) {
+ return leadingSomething(value, digits, '0');
+ };
+
+ function compareByDay(date1, date2) {
+ function sgn(value) {
+ return value < 0 ? -1 : (value > 0 ? 1 : 0);
+ };
+
+ var compare;
+ if ((compare = sgn(date1.getFullYear()-date2.getFullYear())) === 0) {
+ if ((compare = sgn(date1.getMonth()-date2.getMonth())) === 0) {
+ compare = sgn(date1.getDate()-date2.getDate());
+ }
+ }
+ return compare;
+ };
+
+ function getFirstWeekStartDate(janFourth) {
+ switch (janFourth.getDay()) {
+ case 0: // Sunday
+ return new Date(janFourth.getFullYear()-1, 11, 29);
+ case 1: // Monday
+ return janFourth;
+ case 2: // Tuesday
+ return new Date(janFourth.getFullYear(), 0, 3);
+ case 3: // Wednesday
+ return new Date(janFourth.getFullYear(), 0, 2);
+ case 4: // Thursday
+ return new Date(janFourth.getFullYear(), 0, 1);
+ case 5: // Friday
+ return new Date(janFourth.getFullYear()-1, 11, 31);
+ case 6: // Saturday
+ return new Date(janFourth.getFullYear()-1, 11, 30);
+ }
+ };
+
+ function getWeekBasedYear(date) {
+ var thisDate = __addDays(new Date(date.tm_year+1900, 0, 1), date.tm_yday);
+
+ var janFourthThisYear = new Date(thisDate.getFullYear(), 0, 4);
+ var janFourthNextYear = new Date(thisDate.getFullYear()+1, 0, 4);
+
+ var firstWeekStartThisYear = getFirstWeekStartDate(janFourthThisYear);
+ var firstWeekStartNextYear = getFirstWeekStartDate(janFourthNextYear);
+
+ if (compareByDay(firstWeekStartThisYear, thisDate) <= 0) {
+ // this date is after the start of the first week of this year
+ if (compareByDay(firstWeekStartNextYear, thisDate) <= 0) {
+ return thisDate.getFullYear()+1;
+ } else {
+ return thisDate.getFullYear();
+ }
+ } else {
+ return thisDate.getFullYear()-1;
+ }
+ };
+
+ var EXPANSION_RULES_2 = {
+ '%a': function(date) {
+ return WEEKDAYS[date.tm_wday].substring(0,3);
+ },
+ '%A': function(date) {
+ return WEEKDAYS[date.tm_wday];
+ },
+ '%b': function(date) {
+ return MONTHS[date.tm_mon].substring(0,3);
+ },
+ '%B': function(date) {
+ return MONTHS[date.tm_mon];
+ },
+ '%C': function(date) {
+ var year = date.tm_year+1900;
+ return leadingNulls(Math.floor(year/100),2);
+ },
+ '%d': function(date) {
+ return leadingNulls(date.tm_mday, 2);
+ },
+ '%e': function(date) {
+ return leadingSomething(date.tm_mday, 2, ' ');
+ },
+ '%g': function(date) {
+ // %g, %G, and %V give values according to the ISO 8601:2000 standard week-based year.
+ // In this system, weeks begin on a Monday and week 1 of the year is the week that includes
+ // January 4th, which is also the week that includes the first Thursday of the year, and
+ // is also the first week that contains at least four days in the year.
+ // If the first Monday of January is the 2nd, 3rd, or 4th, the preceding days are part of
+ // the last week of the preceding year; thus, for Saturday 2nd January 1999,
+ // %G is replaced by 1998 and %V is replaced by 53. If December 29th, 30th,
+ // or 31st is a Monday, it and any following days are part of week 1 of the following year.
+ // Thus, for Tuesday 30th December 1997, %G is replaced by 1998 and %V is replaced by 01.
+
+ return getWeekBasedYear(date).toString().substring(2);
+ },
+ '%G': function(date) {
+ return getWeekBasedYear(date);
+ },
+ '%H': function(date) {
+ return leadingNulls(date.tm_hour, 2);
+ },
+ '%I': function(date) {
+ return leadingNulls(date.tm_hour < 13 ? date.tm_hour : date.tm_hour-12, 2);
+ },
+ '%j': function(date) {
+ // Day of the year (001-366)
+ return leadingNulls(date.tm_mday+__arraySum(__isLeapYear(date.tm_year+1900) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, date.tm_mon-1), 3);
+ },
+ '%m': function(date) {
+ return leadingNulls(date.tm_mon+1, 2);
+ },
+ '%M': function(date) {
+ return leadingNulls(date.tm_min, 2);
+ },
+ '%n': function() {
+ return '\n';
+ },
+ '%p': function(date) {
+ if (date.tm_hour > 0 && date.tm_hour < 13) {
+ return 'AM';
+ } else {
+ return 'PM';
+ }
+ },
+ '%S': function(date) {
+ return leadingNulls(date.tm_sec, 2);
+ },
+ '%t': function() {
+ return '\t';
+ },
+ '%u': function(date) {
+ var day = new Date(date.tm_year+1900, date.tm_mon+1, date.tm_mday, 0, 0, 0, 0);
+ return day.getDay() || 7;
+ },
+ '%U': function(date) {
+ // Replaced by the week number of the year as a decimal number [00,53].
+ // The first Sunday of January is the first day of week 1;
+ // days in the new year before this are in week 0. [ tm_year, tm_wday, tm_yday]
+ var janFirst = new Date(date.tm_year+1900, 0, 1);
+ var firstSunday = janFirst.getDay() === 0 ? janFirst : __addDays(janFirst, 7-janFirst.getDay());
+ var endDate = new Date(date.tm_year+1900, date.tm_mon, date.tm_mday);
+
+ // is target date after the first Sunday?
+ if (compareByDay(firstSunday, endDate) < 0) {
+ // calculate difference in days between first Sunday and endDate
+ var februaryFirstUntilEndMonth = __arraySum(__isLeapYear(endDate.getFullYear()) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, endDate.getMonth()-1)-31;
+ var firstSundayUntilEndJanuary = 31-firstSunday.getDate();
+ var days = firstSundayUntilEndJanuary+februaryFirstUntilEndMonth+endDate.getDate();
+ return leadingNulls(Math.ceil(days/7), 2);
+ }
+
+ return compareByDay(firstSunday, janFirst) === 0 ? '01': '00';
+ },
+ '%V': function(date) {
+ // Replaced by the week number of the year (Monday as the first day of the week)
+ // as a decimal number [01,53]. If the week containing 1 January has four
+ // or more days in the new year, then it is considered week 1.
+ // Otherwise, it is the last week of the previous year, and the next week is week 1.
+ // Both January 4th and the first Thursday of January are always in week 1. [ tm_year, tm_wday, tm_yday]
+ var janFourthThisYear = new Date(date.tm_year+1900, 0, 4);
+ var janFourthNextYear = new Date(date.tm_year+1901, 0, 4);
+
+ var firstWeekStartThisYear = getFirstWeekStartDate(janFourthThisYear);
+ var firstWeekStartNextYear = getFirstWeekStartDate(janFourthNextYear);
+
+ var endDate = __addDays(new Date(date.tm_year+1900, 0, 1), date.tm_yday);
+
+ if (compareByDay(endDate, firstWeekStartThisYear) < 0) {
+ // if given date is before this years first week, then it belongs to the 53rd week of last year
+ return '53';
+ }
+
+ if (compareByDay(firstWeekStartNextYear, endDate) <= 0) {
+ // if given date is after next years first week, then it belongs to the 01th week of next year
+ return '01';
+ }
+
+ // given date is in between CW 01..53 of this calendar year
+ var daysDifference;
+ if (firstWeekStartThisYear.getFullYear() < date.tm_year+1900) {
+ // first CW of this year starts last year
+ daysDifference = date.tm_yday+32-firstWeekStartThisYear.getDate()
+ } else {
+ // first CW of this year starts this year
+ daysDifference = date.tm_yday+1-firstWeekStartThisYear.getDate();
+ }
+ return leadingNulls(Math.ceil(daysDifference/7), 2);
+ },
+ '%w': function(date) {
+ var day = new Date(date.tm_year+1900, date.tm_mon+1, date.tm_mday, 0, 0, 0, 0);
+ return day.getDay();
+ },
+ '%W': function(date) {
+ // Replaced by the week number of the year as a decimal number [00,53].
+ // The first Monday of January is the first day of week 1;
+ // days in the new year before this are in week 0. [ tm_year, tm_wday, tm_yday]
+ var janFirst = new Date(date.tm_year, 0, 1);
+ var firstMonday = janFirst.getDay() === 1 ? janFirst : __addDays(janFirst, janFirst.getDay() === 0 ? 1 : 7-janFirst.getDay()+1);
+ var endDate = new Date(date.tm_year+1900, date.tm_mon, date.tm_mday);
+
+ // is target date after the first Monday?
+ if (compareByDay(firstMonday, endDate) < 0) {
+ var februaryFirstUntilEndMonth = __arraySum(__isLeapYear(endDate.getFullYear()) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, endDate.getMonth()-1)-31;
+ var firstMondayUntilEndJanuary = 31-firstMonday.getDate();
+ var days = firstMondayUntilEndJanuary+februaryFirstUntilEndMonth+endDate.getDate();
+ return leadingNulls(Math.ceil(days/7), 2);
+ }
+ return compareByDay(firstMonday, janFirst) === 0 ? '01': '00';
+ },
+ '%y': function(date) {
+ // Replaced by the last two digits of the year as a decimal number [00,99]. [ tm_year]
+ return (date.tm_year+1900).toString().substring(2);
+ },
+ '%Y': function(date) {
+ // Replaced by the year as a decimal number (for example, 1997). [ tm_year]
+ return date.tm_year+1900;
+ },
+ '%z': function(date) {
+ // Replaced by the offset from UTC in the ISO 8601:2000 standard format ( +hhmm or -hhmm ),
+ // or by no characters if no timezone is determinable.
+ // For example, "-0430" means 4 hours 30 minutes behind UTC (west of Greenwich).
+ // If tm_isdst is zero, the standard time offset is used.
+ // If tm_isdst is greater than zero, the daylight savings time offset is used.
+ // If tm_isdst is negative, no characters are returned.
+ // FIXME: we cannot determine time zone (or can we?)
+ return '';
+ },
+ '%Z': function(date) {
+ // Replaced by the timezone name or abbreviation, or by no bytes if no timezone information exists. [ tm_isdst]
+ // FIXME: we cannot determine time zone (or can we?)
+ return '';
+ },
+ '%%': function() {
+ return '%';
+ }
+ };
+ for (var rule in EXPANSION_RULES_2) {
+ if (pattern.indexOf(rule) >= 0) {
+ pattern = pattern.replace(new RegExp(rule, 'g'), EXPANSION_RULES_2[rule](date));
+ }
+ }
+
+ var bytes = intArrayFromString(pattern, false);
+ if (bytes.length > maxsize) {
+ return 0;
+ }
+
+ writeArrayToMemory(bytes, s);
+ return bytes.length-1;
+ }
+
+
+
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+_fputc.ret = allocate([0], "i8", ALLOC_STATIC);
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+_fgetc.ret = allocate([0], "i8", ALLOC_STATIC);
+___buildEnvironment(ENV);
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+ var ctlz_i8 = allocate([8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_DYNAMIC);
+ var cttz_i8 = allocate([8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0], "i8", ALLOC_DYNAMIC);
+
+var Math_min = Math.min;
+function invoke_iiii(index,a1,a2,a3) {
+ try {
+ return Module["dynCall_iiii"](index,a1,a2,a3);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vi(index,a1) {
+ try {
+ Module["dynCall_vi"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vii(index,a1,a2) {
+ try {
+ Module["dynCall_vii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_ii(index,a1) {
+ try {
+ return Module["dynCall_ii"](index,a1);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_iiiii(index,a1,a2,a3,a4) {
+ try {
+ return Module["dynCall_iiiii"](index,a1,a2,a3,a4);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_iii(index,a1,a2) {
+ try {
+ return Module["dynCall_iii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+ var cttz_i8=env.cttz_i8|0;
+ var ctlz_i8=env.ctlz_i8|0;
+ var ___rand_seed=env.___rand_seed|0;
+ var _stderr=env._stderr|0;
+ var _stdin=env._stdin|0;
+ var _stdout=env._stdout|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var invoke_iiii=env.invoke_iiii;
+ var invoke_vi=env.invoke_vi;
+ var invoke_vii=env.invoke_vii;
+ var invoke_ii=env.invoke_ii;
+ var invoke_iiiii=env.invoke_iiiii;
+ var invoke_iii=env.invoke_iii;
+ var _isalnum=env._isalnum;
+ var _fabs=env._fabs;
+ var _frexp=env._frexp;
+ var _exp=env._exp;
+ var _fread=env._fread;
+ var __reallyNegative=env.__reallyNegative;
+ var _longjmp=env._longjmp;
+ var __addDays=env.__addDays;
+ var _fsync=env._fsync;
+ var _signal=env._signal;
+ var _rename=env._rename;
+ var _sbrk=env._sbrk;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _sinh=env._sinh;
+ var _sysconf=env._sysconf;
+ var _close=env._close;
+ var _ferror=env._ferror;
+ var _clock=env._clock;
+ var _cos=env._cos;
+ var _tanh=env._tanh;
+ var _unlink=env._unlink;
+ var _write=env._write;
+ var __isLeapYear=env.__isLeapYear;
+ var _ftell=env._ftell;
+ var _isupper=env._isupper;
+ var _gmtime_r=env._gmtime_r;
+ var _islower=env._islower;
+ var _tmpnam=env._tmpnam;
+ var _tmpfile=env._tmpfile;
+ var _send=env._send;
+ var _abort=env._abort;
+ var _setvbuf=env._setvbuf;
+ var _atan2=env._atan2;
+ var _setlocale=env._setlocale;
+ var _isgraph=env._isgraph;
+ var _modf=env._modf;
+ var _strerror_r=env._strerror_r;
+ var _fscanf=env._fscanf;
+ var ___setErrNo=env.___setErrNo;
+ var _isalpha=env._isalpha;
+ var _srand=env._srand;
+ var _mktime=env._mktime;
+ var _putchar=env._putchar;
+ var _gmtime=env._gmtime;
+ var _localeconv=env._localeconv;
+ var _sprintf=env._sprintf;
+ var _localtime=env._localtime;
+ var _read=env._read;
+ var _fwrite=env._fwrite;
+ var _time=env._time;
+ var _fprintf=env._fprintf;
+ var _exit=env._exit;
+ var _freopen=env._freopen;
+ var _llvm_pow_f64=env._llvm_pow_f64;
+ var _fgetc=env._fgetc;
+ var _fmod=env._fmod;
+ var _lseek=env._lseek;
+ var _rmdir=env._rmdir;
+ var _asin=env._asin;
+ var _floor=env._floor;
+ var _pwrite=env._pwrite;
+ var _localtime_r=env._localtime_r;
+ var _tzset=env._tzset;
+ var _open=env._open;
+ var _remove=env._remove;
+ var _snprintf=env._snprintf;
+ var __scanString=env.__scanString;
+ var _strftime=env._strftime;
+ var _fseek=env._fseek;
+ var _iscntrl=env._iscntrl;
+ var _isxdigit=env._isxdigit;
+ var _fclose=env._fclose;
+ var _log=env._log;
+ var _recv=env._recv;
+ var _tan=env._tan;
+ var _copysign=env._copysign;
+ var __getFloat=env.__getFloat;
+ var _fputc=env._fputc;
+ var _ispunct=env._ispunct;
+ var _ceil=env._ceil;
+ var _isspace=env._isspace;
+ var _fopen=env._fopen;
+ var _sin=env._sin;
+ var _acos=env._acos;
+ var _cosh=env._cosh;
+ var ___buildEnvironment=env.___buildEnvironment;
+ var _difftime=env._difftime;
+ var _ungetc=env._ungetc;
+ var _system=env._system;
+ var _fflush=env._fflush;
+ var _log10=env._log10;
+ var _fileno=env._fileno;
+ var __exit=env.__exit;
+ var __arraySum=env.__arraySum;
+ var _fgets=env._fgets;
+ var _atan=env._atan;
+ var _pread=env._pread;
+ var _mkport=env._mkport;
+ var _toupper=env._toupper;
+ var _feof=env._feof;
+ var ___errno_location=env.___errno_location;
+ var _clearerr=env._clearerr;
+ var _getenv=env._getenv;
+ var _strerror=env._strerror;
+ var _emscripten_longjmp=env._emscripten_longjmp;
+ var __formatString=env.__formatString;
+ var _fputs=env._fputs;
+ var _sqrt=env._sqrt;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[3228] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 12952 + (i5 << 2) | 0;
+ i5 = 12952 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[3228] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[12920 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 12952 + (i7 << 2) | 0;
+ i7 = 12952 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[3228] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[12920 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[12932 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 12952 + (i9 << 2) | 0;
+ i7 = HEAP32[3228] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 12952 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[3228] = i7 | i8;
+ i28 = 12952 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[12920 >> 2] = i4;
+ HEAP32[12932 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[12916 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[13216 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[12928 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 13216 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[12920 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[12932 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 12952 + (i9 << 2) | 0;
+ i7 = HEAP32[3228] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 12952 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[3228] = i7 | i8;
+ i25 = 12952 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[12920 >> 2] = i2;
+ HEAP32[12932 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[12916 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[13216 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[13216 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[12920 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[12928 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 13216 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 12952 + (i6 << 2) | 0;
+ i5 = HEAP32[3228] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 12952 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[3228] = i5 | i4;
+ i21 = 12952 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 13216 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[12916 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[12916 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[12928 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[12920 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[12932 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[12932 >> 2] = i2 + i12;
+ HEAP32[12920 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[12920 >> 2] = 0;
+ HEAP32[12932 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[12924 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[12924 >> 2] = i31;
+ i32 = HEAP32[12936 >> 2] | 0;
+ HEAP32[12936 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[3346] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[13392 >> 2] = i18;
+ HEAP32[13388 >> 2] = i18;
+ HEAP32[13396 >> 2] = -1;
+ HEAP32[13400 >> 2] = -1;
+ HEAP32[13404 >> 2] = 0;
+ HEAP32[13356 >> 2] = 0;
+ HEAP32[3346] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[13392 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[13352 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[13344 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[13356 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[12936 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 13360 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[12924 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[13388 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[13344 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[13352 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[13392 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[13356 >> 2] = HEAP32[13356 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[13344 >> 2] | 0) + i14 | 0;
+ HEAP32[13344 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[13348 >> 2] | 0) >>> 0) {
+ HEAP32[13348 >> 2] = i15;
+ }
+ i15 = HEAP32[12936 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 13360 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[12924 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[12936 >> 2] = i15 + i3;
+ HEAP32[12924 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[12940 >> 2] = HEAP32[13400 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ HEAP32[12928 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 13360 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[12936 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i32 = (HEAP32[12920 >> 2] | 0) + i10 | 0;
+ HEAP32[12920 >> 2] = i32;
+ HEAP32[12932 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 13216 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 12952 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 12952 + (i10 << 2) | 0;
+ i9 = HEAP32[3228] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 12952 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[3228] = i9 | i5;
+ i3 = 12952 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 13216 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[12916 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[12916 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L445 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L445;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[12928 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[12924 >> 2] | 0) + i10 | 0;
+ HEAP32[12924 >> 2] = i32;
+ HEAP32[12936 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 13360 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[12936 >> 2] = i17 + i4;
+ HEAP32[12924 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[12940 >> 2] = HEAP32[13400 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[13360 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[13364 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[13368 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[13372 >> 2];
+ HEAP32[13360 >> 2] = i17;
+ HEAP32[13364 >> 2] = i14;
+ HEAP32[13372 >> 2] = 0;
+ HEAP32[13368 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 12952 + (i4 << 2) | 0;
+ i5 = HEAP32[3228] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 12952 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[3228] = i5 | i3;
+ i7 = 12952 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 13216 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[12916 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[12916 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[12928 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[12928 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[12928 >> 2] = i17;
+ }
+ HEAP32[13360 >> 2] = i17;
+ HEAP32[13364 >> 2] = i14;
+ HEAP32[13372 >> 2] = 0;
+ HEAP32[12948 >> 2] = HEAP32[3346];
+ HEAP32[12944 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 12952 + (i32 << 2) | 0;
+ HEAP32[12952 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[12952 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[12936 >> 2] = i17 + i2;
+ HEAP32[12924 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[12940 >> 2] = HEAP32[13400 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[12924 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[12924 >> 2] = i31;
+ i32 = HEAP32[12936 >> 2] | 0;
+ HEAP32[12936 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function _llex(i2, i3) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i12 = i1;
+ i4 = i2 + 60 | 0;
+ HEAP32[(HEAP32[i4 >> 2] | 0) + 4 >> 2] = 0;
+ i5 = i2 + 56 | 0;
+ L1 : while (1) {
+ i13 = HEAP32[i2 >> 2] | 0;
+ L3 : while (1) {
+ switch (i13 | 0) {
+ case 11:
+ case 9:
+ case 12:
+ case 32:
+ {
+ break;
+ }
+ case 91:
+ {
+ i9 = 25;
+ break L1;
+ }
+ case 62:
+ {
+ i9 = 45;
+ break L1;
+ }
+ case 46:
+ {
+ i9 = 161;
+ break L1;
+ }
+ case 13:
+ case 10:
+ {
+ i9 = 4;
+ break L3;
+ }
+ case 45:
+ {
+ break L3;
+ }
+ case 61:
+ {
+ i9 = 29;
+ break L1;
+ }
+ case 39:
+ case 34:
+ {
+ i9 = 69;
+ break L1;
+ }
+ case 126:
+ {
+ i9 = 53;
+ break L1;
+ }
+ case 60:
+ {
+ i9 = 37;
+ break L1;
+ }
+ case 58:
+ {
+ i9 = 61;
+ break L1;
+ }
+ case 57:
+ case 56:
+ case 55:
+ case 54:
+ case 53:
+ case 52:
+ case 51:
+ case 50:
+ case 49:
+ case 48:
+ {
+ i20 = i13;
+ break L1;
+ }
+ case -1:
+ {
+ i2 = 286;
+ i9 = 306;
+ break L1;
+ }
+ default:
+ {
+ i9 = 283;
+ break L1;
+ }
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i13 = _luaZ_fill(i13) | 0;
+ } else {
+ i27 = i13 + 4 | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i13 + 1;
+ i13 = HEAPU8[i13] | 0;
+ }
+ HEAP32[i2 >> 2] = i13;
+ }
+ if ((i9 | 0) == 4) {
+ i9 = 0;
+ _inclinenumber(i2);
+ continue;
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i13 = _luaZ_fill(i13) | 0;
+ } else {
+ i27 = i13 + 4 | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i13 + 1;
+ i13 = HEAPU8[i13] | 0;
+ }
+ HEAP32[i2 >> 2] = i13;
+ if ((i13 | 0) != 45) {
+ i2 = 45;
+ i9 = 306;
+ break;
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i13 = _luaZ_fill(i13) | 0;
+ } else {
+ i27 = i13 + 4 | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i13 + 1;
+ i13 = HEAPU8[i13] | 0;
+ }
+ HEAP32[i2 >> 2] = i13;
+ do {
+ if ((i13 | 0) == 91) {
+ i13 = _skip_sep(i2) | 0;
+ HEAP32[(HEAP32[i4 >> 2] | 0) + 4 >> 2] = 0;
+ if ((i13 | 0) > -1) {
+ _read_long_string(i2, 0, i13);
+ HEAP32[(HEAP32[i4 >> 2] | 0) + 4 >> 2] = 0;
+ continue L1;
+ } else {
+ i13 = HEAP32[i2 >> 2] | 0;
+ break;
+ }
+ }
+ } while (0);
+ while (1) {
+ if ((i13 | 0) == -1 | (i13 | 0) == 13 | (i13 | 0) == 10) {
+ continue L1;
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i13 = _luaZ_fill(i13) | 0;
+ } else {
+ i27 = i13 + 4 | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i13 + 1;
+ i13 = HEAPU8[i13] | 0;
+ }
+ HEAP32[i2 >> 2] = i13;
+ }
+ }
+ if ((i9 | 0) == 25) {
+ i9 = _skip_sep(i2) | 0;
+ if ((i9 | 0) > -1) {
+ _read_long_string(i2, i3, i9);
+ i27 = 289;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ if ((i9 | 0) == -1) {
+ i27 = 91;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else {
+ _lexerror(i2, 12272, 289);
+ }
+ } else if ((i9 | 0) == 29) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) != 61) {
+ i27 = 61;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 281;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 37) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) != 61) {
+ i27 = 60;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 283;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 45) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) != 61) {
+ i27 = 62;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 282;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 53) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) != 61) {
+ i27 = 126;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 284;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 61) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) != 58) {
+ i27 = 58;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 285;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 69) {
+ i14 = HEAP32[i4 >> 2] | 0;
+ i7 = i14 + 4 | 0;
+ i15 = HEAP32[i7 >> 2] | 0;
+ i8 = i14 + 8 | 0;
+ i6 = HEAP32[i8 >> 2] | 0;
+ do {
+ if ((i15 + 1 | 0) >>> 0 > i6 >>> 0) {
+ if (i6 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i16 = i6 << 1;
+ i15 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i16 | 0) == -2) {
+ _luaM_toobig(i15);
+ } else {
+ i24 = _luaM_realloc_(i15, HEAP32[i14 >> 2] | 0, i6, i16) | 0;
+ HEAP32[i14 >> 2] = i24;
+ HEAP32[i8 >> 2] = i16;
+ i23 = HEAP32[i7 >> 2] | 0;
+ break;
+ }
+ } else {
+ i23 = i15;
+ i24 = HEAP32[i14 >> 2] | 0;
+ }
+ } while (0);
+ i6 = i13 & 255;
+ HEAP32[i7 >> 2] = i23 + 1;
+ HEAP8[i24 + i23 | 0] = i6;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i14 = _luaZ_fill(i7) | 0;
+ } else {
+ i27 = i7 + 4 | 0;
+ i14 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i14 + 1;
+ i14 = HEAPU8[i14] | 0;
+ }
+ HEAP32[i2 >> 2] = i14;
+ L139 : do {
+ if ((i14 | 0) != (i13 | 0)) {
+ i7 = i2 + 52 | 0;
+ L141 : while (1) {
+ L143 : do {
+ if ((i14 | 0) == 92) {
+ i8 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i8 = _luaZ_fill(i8) | 0;
+ } else {
+ i27 = i8 + 4 | 0;
+ i8 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i8 + 1;
+ i8 = HEAPU8[i8] | 0;
+ }
+ HEAP32[i2 >> 2] = i8;
+ switch (i8 | 0) {
+ case 13:
+ case 10:
+ {
+ _inclinenumber(i2);
+ i8 = 10;
+ break;
+ }
+ case 39:
+ case 34:
+ case 92:
+ {
+ i9 = 124;
+ break;
+ }
+ case 122:
+ {
+ i8 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i14 = _luaZ_fill(i8) | 0;
+ } else {
+ i27 = i8 + 4 | 0;
+ i14 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i14 + 1;
+ i14 = HEAPU8[i14] | 0;
+ }
+ HEAP32[i2 >> 2] = i14;
+ if ((HEAP8[i14 + 10913 | 0] & 8) == 0) {
+ break L143;
+ }
+ while (1) {
+ if ((i14 | 0) == 13 | (i14 | 0) == 10) {
+ _inclinenumber(i2);
+ i14 = HEAP32[i2 >> 2] | 0;
+ } else {
+ i8 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i14 = _luaZ_fill(i8) | 0;
+ } else {
+ i27 = i8 + 4 | 0;
+ i14 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i14 + 1;
+ i14 = HEAPU8[i14] | 0;
+ }
+ HEAP32[i2 >> 2] = i14;
+ }
+ if ((HEAP8[i14 + 10913 | 0] & 8) == 0) {
+ break L143;
+ }
+ }
+ }
+ case 118:
+ {
+ i8 = 11;
+ i9 = 124;
+ break;
+ }
+ case 120:
+ {
+ HEAP32[i12 >> 2] = 120;
+ i14 = 1;
+ i8 = 0;
+ while (1) {
+ i9 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i9 = _luaZ_fill(i9) | 0;
+ } else {
+ i27 = i9 + 4 | 0;
+ i9 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i9 + 1;
+ i9 = HEAPU8[i9] | 0;
+ }
+ HEAP32[i2 >> 2] = i9;
+ HEAP32[i12 + (i14 << 2) >> 2] = i9;
+ if ((HEAP8[i9 + 10913 | 0] & 16) == 0) {
+ i9 = 100;
+ break L141;
+ }
+ i8 = (_luaO_hexavalue(i9) | 0) + (i8 << 4) | 0;
+ i14 = i14 + 1 | 0;
+ if ((i14 | 0) >= 3) {
+ i9 = 124;
+ break;
+ }
+ }
+ break;
+ }
+ case -1:
+ {
+ i14 = -1;
+ break L143;
+ }
+ case 98:
+ {
+ i8 = 8;
+ i9 = 124;
+ break;
+ }
+ case 102:
+ {
+ i8 = 12;
+ i9 = 124;
+ break;
+ }
+ case 110:
+ {
+ i8 = 10;
+ i9 = 124;
+ break;
+ }
+ case 114:
+ {
+ i8 = 13;
+ i9 = 124;
+ break;
+ }
+ case 116:
+ {
+ i8 = 9;
+ i9 = 124;
+ break;
+ }
+ case 97:
+ {
+ i8 = 7;
+ i9 = 124;
+ break;
+ }
+ default:
+ {
+ if ((HEAP8[i8 + 10913 | 0] & 2) == 0) {
+ i9 = 116;
+ break L141;
+ } else {
+ i15 = i8;
+ i14 = 0;
+ i8 = 0;
+ }
+ do {
+ if ((HEAP8[i15 + 10913 | 0] & 2) == 0) {
+ break;
+ }
+ HEAP32[i12 + (i14 << 2) >> 2] = i15;
+ i8 = i15 + -48 + (i8 * 10 | 0) | 0;
+ i15 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i15 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i15 = _luaZ_fill(i15) | 0;
+ } else {
+ i27 = i15 + 4 | 0;
+ i15 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i15 + 1;
+ i15 = HEAPU8[i15] | 0;
+ }
+ HEAP32[i2 >> 2] = i15;
+ i14 = i14 + 1 | 0;
+ } while ((i14 | 0) < 3);
+ if ((i8 | 0) > 255) {
+ i9 = 123;
+ break L141;
+ }
+ }
+ }
+ if ((i9 | 0) == 124) {
+ i9 = 0;
+ i14 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i14 = _luaZ_fill(i14) | 0;
+ } else {
+ i27 = i14 + 4 | 0;
+ i14 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i14 + 1;
+ i14 = HEAPU8[i14] | 0;
+ }
+ HEAP32[i2 >> 2] = i14;
+ }
+ i15 = HEAP32[i4 >> 2] | 0;
+ i14 = i15 + 4 | 0;
+ i18 = HEAP32[i14 >> 2] | 0;
+ i16 = i15 + 8 | 0;
+ i17 = HEAP32[i16 >> 2] | 0;
+ if ((i18 + 1 | 0) >>> 0 > i17 >>> 0) {
+ if (i17 >>> 0 > 2147483645) {
+ i9 = 131;
+ break L141;
+ }
+ i18 = i17 << 1;
+ i19 = HEAP32[i7 >> 2] | 0;
+ if ((i18 | 0) == -2) {
+ i9 = 133;
+ break L141;
+ }
+ i27 = _luaM_realloc_(i19, HEAP32[i15 >> 2] | 0, i17, i18) | 0;
+ HEAP32[i15 >> 2] = i27;
+ HEAP32[i16 >> 2] = i18;
+ i18 = HEAP32[i14 >> 2] | 0;
+ i15 = i27;
+ } else {
+ i15 = HEAP32[i15 >> 2] | 0;
+ }
+ HEAP32[i14 >> 2] = i18 + 1;
+ HEAP8[i15 + i18 | 0] = i8;
+ i14 = HEAP32[i2 >> 2] | 0;
+ } else if ((i14 | 0) == -1) {
+ i9 = 82;
+ break L141;
+ } else if ((i14 | 0) == 13 | (i14 | 0) == 10) {
+ i9 = 83;
+ break L141;
+ } else {
+ i15 = HEAP32[i4 >> 2] | 0;
+ i8 = i15 + 4 | 0;
+ i18 = HEAP32[i8 >> 2] | 0;
+ i17 = i15 + 8 | 0;
+ i16 = HEAP32[i17 >> 2] | 0;
+ if ((i18 + 1 | 0) >>> 0 > i16 >>> 0) {
+ if (i16 >>> 0 > 2147483645) {
+ i9 = 139;
+ break L141;
+ }
+ i19 = i16 << 1;
+ i18 = HEAP32[i7 >> 2] | 0;
+ if ((i19 | 0) == -2) {
+ i9 = 141;
+ break L141;
+ }
+ i27 = _luaM_realloc_(i18, HEAP32[i15 >> 2] | 0, i16, i19) | 0;
+ HEAP32[i15 >> 2] = i27;
+ HEAP32[i17 >> 2] = i19;
+ i18 = HEAP32[i8 >> 2] | 0;
+ i15 = i27;
+ } else {
+ i15 = HEAP32[i15 >> 2] | 0;
+ }
+ HEAP32[i8 >> 2] = i18 + 1;
+ HEAP8[i15 + i18 | 0] = i14;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i14 = _luaZ_fill(i8) | 0;
+ } else {
+ i27 = i8 + 4 | 0;
+ i14 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i14 + 1;
+ i14 = HEAPU8[i14] | 0;
+ }
+ HEAP32[i2 >> 2] = i14;
+ }
+ } while (0);
+ if ((i14 | 0) == (i13 | 0)) {
+ break L139;
+ }
+ }
+ if ((i9 | 0) == 82) {
+ _lexerror(i2, 12400, 286);
+ } else if ((i9 | 0) == 83) {
+ _lexerror(i2, 12400, 289);
+ } else if ((i9 | 0) == 100) {
+ _escerror(i2, i12, i14 + 1 | 0, 12480);
+ } else if ((i9 | 0) == 116) {
+ _escerror(i2, i2, 1, 12424);
+ } else if ((i9 | 0) == 123) {
+ _escerror(i2, i12, i14, 12448);
+ } else if ((i9 | 0) == 131) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 133) {
+ _luaM_toobig(i19);
+ } else if ((i9 | 0) == 139) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 141) {
+ _luaM_toobig(i18);
+ }
+ }
+ } while (0);
+ i7 = HEAP32[i4 >> 2] | 0;
+ i8 = i7 + 4 | 0;
+ i13 = HEAP32[i8 >> 2] | 0;
+ i12 = i7 + 8 | 0;
+ i9 = HEAP32[i12 >> 2] | 0;
+ do {
+ if ((i13 + 1 | 0) >>> 0 > i9 >>> 0) {
+ if (i9 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i14 = i9 << 1;
+ i13 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i14 | 0) == -2) {
+ _luaM_toobig(i13);
+ } else {
+ i11 = _luaM_realloc_(i13, HEAP32[i7 >> 2] | 0, i9, i14) | 0;
+ HEAP32[i7 >> 2] = i11;
+ HEAP32[i12 >> 2] = i14;
+ i10 = HEAP32[i8 >> 2] | 0;
+ break;
+ }
+ } else {
+ i10 = i13;
+ i11 = HEAP32[i7 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i8 >> 2] = i10 + 1;
+ HEAP8[i11 + i10 | 0] = i6;
+ i5 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i5 = _luaZ_fill(i5) | 0;
+ } else {
+ i27 = i5 + 4 | 0;
+ i5 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i5 + 1;
+ i5 = HEAPU8[i5] | 0;
+ }
+ HEAP32[i2 >> 2] = i5;
+ i5 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP32[i2 + 52 >> 2] | 0;
+ i5 = _luaS_newlstr(i4, (HEAP32[i5 >> 2] | 0) + 1 | 0, (HEAP32[i5 + 4 >> 2] | 0) + -2 | 0) | 0;
+ i6 = i4 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i7 + 16;
+ HEAP32[i7 >> 2] = i5;
+ HEAP32[i7 + 8 >> 2] = HEAPU8[i5 + 4 | 0] | 64;
+ i7 = _luaH_set(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 4 >> 2] | 0, (HEAP32[i6 >> 2] | 0) + -16 | 0) | 0;
+ i2 = i7 + 8 | 0;
+ if ((HEAP32[i2 >> 2] | 0) == 0 ? (HEAP32[i7 >> 2] = 1, HEAP32[i2 >> 2] = 1, (HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) : 0) {
+ _luaC_step(i4);
+ }
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + -16;
+ HEAP32[i3 >> 2] = i5;
+ i27 = 289;
+ STACKTOP = i1;
+ return i27 | 0;
+ } else if ((i9 | 0) == 161) {
+ i10 = HEAP32[i4 >> 2] | 0;
+ i9 = i10 + 4 | 0;
+ i13 = HEAP32[i9 >> 2] | 0;
+ i12 = i10 + 8 | 0;
+ i11 = HEAP32[i12 >> 2] | 0;
+ do {
+ if ((i13 + 1 | 0) >>> 0 > i11 >>> 0) {
+ if (i11 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i13 = i11 << 1;
+ i20 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i13 | 0) == -2) {
+ _luaM_toobig(i20);
+ } else {
+ i25 = _luaM_realloc_(i20, HEAP32[i10 >> 2] | 0, i11, i13) | 0;
+ HEAP32[i10 >> 2] = i25;
+ HEAP32[i12 >> 2] = i13;
+ i26 = HEAP32[i9 >> 2] | 0;
+ break;
+ }
+ } else {
+ i26 = i13;
+ i25 = HEAP32[i10 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i9 >> 2] = i26 + 1;
+ HEAP8[i25 + i26 | 0] = 46;
+ i9 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i20 = _luaZ_fill(i9) | 0;
+ } else {
+ i27 = i9 + 4 | 0;
+ i20 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i20 + 1;
+ i20 = HEAPU8[i20] | 0;
+ }
+ HEAP32[i2 >> 2] = i20;
+ if ((i20 | 0) != 0 ? (_memchr(12304, i20, 2) | 0) != 0 : 0) {
+ i6 = HEAP32[i4 >> 2] | 0;
+ i3 = i6 + 4 | 0;
+ i9 = HEAP32[i3 >> 2] | 0;
+ i8 = i6 + 8 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ do {
+ if ((i9 + 1 | 0) >>> 0 > i7 >>> 0) {
+ if (i7 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i9 = i7 << 1;
+ i10 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i9 | 0) == -2) {
+ _luaM_toobig(i10);
+ } else {
+ i21 = _luaM_realloc_(i10, HEAP32[i6 >> 2] | 0, i7, i9) | 0;
+ HEAP32[i6 >> 2] = i21;
+ HEAP32[i8 >> 2] = i9;
+ i22 = HEAP32[i3 >> 2] | 0;
+ break;
+ }
+ } else {
+ i22 = i9;
+ i21 = HEAP32[i6 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i3 >> 2] = i22 + 1;
+ HEAP8[i21 + i22 | 0] = i20;
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ if ((i3 | 0) == 0) {
+ i27 = 279;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ if ((_memchr(12304, i3, 2) | 0) == 0) {
+ i27 = 279;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i6 = HEAP32[i4 >> 2] | 0;
+ i7 = i6 + 4 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ i8 = i6 + 8 | 0;
+ i4 = HEAP32[i8 >> 2] | 0;
+ do {
+ if ((i9 + 1 | 0) >>> 0 > i4 >>> 0) {
+ if (i4 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i10 = i4 << 1;
+ i9 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i10 | 0) == -2) {
+ _luaM_toobig(i9);
+ } else {
+ i18 = _luaM_realloc_(i9, HEAP32[i6 >> 2] | 0, i4, i10) | 0;
+ HEAP32[i6 >> 2] = i18;
+ HEAP32[i8 >> 2] = i10;
+ i19 = HEAP32[i7 >> 2] | 0;
+ break;
+ }
+ } else {
+ i19 = i9;
+ i18 = HEAP32[i6 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i7 >> 2] = i19 + 1;
+ HEAP8[i18 + i19 | 0] = i3;
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = 280;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ if ((HEAP8[i20 + 10913 | 0] & 2) == 0) {
+ i27 = 46;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ } else if ((i9 | 0) == 283) {
+ if ((HEAP8[i13 + 10913 | 0] & 1) == 0) {
+ i3 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i27 = i3 + 4 | 0;
+ i3 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i27 = i13;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i10 = i2 + 52 | 0;
+ while (1) {
+ i11 = HEAP32[i4 >> 2] | 0;
+ i9 = i11 + 4 | 0;
+ i12 = HEAP32[i9 >> 2] | 0;
+ i19 = i11 + 8 | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i12 + 1 | 0) >>> 0 > i18 >>> 0) {
+ if (i18 >>> 0 > 2147483645) {
+ i9 = 288;
+ break;
+ }
+ i21 = i18 << 1;
+ i12 = HEAP32[i10 >> 2] | 0;
+ if ((i21 | 0) == -2) {
+ i9 = 290;
+ break;
+ }
+ i27 = _luaM_realloc_(i12, HEAP32[i11 >> 2] | 0, i18, i21) | 0;
+ HEAP32[i11 >> 2] = i27;
+ HEAP32[i19 >> 2] = i21;
+ i12 = HEAP32[i9 >> 2] | 0;
+ i11 = i27;
+ } else {
+ i11 = HEAP32[i11 >> 2] | 0;
+ }
+ HEAP32[i9 >> 2] = i12 + 1;
+ HEAP8[i11 + i12 | 0] = i13;
+ i9 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i13 = _luaZ_fill(i9) | 0;
+ } else {
+ i27 = i9 + 4 | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i13 + 1;
+ i13 = HEAPU8[i13] | 0;
+ }
+ HEAP32[i2 >> 2] = i13;
+ if ((HEAP8[i13 + 10913 | 0] & 3) == 0) {
+ i9 = 296;
+ break;
+ }
+ }
+ if ((i9 | 0) == 288) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 290) {
+ _luaM_toobig(i12);
+ } else if ((i9 | 0) == 296) {
+ i6 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP32[i10 >> 2] | 0;
+ i6 = _luaS_newlstr(i4, HEAP32[i6 >> 2] | 0, HEAP32[i6 + 4 >> 2] | 0) | 0;
+ i7 = i4 + 8 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i8 + 16;
+ HEAP32[i8 >> 2] = i6;
+ i5 = i6 + 4 | 0;
+ HEAP32[i8 + 8 >> 2] = HEAPU8[i5] | 64;
+ i8 = _luaH_set(i4, HEAP32[(HEAP32[i2 + 48 >> 2] | 0) + 4 >> 2] | 0, (HEAP32[i7 >> 2] | 0) + -16 | 0) | 0;
+ i2 = i8 + 8 | 0;
+ if ((HEAP32[i2 >> 2] | 0) == 0 ? (HEAP32[i8 >> 2] = 1, HEAP32[i2 >> 2] = 1, (HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) : 0) {
+ _luaC_step(i4);
+ }
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + -16;
+ HEAP32[i3 >> 2] = i6;
+ if ((HEAP8[i5] | 0) != 4) {
+ i27 = 288;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i2 = HEAP8[i6 + 6 | 0] | 0;
+ if (i2 << 24 >> 24 == 0) {
+ i27 = 288;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i27 = i2 & 255 | 256;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ } else if ((i9 | 0) == 306) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ i9 = HEAP32[i4 >> 2] | 0;
+ i12 = i9 + 4 | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ i11 = i9 + 8 | 0;
+ i10 = HEAP32[i11 >> 2] | 0;
+ do {
+ if ((i13 + 1 | 0) >>> 0 > i10 >>> 0) {
+ if (i10 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i18 = i10 << 1;
+ i13 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i18 | 0) == -2) {
+ _luaM_toobig(i13);
+ } else {
+ i16 = _luaM_realloc_(i13, HEAP32[i9 >> 2] | 0, i10, i18) | 0;
+ HEAP32[i9 >> 2] = i16;
+ HEAP32[i11 >> 2] = i18;
+ i17 = HEAP32[i12 >> 2] | 0;
+ break;
+ }
+ } else {
+ i17 = i13;
+ i16 = HEAP32[i9 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i12 >> 2] = i17 + 1;
+ HEAP8[i16 + i17 | 0] = i20;
+ i9 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i9 = _luaZ_fill(i9) | 0;
+ } else {
+ i27 = i9 + 4 | 0;
+ i9 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i9 + 1;
+ i9 = HEAPU8[i9] | 0;
+ }
+ HEAP32[i2 >> 2] = i9;
+ if ((i20 | 0) == 48) {
+ if ((i9 | 0) != 0) {
+ if ((_memchr(12320, i9, 3) | 0) == 0) {
+ i15 = i9;
+ i9 = 12312;
+ } else {
+ i10 = HEAP32[i4 >> 2] | 0;
+ i13 = i10 + 4 | 0;
+ i16 = HEAP32[i13 >> 2] | 0;
+ i11 = i10 + 8 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ do {
+ if ((i16 + 1 | 0) >>> 0 > i12 >>> 0) {
+ if (i12 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i17 = i12 << 1;
+ i16 = HEAP32[i2 + 52 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ _luaM_toobig(i16);
+ } else {
+ i15 = _luaM_realloc_(i16, HEAP32[i10 >> 2] | 0, i12, i17) | 0;
+ HEAP32[i10 >> 2] = i15;
+ HEAP32[i11 >> 2] = i17;
+ i14 = HEAP32[i13 >> 2] | 0;
+ break;
+ }
+ } else {
+ i14 = i16;
+ i15 = HEAP32[i10 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i13 >> 2] = i14 + 1;
+ HEAP8[i15 + i14 | 0] = i9;
+ i9 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i15 = _luaZ_fill(i9) | 0;
+ } else {
+ i27 = i9 + 4 | 0;
+ i15 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i15 + 1;
+ i15 = HEAPU8[i15] | 0;
+ }
+ HEAP32[i2 >> 2] = i15;
+ i9 = 12328;
+ }
+ } else {
+ i15 = 0;
+ i9 = 12312;
+ }
+ } else {
+ i15 = i9;
+ i9 = 12312;
+ }
+ i10 = i2 + 52 | 0;
+ while (1) {
+ if ((i15 | 0) != 0) {
+ if ((_memchr(i9, i15, 3) | 0) != 0) {
+ i12 = HEAP32[i4 >> 2] | 0;
+ i11 = i12 + 4 | 0;
+ i16 = HEAP32[i11 >> 2] | 0;
+ i14 = i12 + 8 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ if ((i16 + 1 | 0) >>> 0 > i13 >>> 0) {
+ if (i13 >>> 0 > 2147483645) {
+ i9 = 227;
+ break;
+ }
+ i17 = i13 << 1;
+ i16 = HEAP32[i10 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ i9 = 229;
+ break;
+ }
+ i27 = _luaM_realloc_(i16, HEAP32[i12 >> 2] | 0, i13, i17) | 0;
+ HEAP32[i12 >> 2] = i27;
+ HEAP32[i14 >> 2] = i17;
+ i16 = HEAP32[i11 >> 2] | 0;
+ i12 = i27;
+ } else {
+ i12 = HEAP32[i12 >> 2] | 0;
+ }
+ HEAP32[i11 >> 2] = i16 + 1;
+ HEAP8[i12 + i16 | 0] = i15;
+ i11 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i15 = _luaZ_fill(i11) | 0;
+ } else {
+ i27 = i11 + 4 | 0;
+ i15 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i15 + 1;
+ i15 = HEAPU8[i15] | 0;
+ }
+ HEAP32[i2 >> 2] = i15;
+ if ((i15 | 0) != 0) {
+ if ((_memchr(12336, i15, 3) | 0) != 0) {
+ i12 = HEAP32[i4 >> 2] | 0;
+ i11 = i12 + 4 | 0;
+ i16 = HEAP32[i11 >> 2] | 0;
+ i14 = i12 + 8 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ if ((i16 + 1 | 0) >>> 0 > i13 >>> 0) {
+ if (i13 >>> 0 > 2147483645) {
+ i9 = 239;
+ break;
+ }
+ i17 = i13 << 1;
+ i16 = HEAP32[i10 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ i9 = 241;
+ break;
+ }
+ i27 = _luaM_realloc_(i16, HEAP32[i12 >> 2] | 0, i13, i17) | 0;
+ HEAP32[i12 >> 2] = i27;
+ HEAP32[i14 >> 2] = i17;
+ i16 = HEAP32[i11 >> 2] | 0;
+ i12 = i27;
+ } else {
+ i12 = HEAP32[i12 >> 2] | 0;
+ }
+ HEAP32[i11 >> 2] = i16 + 1;
+ HEAP8[i12 + i16 | 0] = i15;
+ i11 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i15 = _luaZ_fill(i11) | 0;
+ } else {
+ i27 = i11 + 4 | 0;
+ i15 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i15 + 1;
+ i15 = HEAPU8[i15] | 0;
+ }
+ HEAP32[i2 >> 2] = i15;
+ }
+ } else {
+ i15 = 0;
+ }
+ }
+ } else {
+ i15 = 0;
+ }
+ i12 = HEAP32[i4 >> 2] | 0;
+ i11 = i12 + 4 | 0;
+ i17 = HEAP32[i11 >> 2] | 0;
+ i14 = i12 + 8 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ i16 = (i17 + 1 | 0) >>> 0 > i13 >>> 0;
+ if (!((HEAP8[i15 + 10913 | 0] & 16) != 0 | (i15 | 0) == 46)) {
+ i9 = 259;
+ break;
+ }
+ if (i16) {
+ if (i13 >>> 0 > 2147483645) {
+ i9 = 251;
+ break;
+ }
+ i17 = i13 << 1;
+ i16 = HEAP32[i10 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ i9 = 253;
+ break;
+ }
+ i27 = _luaM_realloc_(i16, HEAP32[i12 >> 2] | 0, i13, i17) | 0;
+ HEAP32[i12 >> 2] = i27;
+ HEAP32[i14 >> 2] = i17;
+ i17 = HEAP32[i11 >> 2] | 0;
+ i12 = i27;
+ } else {
+ i12 = HEAP32[i12 >> 2] | 0;
+ }
+ HEAP32[i11 >> 2] = i17 + 1;
+ HEAP8[i12 + i17 | 0] = i15;
+ i11 = HEAP32[i5 >> 2] | 0;
+ i27 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i27 + -1;
+ if ((i27 | 0) == 0) {
+ i15 = _luaZ_fill(i11) | 0;
+ } else {
+ i27 = i11 + 4 | 0;
+ i15 = HEAP32[i27 >> 2] | 0;
+ HEAP32[i27 >> 2] = i15 + 1;
+ i15 = HEAPU8[i15] | 0;
+ }
+ HEAP32[i2 >> 2] = i15;
+ }
+ if ((i9 | 0) == 227) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 229) {
+ _luaM_toobig(i16);
+ } else if ((i9 | 0) == 239) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 241) {
+ _luaM_toobig(i16);
+ } else if ((i9 | 0) == 251) {
+ _lexerror(i2, 12368, 0);
+ } else if ((i9 | 0) == 253) {
+ _luaM_toobig(i16);
+ } else if ((i9 | 0) == 259) {
+ do {
+ if (i16) {
+ if (i13 >>> 0 > 2147483645) {
+ _lexerror(i2, 12368, 0);
+ }
+ i5 = i13 << 1;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i5 | 0) == -2) {
+ _luaM_toobig(i9);
+ } else {
+ i7 = _luaM_realloc_(i9, HEAP32[i12 >> 2] | 0, i13, i5) | 0;
+ HEAP32[i12 >> 2] = i7;
+ HEAP32[i14 >> 2] = i5;
+ i8 = HEAP32[i11 >> 2] | 0;
+ break;
+ }
+ } else {
+ i8 = i17;
+ i7 = HEAP32[i12 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i11 >> 2] = i8 + 1;
+ HEAP8[i7 + i8 | 0] = 0;
+ i5 = i2 + 76 | 0;
+ i7 = HEAP8[i5] | 0;
+ i10 = HEAP32[i4 >> 2] | 0;
+ i8 = HEAP32[i10 >> 2] | 0;
+ i10 = HEAP32[i10 + 4 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i7 = -1;
+ } else {
+ do {
+ i10 = i10 + -1 | 0;
+ i9 = i8 + i10 | 0;
+ if ((HEAP8[i9] | 0) == 46) {
+ HEAP8[i9] = i7;
+ }
+ } while ((i10 | 0) != 0);
+ i7 = HEAP32[i4 >> 2] | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i7 = (HEAP32[i7 + 4 >> 2] | 0) + -1 | 0;
+ }
+ if ((_luaO_str2d(i8, i7, i3) | 0) != 0) {
+ i27 = 287;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i9 = HEAP8[i5] | 0;
+ i8 = HEAP8[HEAP32[(_localeconv() | 0) >> 2] | 0] | 0;
+ HEAP8[i5] = i8;
+ i10 = HEAP32[i4 >> 2] | 0;
+ i7 = HEAP32[i10 >> 2] | 0;
+ i10 = HEAP32[i10 + 4 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i8 = -1;
+ } else {
+ do {
+ i10 = i10 + -1 | 0;
+ i11 = i7 + i10 | 0;
+ if ((HEAP8[i11] | 0) == i9 << 24 >> 24) {
+ HEAP8[i11] = i8;
+ }
+ } while ((i10 | 0) != 0);
+ i8 = HEAP32[i4 >> 2] | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ i8 = (HEAP32[i8 + 4 >> 2] | 0) + -1 | 0;
+ }
+ if ((_luaO_str2d(i7, i8, i3) | 0) != 0) {
+ i27 = 287;
+ STACKTOP = i1;
+ return i27 | 0;
+ }
+ i1 = HEAP8[i5] | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP32[i4 + 4 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ _lexerror(i2, 12344, 287);
+ } else {
+ i6 = i4;
+ }
+ do {
+ i6 = i6 + -1 | 0;
+ i4 = i3 + i6 | 0;
+ if ((HEAP8[i4] | 0) == i1 << 24 >> 24) {
+ HEAP8[i4] = 46;
+ }
+ } while ((i6 | 0) != 0);
+ _lexerror(i2, 12344, 287);
+ }
+ return 0;
+}
+function _luaV_execute(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, d37 = 0.0, d38 = 0.0, d39 = 0.0;
+ i12 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i13 = i12 + 24 | 0;
+ i10 = i12 + 16 | 0;
+ i9 = i12 + 8 | 0;
+ i8 = i12;
+ i3 = i1 + 16 | 0;
+ i4 = i1 + 40 | 0;
+ i6 = i1 + 12 | 0;
+ i5 = i1 + 8 | 0;
+ i11 = i1 + 24 | 0;
+ i17 = i1 + 48 | 0;
+ i2 = i1 + 20 | 0;
+ i16 = i1 + 6 | 0;
+ i7 = i1 + 44 | 0;
+ i19 = HEAP32[i3 >> 2] | 0;
+ L1 : while (1) {
+ i22 = HEAP32[HEAP32[i19 >> 2] >> 2] | 0;
+ i18 = i22 + 12 | 0;
+ i23 = HEAP32[(HEAP32[i18 >> 2] | 0) + 8 >> 2] | 0;
+ i20 = i19 + 24 | 0;
+ i21 = i19 + 28 | 0;
+ i22 = i22 + 16 | 0;
+ i24 = i19 + 4 | 0;
+ i25 = HEAP32[i20 >> 2] | 0;
+ L3 : while (1) {
+ i28 = HEAP32[i21 >> 2] | 0;
+ HEAP32[i21 >> 2] = i28 + 4;
+ i28 = HEAP32[i28 >> 2] | 0;
+ i27 = HEAP8[i4] | 0;
+ do {
+ if (!((i27 & 12) == 0)) {
+ i26 = (HEAP32[i17 >> 2] | 0) + -1 | 0;
+ HEAP32[i17 >> 2] = i26;
+ i26 = (i26 | 0) == 0;
+ if (!i26 ? (i27 & 4) == 0 : 0) {
+ break;
+ }
+ i25 = HEAP32[i3 >> 2] | 0;
+ i29 = i27 & 255;
+ if ((i29 & 8 | 0) == 0 | i26 ^ 1) {
+ i27 = 0;
+ } else {
+ HEAP32[i17 >> 2] = HEAP32[i7 >> 2];
+ i27 = 1;
+ }
+ i26 = i25 + 18 | 0;
+ i30 = HEAPU8[i26] | 0;
+ if ((i30 & 128 | 0) == 0) {
+ if (i27) {
+ _luaD_hook(i1, 3, -1);
+ }
+ do {
+ if ((i29 & 4 | 0) == 0) {
+ i29 = i25 + 28 | 0;
+ } else {
+ i34 = HEAP32[(HEAP32[HEAP32[i25 >> 2] >> 2] | 0) + 12 >> 2] | 0;
+ i29 = i25 + 28 | 0;
+ i32 = HEAP32[i29 >> 2] | 0;
+ i35 = HEAP32[i34 + 12 >> 2] | 0;
+ i33 = (i32 - i35 >> 2) + -1 | 0;
+ i34 = HEAP32[i34 + 20 >> 2] | 0;
+ i31 = (i34 | 0) == 0;
+ if (i31) {
+ i30 = 0;
+ } else {
+ i30 = HEAP32[i34 + (i33 << 2) >> 2] | 0;
+ }
+ if ((i33 | 0) != 0 ? (i14 = HEAP32[i2 >> 2] | 0, i32 >>> 0 > i14 >>> 0) : 0) {
+ if (i31) {
+ i31 = 0;
+ } else {
+ i31 = HEAP32[i34 + ((i14 - i35 >> 2) + -1 << 2) >> 2] | 0;
+ }
+ if ((i30 | 0) == (i31 | 0)) {
+ break;
+ }
+ }
+ _luaD_hook(i1, 2, i30);
+ }
+ } while (0);
+ HEAP32[i2 >> 2] = HEAP32[i29 >> 2];
+ if ((HEAP8[i16] | 0) == 1) {
+ i15 = 23;
+ break L1;
+ }
+ } else {
+ HEAP8[i26] = i30 & 127;
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ }
+ } while (0);
+ i26 = i28 >>> 6 & 255;
+ i27 = i25 + (i26 << 4) | 0;
+ switch (i28 & 63 | 0) {
+ case 9:
+ {
+ i28 = HEAP32[i22 + (i28 >>> 23 << 2) >> 2] | 0;
+ i35 = HEAP32[i28 + 8 >> 2] | 0;
+ i33 = i27;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i36 = i35;
+ HEAP32[i36 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i36 + 4 >> 2] = i34;
+ i36 = i25 + (i26 << 4) + 8 | 0;
+ HEAP32[i35 + 8 >> 2] = HEAP32[i36 >> 2];
+ if ((HEAP32[i36 >> 2] & 64 | 0) == 0) {
+ continue L3;
+ }
+ i26 = HEAP32[i27 >> 2] | 0;
+ if ((HEAP8[i26 + 5 | 0] & 3) == 0) {
+ continue L3;
+ }
+ if ((HEAP8[i28 + 5 | 0] & 4) == 0) {
+ continue L3;
+ }
+ _luaC_barrier_(i1, i28, i26);
+ continue L3;
+ }
+ case 10:
+ {
+ i26 = i28 >>> 23;
+ if ((i26 & 256 | 0) == 0) {
+ i26 = i25 + (i26 << 4) | 0;
+ } else {
+ i26 = i23 + ((i26 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i25 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ _luaV_settable(i1, i27, i26, i25);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 17:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ d37 = +HEAPF64[i29 >> 3];
+ d38 = +HEAPF64[i28 >> 3];
+ HEAPF64[i27 >> 3] = d37 - d38 * +Math_floor(+(d37 / d38));
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 10);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 23:
+ {
+ if ((i26 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i26 + -1 << 4) | 0);
+ }
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + ((i28 >>> 14) + -131071 << 2);
+ continue L3;
+ }
+ case 24:
+ {
+ i27 = i28 >>> 23;
+ if ((i27 & 256 | 0) == 0) {
+ i27 = i25 + (i27 << 4) | 0;
+ } else {
+ i27 = i23 + ((i27 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i25 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i27 + 8 >> 2] | 0) == (HEAP32[i25 + 8 >> 2] | 0)) {
+ i27 = (_luaV_equalobj_(i1, i27, i25) | 0) != 0;
+ } else {
+ i27 = 0;
+ }
+ i25 = HEAP32[i21 >> 2] | 0;
+ if ((i27 & 1 | 0) == (i26 | 0)) {
+ i26 = HEAP32[i25 >> 2] | 0;
+ i27 = i26 >>> 6 & 255;
+ if ((i27 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i27 + -1 << 4) | 0);
+ i25 = HEAP32[i21 >> 2] | 0;
+ }
+ i25 = i25 + ((i26 >>> 14) + -131070 << 2) | 0;
+ } else {
+ i25 = i25 + 4 | 0;
+ }
+ HEAP32[i21 >> 2] = i25;
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 18:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ HEAPF64[i27 >> 3] = +Math_pow(+(+HEAPF64[i29 >> 3]), +(+HEAPF64[i28 >> 3]));
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 11);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 1:
+ {
+ i36 = i28 >>> 14;
+ i33 = i23 + (i36 << 4) | 0;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i27;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i23 + (i36 << 4) + 8 >> 2];
+ continue L3;
+ }
+ case 0:
+ {
+ i36 = i28 >>> 23;
+ i33 = i25 + (i36 << 4) | 0;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i27;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i25 + (i36 << 4) + 8 >> 2];
+ continue L3;
+ }
+ case 2:
+ {
+ i36 = HEAP32[i21 >> 2] | 0;
+ HEAP32[i21 >> 2] = i36 + 4;
+ i36 = (HEAP32[i36 >> 2] | 0) >>> 6;
+ i33 = i23 + (i36 << 4) | 0;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i27;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i23 + (i36 << 4) + 8 >> 2];
+ continue L3;
+ }
+ case 5:
+ {
+ i36 = HEAP32[(HEAP32[i22 + (i28 >>> 23 << 2) >> 2] | 0) + 8 >> 2] | 0;
+ i33 = i36;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i27;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i36 + 8 >> 2];
+ continue L3;
+ }
+ case 3:
+ {
+ HEAP32[i27 >> 2] = i28 >>> 23;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 1;
+ if ((i28 & 8372224 | 0) == 0) {
+ continue L3;
+ }
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + 4;
+ continue L3;
+ }
+ case 7:
+ {
+ i26 = i28 >>> 14;
+ if ((i26 & 256 | 0) == 0) {
+ i26 = i25 + ((i26 & 511) << 4) | 0;
+ } else {
+ i26 = i23 + ((i26 & 255) << 4) | 0;
+ }
+ _luaV_gettable(i1, i25 + (i28 >>> 23 << 4) | 0, i26, i27);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 12:
+ {
+ i36 = i28 >>> 23;
+ i29 = i25 + (i36 << 4) | 0;
+ i26 = i26 + 1 | 0;
+ i33 = i29;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i25 + (i26 << 4) | 0;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i25 + (i36 << 4) + 8 >> 2];
+ i26 = i28 >>> 14;
+ if ((i26 & 256 | 0) == 0) {
+ i25 = i25 + ((i26 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i26 & 255) << 4) | 0;
+ }
+ _luaV_gettable(i1, i29, i25, i27);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 13:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ HEAPF64[i27 >> 3] = +HEAPF64[i29 >> 3] + +HEAPF64[i28 >> 3];
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 6);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 14:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ HEAPF64[i27 >> 3] = +HEAPF64[i29 >> 3] - +HEAPF64[i28 >> 3];
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 7);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 6:
+ {
+ i26 = i28 >>> 14;
+ if ((i26 & 256 | 0) == 0) {
+ i25 = i25 + ((i26 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i26 & 255) << 4) | 0;
+ }
+ _luaV_gettable(i1, HEAP32[(HEAP32[i22 + (i28 >>> 23 << 2) >> 2] | 0) + 8 >> 2] | 0, i25, i27);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 4:
+ {
+ i26 = i28 >>> 23;
+ while (1) {
+ HEAP32[i27 + 8 >> 2] = 0;
+ if ((i26 | 0) == 0) {
+ continue L3;
+ } else {
+ i26 = i26 + -1 | 0;
+ i27 = i27 + 16 | 0;
+ }
+ }
+ }
+ case 8:
+ {
+ i27 = i28 >>> 23;
+ if ((i27 & 256 | 0) == 0) {
+ i27 = i25 + (i27 << 4) | 0;
+ } else {
+ i27 = i23 + ((i27 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i25 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ _luaV_settable(i1, HEAP32[(HEAP32[i22 + (i26 << 2) >> 2] | 0) + 8 >> 2] | 0, i27, i25);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 11:
+ {
+ i29 = i28 >>> 23;
+ i28 = i28 >>> 14 & 511;
+ i30 = _luaH_new(i1) | 0;
+ HEAP32[i27 >> 2] = i30;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 69;
+ if ((i28 | i29 | 0) != 0) {
+ i36 = _luaO_fb2int(i29) | 0;
+ _luaH_resize(i1, i30, i36, _luaO_fb2int(i28) | 0);
+ }
+ if ((HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ HEAP32[i5 >> 2] = i25 + (i26 + 1 << 4);
+ _luaC_step(i1);
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 19:
+ {
+ i36 = i28 >>> 23;
+ i28 = i25 + (i36 << 4) | 0;
+ if ((HEAP32[i25 + (i36 << 4) + 8 >> 2] | 0) == 3) {
+ HEAPF64[i27 >> 3] = -+HEAPF64[i28 >> 3];
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ } else {
+ _luaV_arith(i1, i27, i28, i28, 12);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ }
+ case 15:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ HEAPF64[i27 >> 3] = +HEAPF64[i29 >> 3] * +HEAPF64[i28 >> 3];
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 8);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 16:
+ {
+ i29 = i28 >>> 23;
+ if ((i29 & 256 | 0) == 0) {
+ i29 = i25 + (i29 << 4) | 0;
+ } else {
+ i29 = i23 + ((i29 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i28 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i28 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ if ((HEAP32[i29 + 8 >> 2] | 0) == 3 ? (HEAP32[i28 + 8 >> 2] | 0) == 3 : 0) {
+ HEAPF64[i27 >> 3] = +HEAPF64[i29 >> 3] / +HEAPF64[i28 >> 3];
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ _luaV_arith(i1, i27, i29, i28, 9);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 20:
+ {
+ i29 = i28 >>> 23;
+ i28 = HEAP32[i25 + (i29 << 4) + 8 >> 2] | 0;
+ if ((i28 | 0) != 0) {
+ if ((i28 | 0) == 1) {
+ i28 = (HEAP32[i25 + (i29 << 4) >> 2] | 0) == 0;
+ } else {
+ i28 = 0;
+ }
+ } else {
+ i28 = 1;
+ }
+ HEAP32[i27 >> 2] = i28 & 1;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 1;
+ continue L3;
+ }
+ case 21:
+ {
+ _luaV_objlen(i1, i27, i25 + (i28 >>> 23 << 4) | 0);
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 22:
+ {
+ i27 = i28 >>> 23;
+ i28 = i28 >>> 14 & 511;
+ HEAP32[i5 >> 2] = i25 + (i28 + 1 << 4);
+ _luaV_concat(i1, 1 - i27 + i28 | 0);
+ i25 = HEAP32[i20 >> 2] | 0;
+ i28 = i25 + (i27 << 4) | 0;
+ i34 = i28;
+ i35 = HEAP32[i34 + 4 >> 2] | 0;
+ i36 = i25 + (i26 << 4) | 0;
+ HEAP32[i36 >> 2] = HEAP32[i34 >> 2];
+ HEAP32[i36 + 4 >> 2] = i35;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = HEAP32[i25 + (i27 << 4) + 8 >> 2];
+ if ((HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ if (!(i26 >>> 0 < i27 >>> 0)) {
+ i28 = i25 + (i26 + 1 << 4) | 0;
+ }
+ HEAP32[i5 >> 2] = i28;
+ _luaC_step(i1);
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ continue L3;
+ }
+ case 25:
+ {
+ i27 = i28 >>> 23;
+ if ((i27 & 256 | 0) == 0) {
+ i27 = i25 + (i27 << 4) | 0;
+ } else {
+ i27 = i23 + ((i27 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i25 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ i36 = (_luaV_lessthan(i1, i27, i25) | 0) == (i26 | 0);
+ i26 = HEAP32[i21 >> 2] | 0;
+ if (i36) {
+ i25 = HEAP32[i26 >> 2] | 0;
+ i27 = i25 >>> 6 & 255;
+ if ((i27 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i27 + -1 << 4) | 0);
+ i26 = HEAP32[i21 >> 2] | 0;
+ }
+ i25 = i26 + ((i25 >>> 14) + -131070 << 2) | 0;
+ } else {
+ i25 = i26 + 4 | 0;
+ }
+ HEAP32[i21 >> 2] = i25;
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 27:
+ {
+ i29 = HEAP32[i25 + (i26 << 4) + 8 >> 2] | 0;
+ i26 = (i29 | 0) == 0;
+ if ((i28 & 8372224 | 0) == 0) {
+ if (!i26) {
+ if (!((i29 | 0) == 1 ? (HEAP32[i27 >> 2] | 0) == 0 : 0)) {
+ i15 = 192;
+ }
+ }
+ } else {
+ if (!i26) {
+ if ((i29 | 0) == 1 ? (HEAP32[i27 >> 2] | 0) == 0 : 0) {
+ i15 = 192;
+ }
+ } else {
+ i15 = 192;
+ }
+ }
+ if ((i15 | 0) == 192) {
+ i15 = 0;
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + 4;
+ continue L3;
+ }
+ i27 = HEAP32[i21 >> 2] | 0;
+ i26 = HEAP32[i27 >> 2] | 0;
+ i28 = i26 >>> 6 & 255;
+ if ((i28 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i28 + -1 << 4) | 0);
+ i27 = HEAP32[i21 >> 2] | 0;
+ }
+ HEAP32[i21 >> 2] = i27 + ((i26 >>> 14) + -131070 << 2);
+ continue L3;
+ }
+ case 26:
+ {
+ i27 = i28 >>> 23;
+ if ((i27 & 256 | 0) == 0) {
+ i27 = i25 + (i27 << 4) | 0;
+ } else {
+ i27 = i23 + ((i27 & 255) << 4) | 0;
+ }
+ i28 = i28 >>> 14;
+ if ((i28 & 256 | 0) == 0) {
+ i25 = i25 + ((i28 & 511) << 4) | 0;
+ } else {
+ i25 = i23 + ((i28 & 255) << 4) | 0;
+ }
+ i36 = (_luaV_lessequal(i1, i27, i25) | 0) == (i26 | 0);
+ i26 = HEAP32[i21 >> 2] | 0;
+ if (i36) {
+ i25 = HEAP32[i26 >> 2] | 0;
+ i27 = i25 >>> 6 & 255;
+ if ((i27 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i27 + -1 << 4) | 0);
+ i26 = HEAP32[i21 >> 2] | 0;
+ }
+ i25 = i26 + ((i25 >>> 14) + -131070 << 2) | 0;
+ } else {
+ i25 = i26 + 4 | 0;
+ }
+ HEAP32[i21 >> 2] = i25;
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 28:
+ {
+ i30 = i28 >>> 23;
+ i29 = i25 + (i30 << 4) | 0;
+ i30 = HEAP32[i25 + (i30 << 4) + 8 >> 2] | 0;
+ i31 = (i30 | 0) == 0;
+ if ((i28 & 8372224 | 0) == 0) {
+ if (!i31) {
+ if (!((i30 | 0) == 1 ? (HEAP32[i29 >> 2] | 0) == 0 : 0)) {
+ i15 = 203;
+ }
+ }
+ } else {
+ if (!i31) {
+ if ((i30 | 0) == 1 ? (HEAP32[i29 >> 2] | 0) == 0 : 0) {
+ i15 = 203;
+ }
+ } else {
+ i15 = 203;
+ }
+ }
+ if ((i15 | 0) == 203) {
+ i15 = 0;
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + 4;
+ continue L3;
+ }
+ i36 = i29;
+ i28 = HEAP32[i36 + 4 >> 2] | 0;
+ HEAP32[i27 >> 2] = HEAP32[i36 >> 2];
+ HEAP32[i27 + 4 >> 2] = i28;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = i30;
+ i27 = HEAP32[i21 >> 2] | 0;
+ i26 = HEAP32[i27 >> 2] | 0;
+ i28 = i26 >>> 6 & 255;
+ if ((i28 | 0) != 0) {
+ _luaF_close(i1, (HEAP32[i20 >> 2] | 0) + (i28 + -1 << 4) | 0);
+ i27 = HEAP32[i21 >> 2] | 0;
+ }
+ HEAP32[i21 >> 2] = i27 + ((i26 >>> 14) + -131070 << 2);
+ continue L3;
+ }
+ case 30:
+ {
+ i28 = i28 >>> 23;
+ if ((i28 | 0) != 0) {
+ HEAP32[i5 >> 2] = i25 + (i26 + i28 << 4);
+ }
+ if ((_luaD_precall(i1, i27, -1) | 0) == 0) {
+ i15 = 218;
+ break L3;
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 29:
+ {
+ i29 = i28 >>> 23;
+ i28 = i28 >>> 14 & 511;
+ if ((i29 | 0) != 0) {
+ HEAP32[i5 >> 2] = i25 + (i26 + i29 << 4);
+ }
+ if ((_luaD_precall(i1, i27, i28 + -1 | 0) | 0) == 0) {
+ i15 = 213;
+ break L3;
+ }
+ if ((i28 | 0) != 0) {
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 32:
+ {
+ d39 = +HEAPF64[i25 + (i26 + 2 << 4) >> 3];
+ d38 = d39 + +HEAPF64[i27 >> 3];
+ d37 = +HEAPF64[i25 + (i26 + 1 << 4) >> 3];
+ if (d39 > 0.0) {
+ if (!(d38 <= d37)) {
+ continue L3;
+ }
+ } else {
+ if (!(d37 <= d38)) {
+ continue L3;
+ }
+ }
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + ((i28 >>> 14) + -131071 << 2);
+ HEAPF64[i27 >> 3] = d38;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 3;
+ i36 = i26 + 3 | 0;
+ HEAPF64[i25 + (i36 << 4) >> 3] = d38;
+ HEAP32[i25 + (i36 << 4) + 8 >> 2] = 3;
+ continue L3;
+ }
+ case 33:
+ {
+ i32 = i26 + 1 | 0;
+ i30 = i25 + (i32 << 4) | 0;
+ i31 = i26 + 2 | 0;
+ i29 = i25 + (i31 << 4) | 0;
+ i26 = i25 + (i26 << 4) + 8 | 0;
+ i33 = HEAP32[i26 >> 2] | 0;
+ if ((i33 | 0) != 3) {
+ if ((i33 & 15 | 0) != 4) {
+ i15 = 239;
+ break L1;
+ }
+ i36 = HEAP32[i27 >> 2] | 0;
+ if ((_luaO_str2d(i36 + 16 | 0, HEAP32[i36 + 12 >> 2] | 0, i8) | 0) == 0) {
+ i15 = 239;
+ break L1;
+ }
+ HEAPF64[i27 >> 3] = +HEAPF64[i8 >> 3];
+ HEAP32[i26 >> 2] = 3;
+ if ((i27 | 0) == 0) {
+ i15 = 239;
+ break L1;
+ }
+ }
+ i33 = i25 + (i32 << 4) + 8 | 0;
+ i32 = HEAP32[i33 >> 2] | 0;
+ if ((i32 | 0) != 3) {
+ if ((i32 & 15 | 0) != 4) {
+ i15 = 244;
+ break L1;
+ }
+ i36 = HEAP32[i30 >> 2] | 0;
+ if ((_luaO_str2d(i36 + 16 | 0, HEAP32[i36 + 12 >> 2] | 0, i9) | 0) == 0) {
+ i15 = 244;
+ break L1;
+ }
+ HEAPF64[i30 >> 3] = +HEAPF64[i9 >> 3];
+ HEAP32[i33 >> 2] = 3;
+ }
+ i31 = i25 + (i31 << 4) + 8 | 0;
+ i30 = HEAP32[i31 >> 2] | 0;
+ if ((i30 | 0) != 3) {
+ if ((i30 & 15 | 0) != 4) {
+ i15 = 249;
+ break L1;
+ }
+ i36 = HEAP32[i29 >> 2] | 0;
+ if ((_luaO_str2d(i36 + 16 | 0, HEAP32[i36 + 12 >> 2] | 0, i10) | 0) == 0) {
+ i15 = 249;
+ break L1;
+ }
+ HEAPF64[i29 >> 3] = +HEAPF64[i10 >> 3];
+ HEAP32[i31 >> 2] = 3;
+ }
+ HEAPF64[i27 >> 3] = +HEAPF64[i27 >> 3] - +HEAPF64[i29 >> 3];
+ HEAP32[i26 >> 2] = 3;
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + ((i28 >>> 14) + -131071 << 2);
+ continue L3;
+ }
+ case 31:
+ {
+ i15 = 223;
+ break L3;
+ }
+ case 34:
+ {
+ i35 = i26 + 3 | 0;
+ i36 = i25 + (i35 << 4) | 0;
+ i33 = i26 + 2 | 0;
+ i34 = i26 + 5 | 0;
+ i32 = i25 + (i33 << 4) | 0;
+ i31 = HEAP32[i32 + 4 >> 2] | 0;
+ i30 = i25 + (i34 << 4) | 0;
+ HEAP32[i30 >> 2] = HEAP32[i32 >> 2];
+ HEAP32[i30 + 4 >> 2] = i31;
+ HEAP32[i25 + (i34 << 4) + 8 >> 2] = HEAP32[i25 + (i33 << 4) + 8 >> 2];
+ i34 = i26 + 1 | 0;
+ i33 = i26 + 4 | 0;
+ i30 = i25 + (i34 << 4) | 0;
+ i31 = HEAP32[i30 + 4 >> 2] | 0;
+ i32 = i25 + (i33 << 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i30 >> 2];
+ HEAP32[i32 + 4 >> 2] = i31;
+ HEAP32[i25 + (i33 << 4) + 8 >> 2] = HEAP32[i25 + (i34 << 4) + 8 >> 2];
+ i33 = i27;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i27 = i36;
+ HEAP32[i27 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i27 + 4 >> 2] = i34;
+ HEAP32[i25 + (i35 << 4) + 8 >> 2] = HEAP32[i25 + (i26 << 4) + 8 >> 2];
+ HEAP32[i5 >> 2] = i25 + (i26 + 6 << 4);
+ _luaD_call(i1, i36, i28 >>> 14 & 511, 1);
+ i36 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ i27 = HEAP32[i21 >> 2] | 0;
+ HEAP32[i21 >> 2] = i27 + 4;
+ i27 = HEAP32[i27 >> 2] | 0;
+ i25 = i36;
+ i28 = i27;
+ i27 = i36 + ((i27 >>> 6 & 255) << 4) | 0;
+ break;
+ }
+ case 35:
+ {
+ break;
+ }
+ case 36:
+ {
+ i29 = i28 >>> 23;
+ i28 = i28 >>> 14 & 511;
+ if ((i29 | 0) == 0) {
+ i29 = ((HEAP32[i5 >> 2] | 0) - i27 >> 4) + -1 | 0;
+ }
+ if ((i28 | 0) == 0) {
+ i28 = HEAP32[i21 >> 2] | 0;
+ HEAP32[i21 >> 2] = i28 + 4;
+ i28 = (HEAP32[i28 >> 2] | 0) >>> 6;
+ }
+ i27 = HEAP32[i27 >> 2] | 0;
+ i30 = i29 + -50 + (i28 * 50 | 0) | 0;
+ if ((i30 | 0) > (HEAP32[i27 + 28 >> 2] | 0)) {
+ _luaH_resizearray(i1, i27, i30);
+ }
+ if ((i29 | 0) > 0) {
+ i28 = i27 + 5 | 0;
+ while (1) {
+ i36 = i29 + i26 | 0;
+ i32 = i25 + (i36 << 4) | 0;
+ i31 = i30 + -1 | 0;
+ _luaH_setint(i1, i27, i30, i32);
+ if (((HEAP32[i25 + (i36 << 4) + 8 >> 2] & 64 | 0) != 0 ? !((HEAP8[(HEAP32[i32 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) ? !((HEAP8[i28] & 4) == 0) : 0) {
+ _luaC_barrierback_(i1, i27);
+ }
+ i29 = i29 + -1 | 0;
+ if ((i29 | 0) > 0) {
+ i30 = i31;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ continue L3;
+ }
+ case 37:
+ {
+ i29 = HEAP32[(HEAP32[(HEAP32[i18 >> 2] | 0) + 16 >> 2] | 0) + (i28 >>> 14 << 2) >> 2] | 0;
+ i28 = i29 + 32 | 0;
+ i33 = HEAP32[i28 >> 2] | 0;
+ i30 = HEAP32[i29 + 40 >> 2] | 0;
+ i31 = HEAP32[i29 + 28 >> 2] | 0;
+ L323 : do {
+ if ((i33 | 0) == 0) {
+ i15 = 276;
+ } else {
+ if ((i30 | 0) > 0) {
+ i34 = i33 + 16 | 0;
+ i32 = 0;
+ while (1) {
+ i35 = HEAPU8[i31 + (i32 << 3) + 5 | 0] | 0;
+ if ((HEAP8[i31 + (i32 << 3) + 4 | 0] | 0) == 0) {
+ i36 = HEAP32[(HEAP32[i22 + (i35 << 2) >> 2] | 0) + 8 >> 2] | 0;
+ } else {
+ i36 = i25 + (i35 << 4) | 0;
+ }
+ i35 = i32 + 1 | 0;
+ if ((HEAP32[(HEAP32[i34 + (i32 << 2) >> 2] | 0) + 8 >> 2] | 0) != (i36 | 0)) {
+ i15 = 276;
+ break L323;
+ }
+ if ((i35 | 0) < (i30 | 0)) {
+ i32 = i35;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP32[i27 >> 2] = i33;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 70;
+ }
+ } while (0);
+ if ((i15 | 0) == 276) {
+ i15 = 0;
+ i32 = _luaF_newLclosure(i1, i30) | 0;
+ HEAP32[i32 + 12 >> 2] = i29;
+ HEAP32[i27 >> 2] = i32;
+ HEAP32[i25 + (i26 << 4) + 8 >> 2] = 70;
+ if ((i30 | 0) > 0) {
+ i27 = i32 + 16 | 0;
+ i34 = 0;
+ do {
+ i33 = HEAPU8[i31 + (i34 << 3) + 5 | 0] | 0;
+ if ((HEAP8[i31 + (i34 << 3) + 4 | 0] | 0) == 0) {
+ HEAP32[i27 + (i34 << 2) >> 2] = HEAP32[i22 + (i33 << 2) >> 2];
+ } else {
+ HEAP32[i27 + (i34 << 2) >> 2] = _luaF_findupval(i1, i25 + (i33 << 4) | 0) | 0;
+ }
+ i34 = i34 + 1 | 0;
+ } while ((i34 | 0) != (i30 | 0));
+ }
+ if (!((HEAP8[i29 + 5 | 0] & 4) == 0)) {
+ _luaC_barrierproto_(i1, i29, i32);
+ }
+ HEAP32[i28 >> 2] = i32;
+ }
+ if ((HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ HEAP32[i5 >> 2] = i25 + (i26 + 1 << 4);
+ _luaC_step(i1);
+ HEAP32[i5 >> 2] = HEAP32[i24 >> 2];
+ }
+ i25 = HEAP32[i20 >> 2] | 0;
+ continue L3;
+ }
+ case 38:
+ {
+ i36 = i28 >>> 23;
+ i29 = i36 + -1 | 0;
+ i30 = (i25 - (HEAP32[i19 >> 2] | 0) >> 4) - (HEAPU8[(HEAP32[i18 >> 2] | 0) + 76 | 0] | 0) | 0;
+ i28 = i30 + -1 | 0;
+ if ((i36 | 0) == 0) {
+ if (((HEAP32[i11 >> 2] | 0) - (HEAP32[i5 >> 2] | 0) >> 4 | 0) <= (i28 | 0)) {
+ _luaD_growstack(i1, i28);
+ }
+ i27 = HEAP32[i20 >> 2] | 0;
+ HEAP32[i5 >> 2] = i27 + (i28 + i26 << 4);
+ i29 = i28;
+ i25 = i27;
+ i27 = i27 + (i26 << 4) | 0;
+ }
+ if ((i29 | 0) <= 0) {
+ continue L3;
+ }
+ i26 = 1 - i30 | 0;
+ i30 = 0;
+ while (1) {
+ if ((i30 | 0) < (i28 | 0)) {
+ i36 = i30 + i26 | 0;
+ i33 = i25 + (i36 << 4) | 0;
+ i34 = HEAP32[i33 + 4 >> 2] | 0;
+ i35 = i27 + (i30 << 4) | 0;
+ HEAP32[i35 >> 2] = HEAP32[i33 >> 2];
+ HEAP32[i35 + 4 >> 2] = i34;
+ HEAP32[i27 + (i30 << 4) + 8 >> 2] = HEAP32[i25 + (i36 << 4) + 8 >> 2];
+ } else {
+ HEAP32[i27 + (i30 << 4) + 8 >> 2] = 0;
+ }
+ i30 = i30 + 1 | 0;
+ if ((i30 | 0) == (i29 | 0)) {
+ continue L3;
+ }
+ }
+ }
+ default:
+ {
+ continue L3;
+ }
+ }
+ i26 = HEAP32[i27 + 24 >> 2] | 0;
+ if ((i26 | 0) == 0) {
+ continue;
+ }
+ i34 = i27 + 16 | 0;
+ i35 = HEAP32[i34 + 4 >> 2] | 0;
+ i36 = i27;
+ HEAP32[i36 >> 2] = HEAP32[i34 >> 2];
+ HEAP32[i36 + 4 >> 2] = i35;
+ HEAP32[i27 + 8 >> 2] = i26;
+ HEAP32[i21 >> 2] = (HEAP32[i21 >> 2] | 0) + ((i28 >>> 14) + -131071 << 2);
+ }
+ if ((i15 | 0) == 213) {
+ i15 = 0;
+ i19 = HEAP32[i3 >> 2] | 0;
+ i36 = i19 + 18 | 0;
+ HEAP8[i36] = HEAPU8[i36] | 4;
+ continue;
+ } else if ((i15 | 0) == 218) {
+ i15 = 0;
+ i22 = HEAP32[i3 >> 2] | 0;
+ i19 = HEAP32[i22 + 8 >> 2] | 0;
+ i23 = HEAP32[i22 >> 2] | 0;
+ i24 = HEAP32[i19 >> 2] | 0;
+ i20 = i22 + 24 | 0;
+ i21 = (HEAP32[i20 >> 2] | 0) + (HEAPU8[(HEAP32[(HEAP32[i23 >> 2] | 0) + 12 >> 2] | 0) + 76 | 0] << 4) | 0;
+ if ((HEAP32[(HEAP32[i18 >> 2] | 0) + 56 >> 2] | 0) > 0) {
+ _luaF_close(i1, HEAP32[i19 + 24 >> 2] | 0);
+ }
+ if (i23 >>> 0 < i21 >>> 0) {
+ i25 = i23;
+ i18 = 0;
+ do {
+ i34 = i25;
+ i35 = HEAP32[i34 + 4 >> 2] | 0;
+ i36 = i24 + (i18 << 4) | 0;
+ HEAP32[i36 >> 2] = HEAP32[i34 >> 2];
+ HEAP32[i36 + 4 >> 2] = i35;
+ HEAP32[i24 + (i18 << 4) + 8 >> 2] = HEAP32[i23 + (i18 << 4) + 8 >> 2];
+ i18 = i18 + 1 | 0;
+ i25 = i23 + (i18 << 4) | 0;
+ } while (i25 >>> 0 < i21 >>> 0);
+ }
+ i36 = i23;
+ HEAP32[i19 + 24 >> 2] = i24 + ((HEAP32[i20 >> 2] | 0) - i36 >> 4 << 4);
+ i36 = i24 + ((HEAP32[i5 >> 2] | 0) - i36 >> 4 << 4) | 0;
+ HEAP32[i5 >> 2] = i36;
+ HEAP32[i19 + 4 >> 2] = i36;
+ HEAP32[i19 + 28 >> 2] = HEAP32[i22 + 28 >> 2];
+ i36 = i19 + 18 | 0;
+ HEAP8[i36] = HEAPU8[i36] | 64;
+ HEAP32[i3 >> 2] = i19;
+ continue;
+ } else if ((i15 | 0) == 223) {
+ i15 = 0;
+ i20 = i28 >>> 23;
+ if ((i20 | 0) != 0) {
+ HEAP32[i5 >> 2] = i25 + (i20 + -1 + i26 << 4);
+ }
+ if ((HEAP32[(HEAP32[i18 >> 2] | 0) + 56 >> 2] | 0) > 0) {
+ _luaF_close(i1, i25);
+ }
+ i18 = _luaD_poscall(i1, i27) | 0;
+ if ((HEAP8[i19 + 18 | 0] & 4) == 0) {
+ i15 = 228;
+ break;
+ }
+ i19 = HEAP32[i3 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ continue;
+ }
+ HEAP32[i5 >> 2] = HEAP32[i19 + 4 >> 2];
+ continue;
+ }
+ }
+ if ((i15 | 0) == 23) {
+ if (!i27) {
+ i36 = HEAP32[i29 >> 2] | 0;
+ i36 = i36 + -4 | 0;
+ HEAP32[i29 >> 2] = i36;
+ i36 = HEAP8[i26] | 0;
+ i36 = i36 & 255;
+ i36 = i36 | 128;
+ i36 = i36 & 255;
+ HEAP8[i26] = i36;
+ i36 = HEAP32[i5 >> 2] | 0;
+ i36 = i36 + -16 | 0;
+ HEAP32[i25 >> 2] = i36;
+ _luaD_throw(i1, 1);
+ }
+ HEAP32[i17 >> 2] = 1;
+ i36 = HEAP32[i29 >> 2] | 0;
+ i36 = i36 + -4 | 0;
+ HEAP32[i29 >> 2] = i36;
+ i36 = HEAP8[i26] | 0;
+ i36 = i36 & 255;
+ i36 = i36 | 128;
+ i36 = i36 & 255;
+ HEAP8[i26] = i36;
+ i36 = HEAP32[i5 >> 2] | 0;
+ i36 = i36 + -16 | 0;
+ HEAP32[i25 >> 2] = i36;
+ _luaD_throw(i1, 1);
+ } else if ((i15 | 0) == 228) {
+ STACKTOP = i12;
+ return;
+ } else if ((i15 | 0) == 239) {
+ _luaG_runerror(i1, 9040, i13);
+ } else if ((i15 | 0) == 244) {
+ _luaG_runerror(i1, 9080, i13);
+ } else if ((i15 | 0) == 249) {
+ _luaG_runerror(i1, 9112, i13);
+ }
+}
+function ___floatscan(i8, i2, i11) {
+ i8 = i8 | 0;
+ i2 = i2 | 0;
+ i11 = i11 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i12 = 0, i13 = 0, d14 = 0.0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, d28 = 0.0, i29 = 0, d30 = 0.0, d31 = 0.0, d32 = 0.0, d33 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 512 | 0;
+ i5 = i1;
+ if ((i2 | 0) == 1) {
+ i2 = 53;
+ i3 = -1074;
+ } else if ((i2 | 0) == 2) {
+ i2 = 53;
+ i3 = -1074;
+ } else if ((i2 | 0) == 0) {
+ i2 = 24;
+ i3 = -149;
+ } else {
+ d31 = 0.0;
+ STACKTOP = i1;
+ return +d31;
+ }
+ i9 = i8 + 4 | 0;
+ i10 = i8 + 100 | 0;
+ do {
+ i4 = HEAP32[i9 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i4 + 1;
+ i21 = HEAPU8[i4] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ } while ((_isspace(i21 | 0) | 0) != 0);
+ do {
+ if ((i21 | 0) == 43 | (i21 | 0) == 45) {
+ i4 = 1 - (((i21 | 0) == 45) << 1) | 0;
+ i7 = HEAP32[i9 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i7 + 1;
+ i21 = HEAPU8[i7] | 0;
+ break;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ break;
+ }
+ } else {
+ i4 = 1;
+ }
+ } while (0);
+ i7 = 0;
+ do {
+ if ((i21 | 32 | 0) != (HEAP8[13408 + i7 | 0] | 0)) {
+ break;
+ }
+ do {
+ if (i7 >>> 0 < 7) {
+ i12 = HEAP32[i9 >> 2] | 0;
+ if (i12 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i12 + 1;
+ i21 = HEAPU8[i12] | 0;
+ break;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ break;
+ }
+ }
+ } while (0);
+ i7 = i7 + 1 | 0;
+ } while (i7 >>> 0 < 8);
+ do {
+ if ((i7 | 0) == 3) {
+ i13 = 23;
+ } else if ((i7 | 0) != 8) {
+ i12 = (i11 | 0) == 0;
+ if (!(i7 >>> 0 < 4 | i12)) {
+ if ((i7 | 0) == 8) {
+ break;
+ } else {
+ i13 = 23;
+ break;
+ }
+ }
+ L34 : do {
+ if ((i7 | 0) == 0) {
+ i7 = 0;
+ do {
+ if ((i21 | 32 | 0) != (HEAP8[13424 + i7 | 0] | 0)) {
+ break L34;
+ }
+ do {
+ if (i7 >>> 0 < 2) {
+ i15 = HEAP32[i9 >> 2] | 0;
+ if (i15 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i15 + 1;
+ i21 = HEAPU8[i15] | 0;
+ break;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ break;
+ }
+ }
+ } while (0);
+ i7 = i7 + 1 | 0;
+ } while (i7 >>> 0 < 3);
+ }
+ } while (0);
+ if ((i7 | 0) == 0) {
+ do {
+ if ((i21 | 0) == 48) {
+ i7 = HEAP32[i9 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i7 + 1;
+ i7 = HEAPU8[i7] | 0;
+ } else {
+ i7 = ___shgetc(i8) | 0;
+ }
+ if ((i7 | 32 | 0) != 120) {
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ i21 = 48;
+ break;
+ }
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i21 = 48;
+ break;
+ }
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i5 + 1;
+ i21 = HEAPU8[i5] | 0;
+ i19 = 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ i19 = 0;
+ }
+ while (1) {
+ if ((i21 | 0) == 46) {
+ i13 = 70;
+ break;
+ } else if ((i21 | 0) != 48) {
+ i5 = 0;
+ i7 = 0;
+ i15 = 0;
+ i16 = 0;
+ i18 = 0;
+ i20 = 0;
+ d28 = 1.0;
+ i17 = 0;
+ d14 = 0.0;
+ break;
+ }
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i5 + 1;
+ i21 = HEAPU8[i5] | 0;
+ i19 = 1;
+ continue;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ i19 = 1;
+ continue;
+ }
+ }
+ L66 : do {
+ if ((i13 | 0) == 70) {
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i5 + 1;
+ i21 = HEAPU8[i5] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ if ((i21 | 0) == 48) {
+ i15 = -1;
+ i16 = -1;
+ while (1) {
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i5 + 1;
+ i21 = HEAPU8[i5] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ if ((i21 | 0) != 48) {
+ i5 = 0;
+ i7 = 0;
+ i19 = 1;
+ i18 = 1;
+ i20 = 0;
+ d28 = 1.0;
+ i17 = 0;
+ d14 = 0.0;
+ break L66;
+ }
+ i29 = _i64Add(i15 | 0, i16 | 0, -1, -1) | 0;
+ i15 = i29;
+ i16 = tempRet0;
+ }
+ } else {
+ i5 = 0;
+ i7 = 0;
+ i15 = 0;
+ i16 = 0;
+ i18 = 1;
+ i20 = 0;
+ d28 = 1.0;
+ i17 = 0;
+ d14 = 0.0;
+ }
+ }
+ } while (0);
+ L79 : while (1) {
+ i24 = i21 + -48 | 0;
+ do {
+ if (!(i24 >>> 0 < 10)) {
+ i23 = i21 | 32;
+ i22 = (i21 | 0) == 46;
+ if (!((i23 + -97 | 0) >>> 0 < 6 | i22)) {
+ break L79;
+ }
+ if (i22) {
+ if ((i18 | 0) == 0) {
+ i15 = i7;
+ i16 = i5;
+ i18 = 1;
+ break;
+ } else {
+ i21 = 46;
+ break L79;
+ }
+ } else {
+ i24 = (i21 | 0) > 57 ? i23 + -87 | 0 : i24;
+ i13 = 84;
+ break;
+ }
+ } else {
+ i13 = 84;
+ }
+ } while (0);
+ if ((i13 | 0) == 84) {
+ i13 = 0;
+ do {
+ if (!((i5 | 0) < 0 | (i5 | 0) == 0 & i7 >>> 0 < 8)) {
+ if ((i5 | 0) < 0 | (i5 | 0) == 0 & i7 >>> 0 < 14) {
+ d31 = d28 * .0625;
+ d30 = d31;
+ d14 = d14 + d31 * +(i24 | 0);
+ break;
+ }
+ if ((i24 | 0) != 0 & (i20 | 0) == 0) {
+ i20 = 1;
+ d30 = d28;
+ d14 = d14 + d28 * .5;
+ } else {
+ d30 = d28;
+ }
+ } else {
+ d30 = d28;
+ i17 = i24 + (i17 << 4) | 0;
+ }
+ } while (0);
+ i7 = _i64Add(i7 | 0, i5 | 0, 1, 0) | 0;
+ i5 = tempRet0;
+ i19 = 1;
+ d28 = d30;
+ }
+ i21 = HEAP32[i9 >> 2] | 0;
+ if (i21 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i21 + 1;
+ i21 = HEAPU8[i21] | 0;
+ continue;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ continue;
+ }
+ }
+ if ((i19 | 0) == 0) {
+ i2 = (HEAP32[i10 >> 2] | 0) == 0;
+ if (!i2) {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ }
+ if (!i12) {
+ if (!i2 ? (i6 = HEAP32[i9 >> 2] | 0, HEAP32[i9 >> 2] = i6 + -1, (i18 | 0) != 0) : 0) {
+ HEAP32[i9 >> 2] = i6 + -2;
+ }
+ } else {
+ ___shlim(i8, 0);
+ }
+ d31 = +(i4 | 0) * 0.0;
+ STACKTOP = i1;
+ return +d31;
+ }
+ i13 = (i18 | 0) == 0;
+ i6 = i13 ? i7 : i15;
+ i13 = i13 ? i5 : i16;
+ if ((i5 | 0) < 0 | (i5 | 0) == 0 & i7 >>> 0 < 8) {
+ do {
+ i17 = i17 << 4;
+ i7 = _i64Add(i7 | 0, i5 | 0, 1, 0) | 0;
+ i5 = tempRet0;
+ } while ((i5 | 0) < 0 | (i5 | 0) == 0 & i7 >>> 0 < 8);
+ }
+ do {
+ if ((i21 | 32 | 0) == 112) {
+ i7 = _scanexp(i8, i11) | 0;
+ i5 = tempRet0;
+ if ((i7 | 0) == 0 & (i5 | 0) == -2147483648) {
+ if (i12) {
+ ___shlim(i8, 0);
+ d31 = 0.0;
+ STACKTOP = i1;
+ return +d31;
+ } else {
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ i7 = 0;
+ i5 = 0;
+ break;
+ }
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i7 = 0;
+ i5 = 0;
+ break;
+ }
+ }
+ } else {
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ i7 = 0;
+ i5 = 0;
+ } else {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i7 = 0;
+ i5 = 0;
+ }
+ }
+ } while (0);
+ i6 = _bitshift64Shl(i6 | 0, i13 | 0, 2) | 0;
+ i6 = _i64Add(i6 | 0, tempRet0 | 0, -32, -1) | 0;
+ i5 = _i64Add(i6 | 0, tempRet0 | 0, i7 | 0, i5 | 0) | 0;
+ i6 = tempRet0;
+ if ((i17 | 0) == 0) {
+ d31 = +(i4 | 0) * 0.0;
+ STACKTOP = i1;
+ return +d31;
+ }
+ if ((i6 | 0) > 0 | (i6 | 0) == 0 & i5 >>> 0 > (0 - i3 | 0) >>> 0) {
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ d31 = +(i4 | 0) * 1.7976931348623157e+308 * 1.7976931348623157e+308;
+ STACKTOP = i1;
+ return +d31;
+ }
+ i29 = i3 + -106 | 0;
+ i27 = ((i29 | 0) < 0) << 31 >> 31;
+ if ((i6 | 0) < (i27 | 0) | (i6 | 0) == (i27 | 0) & i5 >>> 0 < i29 >>> 0) {
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ d31 = +(i4 | 0) * 2.2250738585072014e-308 * 2.2250738585072014e-308;
+ STACKTOP = i1;
+ return +d31;
+ }
+ if ((i17 | 0) > -1) {
+ do {
+ i17 = i17 << 1;
+ if (!(d14 >= .5)) {
+ d28 = d14;
+ } else {
+ d28 = d14 + -1.0;
+ i17 = i17 | 1;
+ }
+ d14 = d14 + d28;
+ i5 = _i64Add(i5 | 0, i6 | 0, -1, -1) | 0;
+ i6 = tempRet0;
+ } while ((i17 | 0) > -1);
+ }
+ i3 = _i64Subtract(32, 0, i3 | 0, ((i3 | 0) < 0) << 31 >> 31 | 0) | 0;
+ i3 = _i64Add(i5 | 0, i6 | 0, i3 | 0, tempRet0 | 0) | 0;
+ i29 = tempRet0;
+ if (0 > (i29 | 0) | 0 == (i29 | 0) & i2 >>> 0 > i3 >>> 0) {
+ i2 = (i3 | 0) < 0 ? 0 : i3;
+ }
+ if ((i2 | 0) < 53) {
+ d28 = +(i4 | 0);
+ d30 = +_copysign(+(+_scalbn(1.0, 84 - i2 | 0)), +d28);
+ if ((i2 | 0) < 32 & d14 != 0.0) {
+ i29 = i17 & 1;
+ i17 = (i29 ^ 1) + i17 | 0;
+ d14 = (i29 | 0) == 0 ? 0.0 : d14;
+ }
+ } else {
+ d28 = +(i4 | 0);
+ d30 = 0.0;
+ }
+ d14 = d28 * d14 + (d30 + d28 * +(i17 >>> 0)) - d30;
+ if (!(d14 != 0.0)) {
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ }
+ d31 = +_scalbnl(d14, i5);
+ STACKTOP = i1;
+ return +d31;
+ }
+ } while (0);
+ i7 = i3 + i2 | 0;
+ i6 = 0 - i7 | 0;
+ i20 = 0;
+ while (1) {
+ if ((i21 | 0) == 46) {
+ i13 = 139;
+ break;
+ } else if ((i21 | 0) != 48) {
+ i25 = 0;
+ i22 = 0;
+ i19 = 0;
+ break;
+ }
+ i15 = HEAP32[i9 >> 2] | 0;
+ if (i15 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i15 + 1;
+ i21 = HEAPU8[i15] | 0;
+ i20 = 1;
+ continue;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ i20 = 1;
+ continue;
+ }
+ }
+ L168 : do {
+ if ((i13 | 0) == 139) {
+ i15 = HEAP32[i9 >> 2] | 0;
+ if (i15 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i15 + 1;
+ i21 = HEAPU8[i15] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ if ((i21 | 0) == 48) {
+ i25 = -1;
+ i22 = -1;
+ while (1) {
+ i15 = HEAP32[i9 >> 2] | 0;
+ if (i15 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i15 + 1;
+ i21 = HEAPU8[i15] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ if ((i21 | 0) != 48) {
+ i20 = 1;
+ i19 = 1;
+ break L168;
+ }
+ i29 = _i64Add(i25 | 0, i22 | 0, -1, -1) | 0;
+ i25 = i29;
+ i22 = tempRet0;
+ }
+ } else {
+ i25 = 0;
+ i22 = 0;
+ i19 = 1;
+ }
+ }
+ } while (0);
+ HEAP32[i5 >> 2] = 0;
+ i26 = i21 + -48 | 0;
+ i27 = (i21 | 0) == 46;
+ L182 : do {
+ if (i26 >>> 0 < 10 | i27) {
+ i15 = i5 + 496 | 0;
+ i24 = 0;
+ i23 = 0;
+ i18 = 0;
+ i17 = 0;
+ i16 = 0;
+ while (1) {
+ do {
+ if (i27) {
+ if ((i19 | 0) == 0) {
+ i25 = i24;
+ i22 = i23;
+ i19 = 1;
+ } else {
+ break L182;
+ }
+ } else {
+ i27 = _i64Add(i24 | 0, i23 | 0, 1, 0) | 0;
+ i23 = tempRet0;
+ i29 = (i21 | 0) != 48;
+ if ((i17 | 0) >= 125) {
+ if (!i29) {
+ i24 = i27;
+ break;
+ }
+ HEAP32[i15 >> 2] = HEAP32[i15 >> 2] | 1;
+ i24 = i27;
+ break;
+ }
+ i20 = i5 + (i17 << 2) | 0;
+ if ((i18 | 0) != 0) {
+ i26 = i21 + -48 + ((HEAP32[i20 >> 2] | 0) * 10 | 0) | 0;
+ }
+ HEAP32[i20 >> 2] = i26;
+ i18 = i18 + 1 | 0;
+ i21 = (i18 | 0) == 9;
+ i24 = i27;
+ i20 = 1;
+ i18 = i21 ? 0 : i18;
+ i17 = (i21 & 1) + i17 | 0;
+ i16 = i29 ? i27 : i16;
+ }
+ } while (0);
+ i21 = HEAP32[i9 >> 2] | 0;
+ if (i21 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i21 + 1;
+ i21 = HEAPU8[i21] | 0;
+ } else {
+ i21 = ___shgetc(i8) | 0;
+ }
+ i26 = i21 + -48 | 0;
+ i27 = (i21 | 0) == 46;
+ if (!(i26 >>> 0 < 10 | i27)) {
+ i13 = 162;
+ break;
+ }
+ }
+ } else {
+ i24 = 0;
+ i23 = 0;
+ i18 = 0;
+ i17 = 0;
+ i16 = 0;
+ i13 = 162;
+ }
+ } while (0);
+ if ((i13 | 0) == 162) {
+ i13 = (i19 | 0) == 0;
+ i25 = i13 ? i24 : i25;
+ i22 = i13 ? i23 : i22;
+ }
+ i13 = (i20 | 0) != 0;
+ if (i13 ? (i21 | 32 | 0) == 101 : 0) {
+ i15 = _scanexp(i8, i11) | 0;
+ i11 = tempRet0;
+ do {
+ if ((i15 | 0) == 0 & (i11 | 0) == -2147483648) {
+ if (i12) {
+ ___shlim(i8, 0);
+ d31 = 0.0;
+ STACKTOP = i1;
+ return +d31;
+ } else {
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ i15 = 0;
+ i11 = 0;
+ break;
+ }
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i15 = 0;
+ i11 = 0;
+ break;
+ }
+ }
+ } while (0);
+ i9 = _i64Add(i15 | 0, i11 | 0, i25 | 0, i22 | 0) | 0;
+ i22 = tempRet0;
+ } else {
+ if ((i21 | 0) > -1 ? (HEAP32[i10 >> 2] | 0) != 0 : 0) {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i9 = i25;
+ } else {
+ i9 = i25;
+ }
+ }
+ if (!i13) {
+ HEAP32[(___errno_location() | 0) >> 2] = 22;
+ ___shlim(i8, 0);
+ d31 = 0.0;
+ STACKTOP = i1;
+ return +d31;
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ d31 = +(i4 | 0) * 0.0;
+ STACKTOP = i1;
+ return +d31;
+ }
+ do {
+ if ((i9 | 0) == (i24 | 0) & (i22 | 0) == (i23 | 0) & ((i23 | 0) < 0 | (i23 | 0) == 0 & i24 >>> 0 < 10)) {
+ if (!(i2 >>> 0 > 30) ? (i8 >>> i2 | 0) != 0 : 0) {
+ break;
+ }
+ d31 = +(i4 | 0) * +(i8 >>> 0);
+ STACKTOP = i1;
+ return +d31;
+ }
+ } while (0);
+ i29 = (i3 | 0) / -2 | 0;
+ i27 = ((i29 | 0) < 0) << 31 >> 31;
+ if ((i22 | 0) > (i27 | 0) | (i22 | 0) == (i27 | 0) & i9 >>> 0 > i29 >>> 0) {
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ d31 = +(i4 | 0) * 1.7976931348623157e+308 * 1.7976931348623157e+308;
+ STACKTOP = i1;
+ return +d31;
+ }
+ i29 = i3 + -106 | 0;
+ i27 = ((i29 | 0) < 0) << 31 >> 31;
+ if ((i22 | 0) < (i27 | 0) | (i22 | 0) == (i27 | 0) & i9 >>> 0 < i29 >>> 0) {
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ d31 = +(i4 | 0) * 2.2250738585072014e-308 * 2.2250738585072014e-308;
+ STACKTOP = i1;
+ return +d31;
+ }
+ if ((i18 | 0) != 0) {
+ if ((i18 | 0) < 9) {
+ i8 = i5 + (i17 << 2) | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ do {
+ i10 = i10 * 10 | 0;
+ i18 = i18 + 1 | 0;
+ } while ((i18 | 0) != 9);
+ HEAP32[i8 >> 2] = i10;
+ }
+ i17 = i17 + 1 | 0;
+ }
+ do {
+ if ((i16 | 0) < 9 ? (i16 | 0) <= (i9 | 0) & (i9 | 0) < 18 : 0) {
+ if ((i9 | 0) == 9) {
+ d31 = +(i4 | 0) * +((HEAP32[i5 >> 2] | 0) >>> 0);
+ STACKTOP = i1;
+ return +d31;
+ }
+ if ((i9 | 0) < 9) {
+ d31 = +(i4 | 0) * +((HEAP32[i5 >> 2] | 0) >>> 0) / +(HEAP32[13440 + (8 - i9 << 2) >> 2] | 0);
+ STACKTOP = i1;
+ return +d31;
+ }
+ i10 = i2 + 27 + (Math_imul(i9, -3) | 0) | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i10 | 0) <= 30 ? (i8 >>> i10 | 0) != 0 : 0) {
+ break;
+ }
+ d31 = +(i4 | 0) * +(i8 >>> 0) * +(HEAP32[13440 + (i9 + -10 << 2) >> 2] | 0);
+ STACKTOP = i1;
+ return +d31;
+ }
+ } while (0);
+ i8 = (i9 | 0) % 9 | 0;
+ if ((i8 | 0) == 0) {
+ i8 = 0;
+ i10 = 0;
+ } else {
+ i11 = (i9 | 0) > -1 ? i8 : i8 + 9 | 0;
+ i12 = HEAP32[13440 + (8 - i11 << 2) >> 2] | 0;
+ if ((i17 | 0) != 0) {
+ i10 = 1e9 / (i12 | 0) | 0;
+ i8 = 0;
+ i16 = 0;
+ i15 = 0;
+ while (1) {
+ i27 = i5 + (i15 << 2) | 0;
+ i13 = HEAP32[i27 >> 2] | 0;
+ i29 = ((i13 >>> 0) / (i12 >>> 0) | 0) + i16 | 0;
+ HEAP32[i27 >> 2] = i29;
+ i16 = Math_imul((i13 >>> 0) % (i12 >>> 0) | 0, i10) | 0;
+ i13 = i15 + 1 | 0;
+ if ((i15 | 0) == (i8 | 0) & (i29 | 0) == 0) {
+ i8 = i13 & 127;
+ i9 = i9 + -9 | 0;
+ }
+ if ((i13 | 0) == (i17 | 0)) {
+ break;
+ } else {
+ i15 = i13;
+ }
+ }
+ if ((i16 | 0) != 0) {
+ HEAP32[i5 + (i17 << 2) >> 2] = i16;
+ i17 = i17 + 1 | 0;
+ }
+ } else {
+ i8 = 0;
+ i17 = 0;
+ }
+ i10 = 0;
+ i9 = 9 - i11 + i9 | 0;
+ }
+ L280 : while (1) {
+ i11 = i5 + (i8 << 2) | 0;
+ if ((i9 | 0) < 18) {
+ do {
+ i13 = 0;
+ i11 = i17 + 127 | 0;
+ while (1) {
+ i11 = i11 & 127;
+ i12 = i5 + (i11 << 2) | 0;
+ i15 = _bitshift64Shl(HEAP32[i12 >> 2] | 0, 0, 29) | 0;
+ i15 = _i64Add(i15 | 0, tempRet0 | 0, i13 | 0, 0) | 0;
+ i13 = tempRet0;
+ if (i13 >>> 0 > 0 | (i13 | 0) == 0 & i15 >>> 0 > 1e9) {
+ i29 = ___udivdi3(i15 | 0, i13 | 0, 1e9, 0) | 0;
+ i15 = ___uremdi3(i15 | 0, i13 | 0, 1e9, 0) | 0;
+ i13 = i29;
+ } else {
+ i13 = 0;
+ }
+ HEAP32[i12 >> 2] = i15;
+ i12 = (i11 | 0) == (i8 | 0);
+ if (!((i11 | 0) != (i17 + 127 & 127 | 0) | i12)) {
+ i17 = (i15 | 0) == 0 ? i11 : i17;
+ }
+ if (i12) {
+ break;
+ } else {
+ i11 = i11 + -1 | 0;
+ }
+ }
+ i10 = i10 + -29 | 0;
+ } while ((i13 | 0) == 0);
+ } else {
+ if ((i9 | 0) != 18) {
+ break;
+ }
+ do {
+ if (!((HEAP32[i11 >> 2] | 0) >>> 0 < 9007199)) {
+ i9 = 18;
+ break L280;
+ }
+ i13 = 0;
+ i12 = i17 + 127 | 0;
+ while (1) {
+ i12 = i12 & 127;
+ i15 = i5 + (i12 << 2) | 0;
+ i16 = _bitshift64Shl(HEAP32[i15 >> 2] | 0, 0, 29) | 0;
+ i16 = _i64Add(i16 | 0, tempRet0 | 0, i13 | 0, 0) | 0;
+ i13 = tempRet0;
+ if (i13 >>> 0 > 0 | (i13 | 0) == 0 & i16 >>> 0 > 1e9) {
+ i29 = ___udivdi3(i16 | 0, i13 | 0, 1e9, 0) | 0;
+ i16 = ___uremdi3(i16 | 0, i13 | 0, 1e9, 0) | 0;
+ i13 = i29;
+ } else {
+ i13 = 0;
+ }
+ HEAP32[i15 >> 2] = i16;
+ i15 = (i12 | 0) == (i8 | 0);
+ if (!((i12 | 0) != (i17 + 127 & 127 | 0) | i15)) {
+ i17 = (i16 | 0) == 0 ? i12 : i17;
+ }
+ if (i15) {
+ break;
+ } else {
+ i12 = i12 + -1 | 0;
+ }
+ }
+ i10 = i10 + -29 | 0;
+ } while ((i13 | 0) == 0);
+ }
+ i8 = i8 + 127 & 127;
+ if ((i8 | 0) == (i17 | 0)) {
+ i29 = i17 + 127 & 127;
+ i17 = i5 + ((i17 + 126 & 127) << 2) | 0;
+ HEAP32[i17 >> 2] = HEAP32[i17 >> 2] | HEAP32[i5 + (i29 << 2) >> 2];
+ i17 = i29;
+ }
+ HEAP32[i5 + (i8 << 2) >> 2] = i13;
+ i9 = i9 + 9 | 0;
+ }
+ L311 : while (1) {
+ i11 = i17 + 1 & 127;
+ i12 = i5 + ((i17 + 127 & 127) << 2) | 0;
+ while (1) {
+ i15 = (i9 | 0) == 18;
+ i13 = (i9 | 0) > 27 ? 9 : 1;
+ while (1) {
+ i16 = 0;
+ while (1) {
+ i18 = i16 + i8 & 127;
+ if ((i18 | 0) == (i17 | 0)) {
+ i16 = 2;
+ break;
+ }
+ i18 = HEAP32[i5 + (i18 << 2) >> 2] | 0;
+ i19 = HEAP32[13432 + (i16 << 2) >> 2] | 0;
+ if (i18 >>> 0 < i19 >>> 0) {
+ i16 = 2;
+ break;
+ }
+ i20 = i16 + 1 | 0;
+ if (i18 >>> 0 > i19 >>> 0) {
+ break;
+ }
+ if ((i20 | 0) < 2) {
+ i16 = i20;
+ } else {
+ i16 = i20;
+ break;
+ }
+ }
+ if ((i16 | 0) == 2 & i15) {
+ break L311;
+ }
+ i10 = i13 + i10 | 0;
+ if ((i8 | 0) == (i17 | 0)) {
+ i8 = i17;
+ } else {
+ break;
+ }
+ }
+ i15 = (1 << i13) + -1 | 0;
+ i19 = 1e9 >>> i13;
+ i18 = i8;
+ i16 = 0;
+ do {
+ i27 = i5 + (i8 << 2) | 0;
+ i29 = HEAP32[i27 >> 2] | 0;
+ i20 = (i29 >>> i13) + i16 | 0;
+ HEAP32[i27 >> 2] = i20;
+ i16 = Math_imul(i29 & i15, i19) | 0;
+ i20 = (i8 | 0) == (i18 | 0) & (i20 | 0) == 0;
+ i8 = i8 + 1 & 127;
+ i9 = i20 ? i9 + -9 | 0 : i9;
+ i18 = i20 ? i8 : i18;
+ } while ((i8 | 0) != (i17 | 0));
+ if ((i16 | 0) == 0) {
+ i8 = i18;
+ continue;
+ }
+ if ((i11 | 0) != (i18 | 0)) {
+ break;
+ }
+ HEAP32[i12 >> 2] = HEAP32[i12 >> 2] | 1;
+ i8 = i18;
+ }
+ HEAP32[i5 + (i17 << 2) >> 2] = i16;
+ i8 = i18;
+ i17 = i11;
+ }
+ i9 = i8 & 127;
+ if ((i9 | 0) == (i17 | 0)) {
+ HEAP32[i5 + (i11 + -1 << 2) >> 2] = 0;
+ i17 = i11;
+ }
+ d28 = +((HEAP32[i5 + (i9 << 2) >> 2] | 0) >>> 0);
+ i9 = i8 + 1 & 127;
+ if ((i9 | 0) == (i17 | 0)) {
+ i17 = i17 + 1 & 127;
+ HEAP32[i5 + (i17 + -1 << 2) >> 2] = 0;
+ }
+ d14 = +(i4 | 0);
+ d30 = d14 * (d28 * 1.0e9 + +((HEAP32[i5 + (i9 << 2) >> 2] | 0) >>> 0));
+ i4 = i10 + 53 | 0;
+ i3 = i4 - i3 | 0;
+ if ((i3 | 0) < (i2 | 0)) {
+ i2 = (i3 | 0) < 0 ? 0 : i3;
+ i9 = 1;
+ } else {
+ i9 = 0;
+ }
+ if ((i2 | 0) < 53) {
+ d33 = +_copysign(+(+_scalbn(1.0, 105 - i2 | 0)), +d30);
+ d32 = +_fmod(+d30, +(+_scalbn(1.0, 53 - i2 | 0)));
+ d28 = d33;
+ d31 = d32;
+ d30 = d33 + (d30 - d32);
+ } else {
+ d28 = 0.0;
+ d31 = 0.0;
+ }
+ i11 = i8 + 2 & 127;
+ if ((i11 | 0) != (i17 | 0)) {
+ i5 = HEAP32[i5 + (i11 << 2) >> 2] | 0;
+ do {
+ if (!(i5 >>> 0 < 5e8)) {
+ if (i5 >>> 0 > 5e8) {
+ d31 = d14 * .75 + d31;
+ break;
+ }
+ if ((i8 + 3 & 127 | 0) == (i17 | 0)) {
+ d31 = d14 * .5 + d31;
+ break;
+ } else {
+ d31 = d14 * .75 + d31;
+ break;
+ }
+ } else {
+ if ((i5 | 0) == 0 ? (i8 + 3 & 127 | 0) == (i17 | 0) : 0) {
+ break;
+ }
+ d31 = d14 * .25 + d31;
+ }
+ } while (0);
+ if ((53 - i2 | 0) > 1 ? !(+_fmod(+d31, 1.0) != 0.0) : 0) {
+ d31 = d31 + 1.0;
+ }
+ }
+ d14 = d30 + d31 - d28;
+ do {
+ if ((i4 & 2147483647 | 0) > (-2 - i7 | 0)) {
+ if (+Math_abs(+d14) >= 9007199254740992.0) {
+ i9 = (i9 | 0) != 0 & (i2 | 0) == (i3 | 0) ? 0 : i9;
+ i10 = i10 + 1 | 0;
+ d14 = d14 * .5;
+ }
+ if ((i10 + 50 | 0) <= (i6 | 0) ? !((i9 | 0) != 0 & d31 != 0.0) : 0) {
+ break;
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 34;
+ }
+ } while (0);
+ d33 = +_scalbnl(d14, i10);
+ STACKTOP = i1;
+ return +d33;
+ } else if ((i7 | 0) == 3) {
+ i2 = HEAP32[i9 >> 2] | 0;
+ if (i2 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i2 + 1;
+ i2 = HEAPU8[i2] | 0;
+ } else {
+ i2 = ___shgetc(i8) | 0;
+ }
+ if ((i2 | 0) == 40) {
+ i2 = 1;
+ } else {
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ d33 = nan;
+ STACKTOP = i1;
+ return +d33;
+ }
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ d33 = nan;
+ STACKTOP = i1;
+ return +d33;
+ }
+ while (1) {
+ i3 = HEAP32[i9 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0) {
+ HEAP32[i9 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ } else {
+ i3 = ___shgetc(i8) | 0;
+ }
+ if (!((i3 + -48 | 0) >>> 0 < 10 | (i3 + -65 | 0) >>> 0 < 26) ? !((i3 + -97 | 0) >>> 0 < 26 | (i3 | 0) == 95) : 0) {
+ break;
+ }
+ i2 = i2 + 1 | 0;
+ }
+ if ((i3 | 0) == 41) {
+ d33 = nan;
+ STACKTOP = i1;
+ return +d33;
+ }
+ i3 = (HEAP32[i10 >> 2] | 0) == 0;
+ if (!i3) {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ }
+ if (i12) {
+ HEAP32[(___errno_location() | 0) >> 2] = 22;
+ ___shlim(i8, 0);
+ d33 = 0.0;
+ STACKTOP = i1;
+ return +d33;
+ }
+ if ((i2 | 0) == 0 | i3) {
+ d33 = nan;
+ STACKTOP = i1;
+ return +d33;
+ }
+ while (1) {
+ i2 = i2 + -1 | 0;
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ if ((i2 | 0) == 0) {
+ d14 = nan;
+ break;
+ }
+ }
+ STACKTOP = i1;
+ return +d14;
+ } else {
+ if ((HEAP32[i10 >> 2] | 0) != 0) {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 22;
+ ___shlim(i8, 0);
+ d33 = 0.0;
+ STACKTOP = i1;
+ return +d33;
+ }
+ }
+ } while (0);
+ if ((i13 | 0) == 23) {
+ i2 = (HEAP32[i10 >> 2] | 0) == 0;
+ if (!i2) {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ }
+ if (!(i7 >>> 0 < 4 | (i11 | 0) == 0 | i2)) {
+ do {
+ HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + -1;
+ i7 = i7 + -1 | 0;
+ } while (i7 >>> 0 > 3);
+ }
+ }
+ d33 = +(i4 | 0) * inf;
+ STACKTOP = i1;
+ return +d33;
+}
+function _statement(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 160 | 0;
+ i8 = i2 + 120 | 0;
+ i24 = i2 + 96 | 0;
+ i15 = i2 + 72 | 0;
+ i25 = i2 + 48 | 0;
+ i20 = i2 + 24 | 0;
+ i21 = i2;
+ i19 = i4 + 4 | 0;
+ i6 = HEAP32[i19 >> 2] | 0;
+ i3 = i4 + 48 | 0;
+ i9 = HEAP32[i3 >> 2] | 0;
+ i1 = i4 + 52 | 0;
+ i26 = (HEAP32[i1 >> 2] | 0) + 38 | 0;
+ i27 = (HEAP16[i26 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP16[i26 >> 1] = i27;
+ if ((i27 & 65535) > 200) {
+ i27 = i9 + 12 | 0;
+ i26 = HEAP32[(HEAP32[i27 >> 2] | 0) + 52 >> 2] | 0;
+ i5 = HEAP32[(HEAP32[i9 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i29 = 6552;
+ HEAP32[i8 >> 2] = 6360;
+ i28 = i8 + 4 | 0;
+ HEAP32[i28 >> 2] = 200;
+ i28 = i8 + 8 | 0;
+ HEAP32[i28 >> 2] = i29;
+ i28 = _luaO_pushfstring(i26, 6592, i8) | 0;
+ i29 = HEAP32[i27 >> 2] | 0;
+ _luaX_syntaxerror(i29, i28);
+ }
+ HEAP32[i8 >> 2] = i5;
+ i28 = _luaO_pushfstring(i26, 6568, i8) | 0;
+ HEAP32[i8 >> 2] = 6360;
+ i29 = i8 + 4 | 0;
+ HEAP32[i29 >> 2] = 200;
+ i29 = i8 + 8 | 0;
+ HEAP32[i29 >> 2] = i28;
+ i29 = _luaO_pushfstring(i26, 6592, i8) | 0;
+ i28 = HEAP32[i27 >> 2] | 0;
+ _luaX_syntaxerror(i28, i29);
+ }
+ i5 = i4 + 16 | 0;
+ L8 : do {
+ switch (HEAP32[i5 >> 2] | 0) {
+ case 59:
+ {
+ _luaX_next(i4);
+ break;
+ }
+ case 267:
+ {
+ HEAP32[i21 >> 2] = -1;
+ _test_then_block(i4, i21);
+ while (1) {
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == 260) {
+ i7 = 10;
+ break;
+ } else if ((i8 | 0) != 261) {
+ break;
+ }
+ _test_then_block(i4, i21);
+ }
+ if ((i7 | 0) == 10) {
+ _luaX_next(i4);
+ i7 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i20 + 10 | 0] = 0;
+ HEAP8[i20 + 8 | 0] = HEAP8[i7 + 46 | 0] | 0;
+ i29 = HEAP32[(HEAP32[i7 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i20 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i20 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i20 + 9 | 0] = 0;
+ i29 = i7 + 16 | 0;
+ HEAP32[i20 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i20;
+ L16 : do {
+ i8 = HEAP32[i5 >> 2] | 0;
+ switch (i8 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L16;
+ }
+ default:
+ {}
+ }
+ _statement(i4);
+ } while ((i8 | 0) != 274);
+ _leaveblock(i7);
+ }
+ _check_match(i4, 262, 267, i6);
+ _luaK_patchtohere(i9, HEAP32[i21 >> 2] | 0);
+ break;
+ }
+ case 259:
+ {
+ _luaX_next(i4);
+ i7 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i20 + 10 | 0] = 0;
+ HEAP8[i20 + 8 | 0] = HEAP8[i7 + 46 | 0] | 0;
+ i29 = HEAP32[(HEAP32[i7 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i20 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i20 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i20 + 9 | 0] = 0;
+ i29 = i7 + 16 | 0;
+ HEAP32[i20 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i20;
+ L22 : do {
+ i8 = HEAP32[i5 >> 2] | 0;
+ switch (i8 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L22;
+ }
+ default:
+ {}
+ }
+ _statement(i4);
+ } while ((i8 | 0) != 274);
+ _leaveblock(i7);
+ _check_match(i4, 262, 259, i6);
+ break;
+ }
+ case 269:
+ {
+ _luaX_next(i4);
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 265) {
+ _luaX_next(i4);
+ i7 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 288) {
+ i29 = HEAP32[i4 + 24 >> 2] | 0;
+ _luaX_next(i4);
+ _new_localvar(i4, i29);
+ i29 = HEAP32[i3 >> 2] | 0;
+ i27 = i29 + 46 | 0;
+ i28 = (HEAPU8[i27] | 0) + 1 | 0;
+ HEAP8[i27] = i28;
+ HEAP32[(HEAP32[(HEAP32[i29 >> 2] | 0) + 24 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[(HEAP32[i29 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0) + ((i28 & 255) + -1 + (HEAP32[i29 + 40 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i29 + 20 >> 2];
+ _body(i4, i25, 0, HEAP32[i19 >> 2] | 0);
+ HEAP32[(HEAP32[(HEAP32[i7 >> 2] | 0) + 24 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[(HEAP32[i7 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0) + ((HEAP32[i7 + 40 >> 2] | 0) + (HEAP32[i25 + 8 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i7 + 20 >> 2];
+ break L8;
+ } else {
+ _error_expected(i4, 288);
+ }
+ }
+ if ((i6 | 0) != 288) {
+ _error_expected(i4, 288);
+ }
+ i7 = i4 + 24 | 0;
+ i6 = 1;
+ while (1) {
+ i8 = HEAP32[i7 >> 2] | 0;
+ _luaX_next(i4);
+ _new_localvar(i4, i8);
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == 61) {
+ i7 = 81;
+ break;
+ } else if ((i8 | 0) != 44) {
+ i7 = 83;
+ break;
+ }
+ _luaX_next(i4);
+ if ((HEAP32[i5 >> 2] | 0) == 288) {
+ i6 = i6 + 1 | 0;
+ } else {
+ i7 = 78;
+ break;
+ }
+ }
+ do {
+ if ((i7 | 0) == 78) {
+ _error_expected(i4, 288);
+ } else if ((i7 | 0) == 81) {
+ _luaX_next(i4);
+ _subexpr(i4, i15, 0) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 44) {
+ i8 = 1;
+ do {
+ _luaX_next(i4);
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i15);
+ _subexpr(i4, i15, 0) | 0;
+ i8 = i8 + 1 | 0;
+ } while ((HEAP32[i5 >> 2] | 0) == 44);
+ } else {
+ i8 = 1;
+ }
+ i5 = HEAP32[i15 >> 2] | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ i8 = i6 - i8 | 0;
+ if ((i5 | 0) == 0) {
+ i17 = i8;
+ i18 = i4;
+ i7 = 88;
+ break;
+ } else if (!((i5 | 0) == 13 | (i5 | 0) == 12)) {
+ _luaK_exp2nextreg(i4, i15);
+ i17 = i8;
+ i18 = i4;
+ i7 = 88;
+ break;
+ }
+ i5 = i8 + 1 | 0;
+ i5 = (i5 | 0) < 0 ? 0 : i5;
+ _luaK_setreturns(i4, i15, i5);
+ if ((i5 | 0) > 1) {
+ _luaK_reserveregs(i4, i5 + -1 | 0);
+ }
+ } else if ((i7 | 0) == 83) {
+ HEAP32[i15 >> 2] = 0;
+ i17 = i6;
+ i18 = HEAP32[i3 >> 2] | 0;
+ i7 = 88;
+ }
+ } while (0);
+ if ((i7 | 0) == 88 ? (i17 | 0) > 0 : 0) {
+ i29 = HEAPU8[i18 + 48 | 0] | 0;
+ _luaK_reserveregs(i18, i17);
+ _luaK_nil(i18, i29, i17);
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ i4 = i5 + 46 | 0;
+ i7 = (HEAPU8[i4] | 0) + i6 | 0;
+ HEAP8[i4] = i7;
+ if ((i6 | 0) != 0 ? (i11 = i5 + 20 | 0, i14 = i5 + 40 | 0, i12 = HEAP32[(HEAP32[i5 >> 2] | 0) + 24 >> 2] | 0, i13 = HEAP32[HEAP32[(HEAP32[i5 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0, HEAP32[i12 + ((HEAP16[i13 + ((i7 & 255) - i6 + (HEAP32[i14 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i11 >> 2], i16 = i6 + -1 | 0, (i16 | 0) != 0) : 0) {
+ do {
+ HEAP32[i12 + ((HEAP16[i13 + ((HEAPU8[i4] | 0) - i16 + (HEAP32[i14 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i11 >> 2];
+ i16 = i16 + -1 | 0;
+ } while ((i16 | 0) != 0);
+ }
+ break;
+ }
+ case 264:
+ {
+ HEAP8[i24 + 10 | 0] = 1;
+ HEAP8[i24 + 8 | 0] = HEAP8[i9 + 46 | 0] | 0;
+ i29 = HEAP32[(HEAP32[i9 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i24 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i24 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i24 + 9 | 0] = 0;
+ i29 = i9 + 16 | 0;
+ HEAP32[i24 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i24;
+ _luaX_next(i4);
+ if ((HEAP32[i5 >> 2] | 0) != 288) {
+ _error_expected(i4, 288);
+ }
+ i14 = i4 + 24 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ _luaX_next(i4);
+ i11 = HEAP32[i5 >> 2] | 0;
+ if ((i11 | 0) == 268 | (i11 | 0) == 44) {
+ i12 = HEAP32[i3 >> 2] | 0;
+ i11 = HEAPU8[i12 + 48 | 0] | 0;
+ _new_localvar(i4, _luaX_newstring(i4, 6744, 15) | 0);
+ _new_localvar(i4, _luaX_newstring(i4, 6760, 11) | 0);
+ _new_localvar(i4, _luaX_newstring(i4, 6776, 13) | 0);
+ _new_localvar(i4, i13);
+ i13 = HEAP32[i5 >> 2] | 0;
+ do {
+ if ((i13 | 0) == 44) {
+ i15 = 4;
+ while (1) {
+ _luaX_next(i4);
+ if ((HEAP32[i5 >> 2] | 0) != 288) {
+ i7 = 40;
+ break;
+ }
+ i13 = HEAP32[i14 >> 2] | 0;
+ _luaX_next(i4);
+ _new_localvar(i4, i13);
+ i13 = HEAP32[i5 >> 2] | 0;
+ if ((i13 | 0) == 44) {
+ i15 = i15 + 1 | 0;
+ } else {
+ i7 = 42;
+ break;
+ }
+ }
+ if ((i7 | 0) == 40) {
+ _error_expected(i4, 288);
+ } else if ((i7 | 0) == 42) {
+ i22 = i13;
+ i10 = i15 + -2 | 0;
+ break;
+ }
+ } else {
+ i22 = i13;
+ i10 = 1;
+ }
+ } while (0);
+ if ((i22 | 0) != 268) {
+ _error_expected(i4, 268);
+ }
+ _luaX_next(i4);
+ i13 = HEAP32[i19 >> 2] | 0;
+ _subexpr(i4, i8, 0) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 44) {
+ i14 = 1;
+ do {
+ _luaX_next(i4);
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i8);
+ _subexpr(i4, i8, 0) | 0;
+ i14 = i14 + 1 | 0;
+ } while ((HEAP32[i5 >> 2] | 0) == 44);
+ } else {
+ i14 = 1;
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ i14 = 3 - i14 | 0;
+ i15 = HEAP32[i8 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ i7 = 51;
+ } else if ((i15 | 0) == 13 | (i15 | 0) == 12) {
+ i15 = i14 + 1 | 0;
+ i15 = (i15 | 0) < 0 ? 0 : i15;
+ _luaK_setreturns(i5, i8, i15);
+ if ((i15 | 0) > 1) {
+ _luaK_reserveregs(i5, i15 + -1 | 0);
+ }
+ } else {
+ _luaK_exp2nextreg(i5, i8);
+ i7 = 51;
+ }
+ if ((i7 | 0) == 51 ? (i14 | 0) > 0 : 0) {
+ i29 = HEAPU8[i5 + 48 | 0] | 0;
+ _luaK_reserveregs(i5, i14);
+ _luaK_nil(i5, i29, i14);
+ }
+ _luaK_checkstack(i12, 3);
+ _forbody(i4, i11, i13, i10, 0);
+ } else if ((i11 | 0) == 61) {
+ i11 = HEAP32[i3 >> 2] | 0;
+ i7 = i11 + 48 | 0;
+ i10 = HEAPU8[i7] | 0;
+ _new_localvar(i4, _luaX_newstring(i4, 6792, 11) | 0);
+ _new_localvar(i4, _luaX_newstring(i4, 6808, 11) | 0);
+ _new_localvar(i4, _luaX_newstring(i4, 6824, 10) | 0);
+ _new_localvar(i4, i13);
+ if ((HEAP32[i5 >> 2] | 0) != 61) {
+ _error_expected(i4, 61);
+ }
+ _luaX_next(i4);
+ _subexpr(i4, i8, 0) | 0;
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i8);
+ if ((HEAP32[i5 >> 2] | 0) != 44) {
+ _error_expected(i4, 44);
+ }
+ _luaX_next(i4);
+ _subexpr(i4, i8, 0) | 0;
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i8);
+ if ((HEAP32[i5 >> 2] | 0) == 44) {
+ _luaX_next(i4);
+ _subexpr(i4, i8, 0) | 0;
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i8);
+ } else {
+ i29 = HEAPU8[i7] | 0;
+ _luaK_codek(i11, i29, _luaK_numberK(i11, 1.0) | 0) | 0;
+ _luaK_reserveregs(i11, 1);
+ }
+ _forbody(i4, i10, i6, 1, 1);
+ } else {
+ _luaX_syntaxerror(i4, 6720);
+ }
+ _check_match(i4, 262, 264, i6);
+ _leaveblock(i9);
+ break;
+ }
+ case 265:
+ {
+ _luaX_next(i4);
+ if ((HEAP32[i5 >> 2] | 0) != 288) {
+ _error_expected(i4, 288);
+ }
+ i8 = HEAP32[i4 + 24 >> 2] | 0;
+ _luaX_next(i4);
+ i9 = HEAP32[i3 >> 2] | 0;
+ if ((_singlevaraux(i9, i8, i20, 1) | 0) == 0) {
+ _singlevaraux(i9, HEAP32[i4 + 72 >> 2] | 0, i20, 1) | 0;
+ i29 = _luaK_stringK(HEAP32[i3 >> 2] | 0, i8) | 0;
+ HEAP32[i25 + 16 >> 2] = -1;
+ HEAP32[i25 + 20 >> 2] = -1;
+ HEAP32[i25 >> 2] = 4;
+ HEAP32[i25 + 8 >> 2] = i29;
+ _luaK_indexed(i9, i20, i25);
+ }
+ while (1) {
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == 58) {
+ i7 = 70;
+ break;
+ } else if ((i8 | 0) != 46) {
+ i5 = 0;
+ break;
+ }
+ _fieldsel(i4, i20);
+ }
+ if ((i7 | 0) == 70) {
+ _fieldsel(i4, i20);
+ i5 = 1;
+ }
+ _body(i4, i21, i5, i6);
+ _luaK_storevar(HEAP32[i3 >> 2] | 0, i20, i21);
+ _luaK_fixline(HEAP32[i3 >> 2] | 0, i6);
+ break;
+ }
+ case 278:
+ {
+ _luaX_next(i4);
+ i7 = _luaK_getlabel(i9) | 0;
+ _subexpr(i4, i20, 0) | 0;
+ if ((HEAP32[i20 >> 2] | 0) == 1) {
+ HEAP32[i20 >> 2] = 3;
+ }
+ _luaK_goiftrue(HEAP32[i3 >> 2] | 0, i20);
+ i8 = HEAP32[i20 + 20 >> 2] | 0;
+ HEAP8[i21 + 10 | 0] = 1;
+ HEAP8[i21 + 8 | 0] = HEAP8[i9 + 46 | 0] | 0;
+ i29 = HEAP32[(HEAP32[i9 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i21 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i21 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i21 + 9 | 0] = 0;
+ i29 = i9 + 16 | 0;
+ HEAP32[i21 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i21;
+ if ((HEAP32[i5 >> 2] | 0) != 259) {
+ _error_expected(i4, 259);
+ }
+ _luaX_next(i4);
+ i10 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i20 + 10 | 0] = 0;
+ HEAP8[i20 + 8 | 0] = HEAP8[i10 + 46 | 0] | 0;
+ i29 = HEAP32[(HEAP32[i10 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i20 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i20 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i20 + 9 | 0] = 0;
+ i29 = i10 + 16 | 0;
+ HEAP32[i20 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i20;
+ L119 : do {
+ i11 = HEAP32[i5 >> 2] | 0;
+ switch (i11 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L119;
+ }
+ default:
+ {}
+ }
+ _statement(i4);
+ } while ((i11 | 0) != 274);
+ _leaveblock(i10);
+ _luaK_patchlist(i9, _luaK_jump(i9) | 0, i7);
+ _check_match(i4, 262, 278, i6);
+ _leaveblock(i9);
+ _luaK_patchtohere(i9, i8);
+ break;
+ }
+ case 273:
+ {
+ i7 = _luaK_getlabel(i9) | 0;
+ HEAP8[i24 + 10 | 0] = 1;
+ i28 = i9 + 46 | 0;
+ HEAP8[i24 + 8 | 0] = HEAP8[i28] | 0;
+ i11 = i9 + 12 | 0;
+ i29 = HEAP32[(HEAP32[i11 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i24 + 4 >> 1] = HEAP32[i29 + 28 >> 2];
+ HEAP16[i24 + 6 >> 1] = HEAP32[i29 + 16 >> 2];
+ HEAP8[i24 + 9 | 0] = 0;
+ i29 = i9 + 16 | 0;
+ HEAP32[i24 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i24;
+ HEAP8[i15 + 10 | 0] = 0;
+ i10 = i15 + 8 | 0;
+ HEAP8[i10] = HEAP8[i28] | 0;
+ i11 = HEAP32[(HEAP32[i11 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i15 + 4 >> 1] = HEAP32[i11 + 28 >> 2];
+ HEAP16[i15 + 6 >> 1] = HEAP32[i11 + 16 >> 2];
+ i11 = i15 + 9 | 0;
+ HEAP8[i11] = 0;
+ HEAP32[i15 >> 2] = HEAP32[i29 >> 2];
+ HEAP32[i29 >> 2] = i15;
+ _luaX_next(i4);
+ L124 : do {
+ i12 = HEAP32[i5 >> 2] | 0;
+ switch (i12 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L124;
+ }
+ default:
+ {}
+ }
+ _statement(i4);
+ } while ((i12 | 0) != 274);
+ _check_match(i4, 277, 273, i6);
+ _subexpr(i4, i8, 0) | 0;
+ if ((HEAP32[i8 >> 2] | 0) == 1) {
+ HEAP32[i8 >> 2] = 3;
+ }
+ _luaK_goiftrue(HEAP32[i3 >> 2] | 0, i8);
+ i4 = HEAP32[i8 + 20 >> 2] | 0;
+ if ((HEAP8[i11] | 0) != 0) {
+ _luaK_patchclose(i9, i4, HEAPU8[i10] | 0);
+ }
+ _leaveblock(i9);
+ _luaK_patchlist(i9, i4, i7);
+ _leaveblock(i9);
+ break;
+ }
+ case 285:
+ {
+ _luaX_next(i4);
+ if ((HEAP32[i5 >> 2] | 0) != 288) {
+ _error_expected(i4, 288);
+ }
+ i10 = HEAP32[i4 + 24 >> 2] | 0;
+ _luaX_next(i4);
+ i15 = HEAP32[i3 >> 2] | 0;
+ i9 = i4 + 64 | 0;
+ i14 = HEAP32[i9 >> 2] | 0;
+ i12 = i14 + 24 | 0;
+ i11 = i15 + 16 | 0;
+ i16 = HEAP16[(HEAP32[i11 >> 2] | 0) + 4 >> 1] | 0;
+ i13 = i14 + 28 | 0;
+ L138 : do {
+ if ((i16 | 0) < (HEAP32[i13 >> 2] | 0)) {
+ while (1) {
+ i17 = i16 + 1 | 0;
+ if ((_luaS_eqstr(i10, HEAP32[(HEAP32[i12 >> 2] | 0) + (i16 << 4) >> 2] | 0) | 0) != 0) {
+ break;
+ }
+ if ((i17 | 0) < (HEAP32[i13 >> 2] | 0)) {
+ i16 = i17;
+ } else {
+ break L138;
+ }
+ }
+ i28 = i15 + 12 | 0;
+ i29 = HEAP32[(HEAP32[i28 >> 2] | 0) + 52 >> 2] | 0;
+ i27 = HEAP32[(HEAP32[i12 >> 2] | 0) + (i16 << 4) + 8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i10 + 16;
+ HEAP32[i8 + 4 >> 2] = i27;
+ i29 = _luaO_pushfstring(i29, 6680, i8) | 0;
+ _semerror(HEAP32[i28 >> 2] | 0, i29);
+ }
+ } while (0);
+ if ((HEAP32[i5 >> 2] | 0) != 285) {
+ _error_expected(i4, 285);
+ }
+ _luaX_next(i4);
+ i8 = HEAP32[i15 + 20 >> 2] | 0;
+ i15 = HEAP32[i13 >> 2] | 0;
+ i14 = i14 + 32 | 0;
+ if ((i15 | 0) < (HEAP32[i14 >> 2] | 0)) {
+ i14 = HEAP32[i12 >> 2] | 0;
+ } else {
+ i14 = _luaM_growaux_(HEAP32[i1 >> 2] | 0, HEAP32[i12 >> 2] | 0, i14, 16, 32767, 6312) | 0;
+ HEAP32[i12 >> 2] = i14;
+ }
+ HEAP32[i14 + (i15 << 4) >> 2] = i10;
+ i29 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i29 + (i15 << 4) + 8 >> 2] = i6;
+ HEAP8[i29 + (i15 << 4) + 12 | 0] = HEAP8[(HEAP32[i3 >> 2] | 0) + 46 | 0] | 0;
+ HEAP32[(HEAP32[i12 >> 2] | 0) + (i15 << 4) + 4 >> 2] = i8;
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + 1;
+ L152 : while (1) {
+ switch (HEAP32[i5 >> 2] | 0) {
+ case 285:
+ case 59:
+ {
+ break;
+ }
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ i7 = 108;
+ break L152;
+ }
+ default:
+ {
+ break L152;
+ }
+ }
+ _statement(i4);
+ }
+ if ((i7 | 0) == 108) {
+ HEAP8[(HEAP32[i12 >> 2] | 0) + (i15 << 4) + 12 | 0] = HEAP8[(HEAP32[i11 >> 2] | 0) + 8 | 0] | 0;
+ }
+ i5 = (HEAP32[i12 >> 2] | 0) + (i15 << 4) | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ i7 = HEAP16[(HEAP32[(HEAP32[i3 >> 2] | 0) + 16 >> 2] | 0) + 6 >> 1] | 0;
+ i6 = i8 + 16 | 0;
+ if ((i7 | 0) < (HEAP32[i6 >> 2] | 0)) {
+ i8 = i8 + 12 | 0;
+ do {
+ while (1) {
+ if ((_luaS_eqstr(HEAP32[(HEAP32[i8 >> 2] | 0) + (i7 << 4) >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0) == 0) {
+ break;
+ }
+ _closegoto(i4, i7, i5);
+ if ((i7 | 0) >= (HEAP32[i6 >> 2] | 0)) {
+ break L8;
+ }
+ }
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (HEAP32[i6 >> 2] | 0));
+ }
+ break;
+ }
+ case 274:
+ {
+ _luaX_next(i4);
+ i6 = HEAP32[i3 >> 2] | 0;
+ L166 : do {
+ switch (HEAP32[i5 >> 2] | 0) {
+ case 59:
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ i8 = 0;
+ i7 = 0;
+ break;
+ }
+ default:
+ {
+ _subexpr(i4, i24, 0) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 44) {
+ i7 = 1;
+ do {
+ _luaX_next(i4);
+ _luaK_exp2nextreg(HEAP32[i3 >> 2] | 0, i24);
+ _subexpr(i4, i24, 0) | 0;
+ i7 = i7 + 1 | 0;
+ } while ((HEAP32[i5 >> 2] | 0) == 44);
+ } else {
+ i7 = 1;
+ }
+ if (!(((HEAP32[i24 >> 2] | 0) + -12 | 0) >>> 0 < 2)) {
+ if ((i7 | 0) == 1) {
+ i8 = _luaK_exp2anyreg(i6, i24) | 0;
+ i7 = 1;
+ break L166;
+ } else {
+ _luaK_exp2nextreg(i6, i24);
+ i8 = HEAPU8[i6 + 46 | 0] | 0;
+ break L166;
+ }
+ } else {
+ _luaK_setreturns(i6, i24, -1);
+ if ((HEAP32[i24 >> 2] | 0) == 12 & (i7 | 0) == 1) {
+ i29 = (HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i24 + 8 >> 2] << 2) | 0;
+ HEAP32[i29 >> 2] = HEAP32[i29 >> 2] & -64 | 30;
+ }
+ i8 = HEAPU8[i6 + 46 | 0] | 0;
+ i7 = -1;
+ break L166;
+ }
+ }
+ }
+ } while (0);
+ _luaK_ret(i6, i8, i7);
+ if ((HEAP32[i5 >> 2] | 0) == 59) {
+ _luaX_next(i4);
+ }
+ break;
+ }
+ case 266:
+ case 258:
+ {
+ i6 = _luaK_jump(i9) | 0;
+ i7 = HEAP32[i19 >> 2] | 0;
+ i29 = (HEAP32[i5 >> 2] | 0) == 266;
+ _luaX_next(i4);
+ do {
+ if (i29) {
+ if ((HEAP32[i5 >> 2] | 0) == 288) {
+ i23 = HEAP32[i4 + 24 >> 2] | 0;
+ _luaX_next(i4);
+ break;
+ } else {
+ _error_expected(i4, 288);
+ }
+ } else {
+ i23 = _luaS_new(HEAP32[i1 >> 2] | 0, 6304) | 0;
+ }
+ } while (0);
+ i10 = HEAP32[i4 + 64 >> 2] | 0;
+ i9 = i10 + 12 | 0;
+ i5 = i10 + 16 | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i10 = i10 + 20 | 0;
+ if ((i8 | 0) < (HEAP32[i10 >> 2] | 0)) {
+ i10 = HEAP32[i9 >> 2] | 0;
+ } else {
+ i10 = _luaM_growaux_(HEAP32[i1 >> 2] | 0, HEAP32[i9 >> 2] | 0, i10, 16, 32767, 6312) | 0;
+ HEAP32[i9 >> 2] = i10;
+ }
+ HEAP32[i10 + (i8 << 4) >> 2] = i23;
+ i29 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i29 + (i8 << 4) + 8 >> 2] = i7;
+ HEAP8[i29 + (i8 << 4) + 12 | 0] = HEAP8[(HEAP32[i3 >> 2] | 0) + 46 | 0] | 0;
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i8 << 4) + 4 >> 2] = i6;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ _findlabel(i4, i8) | 0;
+ break;
+ }
+ default:
+ {
+ i6 = i8 + 8 | 0;
+ _suffixedexp(i4, i6);
+ i29 = HEAP32[i5 >> 2] | 0;
+ if ((i29 | 0) == 44 | (i29 | 0) == 61) {
+ HEAP32[i8 >> 2] = 0;
+ _assignment(i4, i8, 1);
+ break L8;
+ }
+ if ((HEAP32[i6 >> 2] | 0) == 12) {
+ i29 = (HEAP32[(HEAP32[i9 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i8 + 16 >> 2] << 2) | 0;
+ HEAP32[i29 >> 2] = HEAP32[i29 >> 2] & -8372225 | 16384;
+ break L8;
+ } else {
+ _luaX_syntaxerror(i4, 6344);
+ }
+ }
+ }
+ } while (0);
+ i29 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i29 + 48 | 0] = HEAP8[i29 + 46 | 0] | 0;
+ i29 = (HEAP32[i1 >> 2] | 0) + 38 | 0;
+ HEAP16[i29 >> 1] = (HEAP16[i29 >> 1] | 0) + -1 << 16 >> 16;
+ STACKTOP = i2;
+ return;
+}
+function _match(i1, i12, i11) {
+ i1 = i1 | 0;
+ i12 = i12 | 0;
+ i11 = i11 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i8 = i2;
+ i32 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i1 >> 2] = i32 + -1;
+ if ((i32 | 0) == 0) {
+ _luaL_error(HEAP32[i1 + 16 >> 2] | 0, 7272, i8) | 0;
+ }
+ i14 = i1 + 12 | 0;
+ i22 = HEAP32[i14 >> 2] | 0;
+ L4 : do {
+ if ((i22 | 0) != (i11 | 0)) {
+ i3 = i1 + 8 | 0;
+ i9 = i1 + 16 | 0;
+ i16 = i1 + 4 | 0;
+ i10 = i1 + 20 | 0;
+ L6 : while (1) {
+ i19 = i12 + 1 | 0;
+ i20 = i12 + -1 | 0;
+ L8 : while (1) {
+ i23 = HEAP8[i11] | 0;
+ i21 = i23 << 24 >> 24;
+ L10 : do {
+ if ((i21 | 0) == 36) {
+ i7 = i11 + 1 | 0;
+ if ((i7 | 0) == (i22 | 0)) {
+ i7 = 23;
+ break L6;
+ } else {
+ i22 = i7;
+ i21 = i7;
+ i7 = 89;
+ }
+ } else if ((i21 | 0) == 37) {
+ i21 = i11 + 1 | 0;
+ i23 = HEAP8[i21] | 0;
+ switch (i23 << 24 >> 24 | 0) {
+ case 57:
+ case 56:
+ case 55:
+ case 54:
+ case 53:
+ case 52:
+ case 51:
+ case 50:
+ case 49:
+ case 48:
+ {
+ i7 = 69;
+ break L8;
+ }
+ case 98:
+ {
+ i7 = 25;
+ break L8;
+ }
+ case 102:
+ {
+ break;
+ }
+ default:
+ {
+ if ((i21 | 0) == (i22 | 0)) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7368, i8) | 0;
+ }
+ i22 = i11 + 2 | 0;
+ i7 = 89;
+ break L10;
+ }
+ }
+ i22 = i11 + 2 | 0;
+ if ((HEAP8[i22] | 0) == 91) {
+ i21 = 91;
+ } else {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7296, i8) | 0;
+ i21 = HEAP8[i22] | 0;
+ }
+ i23 = i11 + 3 | 0;
+ i21 = i21 << 24 >> 24;
+ if ((i21 | 0) == 91) {
+ i21 = (HEAP8[i23] | 0) == 94 ? i11 + 4 | 0 : i23;
+ while (1) {
+ if ((i21 | 0) == (HEAP32[i14 >> 2] | 0)) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7408, i8) | 0;
+ }
+ i11 = i21 + 1 | 0;
+ if ((HEAP8[i21] | 0) == 37) {
+ i11 = i11 >>> 0 < (HEAP32[i14 >> 2] | 0) >>> 0 ? i21 + 2 | 0 : i11;
+ }
+ if ((HEAP8[i11] | 0) == 93) {
+ break;
+ } else {
+ i21 = i11;
+ }
+ }
+ i11 = i11 + 1 | 0;
+ } else if ((i21 | 0) == 37) {
+ if ((i23 | 0) == (HEAP32[i14 >> 2] | 0)) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7368, i8) | 0;
+ }
+ i11 = i11 + 4 | 0;
+ } else {
+ i11 = i23;
+ }
+ if ((i12 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ i25 = 0;
+ } else {
+ i25 = HEAP8[i20] | 0;
+ }
+ i24 = i25 & 255;
+ i21 = i11 + -1 | 0;
+ i26 = (HEAP8[i23] | 0) == 94;
+ i28 = i26 ? i23 : i22;
+ i27 = i26 & 1;
+ i26 = i27 ^ 1;
+ i30 = i28 + 1 | 0;
+ L41 : do {
+ if (i30 >>> 0 < i21 >>> 0) {
+ while (1) {
+ i32 = HEAP8[i30] | 0;
+ i29 = i28 + 2 | 0;
+ i31 = HEAP8[i29] | 0;
+ do {
+ if (i32 << 24 >> 24 == 37) {
+ if ((_match_class(i24, i31 & 255) | 0) == 0) {
+ i28 = i29;
+ } else {
+ break L41;
+ }
+ } else {
+ if (i31 << 24 >> 24 == 45 ? (i18 = i28 + 3 | 0, i18 >>> 0 < i21 >>> 0) : 0) {
+ if ((i32 & 255) > (i25 & 255)) {
+ i28 = i18;
+ break;
+ }
+ if ((HEAPU8[i18] | 0) < (i25 & 255)) {
+ i28 = i18;
+ break;
+ } else {
+ break L41;
+ }
+ }
+ if (i32 << 24 >> 24 == i25 << 24 >> 24) {
+ break L41;
+ } else {
+ i28 = i30;
+ }
+ }
+ } while (0);
+ i30 = i28 + 1 | 0;
+ if (!(i30 >>> 0 < i21 >>> 0)) {
+ i26 = i27;
+ break;
+ }
+ }
+ } else {
+ i26 = i27;
+ }
+ } while (0);
+ if ((i26 | 0) != 0) {
+ i12 = 0;
+ break L4;
+ }
+ i24 = HEAP8[i12] | 0;
+ i25 = i24 & 255;
+ i27 = (HEAP8[i23] | 0) == 94;
+ i26 = i27 ? i23 : i22;
+ i22 = i27 & 1;
+ i23 = i22 ^ 1;
+ i30 = i26 + 1 | 0;
+ L55 : do {
+ if (i30 >>> 0 < i21 >>> 0) {
+ do {
+ i29 = HEAP8[i30] | 0;
+ i28 = i26 + 2 | 0;
+ i27 = HEAP8[i28] | 0;
+ do {
+ if (i29 << 24 >> 24 == 37) {
+ if ((_match_class(i25, i27 & 255) | 0) == 0) {
+ i26 = i28;
+ } else {
+ i22 = i23;
+ break L55;
+ }
+ } else {
+ if (i27 << 24 >> 24 == 45 ? (i17 = i26 + 3 | 0, i17 >>> 0 < i21 >>> 0) : 0) {
+ if ((i29 & 255) > (i24 & 255)) {
+ i26 = i17;
+ break;
+ }
+ if ((HEAPU8[i17] | 0) < (i24 & 255)) {
+ i26 = i17;
+ break;
+ } else {
+ i22 = i23;
+ break L55;
+ }
+ }
+ if (i29 << 24 >> 24 == i24 << 24 >> 24) {
+ i22 = i23;
+ break L55;
+ } else {
+ i26 = i30;
+ }
+ }
+ } while (0);
+ i30 = i26 + 1 | 0;
+ } while (i30 >>> 0 < i21 >>> 0);
+ }
+ } while (0);
+ if ((i22 | 0) == 0) {
+ i12 = 0;
+ break L4;
+ }
+ } else if ((i21 | 0) == 40) {
+ i7 = 7;
+ break L6;
+ } else if ((i21 | 0) != 41) {
+ i21 = i11 + 1 | 0;
+ if (i23 << 24 >> 24 == 91) {
+ i7 = (HEAP8[i21] | 0) == 94 ? i11 + 2 | 0 : i21;
+ while (1) {
+ if ((i7 | 0) == (i22 | 0)) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7408, i8) | 0;
+ }
+ i22 = i7 + 1 | 0;
+ if ((HEAP8[i7] | 0) == 37) {
+ i7 = i22 >>> 0 < (HEAP32[i14 >> 2] | 0) >>> 0 ? i7 + 2 | 0 : i22;
+ } else {
+ i7 = i22;
+ }
+ if ((HEAP8[i7] | 0) == 93) {
+ break;
+ }
+ i22 = HEAP32[i14 >> 2] | 0;
+ }
+ i22 = i7 + 1 | 0;
+ i7 = 89;
+ } else {
+ i22 = i21;
+ i7 = 89;
+ }
+ } else {
+ i7 = 16;
+ break L6;
+ }
+ } while (0);
+ L80 : do {
+ if ((i7 | 0) == 89) {
+ i7 = 0;
+ do {
+ if ((HEAP32[i3 >> 2] | 0) >>> 0 > i12 >>> 0) {
+ i23 = HEAP8[i12] | 0;
+ i24 = i23 & 255;
+ i26 = HEAP8[i11] | 0;
+ i25 = i26 << 24 >> 24;
+ L85 : do {
+ if ((i25 | 0) == 46) {
+ i23 = HEAP8[i22] | 0;
+ } else if ((i25 | 0) == 37) {
+ i25 = _match_class(i24, HEAPU8[i21] | 0) | 0;
+ i7 = 104;
+ } else if ((i25 | 0) == 91) {
+ i7 = i22 + -1 | 0;
+ i25 = (HEAP8[i21] | 0) == 94;
+ i27 = i25 ? i21 : i11;
+ i26 = i25 & 1;
+ i25 = i26 ^ 1;
+ i30 = i27 + 1 | 0;
+ if (i30 >>> 0 < i7 >>> 0) {
+ while (1) {
+ i31 = HEAP8[i30] | 0;
+ i29 = i27 + 2 | 0;
+ i28 = HEAP8[i29] | 0;
+ do {
+ if (i31 << 24 >> 24 == 37) {
+ if ((_match_class(i24, i28 & 255) | 0) == 0) {
+ i27 = i29;
+ } else {
+ i7 = 104;
+ break L85;
+ }
+ } else {
+ if (i28 << 24 >> 24 == 45 ? (i13 = i27 + 3 | 0, i13 >>> 0 < i7 >>> 0) : 0) {
+ if ((i31 & 255) > (i23 & 255)) {
+ i27 = i13;
+ break;
+ }
+ if ((HEAPU8[i13] | 0) < (i23 & 255)) {
+ i27 = i13;
+ break;
+ } else {
+ i7 = 104;
+ break L85;
+ }
+ }
+ if (i31 << 24 >> 24 == i23 << 24 >> 24) {
+ i7 = 104;
+ break L85;
+ } else {
+ i27 = i30;
+ }
+ }
+ } while (0);
+ i30 = i27 + 1 | 0;
+ if (!(i30 >>> 0 < i7 >>> 0)) {
+ i25 = i26;
+ i7 = 104;
+ break;
+ }
+ }
+ } else {
+ i25 = i26;
+ i7 = 104;
+ }
+ } else {
+ i25 = i26 << 24 >> 24 == i23 << 24 >> 24 | 0;
+ i7 = 104;
+ }
+ } while (0);
+ if ((i7 | 0) == 104) {
+ i7 = 0;
+ i23 = HEAP8[i22] | 0;
+ if ((i25 | 0) == 0) {
+ break;
+ }
+ }
+ i23 = i23 << 24 >> 24;
+ if ((i23 | 0) == 45) {
+ i7 = 109;
+ break L6;
+ } else if ((i23 | 0) == 42) {
+ i7 = 112;
+ break L6;
+ } else if ((i23 | 0) == 43) {
+ break L6;
+ } else if ((i23 | 0) != 63) {
+ i12 = i19;
+ i11 = i22;
+ break L8;
+ }
+ i11 = i22 + 1 | 0;
+ i21 = _match(i1, i19, i11) | 0;
+ if ((i21 | 0) == 0) {
+ break L80;
+ } else {
+ i12 = i21;
+ break L4;
+ }
+ } else {
+ i23 = HEAP8[i22] | 0;
+ }
+ } while (0);
+ if (!(i23 << 24 >> 24 == 45 | i23 << 24 >> 24 == 63 | i23 << 24 >> 24 == 42)) {
+ i12 = 0;
+ break L4;
+ }
+ i11 = i22 + 1 | 0;
+ }
+ } while (0);
+ i22 = HEAP32[i14 >> 2] | 0;
+ if ((i11 | 0) == (i22 | 0)) {
+ break L4;
+ }
+ }
+ if ((i7 | 0) == 25) {
+ i7 = 0;
+ i21 = i11 + 2 | 0;
+ if (!((i22 + -1 | 0) >>> 0 > i21 >>> 0)) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7440, i8) | 0;
+ }
+ i20 = HEAP8[i12] | 0;
+ if (!(i20 << 24 >> 24 == (HEAP8[i21] | 0))) {
+ i12 = 0;
+ break L4;
+ }
+ i21 = HEAP8[i11 + 3 | 0] | 0;
+ i22 = HEAP32[i3 >> 2] | 0;
+ if (i19 >>> 0 < i22 >>> 0) {
+ i24 = 1;
+ } else {
+ i12 = 0;
+ break L4;
+ }
+ while (1) {
+ i23 = HEAP8[i19] | 0;
+ if (i23 << 24 >> 24 == i21 << 24 >> 24) {
+ i24 = i24 + -1 | 0;
+ if ((i24 | 0) == 0) {
+ break;
+ }
+ } else {
+ i24 = (i23 << 24 >> 24 == i20 << 24 >> 24) + i24 | 0;
+ }
+ i12 = i19 + 1 | 0;
+ if (i12 >>> 0 < i22 >>> 0) {
+ i32 = i19;
+ i19 = i12;
+ i12 = i32;
+ } else {
+ i12 = 0;
+ break L4;
+ }
+ }
+ i12 = i12 + 2 | 0;
+ i11 = i11 + 4 | 0;
+ } else if ((i7 | 0) == 69) {
+ i7 = 0;
+ i20 = i23 & 255;
+ i19 = i20 + -49 | 0;
+ if (((i19 | 0) >= 0 ? (i19 | 0) < (HEAP32[i10 >> 2] | 0) : 0) ? (i15 = HEAP32[i1 + (i19 << 3) + 28 >> 2] | 0, !((i15 | 0) == -1)) : 0) {
+ i20 = i15;
+ } else {
+ i19 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i8 >> 2] = i20 + -48;
+ i20 = _luaL_error(i19, 7336, i8) | 0;
+ i19 = i20;
+ i20 = HEAP32[i1 + (i20 << 3) + 28 >> 2] | 0;
+ }
+ if (((HEAP32[i3 >> 2] | 0) - i12 | 0) >>> 0 < i20 >>> 0) {
+ i12 = 0;
+ break L4;
+ }
+ if ((_memcmp(HEAP32[i1 + (i19 << 3) + 24 >> 2] | 0, i12, i20) | 0) != 0) {
+ i12 = 0;
+ break L4;
+ }
+ i12 = i12 + i20 | 0;
+ if ((i12 | 0) == 0) {
+ i12 = 0;
+ break L4;
+ }
+ i11 = i11 + 2 | 0;
+ }
+ i22 = HEAP32[i14 >> 2] | 0;
+ if ((i11 | 0) == (i22 | 0)) {
+ break L4;
+ }
+ }
+ if ((i7 | 0) == 7) {
+ i3 = i11 + 1 | 0;
+ if ((HEAP8[i3] | 0) == 41) {
+ i3 = HEAP32[i10 >> 2] | 0;
+ if ((i3 | 0) > 31) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7200, i8) | 0;
+ }
+ HEAP32[i1 + (i3 << 3) + 24 >> 2] = i12;
+ HEAP32[i1 + (i3 << 3) + 28 >> 2] = -2;
+ HEAP32[i10 >> 2] = i3 + 1;
+ i12 = _match(i1, i12, i11 + 2 | 0) | 0;
+ if ((i12 | 0) != 0) {
+ break;
+ }
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -1;
+ i12 = 0;
+ break;
+ } else {
+ i4 = HEAP32[i10 >> 2] | 0;
+ if ((i4 | 0) > 31) {
+ _luaL_error(HEAP32[i9 >> 2] | 0, 7200, i8) | 0;
+ }
+ HEAP32[i1 + (i4 << 3) + 24 >> 2] = i12;
+ HEAP32[i1 + (i4 << 3) + 28 >> 2] = -1;
+ HEAP32[i10 >> 2] = i4 + 1;
+ i12 = _match(i1, i12, i3) | 0;
+ if ((i12 | 0) != 0) {
+ break;
+ }
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -1;
+ i12 = 0;
+ break;
+ }
+ } else if ((i7 | 0) == 16) {
+ i3 = i11 + 1 | 0;
+ i5 = HEAP32[i10 >> 2] | 0;
+ while (1) {
+ i4 = i5 + -1 | 0;
+ if ((i5 | 0) <= 0) {
+ i7 = 19;
+ break;
+ }
+ if ((HEAP32[i1 + (i4 << 3) + 28 >> 2] | 0) == -1) {
+ break;
+ } else {
+ i5 = i4;
+ }
+ }
+ if ((i7 | 0) == 19) {
+ i4 = _luaL_error(HEAP32[i9 >> 2] | 0, 7488, i8) | 0;
+ }
+ i5 = i1 + (i4 << 3) + 28 | 0;
+ HEAP32[i5 >> 2] = i12 - (HEAP32[i1 + (i4 << 3) + 24 >> 2] | 0);
+ i12 = _match(i1, i12, i3) | 0;
+ if ((i12 | 0) != 0) {
+ break;
+ }
+ HEAP32[i5 >> 2] = -1;
+ i12 = 0;
+ break;
+ } else if ((i7 | 0) == 23) {
+ i12 = (i12 | 0) == (HEAP32[i3 >> 2] | 0) ? i12 : 0;
+ break;
+ } else if ((i7 | 0) == 109) {
+ i4 = i22 + 1 | 0;
+ i8 = _match(i1, i12, i4) | 0;
+ if ((i8 | 0) != 0) {
+ i12 = i8;
+ break;
+ }
+ i8 = i22 + -1 | 0;
+ while (1) {
+ if (!((HEAP32[i3 >> 2] | 0) >>> 0 > i12 >>> 0)) {
+ i12 = 0;
+ break L4;
+ }
+ i9 = HEAP8[i12] | 0;
+ i10 = i9 & 255;
+ i14 = HEAP8[i11] | 0;
+ i13 = i14 << 24 >> 24;
+ L139 : do {
+ if ((i13 | 0) == 91) {
+ i6 = (HEAP8[i21] | 0) == 94;
+ i13 = i6 ? i21 : i11;
+ i6 = i6 & 1;
+ i7 = i6 ^ 1;
+ i14 = i13 + 1 | 0;
+ if (i14 >>> 0 < i8 >>> 0) {
+ while (1) {
+ i17 = HEAP8[i14] | 0;
+ i15 = i13 + 2 | 0;
+ i16 = HEAP8[i15] | 0;
+ do {
+ if (i17 << 24 >> 24 == 37) {
+ if ((_match_class(i10, i16 & 255) | 0) == 0) {
+ i13 = i15;
+ } else {
+ i6 = i7;
+ i7 = 147;
+ break L139;
+ }
+ } else {
+ if (i16 << 24 >> 24 == 45 ? (i5 = i13 + 3 | 0, i5 >>> 0 < i8 >>> 0) : 0) {
+ if ((i17 & 255) > (i9 & 255)) {
+ i13 = i5;
+ break;
+ }
+ if ((HEAPU8[i5] | 0) < (i9 & 255)) {
+ i13 = i5;
+ break;
+ } else {
+ i6 = i7;
+ i7 = 147;
+ break L139;
+ }
+ }
+ if (i17 << 24 >> 24 == i9 << 24 >> 24) {
+ i6 = i7;
+ i7 = 147;
+ break L139;
+ } else {
+ i13 = i14;
+ }
+ }
+ } while (0);
+ i14 = i13 + 1 | 0;
+ if (!(i14 >>> 0 < i8 >>> 0)) {
+ i7 = 147;
+ break;
+ }
+ }
+ } else {
+ i7 = 147;
+ }
+ } else if ((i13 | 0) == 37) {
+ i6 = _match_class(i10, HEAPU8[i21] | 0) | 0;
+ i7 = 147;
+ } else if ((i13 | 0) != 46) {
+ i6 = i14 << 24 >> 24 == i9 << 24 >> 24 | 0;
+ i7 = 147;
+ }
+ } while (0);
+ if ((i7 | 0) == 147 ? (i7 = 0, (i6 | 0) == 0) : 0) {
+ i12 = 0;
+ break L4;
+ }
+ i9 = i12 + 1 | 0;
+ i12 = _match(i1, i9, i4) | 0;
+ if ((i12 | 0) == 0) {
+ i12 = i9;
+ } else {
+ break L4;
+ }
+ }
+ } else if ((i7 | 0) == 112) {
+ i19 = i12;
+ }
+ i10 = HEAP32[i3 >> 2] | 0;
+ if (i10 >>> 0 > i19 >>> 0) {
+ i5 = i22 + -1 | 0;
+ i8 = i19;
+ i6 = 0;
+ do {
+ i8 = HEAP8[i8] | 0;
+ i9 = i8 & 255;
+ i13 = HEAP8[i11] | 0;
+ i12 = i13 << 24 >> 24;
+ L183 : do {
+ if ((i12 | 0) == 37) {
+ i10 = _match_class(i9, HEAPU8[i21] | 0) | 0;
+ i7 = 129;
+ } else if ((i12 | 0) == 91) {
+ i7 = (HEAP8[i21] | 0) == 94;
+ i12 = i7 ? i21 : i11;
+ i10 = i7 & 1;
+ i7 = i10 ^ 1;
+ i13 = i12 + 1 | 0;
+ if (i13 >>> 0 < i5 >>> 0) {
+ while (1) {
+ i14 = HEAP8[i13] | 0;
+ i16 = i12 + 2 | 0;
+ i15 = HEAP8[i16] | 0;
+ do {
+ if (i14 << 24 >> 24 == 37) {
+ if ((_match_class(i9, i15 & 255) | 0) == 0) {
+ i12 = i16;
+ } else {
+ i10 = i7;
+ i7 = 129;
+ break L183;
+ }
+ } else {
+ if (i15 << 24 >> 24 == 45 ? (i4 = i12 + 3 | 0, i4 >>> 0 < i5 >>> 0) : 0) {
+ if ((i14 & 255) > (i8 & 255)) {
+ i12 = i4;
+ break;
+ }
+ if ((HEAPU8[i4] | 0) < (i8 & 255)) {
+ i12 = i4;
+ break;
+ } else {
+ i10 = i7;
+ i7 = 129;
+ break L183;
+ }
+ }
+ if (i14 << 24 >> 24 == i8 << 24 >> 24) {
+ i10 = i7;
+ i7 = 129;
+ break L183;
+ } else {
+ i12 = i13;
+ }
+ }
+ } while (0);
+ i13 = i12 + 1 | 0;
+ if (!(i13 >>> 0 < i5 >>> 0)) {
+ i7 = 129;
+ break;
+ }
+ }
+ } else {
+ i7 = 129;
+ }
+ } else if ((i12 | 0) != 46) {
+ i10 = i13 << 24 >> 24 == i8 << 24 >> 24 | 0;
+ i7 = 129;
+ }
+ } while (0);
+ if ((i7 | 0) == 129) {
+ i7 = 0;
+ if ((i10 | 0) == 0) {
+ break;
+ }
+ i10 = HEAP32[i3 >> 2] | 0;
+ }
+ i6 = i6 + 1 | 0;
+ i8 = i19 + i6 | 0;
+ } while (i10 >>> 0 > i8 >>> 0);
+ if (!((i6 | 0) > -1)) {
+ i12 = 0;
+ break;
+ }
+ } else {
+ i6 = 0;
+ }
+ i3 = i22 + 1 | 0;
+ while (1) {
+ i12 = _match(i1, i19 + i6 | 0, i3) | 0;
+ if ((i12 | 0) != 0) {
+ break L4;
+ }
+ if ((i6 | 0) > 0) {
+ i6 = i6 + -1 | 0;
+ } else {
+ i12 = 0;
+ break;
+ }
+ }
+ }
+ } while (0);
+ HEAP32[i1 >> 2] = (HEAP32[i1 >> 2] | 0) + 1;
+ STACKTOP = i2;
+ return i12 | 0;
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[12928 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[12920 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 12952 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 13216 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[12936 >> 2] | 0)) {
+ i21 = (HEAP32[12924 >> 2] | 0) + i11 | 0;
+ HEAP32[12924 >> 2] = i21;
+ HEAP32[12936 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[12932 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[12932 >> 2] = 0;
+ HEAP32[12920 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i21 = (HEAP32[12920 >> 2] | 0) + i11 | 0;
+ HEAP32[12920 >> 2] = i21;
+ HEAP32[12932 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 13216 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 12952 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ HEAP32[12920 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 12952 + (i7 << 2) | 0;
+ i8 = HEAP32[3228] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 12952 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[3228] = i8 | i6;
+ i4 = 12952 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 13216 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[12916 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L204 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L204;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[12928 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[12916 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[12944 >> 2] | 0) + -1 | 0;
+ HEAP32[12944 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 13368 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[12944 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function _dispose_chunk(i6, i7) {
+ i6 = i6 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0;
+ i1 = STACKTOP;
+ i5 = i6 + i7 | 0;
+ i10 = HEAP32[i6 + 4 >> 2] | 0;
+ do {
+ if ((i10 & 1 | 0) == 0) {
+ i14 = HEAP32[i6 >> 2] | 0;
+ if ((i10 & 3 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i10 = i6 + (0 - i14) | 0;
+ i11 = i14 + i7 | 0;
+ i15 = HEAP32[12928 >> 2] | 0;
+ if (i10 >>> 0 < i15 >>> 0) {
+ _abort();
+ }
+ if ((i10 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i2 = i6 + (i7 + 4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ HEAP32[12920 >> 2] = i11;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i6 + (4 - i14) >> 2] = i11 | 1;
+ HEAP32[i5 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ i17 = i14 >>> 3;
+ if (i14 >>> 0 < 256) {
+ i2 = HEAP32[i6 + (8 - i14) >> 2] | 0;
+ i12 = HEAP32[i6 + (12 - i14) >> 2] | 0;
+ i13 = 12952 + (i17 << 1 << 2) | 0;
+ if ((i2 | 0) != (i13 | 0)) {
+ if (i2 >>> 0 < i15 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i10 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i2 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i17);
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ if ((i12 | 0) != (i13 | 0)) {
+ if (i12 >>> 0 < i15 >>> 0) {
+ _abort();
+ }
+ i13 = i12 + 8 | 0;
+ if ((HEAP32[i13 >> 2] | 0) == (i10 | 0)) {
+ i16 = i13;
+ } else {
+ _abort();
+ }
+ } else {
+ i16 = i12 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i12;
+ HEAP32[i16 >> 2] = i2;
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ i16 = HEAP32[i6 + (24 - i14) >> 2] | 0;
+ i18 = HEAP32[i6 + (12 - i14) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i10 | 0)) {
+ i19 = 16 - i14 | 0;
+ i18 = i6 + (i19 + 4) | 0;
+ i17 = HEAP32[i18 >> 2] | 0;
+ if ((i17 | 0) == 0) {
+ i18 = i6 + i19 | 0;
+ i17 = HEAP32[i18 >> 2] | 0;
+ if ((i17 | 0) == 0) {
+ i13 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i19 = i17 + 20 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i17 = i20;
+ i18 = i19;
+ continue;
+ }
+ i20 = i17 + 16 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) == 0) {
+ break;
+ } else {
+ i17 = i19;
+ i18 = i20;
+ }
+ }
+ if (i18 >>> 0 < i15 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i13 = i17;
+ break;
+ }
+ } else {
+ i17 = HEAP32[i6 + (8 - i14) >> 2] | 0;
+ if (i17 >>> 0 < i15 >>> 0) {
+ _abort();
+ }
+ i19 = i17 + 12 | 0;
+ if ((HEAP32[i19 >> 2] | 0) != (i10 | 0)) {
+ _abort();
+ }
+ i15 = i18 + 8 | 0;
+ if ((HEAP32[i15 >> 2] | 0) == (i10 | 0)) {
+ HEAP32[i19 >> 2] = i18;
+ HEAP32[i15 >> 2] = i17;
+ i13 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i16 | 0) != 0) {
+ i15 = HEAP32[i6 + (28 - i14) >> 2] | 0;
+ i17 = 13216 + (i15 << 2) | 0;
+ if ((i10 | 0) == (HEAP32[i17 >> 2] | 0)) {
+ HEAP32[i17 >> 2] = i13;
+ if ((i13 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i15);
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ } else {
+ if (i16 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i15 = i16 + 16 | 0;
+ if ((HEAP32[i15 >> 2] | 0) == (i10 | 0)) {
+ HEAP32[i15 >> 2] = i13;
+ } else {
+ HEAP32[i16 + 20 >> 2] = i13;
+ }
+ if ((i13 | 0) == 0) {
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i13 + 24 >> 2] = i16;
+ i14 = 16 - i14 | 0;
+ i15 = HEAP32[i6 + i14 >> 2] | 0;
+ do {
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 + 16 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i13;
+ break;
+ }
+ }
+ } while (0);
+ i14 = HEAP32[i6 + (i14 + 4) >> 2] | 0;
+ if ((i14 | 0) != 0) {
+ if (i14 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 + 20 >> 2] = i14;
+ HEAP32[i14 + 24 >> 2] = i13;
+ i2 = i10;
+ i12 = i11;
+ break;
+ }
+ } else {
+ i2 = i10;
+ i12 = i11;
+ }
+ } else {
+ i2 = i10;
+ i12 = i11;
+ }
+ } else {
+ i2 = i6;
+ i12 = i7;
+ }
+ } while (0);
+ i10 = HEAP32[12928 >> 2] | 0;
+ if (i5 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i11 = i6 + (i7 + 4) | 0;
+ i13 = HEAP32[i11 >> 2] | 0;
+ if ((i13 & 2 | 0) == 0) {
+ if ((i5 | 0) == (HEAP32[12936 >> 2] | 0)) {
+ i20 = (HEAP32[12924 >> 2] | 0) + i12 | 0;
+ HEAP32[12924 >> 2] = i20;
+ HEAP32[12936 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i20 | 1;
+ if ((i2 | 0) != (HEAP32[12932 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[12932 >> 2] = 0;
+ HEAP32[12920 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i5 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i20 = (HEAP32[12920 >> 2] | 0) + i12 | 0;
+ HEAP32[12920 >> 2] = i20;
+ HEAP32[12932 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i20 | 1;
+ HEAP32[i2 + i20 >> 2] = i20;
+ STACKTOP = i1;
+ return;
+ }
+ i12 = (i13 & -8) + i12 | 0;
+ i11 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i9 = HEAP32[i6 + (i7 + 24) >> 2] | 0;
+ i11 = HEAP32[i6 + (i7 + 12) >> 2] | 0;
+ do {
+ if ((i11 | 0) == (i5 | 0)) {
+ i13 = i6 + (i7 + 20) | 0;
+ i11 = HEAP32[i13 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ i13 = i6 + (i7 + 16) | 0;
+ i11 = HEAP32[i13 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ i8 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i15 = i11 + 20 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ if ((i14 | 0) != 0) {
+ i11 = i14;
+ i13 = i15;
+ continue;
+ }
+ i14 = i11 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i11 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < i10 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i8 = i11;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i6 + (i7 + 8) >> 2] | 0;
+ if (i13 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i10 = i13 + 12 | 0;
+ if ((HEAP32[i10 >> 2] | 0) != (i5 | 0)) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i10 >> 2] = i11;
+ HEAP32[i14 >> 2] = i13;
+ i8 = i11;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i10 = HEAP32[i6 + (i7 + 28) >> 2] | 0;
+ i11 = 13216 + (i10 << 2) | 0;
+ if ((i5 | 0) == (HEAP32[i11 >> 2] | 0)) {
+ HEAP32[i11 >> 2] = i8;
+ if ((i8 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i10);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i10 = i9 + 16 | 0;
+ if ((HEAP32[i10 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i10 >> 2] = i8;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i8;
+ }
+ if ((i8 | 0) == 0) {
+ break;
+ }
+ }
+ if (i8 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i8 + 24 >> 2] = i9;
+ i5 = HEAP32[i6 + (i7 + 16) >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i8;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i6 + (i7 + 20) >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i8;
+ break;
+ }
+ }
+ }
+ } else {
+ i8 = HEAP32[i6 + (i7 + 8) >> 2] | 0;
+ i6 = HEAP32[i6 + (i7 + 12) >> 2] | 0;
+ i7 = 12952 + (i11 << 1 << 2) | 0;
+ if ((i8 | 0) != (i7 | 0)) {
+ if (i8 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i8 + 12 >> 2] | 0) != (i5 | 0)) {
+ _abort();
+ }
+ }
+ if ((i6 | 0) == (i8 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i11);
+ break;
+ }
+ if ((i6 | 0) != (i7 | 0)) {
+ if (i6 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i7 = i6 + 8 | 0;
+ if ((HEAP32[i7 >> 2] | 0) == (i5 | 0)) {
+ i9 = i7;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i6 + 8 | 0;
+ }
+ HEAP32[i8 + 12 >> 2] = i6;
+ HEAP32[i9 >> 2] = i8;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i12 | 1;
+ HEAP32[i2 + i12 >> 2] = i12;
+ if ((i2 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ HEAP32[12920 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i11 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i12 | 1;
+ HEAP32[i2 + i12 >> 2] = i12;
+ }
+ i6 = i12 >>> 3;
+ if (i12 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i5 = 12952 + (i7 << 2) | 0;
+ i8 = HEAP32[3228] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i7 = 12952 + (i7 + 2 << 2) | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ if (i6 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i7;
+ i3 = i6;
+ }
+ } else {
+ HEAP32[3228] = i8 | i6;
+ i4 = 12952 + (i7 + 2 << 2) | 0;
+ i3 = i5;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i3;
+ HEAP32[i2 + 12 >> 2] = i5;
+ STACKTOP = i1;
+ return;
+ }
+ i3 = i12 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i19 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i20 = i3 << i19;
+ i18 = (i20 + 520192 | 0) >>> 16 & 4;
+ i20 = i20 << i18;
+ i3 = (i20 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i18 | i19 | i3) + (i20 << i3 >>> 15) | 0;
+ i3 = i12 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 13216 + (i3 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i3;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i5 = HEAP32[12916 >> 2] | 0;
+ i4 = 1 << i3;
+ if ((i5 & i4 | 0) == 0) {
+ HEAP32[12916 >> 2] = i5 | i4;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i6;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L194 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i12 | 0)) {
+ i3 = i12 << i3;
+ i6 = i4;
+ while (1) {
+ i5 = i6 + (i3 >>> 31 << 2) + 16 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) == (i12 | 0)) {
+ break L194;
+ } else {
+ i3 = i3 << 1;
+ i6 = i4;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i6;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ STACKTOP = i1;
+ return;
+ }
+ } while (0);
+ i3 = i4 + 8 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i5 = HEAP32[12928 >> 2] | 0;
+ if (i4 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i6 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ HEAP32[i6 + 12 >> 2] = i2;
+ HEAP32[i3 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i6;
+ HEAP32[i2 + 12 >> 2] = i4;
+ HEAP32[i2 + 24 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+}
+function _singlestep(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i14 = i1;
+ i3 = i2 + 12 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ i6 = i8 + 61 | 0;
+ switch (HEAPU8[i6] | 0) {
+ case 0:
+ {
+ if ((HEAP32[i8 + 84 >> 2] | 0) != 0) {
+ i21 = i8 + 16 | 0;
+ i22 = HEAP32[i21 >> 2] | 0;
+ _propagatemark(i8);
+ i22 = (HEAP32[i21 >> 2] | 0) - i22 | 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ HEAP8[i6] = 1;
+ i6 = i8 + 20 | 0;
+ HEAP32[i6 >> 2] = HEAP32[i8 + 16 >> 2];
+ i8 = HEAP32[i3 >> 2] | 0;
+ i7 = i8 + 16 | 0;
+ i14 = HEAP32[i7 >> 2] | 0;
+ if ((i2 | 0) != 0 ? !((HEAP8[i2 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i8, i2);
+ }
+ if ((HEAP32[i8 + 48 >> 2] & 64 | 0) != 0 ? (i13 = HEAP32[i8 + 40 >> 2] | 0, !((HEAP8[i13 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i8, i13);
+ }
+ _markmt(i8);
+ i13 = i8 + 112 | 0;
+ i15 = HEAP32[i8 + 132 >> 2] | 0;
+ if ((i15 | 0) != (i13 | 0)) {
+ do {
+ if (((HEAP8[i15 + 5 | 0] & 7) == 0 ? (i12 = HEAP32[i15 + 8 >> 2] | 0, (HEAP32[i12 + 8 >> 2] & 64 | 0) != 0) : 0) ? (i11 = HEAP32[i12 >> 2] | 0, !((HEAP8[i11 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i8, i11);
+ }
+ i15 = HEAP32[i15 + 20 >> 2] | 0;
+ } while ((i15 | 0) != (i13 | 0));
+ }
+ i16 = i8 + 84 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != 0) {
+ do {
+ _propagatemark(i8);
+ } while ((HEAP32[i16 >> 2] | 0) != 0);
+ }
+ i17 = (HEAP32[i7 >> 2] | 0) - i14 | 0;
+ i11 = i8 + 92 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ i21 = i8 + 88 | 0;
+ i22 = HEAP32[i21 >> 2] | 0;
+ i15 = i8 + 96 | 0;
+ i13 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i15 >> 2] = 0;
+ HEAP32[i21 >> 2] = 0;
+ HEAP32[i11 >> 2] = 0;
+ HEAP32[i16 >> 2] = i22;
+ if ((i22 | 0) != 0) {
+ do {
+ _propagatemark(i8);
+ } while ((HEAP32[i16 >> 2] | 0) != 0);
+ }
+ HEAP32[i16 >> 2] = i12;
+ if ((i12 | 0) != 0) {
+ do {
+ _propagatemark(i8);
+ } while ((HEAP32[i16 >> 2] | 0) != 0);
+ }
+ HEAP32[i16 >> 2] = i13;
+ if ((i13 | 0) != 0) {
+ do {
+ _propagatemark(i8);
+ } while ((HEAP32[i16 >> 2] | 0) != 0);
+ }
+ i18 = HEAP32[i7 >> 2] | 0;
+ while (1) {
+ i13 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i15 >> 2] = 0;
+ i12 = 0;
+ L42 : while (1) {
+ i14 = i13;
+ while (1) {
+ if ((i14 | 0) == 0) {
+ break L42;
+ }
+ i13 = HEAP32[i14 + 24 >> 2] | 0;
+ if ((_traverseephemeron(i8, i14) | 0) == 0) {
+ i14 = i13;
+ } else {
+ break;
+ }
+ }
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i12 = 1;
+ continue;
+ }
+ while (1) {
+ _propagatemark(i8);
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i12 = 1;
+ continue L42;
+ }
+ }
+ }
+ if ((i12 | 0) == 0) {
+ break;
+ }
+ }
+ _clearvalues(i8, HEAP32[i11 >> 2] | 0, 0);
+ i14 = i8 + 100 | 0;
+ _clearvalues(i8, HEAP32[i14 >> 2] | 0, 0);
+ i13 = HEAP32[i11 >> 2] | 0;
+ i12 = HEAP32[i14 >> 2] | 0;
+ i21 = HEAP32[i7 >> 2] | 0;
+ i20 = HEAP32[i3 >> 2] | 0;
+ i19 = i20 + 104 | 0;
+ while (1) {
+ i22 = HEAP32[i19 >> 2] | 0;
+ if ((i22 | 0) == 0) {
+ break;
+ } else {
+ i19 = i22;
+ }
+ }
+ i17 = i17 - i18 + i21 | 0;
+ i20 = i20 + 72 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ L55 : do {
+ if ((i21 | 0) != 0) {
+ while (1) {
+ i18 = i21;
+ while (1) {
+ i22 = i18 + 5 | 0;
+ i21 = HEAP8[i22] | 0;
+ if ((i21 & 3) == 0) {
+ break;
+ }
+ HEAP8[i22] = i21 & 255 | 8;
+ HEAP32[i20 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i18 >> 2] = HEAP32[i19 >> 2];
+ HEAP32[i19 >> 2] = i18;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) == 0) {
+ break L55;
+ } else {
+ i22 = i18;
+ i18 = i19;
+ i19 = i22;
+ }
+ }
+ i21 = HEAP32[i18 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i20 = i18;
+ }
+ }
+ }
+ } while (0);
+ i19 = HEAP32[i8 + 104 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i18 = i8 + 60 | 0;
+ do {
+ i22 = i19 + 5 | 0;
+ HEAP8[i22] = HEAP8[i18] & 3 | HEAP8[i22] & 184;
+ _reallymarkobject(i8, i19);
+ i19 = HEAP32[i19 >> 2] | 0;
+ } while ((i19 | 0) != 0);
+ }
+ if ((HEAP32[i16 >> 2] | 0) != 0) {
+ do {
+ _propagatemark(i8);
+ } while ((HEAP32[i16 >> 2] | 0) != 0);
+ }
+ i18 = HEAP32[i7 >> 2] | 0;
+ while (1) {
+ i20 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i15 >> 2] = 0;
+ i19 = 0;
+ L74 : while (1) {
+ i21 = i20;
+ while (1) {
+ if ((i21 | 0) == 0) {
+ break L74;
+ }
+ i20 = HEAP32[i21 + 24 >> 2] | 0;
+ if ((_traverseephemeron(i8, i21) | 0) == 0) {
+ i21 = i20;
+ } else {
+ break;
+ }
+ }
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i19 = 1;
+ continue;
+ }
+ while (1) {
+ _propagatemark(i8);
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i19 = 1;
+ continue L74;
+ }
+ }
+ }
+ if ((i19 | 0) == 0) {
+ break;
+ }
+ }
+ i16 = i17 - i18 | 0;
+ i15 = HEAP32[i15 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ do {
+ i22 = 1 << HEAPU8[i15 + 7 | 0];
+ i19 = HEAP32[i15 + 16 >> 2] | 0;
+ i17 = i19 + (i22 << 5) | 0;
+ if ((i22 | 0) > 0) {
+ do {
+ i18 = i19 + 8 | 0;
+ do {
+ if ((HEAP32[i18 >> 2] | 0) != 0 ? (i9 = i19 + 24 | 0, i10 = HEAP32[i9 >> 2] | 0, (i10 & 64 | 0) != 0) : 0) {
+ i20 = HEAP32[i19 + 16 >> 2] | 0;
+ if ((i10 & 15 | 0) == 4) {
+ if ((i20 | 0) == 0) {
+ break;
+ }
+ if ((HEAP8[i20 + 5 | 0] & 3) == 0) {
+ break;
+ }
+ _reallymarkobject(i8, i20);
+ break;
+ } else {
+ i20 = i20 + 5 | 0;
+ if ((HEAP8[i20] & 3) == 0) {
+ break;
+ }
+ HEAP32[i18 >> 2] = 0;
+ if ((HEAP8[i20] & 3) == 0) {
+ break;
+ }
+ HEAP32[i9 >> 2] = 11;
+ break;
+ }
+ }
+ } while (0);
+ i19 = i19 + 32 | 0;
+ } while (i19 >>> 0 < i17 >>> 0);
+ }
+ i15 = HEAP32[i15 + 24 >> 2] | 0;
+ } while ((i15 | 0) != 0);
+ }
+ i10 = HEAP32[i14 >> 2] | 0;
+ if ((i10 | 0) != 0) {
+ do {
+ i22 = 1 << HEAPU8[i10 + 7 | 0];
+ i17 = HEAP32[i10 + 16 >> 2] | 0;
+ i9 = i17 + (i22 << 5) | 0;
+ if ((i22 | 0) > 0) {
+ do {
+ i15 = i17 + 8 | 0;
+ do {
+ if ((HEAP32[i15 >> 2] | 0) != 0 ? (i5 = i17 + 24 | 0, i4 = HEAP32[i5 >> 2] | 0, (i4 & 64 | 0) != 0) : 0) {
+ i18 = HEAP32[i17 + 16 >> 2] | 0;
+ if ((i4 & 15 | 0) == 4) {
+ if ((i18 | 0) == 0) {
+ break;
+ }
+ if ((HEAP8[i18 + 5 | 0] & 3) == 0) {
+ break;
+ }
+ _reallymarkobject(i8, i18);
+ break;
+ } else {
+ i18 = i18 + 5 | 0;
+ if ((HEAP8[i18] & 3) == 0) {
+ break;
+ }
+ HEAP32[i15 >> 2] = 0;
+ if ((HEAP8[i18] & 3) == 0) {
+ break;
+ }
+ HEAP32[i5 >> 2] = 11;
+ break;
+ }
+ }
+ } while (0);
+ i17 = i17 + 32 | 0;
+ } while (i17 >>> 0 < i9 >>> 0);
+ }
+ i10 = HEAP32[i10 + 24 >> 2] | 0;
+ } while ((i10 | 0) != 0);
+ }
+ _clearvalues(i8, HEAP32[i11 >> 2] | 0, i13);
+ _clearvalues(i8, HEAP32[i14 >> 2] | 0, i12);
+ i4 = i8 + 60 | 0;
+ HEAP8[i4] = HEAPU8[i4] ^ 3;
+ i4 = i16 + (HEAP32[i7 >> 2] | 0) | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i4;
+ i3 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i3 + 61 | 0] = 2;
+ HEAP32[i3 + 64 >> 2] = 0;
+ i7 = i3 + 72 | 0;
+ i5 = 0;
+ do {
+ i5 = i5 + 1 | 0;
+ i6 = _sweeplist(i2, i7, 1) | 0;
+ } while ((i6 | 0) == (i7 | 0));
+ HEAP32[i3 + 80 >> 2] = i6;
+ i6 = i3 + 68 | 0;
+ i7 = 0;
+ do {
+ i7 = i7 + 1 | 0;
+ i8 = _sweeplist(i2, i6, 1) | 0;
+ } while ((i8 | 0) == (i6 | 0));
+ HEAP32[i3 + 76 >> 2] = i8;
+ i22 = ((i7 + i5 | 0) * 5 | 0) + i4 | 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ case 2:
+ {
+ i3 = i8 + 64 | 0;
+ i4 = i8 + 32 | 0;
+ i8 = i8 + 24 | 0;
+ i5 = 0;
+ while (1) {
+ i10 = HEAP32[i3 >> 2] | 0;
+ i11 = i10 + i5 | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ if ((i11 | 0) >= (i9 | 0)) {
+ i2 = i10;
+ break;
+ }
+ _sweeplist(i2, (HEAP32[i8 >> 2] | 0) + (i11 << 2) | 0, -3) | 0;
+ i5 = i5 + 1 | 0;
+ if ((i5 | 0) >= 80) {
+ i7 = 96;
+ break;
+ }
+ }
+ if ((i7 | 0) == 96) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ }
+ i22 = i2 + i5 | 0;
+ HEAP32[i3 >> 2] = i22;
+ if ((i22 | 0) >= (i9 | 0)) {
+ HEAP8[i6] = 3;
+ }
+ i22 = i5 * 5 | 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ case 5:
+ {
+ i2 = i8 + 16 | 0;
+ HEAP32[i2 >> 2] = HEAP32[i8 + 32 >> 2] << 2;
+ i22 = i8 + 84 | 0;
+ i3 = i8 + 172 | 0;
+ HEAP32[i22 + 0 >> 2] = 0;
+ HEAP32[i22 + 4 >> 2] = 0;
+ HEAP32[i22 + 8 >> 2] = 0;
+ HEAP32[i22 + 12 >> 2] = 0;
+ HEAP32[i22 + 16 >> 2] = 0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i8, i3);
+ }
+ if ((HEAP32[i8 + 48 >> 2] & 64 | 0) != 0 ? (i15 = HEAP32[i8 + 40 >> 2] | 0, !((HEAP8[i15 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i8, i15);
+ }
+ _markmt(i8);
+ i4 = HEAP32[i8 + 104 >> 2] | 0;
+ if ((i4 | 0) != 0) {
+ i3 = i8 + 60 | 0;
+ do {
+ i22 = i4 + 5 | 0;
+ HEAP8[i22] = HEAP8[i3] & 3 | HEAP8[i22] & 184;
+ _reallymarkobject(i8, i4);
+ i4 = HEAP32[i4 >> 2] | 0;
+ } while ((i4 | 0) != 0);
+ }
+ HEAP8[i6] = 0;
+ i22 = HEAP32[i2 >> 2] | 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ case 3:
+ {
+ i3 = i8 + 80 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ HEAP8[i6] = 4;
+ i22 = 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ } else {
+ HEAP32[i3 >> 2] = _sweeplist(i2, i4, 80) | 0;
+ i22 = 400;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ }
+ case 4:
+ {
+ i4 = i8 + 76 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ HEAP32[i4 >> 2] = _sweeplist(i2, i5, 80) | 0;
+ i22 = 400;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ HEAP32[i14 >> 2] = HEAP32[i8 + 172 >> 2];
+ _sweeplist(i2, i14, 1) | 0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP8[i3 + 62 | 0] | 0) != 1) {
+ i4 = (HEAP32[i3 + 32 >> 2] | 0) / 2 | 0;
+ if ((HEAP32[i3 + 28 >> 2] | 0) >>> 0 < i4 >>> 0) {
+ _luaS_resize(i2, i4);
+ }
+ i21 = i3 + 144 | 0;
+ i22 = i3 + 152 | 0;
+ HEAP32[i21 >> 2] = _luaM_realloc_(i2, HEAP32[i21 >> 2] | 0, HEAP32[i22 >> 2] | 0, 0) | 0;
+ HEAP32[i22 >> 2] = 0;
+ }
+ HEAP8[i6] = 5;
+ i22 = 5;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ default:
+ {
+ i22 = 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ }
+ return 0;
+}
+function _pmain(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ i7 = _lua_tointegerx(i3, 1, 0) | 0;
+ i4 = _lua_touserdata(i3, 2) | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) != 0 ? (HEAP8[i5] | 0) != 0 : 0) {
+ HEAP32[20] = i5;
+ }
+ i12 = HEAP32[i4 + 4 >> 2] | 0;
+ do {
+ if ((i12 | 0) == 0) {
+ i5 = 0;
+ i6 = 0;
+ i8 = 0;
+ i9 = 1;
+ i10 = 1;
+ } else {
+ i9 = 0;
+ i8 = 0;
+ i11 = 0;
+ i6 = 0;
+ i5 = 1;
+ L6 : while (1) {
+ if ((HEAP8[i12] | 0) != 45) {
+ i10 = 18;
+ break;
+ }
+ switch (HEAP8[i12 + 1 | 0] | 0) {
+ case 108:
+ {
+ i10 = 12;
+ break;
+ }
+ case 69:
+ {
+ i9 = 1;
+ break;
+ }
+ case 45:
+ {
+ i10 = 7;
+ break L6;
+ }
+ case 105:
+ {
+ if ((HEAP8[i12 + 2 | 0] | 0) == 0) {
+ i11 = 1;
+ i6 = 1;
+ } else {
+ i5 = -1;
+ break L6;
+ }
+ break;
+ }
+ case 101:
+ {
+ i8 = 1;
+ i10 = 12;
+ break;
+ }
+ case 118:
+ {
+ if ((HEAP8[i12 + 2 | 0] | 0) == 0) {
+ i11 = 1;
+ } else {
+ i5 = -1;
+ break L6;
+ }
+ break;
+ }
+ case 0:
+ {
+ i10 = 18;
+ break L6;
+ }
+ default:
+ {
+ i10 = 16;
+ break L6;
+ }
+ }
+ if ((i10 | 0) == 12) {
+ i10 = 0;
+ if ((HEAP8[i12 + 2 | 0] | 0) == 0) {
+ i12 = i5 + 1 | 0;
+ i13 = HEAP32[i4 + (i12 << 2) >> 2] | 0;
+ if ((i13 | 0) == 0) {
+ i10 = 15;
+ break;
+ }
+ if ((HEAP8[i13] | 0) == 45) {
+ i10 = 15;
+ break;
+ } else {
+ i5 = i12;
+ }
+ }
+ }
+ i5 = i5 + 1 | 0;
+ i12 = HEAP32[i4 + (i5 << 2) >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i5 = 0;
+ i12 = i9;
+ i10 = 23;
+ break;
+ }
+ }
+ if ((i10 | 0) == 7) {
+ if ((HEAP8[i12 + 2 | 0] | 0) == 0) {
+ i5 = i5 + 1 | 0;
+ i5 = (HEAP32[i4 + (i5 << 2) >> 2] | 0) == 0 ? 0 : i5;
+ i10 = 18;
+ } else {
+ i5 = -1;
+ }
+ } else if ((i10 | 0) == 15) {
+ i5 = 0 - i5 | 0;
+ i10 = 18;
+ } else if ((i10 | 0) == 16) {
+ i5 = 0 - i5 | 0;
+ i10 = 18;
+ }
+ if ((i10 | 0) == 18) {
+ if ((i5 | 0) >= 0) {
+ i12 = i9;
+ i10 = 23;
+ }
+ }
+ if ((i10 | 0) == 23) {
+ if ((i11 | 0) == 0) {
+ i9 = 1;
+ } else {
+ i9 = HEAP32[_stdout >> 2] | 0;
+ _fwrite(440, 1, 51, i9 | 0) | 0;
+ _fputc(10, i9 | 0) | 0;
+ _fflush(i9 | 0) | 0;
+ i9 = 0;
+ }
+ if ((i12 | 0) == 0) {
+ i10 = 1;
+ break;
+ }
+ _lua_pushboolean(i3, 1);
+ _lua_setfield(i3, -1001e3, 96);
+ i10 = 0;
+ break;
+ }
+ i3 = HEAP32[i4 + (0 - i5 << 2) >> 2] | 0;
+ i4 = HEAP32[_stderr >> 2] | 0;
+ HEAP32[i2 >> 2] = HEAP32[20];
+ _fprintf(i4 | 0, 496, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ i13 = HEAP8[i3 + 1 | 0] | 0;
+ if (i13 << 24 >> 24 == 108 | i13 << 24 >> 24 == 101) {
+ HEAP32[i2 >> 2] = i3;
+ _fprintf(i4 | 0, 504, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ } else {
+ HEAP32[i2 >> 2] = i3;
+ _fprintf(i4 | 0, 528, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = HEAP32[20];
+ _fprintf(i4 | 0, 560, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ } while (0);
+ _luaL_checkversion_(i3, 502.0);
+ _lua_gc(i3, 0, 0) | 0;
+ _luaL_openlibs(i3);
+ _lua_gc(i3, 1, 0) | 0;
+ do {
+ if (i10) {
+ i10 = _getenv(409 | 0) | 0;
+ if ((i10 | 0) == 0) {
+ i10 = _getenv(425 | 0) | 0;
+ if ((i10 | 0) == 0) {
+ break;
+ } else {
+ i11 = 424;
+ }
+ } else {
+ i11 = 408;
+ }
+ if ((HEAP8[i10] | 0) == 64) {
+ i13 = _luaL_loadfilex(i3, i10 + 1 | 0, 0) | 0;
+ if ((i13 | 0) == 0) {
+ i12 = _lua_gettop(i3) | 0;
+ _lua_pushcclosure(i3, 142, 0);
+ _lua_insert(i3, i12);
+ HEAP32[48] = i3;
+ _signal(2, 1) | 0;
+ i13 = _lua_pcallk(i3, 0, 0, i12, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i3, i12);
+ if ((i13 | 0) == 0) {
+ break;
+ }
+ }
+ if ((_lua_type(i3, -1) | 0) == 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i11 = _lua_tolstring(i3, -1, 0) | 0;
+ i12 = HEAP32[20] | 0;
+ i10 = HEAP32[_stderr >> 2] | 0;
+ if ((i12 | 0) != 0) {
+ HEAP32[i2 >> 2] = i12;
+ _fprintf(i10 | 0, 496, i2 | 0) | 0;
+ _fflush(i10 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i11 | 0) == 0 ? 48 : i11;
+ _fprintf(i10 | 0, 912, i2 | 0) | 0;
+ _fflush(i10 | 0) | 0;
+ _lua_settop(i3, -2);
+ _lua_gc(i3, 2, 0) | 0;
+ } else {
+ i13 = _luaL_loadbufferx(i3, i10, _strlen(i10 | 0) | 0, i11, 0) | 0;
+ if ((i13 | 0) == 0) {
+ i12 = _lua_gettop(i3) | 0;
+ _lua_pushcclosure(i3, 142, 0);
+ _lua_insert(i3, i12);
+ HEAP32[48] = i3;
+ _signal(2, 1) | 0;
+ i13 = _lua_pcallk(i3, 0, 0, i12, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i3, i12);
+ if ((i13 | 0) == 0) {
+ break;
+ }
+ }
+ if ((_lua_type(i3, -1) | 0) == 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i11 = _lua_tolstring(i3, -1, 0) | 0;
+ i10 = HEAP32[20] | 0;
+ i12 = HEAP32[_stderr >> 2] | 0;
+ if ((i10 | 0) != 0) {
+ HEAP32[i2 >> 2] = i10;
+ _fprintf(i12 | 0, 496, i2 | 0) | 0;
+ _fflush(i12 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i11 | 0) == 0 ? 48 : i11;
+ _fprintf(i12 | 0, 912, i2 | 0) | 0;
+ _fflush(i12 | 0) | 0;
+ _lua_settop(i3, -2);
+ _lua_gc(i3, 2, 0) | 0;
+ }
+ if ((i13 | 0) != 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ } while (0);
+ i7 = (i5 | 0) > 0 ? i5 : i7;
+ L67 : do {
+ if ((i7 | 0) > 1) {
+ i10 = 1;
+ while (1) {
+ i11 = HEAP32[i4 + (i10 << 2) >> 2] | 0;
+ i12 = HEAP8[i11 + 1 | 0] | 0;
+ if ((i12 | 0) == 108) {
+ i11 = i11 + 2 | 0;
+ if ((HEAP8[i11] | 0) == 0) {
+ i10 = i10 + 1 | 0;
+ i11 = HEAP32[i4 + (i10 << 2) >> 2] | 0;
+ }
+ _lua_getglobal(i3, 400);
+ _lua_pushstring(i3, i11) | 0;
+ i12 = (_lua_gettop(i3) | 0) + -1 | 0;
+ _lua_pushcclosure(i3, 142, 0);
+ _lua_insert(i3, i12);
+ HEAP32[48] = i3;
+ _signal(2, 1) | 0;
+ i13 = _lua_pcallk(i3, 1, 1, i12, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i3, i12);
+ if ((i13 | 0) != 0) {
+ i10 = 58;
+ break;
+ }
+ _lua_setglobal(i3, i11);
+ } else if ((i12 | 0) == 101) {
+ i11 = i11 + 2 | 0;
+ if ((HEAP8[i11] | 0) == 0) {
+ i10 = i10 + 1 | 0;
+ i11 = HEAP32[i4 + (i10 << 2) >> 2] | 0;
+ }
+ if ((_luaL_loadbufferx(i3, i11, _strlen(i11 | 0) | 0, 384, 0) | 0) != 0) {
+ i10 = 50;
+ break;
+ }
+ i12 = _lua_gettop(i3) | 0;
+ _lua_pushcclosure(i3, 142, 0);
+ _lua_insert(i3, i12);
+ HEAP32[48] = i3;
+ _signal(2, 1) | 0;
+ i13 = _lua_pcallk(i3, 0, 0, i12, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i3, i12);
+ if ((i13 | 0) != 0) {
+ i10 = 50;
+ break;
+ }
+ }
+ i10 = i10 + 1 | 0;
+ if ((i10 | 0) >= (i7 | 0)) {
+ break L67;
+ }
+ }
+ if ((i10 | 0) == 50) {
+ if ((_lua_type(i3, -1) | 0) == 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i5 = _lua_tolstring(i3, -1, 0) | 0;
+ i6 = HEAP32[20] | 0;
+ i4 = HEAP32[_stderr >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ HEAP32[i2 >> 2] = i6;
+ _fprintf(i4 | 0, 496, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i5 | 0) == 0 ? 48 : i5;
+ _fprintf(i4 | 0, 912, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ _lua_settop(i3, -2);
+ _lua_gc(i3, 2, 0) | 0;
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else if ((i10 | 0) == 58) {
+ if ((_lua_type(i3, -1) | 0) == 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i5 = _lua_tolstring(i3, -1, 0) | 0;
+ i6 = HEAP32[20] | 0;
+ i4 = HEAP32[_stderr >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ HEAP32[i2 >> 2] = i6;
+ _fprintf(i4 | 0, 496, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i5 | 0) == 0 ? 48 : i5;
+ _fprintf(i4 | 0, 912, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ _lua_settop(i3, -2);
+ _lua_gc(i3, 2, 0) | 0;
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i10 = 0;
+ while (1) {
+ if ((HEAP32[i4 + (i10 << 2) >> 2] | 0) == 0) {
+ break;
+ } else {
+ i10 = i10 + 1 | 0;
+ }
+ }
+ i11 = i5 + 1 | 0;
+ i7 = i10 - i11 | 0;
+ _luaL_checkstack(i3, i7 + 3 | 0, 352);
+ if ((i11 | 0) < (i10 | 0)) {
+ i12 = i11;
+ do {
+ _lua_pushstring(i3, HEAP32[i4 + (i12 << 2) >> 2] | 0) | 0;
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) != (i10 | 0));
+ }
+ _lua_createtable(i3, i7, i11);
+ if ((i10 | 0) > 0) {
+ i11 = 0;
+ do {
+ _lua_pushstring(i3, HEAP32[i4 + (i11 << 2) >> 2] | 0) | 0;
+ _lua_rawseti(i3, -2, i11 - i5 | 0);
+ i11 = i11 + 1 | 0;
+ } while ((i11 | 0) != (i10 | 0));
+ }
+ _lua_setglobal(i3, 328);
+ i10 = HEAP32[i4 + (i5 << 2) >> 2] | 0;
+ if ((_strcmp(i10, 336) | 0) == 0) {
+ i13 = (_strcmp(HEAP32[i4 + (i5 + -1 << 2) >> 2] | 0, 344) | 0) == 0;
+ i10 = i13 ? i10 : 0;
+ }
+ i10 = _luaL_loadfilex(i3, i10, 0) | 0;
+ i4 = ~i7;
+ _lua_insert(i3, i4);
+ if ((i10 | 0) == 0) {
+ i13 = (_lua_gettop(i3) | 0) - i7 | 0;
+ _lua_pushcclosure(i3, 142, 0);
+ _lua_insert(i3, i13);
+ HEAP32[48] = i3;
+ _signal(2, 1) | 0;
+ i10 = _lua_pcallk(i3, i7, -1, i13, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i3, i13);
+ if ((i10 | 0) == 0) {
+ break;
+ }
+ } else {
+ _lua_settop(i3, i4);
+ }
+ if ((_lua_type(i3, -1) | 0) != 0) {
+ i7 = _lua_tolstring(i3, -1, 0) | 0;
+ i11 = HEAP32[20] | 0;
+ i4 = HEAP32[_stderr >> 2] | 0;
+ if ((i11 | 0) != 0) {
+ HEAP32[i2 >> 2] = i11;
+ _fprintf(i4 | 0, 496, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i7 | 0) == 0 ? 48 : i7;
+ _fprintf(i4 | 0, 912, i2 | 0) | 0;
+ _fflush(i4 | 0) | 0;
+ _lua_settop(i3, -2);
+ _lua_gc(i3, 2, 0) | 0;
+ }
+ if ((i10 | 0) != 0) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ } while (0);
+ if ((i6 | 0) == 0) {
+ if (!((i8 | i5 | 0) != 0 | i9 ^ 1)) {
+ i13 = HEAP32[_stdout >> 2] | 0;
+ _fwrite(440, 1, 51, i13 | 0) | 0;
+ _fputc(10, i13 | 0) | 0;
+ _fflush(i13 | 0) | 0;
+ _dotty(i3);
+ }
+ } else {
+ _dotty(i3);
+ }
+ _lua_pushboolean(i3, 1);
+ i13 = 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _DumpFunction(i6, i2) {
+ i6 = i6 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i17 = i5 + 56 | 0;
+ i19 = i5 + 52 | 0;
+ i20 = i5 + 48 | 0;
+ i18 = i5;
+ i21 = i5 + 60 | 0;
+ i22 = i5 + 44 | 0;
+ i1 = i5 + 40 | 0;
+ i16 = i5 + 36 | 0;
+ i23 = i5 + 32 | 0;
+ i3 = i5 + 28 | 0;
+ i7 = i5 + 24 | 0;
+ i8 = i5 + 20 | 0;
+ i9 = i5 + 16 | 0;
+ i10 = i5 + 12 | 0;
+ i12 = i5 + 8 | 0;
+ HEAP32[i17 >> 2] = HEAP32[i6 + 64 >> 2];
+ i4 = i2 + 16 | 0;
+ i28 = HEAP32[i4 >> 2] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ HEAP32[i17 >> 2] = HEAP32[i6 + 68 >> 2];
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ HEAP8[i17] = HEAP8[i6 + 76 | 0] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 1, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ HEAP8[i17] = HEAP8[i6 + 77 | 0] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 1, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ HEAP8[i17] = HEAP8[i6 + 78 | 0] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 1, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ i25 = HEAP32[i6 + 12 >> 2] | 0;
+ i24 = HEAP32[i6 + 48 >> 2] | 0;
+ HEAP32[i23 >> 2] = i24;
+ if ((i28 | 0) == 0) {
+ i26 = i2 + 4 | 0;
+ i27 = i2 + 8 | 0;
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i26 >> 2] & 3](HEAP32[i2 >> 2] | 0, i23, 4, HEAP32[i27 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i26 >> 2] & 3](HEAP32[i2 >> 2] | 0, i25, i24 << 2, HEAP32[i27 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ i25 = HEAP32[i6 + 44 >> 2] | 0;
+ HEAP32[i22 >> 2] = i25;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i22, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ } else {
+ i11 = 13;
+ }
+ } else {
+ i11 = 13;
+ }
+ if ((i11 | 0) == 13) {
+ i25 = HEAP32[i6 + 44 >> 2] | 0;
+ HEAP32[i22 >> 2] = i25;
+ }
+ if ((i25 | 0) > 0) {
+ i24 = i6 + 8 | 0;
+ i23 = i2 + 4 | 0;
+ i22 = i2 + 8 | 0;
+ i26 = 0;
+ do {
+ i30 = HEAP32[i24 >> 2] | 0;
+ i27 = i30 + (i26 << 4) | 0;
+ i30 = i30 + (i26 << 4) + 8 | 0;
+ i29 = HEAP32[i30 >> 2] | 0;
+ HEAP8[i17] = i29 & 15;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 1, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ i29 = HEAP32[i30 >> 2] | 0;
+ }
+ i29 = i29 & 15;
+ do {
+ if ((i29 | 0) == 3) {
+ HEAPF64[i18 >> 3] = +HEAPF64[i27 >> 3];
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i18, 8, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ } else if ((i29 | 0) == 1) {
+ HEAP8[i21] = HEAP32[i27 >> 2];
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i21, 1, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ } else if ((i29 | 0) == 4) {
+ i27 = HEAP32[i27 >> 2] | 0;
+ if ((i27 | 0) == 0) {
+ HEAP32[i19 >> 2] = 0;
+ if ((i28 | 0) != 0) {
+ break;
+ }
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i19, 4, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ break;
+ }
+ HEAP32[i20 >> 2] = (HEAP32[i27 + 12 >> 2] | 0) + 1;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i20, 4, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i23 >> 2] & 3](HEAP32[i2 >> 2] | 0, i27 + 16 | 0, HEAP32[i20 >> 2] | 0, HEAP32[i22 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ }
+ }
+ } while (0);
+ i26 = i26 + 1 | 0;
+ } while ((i26 | 0) != (i25 | 0));
+ }
+ i18 = HEAP32[i6 + 56 >> 2] | 0;
+ HEAP32[i17 >> 2] = i18;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i17, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ if ((i18 | 0) > 0) {
+ i17 = i6 + 16 | 0;
+ i19 = 0;
+ do {
+ _DumpFunction(HEAP32[(HEAP32[i17 >> 2] | 0) + (i19 << 2) >> 2] | 0, i2);
+ i19 = i19 + 1 | 0;
+ } while ((i19 | 0) != (i18 | 0));
+ i28 = HEAP32[i4 >> 2] | 0;
+ }
+ i17 = i6 + 40 | 0;
+ i18 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i16 >> 2] = i18;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i16, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ if ((i18 | 0) > 0) {
+ i19 = i6 + 28 | 0;
+ i16 = i2 + 4 | 0;
+ i20 = i2 + 8 | 0;
+ i21 = 0;
+ do {
+ i22 = HEAP32[i19 >> 2] | 0;
+ HEAP8[i1] = HEAP8[i22 + (i21 << 3) + 4 | 0] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i16 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 1, HEAP32[i20 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ i22 = HEAP32[i19 >> 2] | 0;
+ }
+ HEAP8[i1] = HEAP8[i22 + (i21 << 3) + 5 | 0] | 0;
+ if ((i28 | 0) == 0) {
+ i28 = FUNCTION_TABLE_iiiii[HEAP32[i16 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 1, HEAP32[i20 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i28;
+ }
+ i21 = i21 + 1 | 0;
+ } while ((i21 | 0) != (i18 | 0));
+ }
+ i16 = i2 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == 0 ? (i13 = HEAP32[i6 + 36 >> 2] | 0, (i13 | 0) != 0) : 0) {
+ HEAP32[i12 >> 2] = (HEAP32[i13 + 12 >> 2] | 0) + 1;
+ if ((i28 | 0) == 0 ? (i14 = i2 + 4 | 0, i15 = i2 + 8 | 0, i30 = FUNCTION_TABLE_iiiii[HEAP32[i14 >> 2] & 3](HEAP32[i2 >> 2] | 0, i12, 4, HEAP32[i15 >> 2] | 0) | 0, HEAP32[i4 >> 2] = i30, (i30 | 0) == 0) : 0) {
+ HEAP32[i4 >> 2] = FUNCTION_TABLE_iiiii[HEAP32[i14 >> 2] & 3](HEAP32[i2 >> 2] | 0, i13 + 16 | 0, HEAP32[i12 >> 2] | 0, HEAP32[i15 >> 2] | 0) | 0;
+ }
+ } else {
+ i12 = i10;
+ i11 = 50;
+ }
+ if ((i11 | 0) == 50) {
+ HEAP32[i10 >> 2] = 0;
+ if ((i28 | 0) == 0) {
+ HEAP32[i4 >> 2] = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i12, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ }
+ }
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i11 = HEAP32[i6 + 52 >> 2] | 0;
+ } else {
+ i11 = 0;
+ }
+ i10 = HEAP32[i6 + 20 >> 2] | 0;
+ HEAP32[i9 >> 2] = i11;
+ i14 = HEAP32[i4 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ i12 = i2 + 4 | 0;
+ i13 = i2 + 8 | 0;
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i12 >> 2] & 3](HEAP32[i2 >> 2] | 0, i9, 4, HEAP32[i13 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i12 >> 2] & 3](HEAP32[i2 >> 2] | 0, i10, i11 << 2, HEAP32[i13 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ }
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i9 = HEAP32[i6 + 60 >> 2] | 0;
+ } else {
+ i9 = 0;
+ }
+ HEAP32[i8 >> 2] = i9;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i8, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ if ((i9 | 0) > 0) {
+ i10 = i6 + 24 | 0;
+ i11 = i2 + 4 | 0;
+ i8 = i2 + 8 | 0;
+ i12 = 0;
+ do {
+ i13 = HEAP32[(HEAP32[i10 >> 2] | 0) + (i12 * 12 | 0) >> 2] | 0;
+ if ((i13 | 0) == 0) {
+ HEAP32[i1 >> 2] = 0;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i11 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 4, HEAP32[i8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ } else {
+ HEAP32[i3 >> 2] = (HEAP32[i13 + 12 >> 2] | 0) + 1;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i11 >> 2] & 3](HEAP32[i2 >> 2] | 0, i3, 4, HEAP32[i8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i11 >> 2] & 3](HEAP32[i2 >> 2] | 0, i13 + 16 | 0, HEAP32[i3 >> 2] | 0, HEAP32[i8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ }
+ }
+ i13 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i1 >> 2] = HEAP32[i13 + (i12 * 12 | 0) + 4 >> 2];
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i11 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 4, HEAP32[i8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ i13 = HEAP32[i10 >> 2] | 0;
+ }
+ HEAP32[i1 >> 2] = HEAP32[i13 + (i12 * 12 | 0) + 8 >> 2];
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i11 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 4, HEAP32[i8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) != (i9 | 0));
+ }
+ if ((HEAP32[i16 >> 2] | 0) == 0) {
+ i8 = HEAP32[i17 >> 2] | 0;
+ } else {
+ i8 = 0;
+ }
+ HEAP32[i7 >> 2] = i8;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i2 + 4 >> 2] & 3](HEAP32[i2 >> 2] | 0, i7, 4, HEAP32[i2 + 8 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ if ((i8 | 0) <= 0) {
+ STACKTOP = i5;
+ return;
+ }
+ i7 = i6 + 28 | 0;
+ i6 = i2 + 4 | 0;
+ i9 = i2 + 8 | 0;
+ i10 = 0;
+ do {
+ i11 = HEAP32[(HEAP32[i7 >> 2] | 0) + (i10 << 3) >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ HEAP32[i1 >> 2] = 0;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i6 >> 2] & 3](HEAP32[i2 >> 2] | 0, i1, 4, HEAP32[i9 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ } else {
+ HEAP32[i3 >> 2] = (HEAP32[i11 + 12 >> 2] | 0) + 1;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i6 >> 2] & 3](HEAP32[i2 >> 2] | 0, i3, 4, HEAP32[i9 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ i14 = FUNCTION_TABLE_iiiii[HEAP32[i6 >> 2] & 3](HEAP32[i2 >> 2] | 0, i11 + 16 | 0, HEAP32[i3 >> 2] | 0, HEAP32[i9 >> 2] | 0) | 0;
+ HEAP32[i4 >> 2] = i14;
+ }
+ }
+ }
+ i10 = i10 + 1 | 0;
+ } while ((i10 | 0) != (i8 | 0));
+ STACKTOP = i5;
+ return;
+}
+function _LoadFunction(i2, i6) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ i5 = i1 + 8 | 0;
+ i4 = i2 + 4 | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ HEAP32[i6 + 64 >> 2] = i8;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ HEAP32[i6 + 68 >> 2] = i8;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ HEAP8[i6 + 76 | 0] = HEAP8[i3] | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ HEAP8[i6 + 77 | 0] = HEAP8[i3] | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ HEAP8[i6 + 78 | 0] = HEAP8[i3] | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i9 = HEAP32[i3 >> 2] | 0;
+ if ((i9 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i8 = HEAP32[i2 >> 2] | 0;
+ if ((i9 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i8);
+ }
+ i14 = i9 << 2;
+ i13 = _luaM_realloc_(i8, 0, 0, i14) | 0;
+ HEAP32[i6 + 12 >> 2] = i13;
+ HEAP32[i6 + 48 >> 2] = i9;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i13, i14) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i9 = HEAP32[i2 >> 2] | 0;
+ if ((i8 + 1 | 0) >>> 0 > 268435455) {
+ _luaM_toobig(i9);
+ }
+ i11 = _luaM_realloc_(i9, 0, 0, i8 << 4) | 0;
+ i9 = i6 + 8 | 0;
+ HEAP32[i9 >> 2] = i11;
+ HEAP32[i6 + 44 >> 2] = i8;
+ i12 = (i8 | 0) > 0;
+ L43 : do {
+ if (i12) {
+ i10 = 0;
+ do {
+ HEAP32[i11 + (i10 << 4) + 8 >> 2] = 0;
+ i10 = i10 + 1 | 0;
+ } while ((i10 | 0) != (i8 | 0));
+ if (i12) {
+ i10 = i2 + 8 | 0;
+ i13 = 0;
+ while (1) {
+ i12 = i11 + (i13 << 4) | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ i9 = 34;
+ break;
+ }
+ i14 = HEAP8[i3] | 0;
+ if ((i14 | 0) == 4) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ i9 = 44;
+ break;
+ }
+ i14 = HEAP32[i3 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ i14 = 0;
+ } else {
+ i14 = _luaZ_openspace(HEAP32[i2 >> 2] | 0, HEAP32[i10 >> 2] | 0, i14) | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i14, HEAP32[i3 >> 2] | 0) | 0) != 0) {
+ i9 = 47;
+ break;
+ }
+ i14 = _luaS_newlstr(HEAP32[i2 >> 2] | 0, i14, (HEAP32[i3 >> 2] | 0) + -1 | 0) | 0;
+ }
+ HEAP32[i12 >> 2] = i14;
+ HEAP32[i11 + (i13 << 4) + 8 >> 2] = HEAPU8[i14 + 4 | 0] | 64;
+ } else if ((i14 | 0) == 1) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ i9 = 38;
+ break;
+ }
+ HEAP32[i12 >> 2] = HEAP8[i3] | 0;
+ HEAP32[i11 + (i13 << 4) + 8 >> 2] = 1;
+ } else if ((i14 | 0) == 3) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 8) | 0) != 0) {
+ i9 = 41;
+ break;
+ }
+ HEAPF64[i12 >> 3] = +HEAPF64[i3 >> 3];
+ HEAP32[i11 + (i13 << 4) + 8 >> 2] = 3;
+ } else if ((i14 | 0) == 0) {
+ HEAP32[i11 + (i13 << 4) + 8 >> 2] = 0;
+ }
+ i13 = i13 + 1 | 0;
+ if ((i13 | 0) >= (i8 | 0)) {
+ break L43;
+ }
+ i11 = HEAP32[i9 >> 2] | 0;
+ }
+ if ((i9 | 0) == 34) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 38) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 41) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 44) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 47) {
+ _error(i2, 8824);
+ }
+ }
+ }
+ } while (0);
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i9 = HEAP32[i2 >> 2] | 0;
+ if ((i8 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i9);
+ }
+ i11 = _luaM_realloc_(i9, 0, 0, i8 << 2) | 0;
+ i9 = i6 + 16 | 0;
+ HEAP32[i9 >> 2] = i11;
+ HEAP32[i6 + 56 >> 2] = i8;
+ i10 = (i8 | 0) > 0;
+ if (i10) {
+ i12 = 0;
+ while (1) {
+ HEAP32[i11 + (i12 << 2) >> 2] = 0;
+ i12 = i12 + 1 | 0;
+ if ((i12 | 0) == (i8 | 0)) {
+ break;
+ }
+ i11 = HEAP32[i9 >> 2] | 0;
+ }
+ if (i10) {
+ i10 = 0;
+ do {
+ i14 = _luaF_newproto(HEAP32[i2 >> 2] | 0) | 0;
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i10 << 2) >> 2] = i14;
+ _LoadFunction(i2, HEAP32[(HEAP32[i9 >> 2] | 0) + (i10 << 2) >> 2] | 0);
+ i10 = i10 + 1 | 0;
+ } while ((i10 | 0) != (i8 | 0));
+ }
+ }
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i9 = HEAP32[i3 >> 2] | 0;
+ if ((i9 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i8 = HEAP32[i2 >> 2] | 0;
+ if ((i9 + 1 | 0) >>> 0 > 536870911) {
+ _luaM_toobig(i8);
+ }
+ i10 = _luaM_realloc_(i8, 0, 0, i9 << 3) | 0;
+ i8 = i6 + 28 | 0;
+ HEAP32[i8 >> 2] = i10;
+ HEAP32[i6 + 40 >> 2] = i9;
+ L98 : do {
+ if ((i9 | 0) > 0) {
+ HEAP32[i10 >> 2] = 0;
+ if ((i9 | 0) == 1) {
+ i10 = 0;
+ } else {
+ i10 = 1;
+ while (1) {
+ HEAP32[(HEAP32[i8 >> 2] | 0) + (i10 << 3) >> 2] = 0;
+ i10 = i10 + 1 | 0;
+ if ((i10 | 0) == (i9 | 0)) {
+ i10 = 0;
+ break;
+ }
+ }
+ }
+ while (1) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ i9 = 73;
+ break;
+ }
+ HEAP8[(HEAP32[i8 >> 2] | 0) + (i10 << 3) + 4 | 0] = HEAP8[i3] | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 1) | 0) != 0) {
+ i9 = 75;
+ break;
+ }
+ HEAP8[(HEAP32[i8 >> 2] | 0) + (i10 << 3) + 5 | 0] = HEAP8[i3] | 0;
+ i10 = i10 + 1 | 0;
+ if ((i10 | 0) >= (i9 | 0)) {
+ break L98;
+ }
+ }
+ if ((i9 | 0) == 73) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 75) {
+ _error(i2, 8824);
+ }
+ }
+ } while (0);
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i9 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ i9 = _luaZ_openspace(HEAP32[i2 >> 2] | 0, HEAP32[i2 + 8 >> 2] | 0, i9) | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i9, HEAP32[i3 >> 2] | 0) | 0) == 0) {
+ i7 = _luaS_newlstr(HEAP32[i2 >> 2] | 0, i9, (HEAP32[i3 >> 2] | 0) + -1 | 0) | 0;
+ break;
+ } else {
+ _error(i2, 8824);
+ }
+ } else {
+ i7 = 0;
+ }
+ } while (0);
+ HEAP32[i6 + 36 >> 2] = i7;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i7 = HEAP32[i3 >> 2] | 0;
+ if ((i7 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i9 = HEAP32[i2 >> 2] | 0;
+ if ((i7 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i9);
+ }
+ i14 = i7 << 2;
+ i13 = _luaM_realloc_(i9, 0, 0, i14) | 0;
+ HEAP32[i6 + 20 >> 2] = i13;
+ HEAP32[i6 + 52 >> 2] = i7;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i13, i14) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i7 = HEAP32[i3 >> 2] | 0;
+ if ((i7 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ i9 = HEAP32[i2 >> 2] | 0;
+ if ((i7 + 1 | 0) >>> 0 > 357913941) {
+ _luaM_toobig(i9);
+ }
+ i10 = _luaM_realloc_(i9, 0, 0, i7 * 12 | 0) | 0;
+ i9 = i6 + 24 | 0;
+ HEAP32[i9 >> 2] = i10;
+ HEAP32[i6 + 60 >> 2] = i7;
+ L141 : do {
+ if ((i7 | 0) > 0) {
+ HEAP32[i10 >> 2] = 0;
+ if ((i7 | 0) != 1) {
+ i6 = 1;
+ do {
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i6 * 12 | 0) >> 2] = 0;
+ i6 = i6 + 1 | 0;
+ } while ((i6 | 0) != (i7 | 0));
+ }
+ i6 = i2 + 8 | 0;
+ i10 = 0;
+ while (1) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ i9 = 102;
+ break;
+ }
+ i11 = HEAP32[i3 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ i11 = 0;
+ } else {
+ i11 = _luaZ_openspace(HEAP32[i2 >> 2] | 0, HEAP32[i6 >> 2] | 0, i11) | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i11, HEAP32[i3 >> 2] | 0) | 0) != 0) {
+ i9 = 105;
+ break;
+ }
+ i11 = _luaS_newlstr(HEAP32[i2 >> 2] | 0, i11, (HEAP32[i3 >> 2] | 0) + -1 | 0) | 0;
+ }
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i10 * 12 | 0) >> 2] = i11;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ i9 = 108;
+ break;
+ }
+ i11 = HEAP32[i3 >> 2] | 0;
+ if ((i11 | 0) < 0) {
+ i9 = 110;
+ break;
+ }
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i10 * 12 | 0) + 4 >> 2] = i11;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ i9 = 112;
+ break;
+ }
+ i11 = HEAP32[i3 >> 2] | 0;
+ if ((i11 | 0) < 0) {
+ i9 = 114;
+ break;
+ }
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i10 * 12 | 0) + 8 >> 2] = i11;
+ i10 = i10 + 1 | 0;
+ if ((i10 | 0) >= (i7 | 0)) {
+ break L141;
+ }
+ }
+ if ((i9 | 0) == 102) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 105) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 108) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 110) {
+ _error(i2, 8872);
+ } else if ((i9 | 0) == 112) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 114) {
+ _error(i2, 8872);
+ }
+ }
+ } while (0);
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i5, 4) | 0) != 0) {
+ _error(i2, 8824);
+ }
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) < 0) {
+ _error(i2, 8872);
+ }
+ if ((i6 | 0) <= 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i5 = i2 + 8 | 0;
+ i7 = 0;
+ while (1) {
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i3, 4) | 0) != 0) {
+ i9 = 123;
+ break;
+ }
+ i9 = HEAP32[i3 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = 0;
+ } else {
+ i9 = _luaZ_openspace(HEAP32[i2 >> 2] | 0, HEAP32[i5 >> 2] | 0, i9) | 0;
+ if ((_luaZ_read(HEAP32[i4 >> 2] | 0, i9, HEAP32[i3 >> 2] | 0) | 0) != 0) {
+ i9 = 126;
+ break;
+ }
+ i9 = _luaS_newlstr(HEAP32[i2 >> 2] | 0, i9, (HEAP32[i3 >> 2] | 0) + -1 | 0) | 0;
+ }
+ HEAP32[(HEAP32[i8 >> 2] | 0) + (i7 << 3) >> 2] = i9;
+ i7 = i7 + 1 | 0;
+ if ((i7 | 0) >= (i6 | 0)) {
+ i9 = 129;
+ break;
+ }
+ }
+ if ((i9 | 0) == 123) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 126) {
+ _error(i2, 8824);
+ } else if ((i9 | 0) == 129) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _exp2reg(i4, i1, i7) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0;
+ i5 = STACKTOP;
+ _discharge2reg(i4, i1, i7);
+ i6 = i1 + 16 | 0;
+ do {
+ if ((HEAP32[i1 >> 2] | 0) == 10 ? (i10 = HEAP32[i1 + 8 >> 2] | 0, !((i10 | 0) == -1)) : 0) {
+ i22 = HEAP32[i6 >> 2] | 0;
+ if ((i22 | 0) == -1) {
+ HEAP32[i6 >> 2] = i10;
+ break;
+ }
+ i20 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i19 = i20 + (i22 << 2) | 0;
+ i21 = HEAP32[i19 >> 2] | 0;
+ i23 = (i21 >>> 14) + -131071 | 0;
+ if ((i23 | 0) == -1) {
+ break;
+ }
+ i23 = i22 + 1 + i23 | 0;
+ if ((i23 | 0) == -1) {
+ break;
+ } else {
+ i22 = i23;
+ }
+ }
+ i10 = i10 + ~i22 | 0;
+ if ((((i10 | 0) > -1 ? i10 : 0 - i10 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i19 >> 2] = (i10 << 14) + 2147467264 | i21 & 16383;
+ break;
+ }
+ }
+ } while (0);
+ i21 = HEAP32[i6 >> 2] | 0;
+ i10 = i1 + 20 | 0;
+ i19 = HEAP32[i10 >> 2] | 0;
+ if ((i21 | 0) == (i19 | 0)) {
+ HEAP32[i6 >> 2] = -1;
+ HEAP32[i10 >> 2] = -1;
+ i25 = i1 + 8 | 0;
+ HEAP32[i25 >> 2] = i7;
+ HEAP32[i1 >> 2] = 6;
+ STACKTOP = i5;
+ return;
+ }
+ L18 : do {
+ if ((i21 | 0) == -1) {
+ i18 = 20;
+ } else {
+ i20 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i23 = i20 + (i21 << 2) | 0;
+ if ((i21 | 0) > 0 ? (i18 = HEAP32[i20 + (i21 + -1 << 2) >> 2] | 0, (HEAP8[5584 + (i18 & 63) | 0] | 0) < 0) : 0) {
+ i22 = i18;
+ } else {
+ i22 = HEAP32[i23 >> 2] | 0;
+ }
+ if ((i22 & 63 | 0) != 28) {
+ i18 = 28;
+ break L18;
+ }
+ i22 = ((HEAP32[i23 >> 2] | 0) >>> 14) + -131071 | 0;
+ if ((i22 | 0) == -1) {
+ i18 = 20;
+ break L18;
+ }
+ i21 = i21 + 1 + i22 | 0;
+ if ((i21 | 0) == -1) {
+ i18 = 20;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L29 : do {
+ if ((i18 | 0) == 20) {
+ if ((i19 | 0) == -1) {
+ i15 = -1;
+ i8 = -1;
+ } else {
+ i20 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i21 = i20 + (i19 << 2) | 0;
+ if ((i19 | 0) > 0 ? (i17 = HEAP32[i20 + (i19 + -1 << 2) >> 2] | 0, (HEAP8[5584 + (i17 & 63) | 0] | 0) < 0) : 0) {
+ i22 = i17;
+ } else {
+ i22 = HEAP32[i21 >> 2] | 0;
+ }
+ if ((i22 & 63 | 0) != 28) {
+ i18 = 28;
+ break L29;
+ }
+ i21 = ((HEAP32[i21 >> 2] | 0) >>> 14) + -131071 | 0;
+ if ((i21 | 0) == -1) {
+ i15 = -1;
+ i8 = -1;
+ break L29;
+ }
+ i19 = i19 + 1 + i21 | 0;
+ if ((i19 | 0) == -1) {
+ i15 = -1;
+ i8 = -1;
+ break;
+ }
+ }
+ }
+ }
+ } while (0);
+ do {
+ if ((i18 | 0) == 28) {
+ i17 = i4 + 28 | 0;
+ do {
+ if ((HEAP32[i1 >> 2] | 0) != 10) {
+ i21 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i17 >> 2] = -1;
+ i18 = _luaK_code(i4, 2147450903) | 0;
+ if (!((i21 | 0) == -1)) {
+ if (!((i18 | 0) == -1)) {
+ i23 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ i22 = i18;
+ while (1) {
+ i20 = i23 + (i22 << 2) | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ i24 = (i19 >>> 14) + -131071 | 0;
+ if ((i24 | 0) == -1) {
+ break;
+ }
+ i24 = i22 + 1 + i24 | 0;
+ if ((i24 | 0) == -1) {
+ break;
+ } else {
+ i22 = i24;
+ }
+ }
+ i21 = i21 + ~i22 | 0;
+ if ((((i21 | 0) > -1 ? i21 : 0 - i21 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i20 >> 2] = (i21 << 14) + 2147467264 | i19 & 16383;
+ i16 = i18;
+ break;
+ }
+ } else {
+ i16 = i21;
+ }
+ } else {
+ i16 = i18;
+ }
+ } else {
+ i16 = -1;
+ }
+ } while (0);
+ i24 = i4 + 20 | 0;
+ i25 = i4 + 24 | 0;
+ HEAP32[i25 >> 2] = HEAP32[i24 >> 2];
+ i19 = i7 << 6;
+ i18 = _luaK_code(i4, i19 | 16387) | 0;
+ HEAP32[i25 >> 2] = HEAP32[i24 >> 2];
+ i19 = _luaK_code(i4, i19 | 8388611) | 0;
+ HEAP32[i25 >> 2] = HEAP32[i24 >> 2];
+ if (!((i16 | 0) == -1)) {
+ i22 = HEAP32[i17 >> 2] | 0;
+ if ((i22 | 0) == -1) {
+ HEAP32[i17 >> 2] = i16;
+ i15 = i18;
+ i8 = i19;
+ break;
+ }
+ i17 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i21 = i17 + (i22 << 2) | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ i23 = (i20 >>> 14) + -131071 | 0;
+ if ((i23 | 0) == -1) {
+ break;
+ }
+ i23 = i22 + 1 + i23 | 0;
+ if ((i23 | 0) == -1) {
+ break;
+ } else {
+ i22 = i23;
+ }
+ }
+ i16 = i16 + ~i22 | 0;
+ if ((((i16 | 0) > -1 ? i16 : 0 - i16 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i21 >> 2] = (i16 << 14) + 2147467264 | i20 & 16383;
+ i15 = i18;
+ i8 = i19;
+ break;
+ }
+ } else {
+ i15 = i18;
+ i8 = i19;
+ }
+ }
+ } while (0);
+ i16 = HEAP32[i4 + 20 >> 2] | 0;
+ HEAP32[i4 + 24 >> 2] = i16;
+ i22 = HEAP32[i10 >> 2] | 0;
+ L67 : do {
+ if (!((i22 | 0) == -1)) {
+ i19 = (i7 | 0) == 255;
+ i17 = i7 << 6 & 16320;
+ i18 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i20 = i18 + (i22 << 2) | 0;
+ i23 = HEAP32[i20 >> 2] | 0;
+ i21 = (i23 >>> 14) + -131071 | 0;
+ if ((i21 | 0) == -1) {
+ i21 = -1;
+ } else {
+ i21 = i22 + 1 + i21 | 0;
+ }
+ if ((i22 | 0) > 0 ? (i14 = i18 + (i22 + -1 << 2) | 0, i13 = HEAP32[i14 >> 2] | 0, (HEAP8[5584 + (i13 & 63) | 0] | 0) < 0) : 0) {
+ i24 = i14;
+ i25 = i13;
+ } else {
+ i24 = i20;
+ i25 = i23;
+ }
+ if ((i25 & 63 | 0) == 28) {
+ i23 = i25 >>> 23;
+ if (i19 | (i23 | 0) == (i7 | 0)) {
+ i23 = i25 & 8372224 | i23 << 6 | 27;
+ } else {
+ i23 = i25 & -16321 | i17;
+ }
+ HEAP32[i24 >> 2] = i23;
+ i22 = i16 + ~i22 | 0;
+ if ((((i22 | 0) > -1 ? i22 : 0 - i22 | 0) | 0) > 131071) {
+ i18 = 58;
+ break;
+ }
+ i22 = HEAP32[i20 >> 2] & 16383 | (i22 << 14) + 2147467264;
+ } else {
+ i22 = i15 + ~i22 | 0;
+ if ((((i22 | 0) > -1 ? i22 : 0 - i22 | 0) | 0) > 131071) {
+ i18 = 61;
+ break;
+ }
+ i22 = i23 & 16383 | (i22 << 14) + 2147467264;
+ }
+ HEAP32[i20 >> 2] = i22;
+ if ((i21 | 0) == -1) {
+ break L67;
+ } else {
+ i22 = i21;
+ }
+ }
+ if ((i18 | 0) == 58) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else if ((i18 | 0) == 61) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ }
+ }
+ } while (0);
+ i20 = HEAP32[i6 >> 2] | 0;
+ if ((i20 | 0) == -1) {
+ HEAP32[i6 >> 2] = -1;
+ HEAP32[i10 >> 2] = -1;
+ i25 = i1 + 8 | 0;
+ HEAP32[i25 >> 2] = i7;
+ HEAP32[i1 >> 2] = 6;
+ STACKTOP = i5;
+ return;
+ }
+ i13 = i7 << 6;
+ i15 = i13 & 16320;
+ i14 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ if ((i7 | 0) == 255) {
+ while (1) {
+ i17 = i14 + (i20 << 2) | 0;
+ i19 = HEAP32[i17 >> 2] | 0;
+ i18 = (i19 >>> 14) + -131071 | 0;
+ if ((i18 | 0) == -1) {
+ i18 = -1;
+ } else {
+ i18 = i20 + 1 + i18 | 0;
+ }
+ if ((i20 | 0) > 0 ? (i12 = i14 + (i20 + -1 << 2) | 0, i11 = HEAP32[i12 >> 2] | 0, (HEAP8[5584 + (i11 & 63) | 0] | 0) < 0) : 0) {
+ i22 = i12;
+ i21 = i11;
+ } else {
+ i22 = i17;
+ i21 = i19;
+ }
+ if ((i21 & 63 | 0) == 28) {
+ HEAP32[i22 >> 2] = i21 & 8372224 | i21 >>> 23 << 6 | 27;
+ i19 = i16 + ~i20 | 0;
+ if ((((i19 | 0) > -1 ? i19 : 0 - i19 | 0) | 0) > 131071) {
+ i18 = 87;
+ break;
+ }
+ i19 = HEAP32[i17 >> 2] & 16383 | (i19 << 14) + 2147467264;
+ } else {
+ i20 = i8 + ~i20 | 0;
+ if ((((i20 | 0) > -1 ? i20 : 0 - i20 | 0) | 0) > 131071) {
+ i18 = 90;
+ break;
+ }
+ i19 = i19 & 16383 | (i20 << 14) + 2147467264;
+ }
+ HEAP32[i17 >> 2] = i19;
+ if ((i18 | 0) == -1) {
+ i18 = 93;
+ break;
+ } else {
+ i20 = i18;
+ }
+ }
+ if ((i18 | 0) == 87) {
+ i25 = i4 + 12 | 0;
+ i25 = HEAP32[i25 >> 2] | 0;
+ _luaX_syntaxerror(i25, 10624);
+ } else if ((i18 | 0) == 90) {
+ i25 = i4 + 12 | 0;
+ i25 = HEAP32[i25 >> 2] | 0;
+ _luaX_syntaxerror(i25, 10624);
+ } else if ((i18 | 0) == 93) {
+ HEAP32[i6 >> 2] = -1;
+ HEAP32[i10 >> 2] = -1;
+ i25 = i1 + 8 | 0;
+ HEAP32[i25 >> 2] = i7;
+ HEAP32[i1 >> 2] = 6;
+ STACKTOP = i5;
+ return;
+ }
+ } else {
+ i9 = i20;
+ }
+ while (1) {
+ i11 = i14 + (i9 << 2) | 0;
+ i17 = HEAP32[i11 >> 2] | 0;
+ i12 = (i17 >>> 14) + -131071 | 0;
+ if ((i12 | 0) == -1) {
+ i12 = -1;
+ } else {
+ i12 = i9 + 1 + i12 | 0;
+ }
+ if ((i9 | 0) > 0 ? (i3 = i14 + (i9 + -1 << 2) | 0, i2 = HEAP32[i3 >> 2] | 0, (HEAP8[5584 + (i2 & 63) | 0] | 0) < 0) : 0) {
+ i18 = i3;
+ i19 = i2;
+ } else {
+ i18 = i11;
+ i19 = i17;
+ }
+ if ((i19 & 63 | 0) == 28) {
+ if ((i19 >>> 23 | 0) == (i7 | 0)) {
+ i17 = i19 & 8372224 | i13 | 27;
+ } else {
+ i17 = i19 & -16321 | i15;
+ }
+ HEAP32[i18 >> 2] = i17;
+ i9 = i16 + ~i9 | 0;
+ if ((((i9 | 0) > -1 ? i9 : 0 - i9 | 0) | 0) > 131071) {
+ i18 = 87;
+ break;
+ }
+ i9 = HEAP32[i11 >> 2] & 16383 | (i9 << 14) + 2147467264;
+ } else {
+ i9 = i8 + ~i9 | 0;
+ if ((((i9 | 0) > -1 ? i9 : 0 - i9 | 0) | 0) > 131071) {
+ i18 = 90;
+ break;
+ }
+ i9 = i17 & 16383 | (i9 << 14) + 2147467264;
+ }
+ HEAP32[i11 >> 2] = i9;
+ if ((i12 | 0) == -1) {
+ i18 = 93;
+ break;
+ } else {
+ i9 = i12;
+ }
+ }
+ if ((i18 | 0) == 87) {
+ i25 = i4 + 12 | 0;
+ i25 = HEAP32[i25 >> 2] | 0;
+ _luaX_syntaxerror(i25, 10624);
+ } else if ((i18 | 0) == 90) {
+ i25 = i4 + 12 | 0;
+ i25 = HEAP32[i25 >> 2] | 0;
+ _luaX_syntaxerror(i25, 10624);
+ } else if ((i18 | 0) == 93) {
+ HEAP32[i6 >> 2] = -1;
+ HEAP32[i10 >> 2] = -1;
+ i25 = i1 + 8 | 0;
+ HEAP32[i25 >> 2] = i7;
+ HEAP32[i1 >> 2] = 6;
+ STACKTOP = i5;
+ return;
+ }
+}
+function _propagatemark(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i1 = STACKTOP;
+ i15 = i2 + 84 | 0;
+ i3 = HEAP32[i15 >> 2] | 0;
+ i10 = i3 + 5 | 0;
+ HEAP8[i10] = HEAPU8[i10] | 4;
+ switch (HEAPU8[i3 + 4 | 0] | 0) {
+ case 5:
+ {
+ i9 = i3 + 24 | 0;
+ HEAP32[i15 >> 2] = HEAP32[i9 >> 2];
+ i15 = i3 + 8 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ do {
+ if ((i14 | 0) != 0) {
+ if ((HEAP8[i14 + 6 | 0] & 8) == 0) {
+ i11 = _luaT_gettm(i14, 3, HEAP32[i2 + 196 >> 2] | 0) | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ if ((i14 | 0) != 0) {
+ i6 = 5;
+ }
+ } else {
+ i11 = 0;
+ i6 = 5;
+ }
+ if ((i6 | 0) == 5) {
+ if (!((HEAP8[i14 + 5 | 0] & 3) == 0)) {
+ _reallymarkobject(i2, i14);
+ }
+ }
+ if (((i11 | 0) != 0 ? (HEAP32[i11 + 8 >> 2] & 15 | 0) == 4 : 0) ? (i13 = (HEAP32[i11 >> 2] | 0) + 16 | 0, i12 = _strchr(i13, 107) | 0, i12 = (i12 | 0) != 0, i13 = (_strchr(i13, 118) | 0) == 0, !(i13 & (i12 ^ 1))) : 0) {
+ HEAP8[i10] = HEAP8[i10] & 251;
+ if (i12) {
+ if (i13) {
+ _traverseephemeron(i2, i3) | 0;
+ break;
+ } else {
+ i15 = i2 + 100 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i3;
+ break;
+ }
+ }
+ i15 = 1 << HEAPU8[i3 + 7 | 0];
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ i4 = i5 + (i15 << 5) | 0;
+ i8 = (HEAP32[i3 + 28 >> 2] | 0) > 0 | 0;
+ if ((i15 | 0) > 0) {
+ do {
+ i12 = i5 + 8 | 0;
+ i10 = i5 + 24 | 0;
+ i11 = (HEAP32[i10 >> 2] & 64 | 0) == 0;
+ do {
+ if ((HEAP32[i12 >> 2] | 0) == 0) {
+ if (!i11 ? !((HEAP8[(HEAP32[i5 + 16 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) {
+ HEAP32[i10 >> 2] = 11;
+ }
+ } else {
+ if (!i11 ? (i7 = HEAP32[i5 + 16 >> 2] | 0, !((HEAP8[i7 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i7);
+ }
+ if ((i8 | 0) == 0) {
+ i10 = HEAP32[i12 >> 2] | 0;
+ if ((i10 & 64 | 0) != 0) {
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i10 & 15 | 0) != 4) {
+ i8 = (HEAP8[i8 + 5 | 0] & 3) != 0 | 0;
+ break;
+ }
+ if ((i8 | 0) != 0 ? !((HEAP8[i8 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i8);
+ i8 = 0;
+ } else {
+ i8 = 0;
+ }
+ } else {
+ i8 = 0;
+ }
+ }
+ }
+ } while (0);
+ i5 = i5 + 32 | 0;
+ } while (i5 >>> 0 < i4 >>> 0);
+ }
+ if ((i8 | 0) == 0) {
+ i15 = i2 + 88 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i3;
+ break;
+ } else {
+ i15 = i2 + 92 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i3;
+ break;
+ }
+ } else {
+ i6 = 33;
+ }
+ } else {
+ i6 = 33;
+ }
+ } while (0);
+ if ((i6 | 0) == 33) {
+ i7 = i3 + 16 | 0;
+ i10 = HEAP32[i7 >> 2] | 0;
+ i6 = i10 + (1 << HEAPU8[i3 + 7 | 0] << 5) | 0;
+ i9 = i3 + 28 | 0;
+ i13 = HEAP32[i9 >> 2] | 0;
+ if ((i13 | 0) > 0) {
+ i10 = i3 + 12 | 0;
+ i11 = 0;
+ do {
+ i12 = HEAP32[i10 >> 2] | 0;
+ if ((HEAP32[i12 + (i11 << 4) + 8 >> 2] & 64 | 0) != 0 ? (i8 = HEAP32[i12 + (i11 << 4) >> 2] | 0, !((HEAP8[i8 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i8);
+ i13 = HEAP32[i9 >> 2] | 0;
+ }
+ i11 = i11 + 1 | 0;
+ } while ((i11 | 0) < (i13 | 0));
+ i7 = HEAP32[i7 >> 2] | 0;
+ } else {
+ i7 = i10;
+ }
+ if (i7 >>> 0 < i6 >>> 0) {
+ do {
+ i10 = i7 + 8 | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ i9 = i7 + 24 | 0;
+ i8 = (HEAP32[i9 >> 2] & 64 | 0) == 0;
+ if ((i11 | 0) == 0) {
+ if (!i8 ? !((HEAP8[(HEAP32[i7 + 16 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) {
+ HEAP32[i9 >> 2] = 11;
+ }
+ } else {
+ if (!i8 ? (i5 = HEAP32[i7 + 16 >> 2] | 0, !((HEAP8[i5 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i5);
+ i11 = HEAP32[i10 >> 2] | 0;
+ }
+ if ((i11 & 64 | 0) != 0 ? (i4 = HEAP32[i7 >> 2] | 0, !((HEAP8[i4 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i4);
+ }
+ }
+ i7 = i7 + 32 | 0;
+ } while (i7 >>> 0 < i6 >>> 0);
+ }
+ }
+ i3 = (HEAP32[i3 + 28 >> 2] << 4) + 32 + (32 << HEAPU8[i3 + 7 | 0]) | 0;
+ break;
+ }
+ case 8:
+ {
+ i7 = i3 + 60 | 0;
+ HEAP32[i15 >> 2] = HEAP32[i7 >> 2];
+ i4 = i2 + 88 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i4 >> 2] = i3;
+ HEAP8[i10] = HEAP8[i10] & 251;
+ i4 = i3 + 28 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i3 = 1;
+ } else {
+ i5 = i3 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if (i7 >>> 0 < i6 >>> 0) {
+ do {
+ if ((HEAP32[i7 + 8 >> 2] & 64 | 0) != 0 ? (i11 = HEAP32[i7 >> 2] | 0, !((HEAP8[i11 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i11);
+ i6 = HEAP32[i5 >> 2] | 0;
+ }
+ i7 = i7 + 16 | 0;
+ } while (i7 >>> 0 < i6 >>> 0);
+ }
+ if ((HEAP8[i2 + 61 | 0] | 0) == 1) {
+ i3 = i3 + 32 | 0;
+ i4 = (HEAP32[i4 >> 2] | 0) + (HEAP32[i3 >> 2] << 4) | 0;
+ if (i7 >>> 0 < i4 >>> 0) {
+ do {
+ HEAP32[i7 + 8 >> 2] = 0;
+ i7 = i7 + 16 | 0;
+ } while (i7 >>> 0 < i4 >>> 0);
+ }
+ } else {
+ i3 = i3 + 32 | 0;
+ }
+ i3 = (HEAP32[i3 >> 2] << 4) + 112 | 0;
+ }
+ break;
+ }
+ case 9:
+ {
+ HEAP32[i15 >> 2] = HEAP32[i3 + 72 >> 2];
+ i5 = i3 + 32 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) != 0 ? !((HEAP8[i4 + 5 | 0] & 3) == 0) : 0) {
+ HEAP32[i5 >> 2] = 0;
+ }
+ i4 = HEAP32[i3 + 36 >> 2] | 0;
+ if ((i4 | 0) != 0 ? !((HEAP8[i4 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i4);
+ }
+ i4 = i3 + 44 | 0;
+ i8 = HEAP32[i4 >> 2] | 0;
+ if ((i8 | 0) > 0) {
+ i5 = i3 + 8 | 0;
+ i6 = 0;
+ do {
+ i7 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i7 + (i6 << 4) + 8 >> 2] & 64 | 0) != 0 ? (i9 = HEAP32[i7 + (i6 << 4) >> 2] | 0, !((HEAP8[i9 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i9);
+ i8 = HEAP32[i4 >> 2] | 0;
+ }
+ i6 = i6 + 1 | 0;
+ } while ((i6 | 0) < (i8 | 0));
+ }
+ i5 = i3 + 40 | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) > 0) {
+ i6 = i3 + 28 | 0;
+ i7 = 0;
+ do {
+ i9 = HEAP32[(HEAP32[i6 >> 2] | 0) + (i7 << 3) >> 2] | 0;
+ if ((i9 | 0) != 0 ? !((HEAP8[i9 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i9);
+ i8 = HEAP32[i5 >> 2] | 0;
+ }
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (i8 | 0));
+ }
+ i6 = i3 + 56 | 0;
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i8 | 0) > 0) {
+ i7 = i3 + 16 | 0;
+ i9 = 0;
+ do {
+ i10 = HEAP32[(HEAP32[i7 >> 2] | 0) + (i9 << 2) >> 2] | 0;
+ if ((i10 | 0) != 0 ? !((HEAP8[i10 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i10);
+ i8 = HEAP32[i6 >> 2] | 0;
+ }
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) < (i8 | 0));
+ }
+ i7 = i3 + 60 | 0;
+ i11 = HEAP32[i7 >> 2] | 0;
+ if ((i11 | 0) > 0) {
+ i8 = i3 + 24 | 0;
+ i9 = 0;
+ do {
+ i10 = HEAP32[(HEAP32[i8 >> 2] | 0) + (i9 * 12 | 0) >> 2] | 0;
+ if ((i10 | 0) != 0 ? !((HEAP8[i10 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i10);
+ i11 = HEAP32[i7 >> 2] | 0;
+ }
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) < (i11 | 0));
+ i8 = HEAP32[i6 >> 2] | 0;
+ }
+ i3 = (i11 * 12 | 0) + 80 + (HEAP32[i4 >> 2] << 4) + (HEAP32[i5 >> 2] << 3) + ((HEAP32[i3 + 48 >> 2] | 0) + i8 + (HEAP32[i3 + 52 >> 2] | 0) << 2) | 0;
+ break;
+ }
+ case 38:
+ {
+ HEAP32[i15 >> 2] = HEAP32[i3 + 8 >> 2];
+ i4 = i3 + 6 | 0;
+ i5 = HEAP8[i4] | 0;
+ if (i5 << 24 >> 24 == 0) {
+ i7 = i5 & 255;
+ } else {
+ i6 = 0;
+ do {
+ if ((HEAP32[i3 + (i6 << 4) + 24 >> 2] & 64 | 0) != 0 ? (i14 = HEAP32[i3 + (i6 << 4) + 16 >> 2] | 0, !((HEAP8[i14 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i2, i14);
+ i5 = HEAP8[i4] | 0;
+ }
+ i6 = i6 + 1 | 0;
+ i7 = i5 & 255;
+ } while ((i6 | 0) < (i7 | 0));
+ }
+ i3 = (i7 << 4) + 16 | 0;
+ break;
+ }
+ case 6:
+ {
+ HEAP32[i15 >> 2] = HEAP32[i3 + 8 >> 2];
+ i4 = HEAP32[i3 + 12 >> 2] | 0;
+ if ((i4 | 0) != 0 ? !((HEAP8[i4 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i4);
+ }
+ i4 = i3 + 6 | 0;
+ i6 = HEAP8[i4] | 0;
+ if (i6 << 24 >> 24 == 0) {
+ i7 = i6 & 255;
+ } else {
+ i5 = 0;
+ do {
+ i7 = HEAP32[i3 + (i5 << 2) + 16 >> 2] | 0;
+ if ((i7 | 0) != 0 ? !((HEAP8[i7 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i7);
+ i6 = HEAP8[i4] | 0;
+ }
+ i5 = i5 + 1 | 0;
+ i7 = i6 & 255;
+ } while ((i5 | 0) < (i7 | 0));
+ }
+ i3 = (i7 << 2) + 16 | 0;
+ break;
+ }
+ default:
+ {
+ STACKTOP = i1;
+ return;
+ }
+ }
+ i15 = i2 + 16 | 0;
+ HEAP32[i15 >> 2] = (HEAP32[i15 >> 2] | 0) + i3;
+ STACKTOP = i1;
+ return;
+}
+function _strstr(i8, i4) {
+ i8 = i8 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i6 = i1 + 1024 | 0;
+ i2 = i1;
+ i10 = HEAP8[i4] | 0;
+ if (i10 << 24 >> 24 == 0) {
+ i20 = i8;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i8 = _strchr(i8, i10 << 24 >> 24) | 0;
+ if ((i8 | 0) == 0) {
+ i20 = 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i13 = HEAP8[i4 + 1 | 0] | 0;
+ if (i13 << 24 >> 24 == 0) {
+ i20 = i8;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i11 = i8 + 1 | 0;
+ i9 = HEAP8[i11] | 0;
+ if (i9 << 24 >> 24 == 0) {
+ i20 = 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i15 = HEAP8[i4 + 2 | 0] | 0;
+ if (i15 << 24 >> 24 == 0) {
+ i2 = i13 & 255 | (i10 & 255) << 8;
+ i3 = i9;
+ i4 = HEAPU8[i8] << 8 | i9 & 255;
+ while (1) {
+ i5 = i4 & 65535;
+ if ((i5 | 0) == (i2 | 0)) {
+ break;
+ }
+ i11 = i11 + 1 | 0;
+ i4 = HEAP8[i11] | 0;
+ if (i4 << 24 >> 24 == 0) {
+ i3 = 0;
+ break;
+ } else {
+ i3 = i4;
+ i4 = i4 & 255 | i5 << 8;
+ }
+ }
+ i20 = i3 << 24 >> 24 == 0 ? 0 : i11 + -1 | 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i16 = i8 + 2 | 0;
+ i11 = HEAP8[i16] | 0;
+ if (i11 << 24 >> 24 == 0) {
+ i20 = 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i18 = HEAP8[i4 + 3 | 0] | 0;
+ if (i18 << 24 >> 24 == 0) {
+ i2 = (i13 & 255) << 16 | (i10 & 255) << 24 | (i15 & 255) << 8;
+ i4 = (i11 & 255) << 8 | (i9 & 255) << 16 | HEAPU8[i8] << 24;
+ if ((i4 | 0) == (i2 | 0)) {
+ i3 = 0;
+ } else {
+ do {
+ i16 = i16 + 1 | 0;
+ i3 = HEAP8[i16] | 0;
+ i4 = (i3 & 255 | i4) << 8;
+ i3 = i3 << 24 >> 24 == 0;
+ } while (!(i3 | (i4 | 0) == (i2 | 0)));
+ }
+ i20 = i3 ? 0 : i16 + -2 | 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ i16 = i8 + 3 | 0;
+ i17 = HEAP8[i16] | 0;
+ if (i17 << 24 >> 24 == 0) {
+ i20 = 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ if ((HEAP8[i4 + 4 | 0] | 0) == 0) {
+ i2 = (i13 & 255) << 16 | (i10 & 255) << 24 | (i15 & 255) << 8 | i18 & 255;
+ i3 = (i11 & 255) << 8 | (i9 & 255) << 16 | i17 & 255 | HEAPU8[i8] << 24;
+ if ((i3 | 0) == (i2 | 0)) {
+ i4 = 0;
+ } else {
+ do {
+ i16 = i16 + 1 | 0;
+ i4 = HEAP8[i16] | 0;
+ i3 = i4 & 255 | i3 << 8;
+ i4 = i4 << 24 >> 24 == 0;
+ } while (!(i4 | (i3 | 0) == (i2 | 0)));
+ }
+ i20 = i4 ? 0 : i16 + -3 | 0;
+ STACKTOP = i1;
+ return i20 | 0;
+ }
+ HEAP32[i6 + 0 >> 2] = 0;
+ HEAP32[i6 + 4 >> 2] = 0;
+ HEAP32[i6 + 8 >> 2] = 0;
+ HEAP32[i6 + 12 >> 2] = 0;
+ HEAP32[i6 + 16 >> 2] = 0;
+ HEAP32[i6 + 20 >> 2] = 0;
+ HEAP32[i6 + 24 >> 2] = 0;
+ HEAP32[i6 + 28 >> 2] = 0;
+ i9 = 0;
+ while (1) {
+ if ((HEAP8[i8 + i9 | 0] | 0) == 0) {
+ i14 = 0;
+ i12 = 80;
+ break;
+ }
+ i20 = i10 & 255;
+ i3 = i6 + (i20 >>> 5 << 2) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] | 1 << (i20 & 31);
+ i3 = i9 + 1 | 0;
+ HEAP32[i2 + (i20 << 2) >> 2] = i3;
+ i10 = HEAP8[i4 + i3 | 0] | 0;
+ if (i10 << 24 >> 24 == 0) {
+ break;
+ } else {
+ i9 = i3;
+ }
+ }
+ if ((i12 | 0) == 80) {
+ STACKTOP = i1;
+ return i14 | 0;
+ }
+ L49 : do {
+ if (i3 >>> 0 > 1) {
+ i14 = 1;
+ i11 = -1;
+ i12 = 0;
+ L50 : while (1) {
+ i10 = 1;
+ while (1) {
+ i13 = i14;
+ L54 : while (1) {
+ i14 = 1;
+ while (1) {
+ i15 = HEAP8[i4 + (i14 + i11) | 0] | 0;
+ i16 = HEAP8[i4 + i13 | 0] | 0;
+ if (!(i15 << 24 >> 24 == i16 << 24 >> 24)) {
+ break L54;
+ }
+ i15 = i14 + 1 | 0;
+ if ((i14 | 0) == (i10 | 0)) {
+ break;
+ }
+ i13 = i15 + i12 | 0;
+ if (i13 >>> 0 < i3 >>> 0) {
+ i14 = i15;
+ } else {
+ break L50;
+ }
+ }
+ i12 = i12 + i10 | 0;
+ i13 = i12 + 1 | 0;
+ if (!(i13 >>> 0 < i3 >>> 0)) {
+ break L50;
+ }
+ }
+ i10 = i13 - i11 | 0;
+ if (!((i15 & 255) > (i16 & 255))) {
+ break;
+ }
+ i14 = i13 + 1 | 0;
+ if (i14 >>> 0 < i3 >>> 0) {
+ i12 = i13;
+ } else {
+ break L50;
+ }
+ }
+ i14 = i12 + 2 | 0;
+ if (i14 >>> 0 < i3 >>> 0) {
+ i11 = i12;
+ i12 = i12 + 1 | 0;
+ } else {
+ i11 = i12;
+ i10 = 1;
+ break;
+ }
+ }
+ i16 = 1;
+ i12 = -1;
+ i14 = 0;
+ while (1) {
+ i13 = 1;
+ while (1) {
+ i15 = i16;
+ L69 : while (1) {
+ i18 = 1;
+ while (1) {
+ i17 = HEAP8[i4 + (i18 + i12) | 0] | 0;
+ i16 = HEAP8[i4 + i15 | 0] | 0;
+ if (!(i17 << 24 >> 24 == i16 << 24 >> 24)) {
+ break L69;
+ }
+ i16 = i18 + 1 | 0;
+ if ((i18 | 0) == (i13 | 0)) {
+ break;
+ }
+ i15 = i16 + i14 | 0;
+ if (i15 >>> 0 < i3 >>> 0) {
+ i18 = i16;
+ } else {
+ i14 = i12;
+ break L49;
+ }
+ }
+ i14 = i14 + i13 | 0;
+ i15 = i14 + 1 | 0;
+ if (!(i15 >>> 0 < i3 >>> 0)) {
+ i14 = i12;
+ break L49;
+ }
+ }
+ i13 = i15 - i12 | 0;
+ if (!((i17 & 255) < (i16 & 255))) {
+ break;
+ }
+ i16 = i15 + 1 | 0;
+ if (i16 >>> 0 < i3 >>> 0) {
+ i14 = i15;
+ } else {
+ i14 = i12;
+ break L49;
+ }
+ }
+ i16 = i14 + 2 | 0;
+ if (i16 >>> 0 < i3 >>> 0) {
+ i12 = i14;
+ i14 = i14 + 1 | 0;
+ } else {
+ i13 = 1;
+ break;
+ }
+ }
+ } else {
+ i11 = -1;
+ i14 = -1;
+ i10 = 1;
+ i13 = 1;
+ }
+ } while (0);
+ i15 = (i14 + 1 | 0) >>> 0 > (i11 + 1 | 0) >>> 0;
+ i12 = i15 ? i13 : i10;
+ i11 = i15 ? i14 : i11;
+ i10 = i11 + 1 | 0;
+ if ((_memcmp(i4, i4 + i12 | 0, i10) | 0) == 0) {
+ i15 = i3 - i12 | 0;
+ i16 = i3 | 63;
+ if ((i3 | 0) != (i12 | 0)) {
+ i14 = i8;
+ i13 = 0;
+ i17 = i8;
+ L82 : while (1) {
+ i18 = i14;
+ do {
+ if ((i17 - i18 | 0) >>> 0 < i3 >>> 0) {
+ i19 = _memchr(i17, 0, i16) | 0;
+ if ((i19 | 0) != 0) {
+ if ((i19 - i18 | 0) >>> 0 < i3 >>> 0) {
+ i14 = 0;
+ i12 = 80;
+ break L82;
+ } else {
+ i17 = i19;
+ break;
+ }
+ } else {
+ i17 = i17 + i16 | 0;
+ break;
+ }
+ }
+ } while (0);
+ i18 = HEAPU8[i14 + i9 | 0] | 0;
+ if ((1 << (i18 & 31) & HEAP32[i6 + (i18 >>> 5 << 2) >> 2] | 0) == 0) {
+ i14 = i14 + i3 | 0;
+ i13 = 0;
+ continue;
+ }
+ i20 = HEAP32[i2 + (i18 << 2) >> 2] | 0;
+ i18 = i3 - i20 | 0;
+ if ((i3 | 0) != (i20 | 0)) {
+ i14 = i14 + ((i13 | 0) != 0 & i18 >>> 0 < i12 >>> 0 ? i15 : i18) | 0;
+ i13 = 0;
+ continue;
+ }
+ i20 = i10 >>> 0 > i13 >>> 0 ? i10 : i13;
+ i18 = HEAP8[i4 + i20 | 0] | 0;
+ L96 : do {
+ if (i18 << 24 >> 24 == 0) {
+ i19 = i10;
+ } else {
+ while (1) {
+ i19 = i20 + 1 | 0;
+ if (!(i18 << 24 >> 24 == (HEAP8[i14 + i20 | 0] | 0))) {
+ break;
+ }
+ i18 = HEAP8[i4 + i19 | 0] | 0;
+ if (i18 << 24 >> 24 == 0) {
+ i19 = i10;
+ break L96;
+ } else {
+ i20 = i19;
+ }
+ }
+ i14 = i14 + (i20 - i11) | 0;
+ i13 = 0;
+ continue L82;
+ }
+ } while (0);
+ while (1) {
+ if (!(i19 >>> 0 > i13 >>> 0)) {
+ break;
+ }
+ i18 = i19 + -1 | 0;
+ if ((HEAP8[i4 + i18 | 0] | 0) == (HEAP8[i14 + i18 | 0] | 0)) {
+ i19 = i18;
+ } else {
+ break;
+ }
+ }
+ if ((i19 | 0) == (i13 | 0)) {
+ i12 = 80;
+ break;
+ }
+ i14 = i14 + i12 | 0;
+ i13 = i15;
+ }
+ if ((i12 | 0) == 80) {
+ STACKTOP = i1;
+ return i14 | 0;
+ }
+ } else {
+ i5 = i16;
+ i7 = i3;
+ }
+ } else {
+ i7 = i3 - i11 + -1 | 0;
+ i5 = i3 | 63;
+ i7 = (i11 >>> 0 > i7 >>> 0 ? i11 : i7) + 1 | 0;
+ }
+ i12 = i4 + i10 | 0;
+ i14 = i8;
+ L111 : while (1) {
+ i13 = i14;
+ do {
+ if ((i8 - i13 | 0) >>> 0 < i3 >>> 0) {
+ i15 = _memchr(i8, 0, i5) | 0;
+ if ((i15 | 0) != 0) {
+ if ((i15 - i13 | 0) >>> 0 < i3 >>> 0) {
+ i14 = 0;
+ i12 = 80;
+ break L111;
+ } else {
+ i8 = i15;
+ break;
+ }
+ } else {
+ i8 = i8 + i5 | 0;
+ break;
+ }
+ }
+ } while (0);
+ i13 = HEAPU8[i14 + i9 | 0] | 0;
+ if ((1 << (i13 & 31) & HEAP32[i6 + (i13 >>> 5 << 2) >> 2] | 0) == 0) {
+ i14 = i14 + i3 | 0;
+ continue;
+ }
+ i13 = HEAP32[i2 + (i13 << 2) >> 2] | 0;
+ if ((i3 | 0) != (i13 | 0)) {
+ i14 = i14 + (i3 - i13) | 0;
+ continue;
+ }
+ i15 = HEAP8[i12] | 0;
+ L125 : do {
+ if (i15 << 24 >> 24 == 0) {
+ i13 = i10;
+ } else {
+ i16 = i10;
+ while (1) {
+ i13 = i16 + 1 | 0;
+ if (!(i15 << 24 >> 24 == (HEAP8[i14 + i16 | 0] | 0))) {
+ break;
+ }
+ i15 = HEAP8[i4 + i13 | 0] | 0;
+ if (i15 << 24 >> 24 == 0) {
+ i13 = i10;
+ break L125;
+ } else {
+ i16 = i13;
+ }
+ }
+ i14 = i14 + (i16 - i11) | 0;
+ continue L111;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 0) {
+ i12 = 80;
+ break L111;
+ }
+ i13 = i13 + -1 | 0;
+ } while ((HEAP8[i4 + i13 | 0] | 0) == (HEAP8[i14 + i13 | 0] | 0));
+ i14 = i14 + i7 | 0;
+ }
+ if ((i12 | 0) == 80) {
+ STACKTOP = i1;
+ return i14 | 0;
+ }
+ return 0;
+}
+function _str_format(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, d21 = 0.0, i22 = 0;
+ i12 = STACKTOP;
+ STACKTOP = STACKTOP + 1104 | 0;
+ i4 = i12;
+ i7 = i12 + 1060 | 0;
+ i9 = i12 + 1082 | 0;
+ i20 = i12 + 1056 | 0;
+ i10 = i12 + 16 | 0;
+ i5 = i12 + 1064 | 0;
+ i6 = i12 + 8 | 0;
+ i8 = _lua_gettop(i2) | 0;
+ i16 = _luaL_checklstring(i2, 1, i20) | 0;
+ i20 = HEAP32[i20 >> 2] | 0;
+ i3 = i16 + i20 | 0;
+ _luaL_buffinit(i2, i10);
+ L1 : do {
+ if ((i20 | 0) > 0) {
+ i1 = i10 + 8 | 0;
+ i13 = i10 + 4 | 0;
+ i14 = i5 + 1 | 0;
+ i19 = 1;
+ L3 : while (1) {
+ while (1) {
+ i15 = HEAP8[i16] | 0;
+ if (i15 << 24 >> 24 == 37) {
+ i18 = i16 + 1 | 0;
+ if ((HEAP8[i18] | 0) != 37) {
+ break;
+ }
+ i15 = HEAP32[i1 >> 2] | 0;
+ if (i15 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0) {
+ i17 = 37;
+ } else {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i15 = HEAP32[i1 >> 2] | 0;
+ i17 = HEAP8[i18] | 0;
+ }
+ HEAP32[i1 >> 2] = i15 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i15 | 0] = i17;
+ i16 = i16 + 2 | 0;
+ } else {
+ i17 = HEAP32[i1 >> 2] | 0;
+ if (!(i17 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i17 = HEAP32[i1 >> 2] | 0;
+ i15 = HEAP8[i16] | 0;
+ }
+ HEAP32[i1 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i17 | 0] = i15;
+ i16 = i16 + 1 | 0;
+ }
+ if (!(i16 >>> 0 < i3 >>> 0)) {
+ break L1;
+ }
+ }
+ i17 = _luaL_prepbuffsize(i10, 512) | 0;
+ i15 = i19 + 1 | 0;
+ if ((i19 | 0) >= (i8 | 0)) {
+ _luaL_argerror(i2, i15, 7648) | 0;
+ }
+ i19 = HEAP8[i18] | 0;
+ L22 : do {
+ if (i19 << 24 >> 24 == 0) {
+ i19 = 0;
+ i20 = i18;
+ } else {
+ i20 = i18;
+ while (1) {
+ i16 = i20 + 1 | 0;
+ if ((_memchr(7800, i19 << 24 >> 24, 6) | 0) == 0) {
+ break L22;
+ }
+ i19 = HEAP8[i16] | 0;
+ if (i19 << 24 >> 24 == 0) {
+ i19 = 0;
+ i20 = i16;
+ break;
+ } else {
+ i20 = i16;
+ }
+ }
+ }
+ } while (0);
+ i16 = i18;
+ if ((i20 - i16 | 0) >>> 0 > 5) {
+ _luaL_error(i2, 7808, i4) | 0;
+ i19 = HEAP8[i20] | 0;
+ }
+ i19 = ((i19 & 255) + -48 | 0) >>> 0 < 10 ? i20 + 1 | 0 : i20;
+ i19 = ((HEAPU8[i19] | 0) + -48 | 0) >>> 0 < 10 ? i19 + 1 | 0 : i19;
+ i20 = HEAP8[i19] | 0;
+ if (i20 << 24 >> 24 == 46) {
+ i20 = i19 + 1 | 0;
+ i19 = ((HEAPU8[i20] | 0) + -48 | 0) >>> 0 < 10 ? i19 + 2 | 0 : i20;
+ i19 = ((HEAPU8[i19] | 0) + -48 | 0) >>> 0 < 10 ? i19 + 1 | 0 : i19;
+ i20 = HEAP8[i19] | 0;
+ }
+ if (((i20 & 255) + -48 | 0) >>> 0 < 10) {
+ _luaL_error(i2, 7840, i4) | 0;
+ }
+ HEAP8[i5] = 37;
+ i16 = i19 - i16 | 0;
+ _memcpy(i14 | 0, i18 | 0, i16 + 1 | 0) | 0;
+ HEAP8[i5 + (i16 + 2) | 0] = 0;
+ i16 = i19 + 1 | 0;
+ i18 = HEAP8[i19] | 0;
+ L36 : do {
+ switch (i18 | 0) {
+ case 115:
+ {
+ i18 = _luaL_tolstring(i2, i15, i6) | 0;
+ if ((_strchr(i5, 46) | 0) == 0 ? (HEAP32[i6 >> 2] | 0) >>> 0 > 99 : 0) {
+ _luaL_addvalue(i10);
+ i17 = 0;
+ break L36;
+ }
+ HEAP32[i4 >> 2] = i18;
+ i17 = _sprintf(i17 | 0, i5 | 0, i4 | 0) | 0;
+ _lua_settop(i2, -2);
+ break;
+ }
+ case 88:
+ case 120:
+ case 117:
+ case 111:
+ {
+ d21 = +_luaL_checknumber(i2, i15);
+ i18 = ~~d21 >>> 0;
+ d21 = d21 - +(i18 >>> 0);
+ if (!(d21 > -1.0 & d21 < 1.0)) {
+ _luaL_argerror(i2, i15, 7696) | 0;
+ }
+ i20 = _strlen(i5 | 0) | 0;
+ i22 = i5 + (i20 + -1) | 0;
+ i19 = HEAP8[i22] | 0;
+ HEAP8[i22] = 108;
+ HEAP8[i22 + 1 | 0] = 0;
+ HEAP8[i5 + i20 | 0] = i19;
+ HEAP8[i5 + (i20 + 1) | 0] = 0;
+ HEAP32[i4 >> 2] = i18;
+ i17 = _sprintf(i17 | 0, i5 | 0, i4 | 0) | 0;
+ break;
+ }
+ case 99:
+ {
+ HEAP32[i4 >> 2] = _luaL_checkinteger(i2, i15) | 0;
+ i17 = _sprintf(i17 | 0, i5 | 0, i4 | 0) | 0;
+ break;
+ }
+ case 113:
+ {
+ i17 = _luaL_checklstring(i2, i15, i7) | 0;
+ i18 = HEAP32[i1 >> 2] | 0;
+ if (!(i18 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i18 = HEAP32[i1 >> 2] | 0;
+ }
+ HEAP32[i1 >> 2] = i18 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i18 | 0] = 34;
+ i22 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i22 + -1;
+ if ((i22 | 0) != 0) {
+ while (1) {
+ i18 = HEAP8[i17] | 0;
+ do {
+ if (i18 << 24 >> 24 == 10 | i18 << 24 >> 24 == 92 | i18 << 24 >> 24 == 34) {
+ i18 = HEAP32[i1 >> 2] | 0;
+ if (!(i18 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i18 = HEAP32[i1 >> 2] | 0;
+ }
+ HEAP32[i1 >> 2] = i18 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i18 | 0] = 92;
+ i18 = HEAP32[i1 >> 2] | 0;
+ if (!(i18 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i18 = HEAP32[i1 >> 2] | 0;
+ }
+ i22 = HEAP8[i17] | 0;
+ HEAP32[i1 >> 2] = i18 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i18 | 0] = i22;
+ } else if (i18 << 24 >> 24 == 0) {
+ i18 = 0;
+ i11 = 44;
+ } else {
+ if ((_iscntrl(i18 & 255 | 0) | 0) != 0) {
+ i18 = HEAP8[i17] | 0;
+ i11 = 44;
+ break;
+ }
+ i18 = HEAP32[i1 >> 2] | 0;
+ if (!(i18 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i18 = HEAP32[i1 >> 2] | 0;
+ }
+ i22 = HEAP8[i17] | 0;
+ HEAP32[i1 >> 2] = i18 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i18 | 0] = i22;
+ }
+ } while (0);
+ if ((i11 | 0) == 44) {
+ i11 = 0;
+ i18 = i18 & 255;
+ if (((HEAPU8[i17 + 1 | 0] | 0) + -48 | 0) >>> 0 < 10) {
+ HEAP32[i4 >> 2] = i18;
+ _sprintf(i9 | 0, 7792, i4 | 0) | 0;
+ } else {
+ HEAP32[i4 >> 2] = i18;
+ _sprintf(i9 | 0, 7784, i4 | 0) | 0;
+ }
+ _luaL_addstring(i10, i9);
+ }
+ i22 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i22 + -1;
+ if ((i22 | 0) == 0) {
+ break;
+ } else {
+ i17 = i17 + 1 | 0;
+ }
+ }
+ }
+ i17 = HEAP32[i1 >> 2] | 0;
+ if (!(i17 >>> 0 < (HEAP32[i13 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i10, 1) | 0;
+ i17 = HEAP32[i1 >> 2] | 0;
+ }
+ HEAP32[i1 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i17 | 0] = 34;
+ i17 = 0;
+ break;
+ }
+ case 71:
+ case 103:
+ case 102:
+ case 69:
+ case 101:
+ {
+ HEAP8[i5 + (_strlen(i5 | 0) | 0) | 0] = 0;
+ d21 = +_luaL_checknumber(i2, i15);
+ HEAPF64[tempDoublePtr >> 3] = d21;
+ HEAP32[i4 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i4 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i17 = _sprintf(i17 | 0, i5 | 0, i4 | 0) | 0;
+ break;
+ }
+ case 105:
+ case 100:
+ {
+ d21 = +_luaL_checknumber(i2, i15);
+ i18 = ~~d21;
+ d21 = d21 - +(i18 | 0);
+ if (!(d21 > -1.0 & d21 < 1.0)) {
+ _luaL_argerror(i2, i15, 7664) | 0;
+ }
+ i22 = _strlen(i5 | 0) | 0;
+ i19 = i5 + (i22 + -1) | 0;
+ i20 = HEAP8[i19] | 0;
+ HEAP8[i19] = 108;
+ HEAP8[i19 + 1 | 0] = 0;
+ HEAP8[i5 + i22 | 0] = i20;
+ HEAP8[i5 + (i22 + 1) | 0] = 0;
+ HEAP32[i4 >> 2] = i18;
+ i17 = _sprintf(i17 | 0, i5 | 0, i4 | 0) | 0;
+ break;
+ }
+ default:
+ {
+ break L3;
+ }
+ }
+ } while (0);
+ HEAP32[i1 >> 2] = (HEAP32[i1 >> 2] | 0) + i17;
+ if (i16 >>> 0 < i3 >>> 0) {
+ i19 = i15;
+ } else {
+ break L1;
+ }
+ }
+ HEAP32[i4 >> 2] = i18;
+ i22 = _luaL_error(i2, 7744, i4) | 0;
+ STACKTOP = i12;
+ return i22 | 0;
+ }
+ } while (0);
+ _luaL_pushresult(i10);
+ i22 = 1;
+ STACKTOP = i12;
+ return i22 | 0;
+}
+function _luaD_precall(i3, i17, i4) {
+ i3 = i3 | 0;
+ i17 = i17 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i8 = i1;
+ i6 = i3 + 28 | 0;
+ i2 = i3 + 8 | 0;
+ i13 = i3 + 24 | 0;
+ i14 = i3 + 32 | 0;
+ while (1) {
+ i15 = HEAP32[i6 >> 2] | 0;
+ i16 = i17;
+ i12 = i15;
+ i5 = i16 - i12 | 0;
+ i11 = HEAP32[i17 + 8 >> 2] & 63;
+ if ((i11 | 0) == 38) {
+ i11 = 4;
+ break;
+ } else if ((i11 | 0) == 22) {
+ i11 = 3;
+ break;
+ } else if ((i11 | 0) == 6) {
+ i11 = 31;
+ break;
+ }
+ i11 = _luaT_gettmbyobj(i3, i17, 16) | 0;
+ i15 = i16 - (HEAP32[i6 >> 2] | 0) | 0;
+ i16 = i11 + 8 | 0;
+ if ((HEAP32[i16 >> 2] & 15 | 0) != 6) {
+ i11 = 54;
+ break;
+ }
+ i19 = HEAP32[i2 >> 2] | 0;
+ if (i19 >>> 0 > i17 >>> 0) {
+ while (1) {
+ i18 = i19 + -16 | 0;
+ i22 = i18;
+ i21 = HEAP32[i22 + 4 >> 2] | 0;
+ i20 = i19;
+ HEAP32[i20 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i20 + 4 >> 2] = i21;
+ HEAP32[i19 + 8 >> 2] = HEAP32[i19 + -8 >> 2];
+ if (i18 >>> 0 > i17 >>> 0) {
+ i19 = i18;
+ } else {
+ break;
+ }
+ }
+ i19 = HEAP32[i2 >> 2] | 0;
+ }
+ i17 = i19 + 16 | 0;
+ HEAP32[i2 >> 2] = i17;
+ if (((HEAP32[i13 >> 2] | 0) - i17 | 0) < 16) {
+ i18 = HEAP32[i14 >> 2] | 0;
+ if ((i18 | 0) > 1e6) {
+ i11 = 60;
+ break;
+ }
+ i17 = (i17 - (HEAP32[i6 >> 2] | 0) >> 4) + 5 | 0;
+ i18 = i18 << 1;
+ i18 = (i18 | 0) > 1e6 ? 1e6 : i18;
+ i17 = (i18 | 0) < (i17 | 0) ? i17 : i18;
+ if ((i17 | 0) > 1e6) {
+ i11 = 62;
+ break;
+ }
+ _luaD_reallocstack(i3, i17);
+ }
+ i22 = HEAP32[i6 >> 2] | 0;
+ i17 = i22 + i15 | 0;
+ i19 = i11;
+ i20 = HEAP32[i19 + 4 >> 2] | 0;
+ i21 = i17;
+ HEAP32[i21 >> 2] = HEAP32[i19 >> 2];
+ HEAP32[i21 + 4 >> 2] = i20;
+ HEAP32[i22 + (i15 + 8) >> 2] = HEAP32[i16 >> 2];
+ }
+ if ((i11 | 0) == 3) {
+ i10 = i17;
+ } else if ((i11 | 0) == 4) {
+ i10 = (HEAP32[i17 >> 2] | 0) + 12 | 0;
+ } else if ((i11 | 0) == 31) {
+ i10 = HEAP32[(HEAP32[i17 >> 2] | 0) + 12 >> 2] | 0;
+ i18 = HEAP32[i2 >> 2] | 0;
+ i16 = i18;
+ i11 = i10 + 78 | 0;
+ i17 = HEAPU8[i11] | 0;
+ do {
+ if (((HEAP32[i13 >> 2] | 0) - i16 >> 4 | 0) <= (i17 | 0)) {
+ i13 = HEAP32[i14 >> 2] | 0;
+ if ((i13 | 0) > 1e6) {
+ _luaD_throw(i3, 6);
+ }
+ i12 = i17 + 5 + (i16 - i12 >> 4) | 0;
+ i13 = i13 << 1;
+ i13 = (i13 | 0) > 1e6 ? 1e6 : i13;
+ i12 = (i13 | 0) < (i12 | 0) ? i12 : i13;
+ if ((i12 | 0) > 1e6) {
+ _luaD_reallocstack(i3, 1000200);
+ _luaG_runerror(i3, 2224, i8);
+ } else {
+ _luaD_reallocstack(i3, i12);
+ i7 = HEAP32[i6 >> 2] | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ break;
+ }
+ } else {
+ i7 = i15;
+ i9 = i18;
+ }
+ } while (0);
+ i6 = i7 + i5 | 0;
+ i22 = i9 - i6 >> 4;
+ i12 = i22 + -1 | 0;
+ i8 = i10 + 76 | 0;
+ i13 = HEAP8[i8] | 0;
+ if ((i22 | 0) > (i13 & 255 | 0)) {
+ i8 = i13;
+ } else {
+ i13 = i9;
+ while (1) {
+ i9 = i13 + 16 | 0;
+ HEAP32[i2 >> 2] = i9;
+ HEAP32[i13 + 8 >> 2] = 0;
+ i12 = i12 + 1 | 0;
+ i13 = HEAP8[i8] | 0;
+ if ((i12 | 0) < (i13 & 255 | 0)) {
+ i13 = i9;
+ } else {
+ i8 = i13;
+ break;
+ }
+ }
+ }
+ if ((HEAP8[i10 + 77 | 0] | 0) != 0) {
+ i5 = i8 & 255;
+ if (!(i8 << 24 >> 24 == 0) ? (i22 = 0 - i12 | 0, HEAP32[i2 >> 2] = i9 + 16, i19 = i9 + (i22 << 4) | 0, i20 = HEAP32[i19 + 4 >> 2] | 0, i21 = i9, HEAP32[i21 >> 2] = HEAP32[i19 >> 2], HEAP32[i21 + 4 >> 2] = i20, i22 = i9 + (i22 << 4) + 8 | 0, HEAP32[i9 + 8 >> 2] = HEAP32[i22 >> 2], HEAP32[i22 >> 2] = 0, (i8 & 255) > 1) : 0) {
+ i7 = 1;
+ do {
+ i21 = HEAP32[i2 >> 2] | 0;
+ i22 = i7 - i12 | 0;
+ HEAP32[i2 >> 2] = i21 + 16;
+ i18 = i9 + (i22 << 4) | 0;
+ i19 = HEAP32[i18 + 4 >> 2] | 0;
+ i20 = i21;
+ HEAP32[i20 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i20 + 4 >> 2] = i19;
+ i22 = i9 + (i22 << 4) + 8 | 0;
+ HEAP32[i21 + 8 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i22 >> 2] = 0;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (i5 | 0));
+ }
+ } else {
+ i9 = i7 + (i5 + 16) | 0;
+ }
+ i7 = i3 + 16 | 0;
+ i5 = HEAP32[(HEAP32[i7 >> 2] | 0) + 12 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = _luaE_extendCI(i3) | 0;
+ }
+ HEAP32[i7 >> 2] = i5;
+ HEAP16[i5 + 16 >> 1] = i4;
+ HEAP32[i5 >> 2] = i6;
+ HEAP32[i5 + 24 >> 2] = i9;
+ i22 = i9 + (HEAPU8[i11] << 4) | 0;
+ HEAP32[i5 + 4 >> 2] = i22;
+ i4 = i5 + 28 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i10 + 12 >> 2];
+ i6 = i5 + 18 | 0;
+ HEAP8[i6] = 1;
+ HEAP32[i2 >> 2] = i22;
+ if ((HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i3);
+ }
+ if ((HEAP8[i3 + 40 | 0] & 1) == 0) {
+ i22 = 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ }
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 4;
+ i2 = HEAP32[i5 + 8 >> 2] | 0;
+ if (!((HEAP8[i2 + 18 | 0] & 1) == 0) ? (HEAP32[(HEAP32[i2 + 28 >> 2] | 0) + -4 >> 2] & 63 | 0) == 30 : 0) {
+ HEAP8[i6] = HEAPU8[i6] | 64;
+ i2 = 4;
+ } else {
+ i2 = 0;
+ }
+ _luaD_hook(i3, i2, -1);
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -4;
+ i22 = 0;
+ STACKTOP = i1;
+ return i22 | 0;
+ } else if ((i11 | 0) == 54) {
+ _luaG_typeerror(i3, i17, 2520);
+ } else if ((i11 | 0) == 60) {
+ _luaD_throw(i3, 6);
+ } else if ((i11 | 0) == 62) {
+ _luaD_reallocstack(i3, 1000200);
+ _luaG_runerror(i3, 2224, i8);
+ }
+ i7 = HEAP32[i10 >> 2] | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ do {
+ if (((HEAP32[i13 >> 2] | 0) - i9 | 0) < 336) {
+ i10 = HEAP32[i14 >> 2] | 0;
+ if ((i10 | 0) > 1e6) {
+ _luaD_throw(i3, 6);
+ }
+ i9 = (i9 - i12 >> 4) + 25 | 0;
+ i10 = i10 << 1;
+ i10 = (i10 | 0) > 1e6 ? 1e6 : i10;
+ i9 = (i10 | 0) < (i9 | 0) ? i9 : i10;
+ if ((i9 | 0) > 1e6) {
+ _luaD_reallocstack(i3, 1000200);
+ _luaG_runerror(i3, 2224, i8);
+ } else {
+ _luaD_reallocstack(i3, i9);
+ break;
+ }
+ }
+ } while (0);
+ i8 = i3 + 16 | 0;
+ i9 = HEAP32[(HEAP32[i8 >> 2] | 0) + 12 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = _luaE_extendCI(i3) | 0;
+ }
+ HEAP32[i8 >> 2] = i9;
+ HEAP16[i9 + 16 >> 1] = i4;
+ HEAP32[i9 >> 2] = (HEAP32[i6 >> 2] | 0) + i5;
+ HEAP32[i9 + 4 >> 2] = (HEAP32[i2 >> 2] | 0) + 320;
+ HEAP8[i9 + 18 | 0] = 0;
+ if ((HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i3);
+ }
+ i5 = i3 + 40 | 0;
+ if (!((HEAP8[i5] & 1) == 0)) {
+ _luaD_hook(i3, 0, -1);
+ }
+ i7 = FUNCTION_TABLE_ii[i7 & 255](i3) | 0;
+ i7 = (HEAP32[i2 >> 2] | 0) + (0 - i7 << 4) | 0;
+ i4 = HEAP32[i8 >> 2] | 0;
+ i5 = HEAPU8[i5] | 0;
+ if ((i5 & 6 | 0) == 0) {
+ i5 = i7;
+ i6 = i4 + 8 | 0;
+ } else {
+ if ((i5 & 2 | 0) == 0) {
+ i5 = i7;
+ } else {
+ i5 = i7 - (HEAP32[i6 >> 2] | 0) | 0;
+ _luaD_hook(i3, 1, -1);
+ i5 = (HEAP32[i6 >> 2] | 0) + i5 | 0;
+ }
+ i6 = i4 + 8 | 0;
+ HEAP32[i3 + 20 >> 2] = HEAP32[(HEAP32[i6 >> 2] | 0) + 28 >> 2];
+ }
+ i3 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP16[i4 + 16 >> 1] | 0;
+ HEAP32[i8 >> 2] = HEAP32[i6 >> 2];
+ L82 : do {
+ if (!(i4 << 16 >> 16 == 0)) {
+ i4 = i4 << 16 >> 16;
+ while (1) {
+ if (!(i5 >>> 0 < (HEAP32[i2 >> 2] | 0) >>> 0)) {
+ break;
+ }
+ i6 = i3 + 16 | 0;
+ i20 = i5;
+ i21 = HEAP32[i20 + 4 >> 2] | 0;
+ i22 = i3;
+ HEAP32[i22 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i22 + 4 >> 2] = i21;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ i4 = i4 + -1 | 0;
+ if ((i4 | 0) == 0) {
+ i3 = i6;
+ break L82;
+ }
+ i5 = i5 + 16 | 0;
+ i3 = i6;
+ }
+ if ((i4 | 0) > 0) {
+ i5 = i4;
+ i6 = i3;
+ while (1) {
+ i5 = i5 + -1 | 0;
+ HEAP32[i6 + 8 >> 2] = 0;
+ if ((i5 | 0) <= 0) {
+ break;
+ } else {
+ i6 = i6 + 16 | 0;
+ }
+ }
+ i3 = i3 + (i4 << 4) | 0;
+ }
+ }
+ } while (0);
+ HEAP32[i2 >> 2] = i3;
+ i22 = 1;
+ STACKTOP = i1;
+ return i22 | 0;
+}
+function _lua_getinfo(i1, i6, i29) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i29 = i29 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i3;
+ if ((HEAP8[i6] | 0) == 62) {
+ i10 = i1 + 8 | 0;
+ i7 = (HEAP32[i10 >> 2] | 0) + -16 | 0;
+ HEAP32[i10 >> 2] = i7;
+ i6 = i6 + 1 | 0;
+ i10 = 0;
+ } else {
+ i7 = HEAP32[i29 + 96 >> 2] | 0;
+ i10 = i7;
+ i7 = HEAP32[i7 >> 2] | 0;
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] & 31 | 0) == 6) {
+ i9 = HEAP32[i7 >> 2] | 0;
+ } else {
+ i9 = 0;
+ }
+ i34 = HEAP8[i6] | 0;
+ L8 : do {
+ if (i34 << 24 >> 24 == 0) {
+ i33 = 1;
+ } else {
+ i12 = (i9 | 0) == 0;
+ i27 = i29 + 16 | 0;
+ i28 = i29 + 24 | 0;
+ i21 = i29 + 28 | 0;
+ i25 = i29 + 12 | 0;
+ i26 = i29 + 36 | 0;
+ i19 = i9 + 4 | 0;
+ i24 = i9 + 12 | 0;
+ i18 = (i10 | 0) == 0;
+ i23 = i29 + 20 | 0;
+ i17 = i10 + 18 | 0;
+ i22 = i10 + 28 | 0;
+ i15 = i29 + 32 | 0;
+ i14 = i29 + 34 | 0;
+ i13 = i29 + 33 | 0;
+ i11 = i9 + 6 | 0;
+ i16 = i29 + 35 | 0;
+ i20 = i29 + 8 | 0;
+ i30 = i29 + 4 | 0;
+ i29 = i10 + 8 | 0;
+ i31 = i1 + 12 | 0;
+ i32 = i6;
+ i33 = 1;
+ while (1) {
+ L12 : do {
+ switch (i34 << 24 >> 24 | 0) {
+ case 116:
+ {
+ if (i18) {
+ i34 = 0;
+ } else {
+ i34 = HEAPU8[i17] & 64;
+ }
+ HEAP8[i16] = i34;
+ break;
+ }
+ case 110:
+ {
+ L18 : do {
+ if ((!i18 ? (HEAP8[i17] & 64) == 0 : 0) ? (i5 = HEAP32[i29 >> 2] | 0, !((HEAP8[i5 + 18 | 0] & 1) == 0)) : 0) {
+ i36 = HEAP32[(HEAP32[HEAP32[i5 >> 2] >> 2] | 0) + 12 >> 2] | 0;
+ i35 = HEAP32[i36 + 12 >> 2] | 0;
+ i34 = ((HEAP32[i5 + 28 >> 2] | 0) - i35 >> 2) + -1 | 0;
+ i35 = HEAP32[i35 + (i34 << 2) >> 2] | 0;
+ switch (i35 & 63 | 0) {
+ case 10:
+ case 8:
+ {
+ i34 = 1;
+ i4 = 46;
+ break;
+ }
+ case 24:
+ {
+ i34 = 5;
+ i4 = 46;
+ break;
+ }
+ case 13:
+ {
+ i34 = 6;
+ i4 = 46;
+ break;
+ }
+ case 14:
+ {
+ i34 = 7;
+ i4 = 46;
+ break;
+ }
+ case 15:
+ {
+ i34 = 8;
+ i4 = 46;
+ break;
+ }
+ case 16:
+ {
+ i34 = 9;
+ i4 = 46;
+ break;
+ }
+ case 17:
+ {
+ i34 = 10;
+ i4 = 46;
+ break;
+ }
+ case 18:
+ {
+ i34 = 11;
+ i4 = 46;
+ break;
+ }
+ case 19:
+ {
+ i34 = 12;
+ i4 = 46;
+ break;
+ }
+ case 21:
+ {
+ i34 = 4;
+ i4 = 46;
+ break;
+ }
+ case 25:
+ {
+ i34 = 13;
+ i4 = 46;
+ break;
+ }
+ case 26:
+ {
+ i34 = 14;
+ i4 = 46;
+ break;
+ }
+ case 22:
+ {
+ i34 = 15;
+ i4 = 46;
+ break;
+ }
+ case 7:
+ case 6:
+ case 12:
+ {
+ i34 = 0;
+ i4 = 46;
+ break;
+ }
+ case 34:
+ {
+ i34 = 2120;
+ i35 = 2120;
+ break;
+ }
+ case 30:
+ case 29:
+ {
+ i36 = _getobjname(i36, i34, i35 >>> 6 & 255, i30) | 0;
+ HEAP32[i20 >> 2] = i36;
+ if ((i36 | 0) == 0) {
+ break L18;
+ } else {
+ break L12;
+ }
+ }
+ default:
+ {
+ i4 = 47;
+ break L18;
+ }
+ }
+ if ((i4 | 0) == 46) {
+ i4 = 0;
+ i34 = (HEAP32[(HEAP32[i31 >> 2] | 0) + (i34 << 2) + 184 >> 2] | 0) + 16 | 0;
+ i35 = 2136;
+ }
+ HEAP32[i30 >> 2] = i34;
+ HEAP32[i20 >> 2] = i35;
+ break L12;
+ } else {
+ i4 = 47;
+ }
+ } while (0);
+ if ((i4 | 0) == 47) {
+ i4 = 0;
+ HEAP32[i20 >> 2] = 0;
+ }
+ HEAP32[i20 >> 2] = 2112;
+ HEAP32[i30 >> 2] = 0;
+ break;
+ }
+ case 108:
+ {
+ if (!i18 ? !((HEAP8[i17] & 1) == 0) : 0) {
+ i35 = HEAP32[(HEAP32[HEAP32[i10 >> 2] >> 2] | 0) + 12 >> 2] | 0;
+ i34 = HEAP32[i35 + 20 >> 2] | 0;
+ if ((i34 | 0) == 0) {
+ i34 = 0;
+ } else {
+ i34 = HEAP32[i34 + (((HEAP32[i22 >> 2] | 0) - (HEAP32[i35 + 12 >> 2] | 0) >> 2) + -1 << 2) >> 2] | 0;
+ }
+ } else {
+ i34 = -1;
+ }
+ HEAP32[i23 >> 2] = i34;
+ break;
+ }
+ case 83:
+ {
+ if (!i12 ? (HEAP8[i19] | 0) != 38 : 0) {
+ i34 = HEAP32[i24 >> 2] | 0;
+ i35 = HEAP32[i34 + 36 >> 2] | 0;
+ if ((i35 | 0) == 0) {
+ i35 = 2168;
+ } else {
+ i35 = i35 + 16 | 0;
+ }
+ HEAP32[i27 >> 2] = i35;
+ i36 = HEAP32[i34 + 64 >> 2] | 0;
+ HEAP32[i28 >> 2] = i36;
+ HEAP32[i21 >> 2] = HEAP32[i34 + 68 >> 2];
+ i34 = (i36 | 0) == 0 ? 2176 : 2184;
+ } else {
+ HEAP32[i27 >> 2] = 2152;
+ HEAP32[i28 >> 2] = -1;
+ HEAP32[i21 >> 2] = -1;
+ i35 = 2152;
+ i34 = 2160;
+ }
+ HEAP32[i25 >> 2] = i34;
+ _luaO_chunkid(i26, i35, 60);
+ break;
+ }
+ case 117:
+ {
+ if (!i12) {
+ HEAP8[i15] = HEAP8[i11] | 0;
+ if ((HEAP8[i19] | 0) != 38) {
+ HEAP8[i14] = HEAP8[(HEAP32[i24 >> 2] | 0) + 77 | 0] | 0;
+ HEAP8[i13] = HEAP8[(HEAP32[i24 >> 2] | 0) + 76 | 0] | 0;
+ break L12;
+ }
+ } else {
+ HEAP8[i15] = 0;
+ }
+ HEAP8[i14] = 1;
+ HEAP8[i13] = 0;
+ break;
+ }
+ case 102:
+ case 76:
+ {
+ break;
+ }
+ default:
+ {
+ i33 = 0;
+ }
+ }
+ } while (0);
+ i32 = i32 + 1 | 0;
+ i34 = HEAP8[i32] | 0;
+ if (i34 << 24 >> 24 == 0) {
+ break L8;
+ }
+ }
+ }
+ } while (0);
+ if ((_strchr(i6, 102) | 0) != 0) {
+ i36 = i1 + 8 | 0;
+ i35 = HEAP32[i36 >> 2] | 0;
+ i31 = i7;
+ i32 = HEAP32[i31 + 4 >> 2] | 0;
+ i34 = i35;
+ HEAP32[i34 >> 2] = HEAP32[i31 >> 2];
+ HEAP32[i34 + 4 >> 2] = i32;
+ HEAP32[i35 + 8 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i36 >> 2] = (HEAP32[i36 >> 2] | 0) + 16;
+ }
+ if ((_strchr(i6, 76) | 0) == 0) {
+ STACKTOP = i3;
+ return i33 | 0;
+ }
+ if ((i9 | 0) != 0 ? (HEAP8[i9 + 4 | 0] | 0) != 38 : 0) {
+ i6 = i9 + 12 | 0;
+ i5 = HEAP32[(HEAP32[i6 >> 2] | 0) + 20 >> 2] | 0;
+ i4 = _luaH_new(i1) | 0;
+ i36 = i1 + 8 | 0;
+ i35 = HEAP32[i36 >> 2] | 0;
+ HEAP32[i35 >> 2] = i4;
+ HEAP32[i35 + 8 >> 2] = 69;
+ HEAP32[i36 >> 2] = (HEAP32[i36 >> 2] | 0) + 16;
+ HEAP32[i2 >> 2] = 1;
+ HEAP32[i2 + 8 >> 2] = 1;
+ if ((HEAP32[(HEAP32[i6 >> 2] | 0) + 52 >> 2] | 0) > 0) {
+ i7 = 0;
+ } else {
+ STACKTOP = i3;
+ return i33 | 0;
+ }
+ do {
+ _luaH_setint(i1, i4, HEAP32[i5 + (i7 << 2) >> 2] | 0, i2);
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (HEAP32[(HEAP32[i6 >> 2] | 0) + 52 >> 2] | 0));
+ STACKTOP = i3;
+ return i33 | 0;
+ }
+ i36 = i1 + 8 | 0;
+ i35 = HEAP32[i36 >> 2] | 0;
+ HEAP32[i35 + 8 >> 2] = 0;
+ HEAP32[i36 >> 2] = i35 + 16;
+ STACKTOP = i3;
+ return i33 | 0;
+}
+function _read_long_string(i3, i1, i5) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0;
+ i2 = STACKTOP;
+ i14 = HEAP32[i3 >> 2] | 0;
+ i4 = i3 + 60 | 0;
+ i13 = HEAP32[i4 >> 2] | 0;
+ i15 = i13 + 4 | 0;
+ i16 = HEAP32[i15 >> 2] | 0;
+ i10 = i13 + 8 | 0;
+ i12 = HEAP32[i10 >> 2] | 0;
+ do {
+ if ((i16 + 1 | 0) >>> 0 > i12 >>> 0) {
+ if (i12 >>> 0 > 2147483645) {
+ _lexerror(i3, 12368, 0);
+ }
+ i16 = i12 << 1;
+ i17 = HEAP32[i3 + 52 >> 2] | 0;
+ if ((i16 | 0) == -2) {
+ _luaM_toobig(i17);
+ } else {
+ i8 = _luaM_realloc_(i17, HEAP32[i13 >> 2] | 0, i12, i16) | 0;
+ HEAP32[i13 >> 2] = i8;
+ HEAP32[i10 >> 2] = i16;
+ i9 = HEAP32[i15 >> 2] | 0;
+ break;
+ }
+ } else {
+ i9 = i16;
+ i8 = HEAP32[i13 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i15 >> 2] = i9 + 1;
+ HEAP8[i8 + i9 | 0] = i14;
+ i9 = i3 + 56 | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ i18 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i18 + -1;
+ if ((i18 | 0) == 0) {
+ i12 = _luaZ_fill(i8) | 0;
+ } else {
+ i18 = i8 + 4 | 0;
+ i12 = HEAP32[i18 >> 2] | 0;
+ HEAP32[i18 >> 2] = i12 + 1;
+ i12 = HEAPU8[i12] | 0;
+ }
+ HEAP32[i3 >> 2] = i12;
+ if ((i12 | 0) == 13 | (i12 | 0) == 10) {
+ _inclinenumber(i3);
+ i11 = 13;
+ }
+ L17 : while (1) {
+ if ((i11 | 0) == 13) {
+ i11 = 0;
+ i12 = HEAP32[i3 >> 2] | 0;
+ }
+ i8 = (i1 | 0) == 0;
+ i10 = i3 + 52 | 0;
+ L21 : do {
+ if (i8) {
+ while (1) {
+ if ((i12 | 0) == 13 | (i12 | 0) == 10) {
+ break L21;
+ } else if ((i12 | 0) == 93) {
+ i11 = 22;
+ break L21;
+ } else if ((i12 | 0) == -1) {
+ i11 = 21;
+ break L17;
+ }
+ i12 = HEAP32[i9 >> 2] | 0;
+ i18 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i18 + -1;
+ if ((i18 | 0) == 0) {
+ i12 = _luaZ_fill(i12) | 0;
+ } else {
+ i18 = i12 + 4 | 0;
+ i12 = HEAP32[i18 >> 2] | 0;
+ HEAP32[i18 >> 2] = i12 + 1;
+ i12 = HEAPU8[i12] | 0;
+ }
+ HEAP32[i3 >> 2] = i12;
+ }
+ } else {
+ while (1) {
+ if ((i12 | 0) == 13 | (i12 | 0) == 10) {
+ break L21;
+ } else if ((i12 | 0) == 93) {
+ i11 = 22;
+ break L21;
+ } else if ((i12 | 0) == -1) {
+ i11 = 21;
+ break L17;
+ }
+ i14 = HEAP32[i4 >> 2] | 0;
+ i13 = i14 + 4 | 0;
+ i17 = HEAP32[i13 >> 2] | 0;
+ i16 = i14 + 8 | 0;
+ i15 = HEAP32[i16 >> 2] | 0;
+ if ((i17 + 1 | 0) >>> 0 > i15 >>> 0) {
+ if (i15 >>> 0 > 2147483645) {
+ i11 = 46;
+ break L17;
+ }
+ i17 = i15 << 1;
+ i18 = HEAP32[i10 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ i11 = 48;
+ break L17;
+ }
+ i18 = _luaM_realloc_(i18, HEAP32[i14 >> 2] | 0, i15, i17) | 0;
+ HEAP32[i14 >> 2] = i18;
+ HEAP32[i16 >> 2] = i17;
+ i17 = HEAP32[i13 >> 2] | 0;
+ i14 = i18;
+ } else {
+ i14 = HEAP32[i14 >> 2] | 0;
+ }
+ HEAP32[i13 >> 2] = i17 + 1;
+ HEAP8[i14 + i17 | 0] = i12;
+ i12 = HEAP32[i9 >> 2] | 0;
+ i18 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i18 + -1;
+ if ((i18 | 0) == 0) {
+ i12 = _luaZ_fill(i12) | 0;
+ } else {
+ i18 = i12 + 4 | 0;
+ i12 = HEAP32[i18 >> 2] | 0;
+ HEAP32[i18 >> 2] = i12 + 1;
+ i12 = HEAPU8[i12] | 0;
+ }
+ HEAP32[i3 >> 2] = i12;
+ }
+ }
+ } while (0);
+ if ((i11 | 0) == 22) {
+ if ((_skip_sep(i3) | 0) == (i5 | 0)) {
+ i11 = 23;
+ break;
+ } else {
+ i11 = 13;
+ continue;
+ }
+ }
+ i12 = HEAP32[i4 >> 2] | 0;
+ i11 = i12 + 4 | 0;
+ i15 = HEAP32[i11 >> 2] | 0;
+ i14 = i12 + 8 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ if ((i15 + 1 | 0) >>> 0 > i13 >>> 0) {
+ if (i13 >>> 0 > 2147483645) {
+ i11 = 37;
+ break;
+ }
+ i15 = i13 << 1;
+ i10 = HEAP32[i10 >> 2] | 0;
+ if ((i15 | 0) == -2) {
+ i11 = 39;
+ break;
+ }
+ i10 = _luaM_realloc_(i10, HEAP32[i12 >> 2] | 0, i13, i15) | 0;
+ HEAP32[i12 >> 2] = i10;
+ HEAP32[i14 >> 2] = i15;
+ i15 = HEAP32[i11 >> 2] | 0;
+ } else {
+ i10 = HEAP32[i12 >> 2] | 0;
+ }
+ HEAP32[i11 >> 2] = i15 + 1;
+ HEAP8[i10 + i15 | 0] = 10;
+ _inclinenumber(i3);
+ if (!i8) {
+ i11 = 13;
+ continue;
+ }
+ HEAP32[(HEAP32[i4 >> 2] | 0) + 4 >> 2] = 0;
+ i11 = 13;
+ }
+ if ((i11 | 0) == 21) {
+ _lexerror(i3, (i1 | 0) != 0 ? 12512 : 12536, 286);
+ } else if ((i11 | 0) == 23) {
+ i15 = HEAP32[i3 >> 2] | 0;
+ i13 = HEAP32[i4 >> 2] | 0;
+ i14 = i13 + 4 | 0;
+ i16 = HEAP32[i14 >> 2] | 0;
+ i11 = i13 + 8 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ do {
+ if ((i16 + 1 | 0) >>> 0 > i12 >>> 0) {
+ if (i12 >>> 0 > 2147483645) {
+ _lexerror(i3, 12368, 0);
+ }
+ i17 = i12 << 1;
+ i16 = HEAP32[i10 >> 2] | 0;
+ if ((i17 | 0) == -2) {
+ _luaM_toobig(i16);
+ } else {
+ i6 = _luaM_realloc_(i16, HEAP32[i13 >> 2] | 0, i12, i17) | 0;
+ HEAP32[i13 >> 2] = i6;
+ HEAP32[i11 >> 2] = i17;
+ i7 = HEAP32[i14 >> 2] | 0;
+ break;
+ }
+ } else {
+ i7 = i16;
+ i6 = HEAP32[i13 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i14 >> 2] = i7 + 1;
+ HEAP8[i6 + i7 | 0] = i15;
+ i6 = HEAP32[i9 >> 2] | 0;
+ i18 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i18 + -1;
+ if ((i18 | 0) == 0) {
+ i6 = _luaZ_fill(i6) | 0;
+ } else {
+ i18 = i6 + 4 | 0;
+ i6 = HEAP32[i18 >> 2] | 0;
+ HEAP32[i18 >> 2] = i6 + 1;
+ i6 = HEAPU8[i6] | 0;
+ }
+ HEAP32[i3 >> 2] = i6;
+ if (i8) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ i5 = i5 + 2 | 0;
+ i6 = HEAP32[i10 >> 2] | 0;
+ i5 = _luaS_newlstr(i6, (HEAP32[i4 >> 2] | 0) + i5 | 0, (HEAP32[i4 + 4 >> 2] | 0) - (i5 << 1) | 0) | 0;
+ i4 = i6 + 8 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i7 + 16;
+ HEAP32[i7 >> 2] = i5;
+ HEAP32[i7 + 8 >> 2] = HEAPU8[i5 + 4 | 0] | 0 | 64;
+ i7 = _luaH_set(i6, HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 4 >> 2] | 0, (HEAP32[i4 >> 2] | 0) + -16 | 0) | 0;
+ i3 = i7 + 8 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 0 ? (HEAP32[i7 >> 2] = 1, HEAP32[i3 >> 2] = 1, (HEAP32[(HEAP32[i6 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) : 0) {
+ _luaC_step(i6);
+ }
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -16;
+ HEAP32[i1 >> 2] = i5;
+ STACKTOP = i2;
+ return;
+ } else if ((i11 | 0) == 37) {
+ _lexerror(i3, 12368, 0);
+ } else if ((i11 | 0) == 39) {
+ _luaM_toobig(i10);
+ } else if ((i11 | 0) == 46) {
+ _lexerror(i3, 12368, 0);
+ } else if ((i11 | 0) == 48) {
+ _luaM_toobig(i18);
+ }
+}
+function _try_realloc_chunk(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i2 = STACKTOP;
+ i4 = i1 + 4 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ i8 = i6 & -8;
+ i5 = i1 + i8 | 0;
+ i10 = HEAP32[12928 >> 2] | 0;
+ if (i1 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i12 = i6 & 3;
+ if (!((i12 | 0) != 1 & i1 >>> 0 < i5 >>> 0)) {
+ _abort();
+ }
+ i7 = i1 + (i8 | 4) | 0;
+ i13 = HEAP32[i7 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i12 | 0) == 0) {
+ if (i3 >>> 0 < 256) {
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ if (!(i8 >>> 0 < (i3 + 4 | 0) >>> 0) ? !((i8 - i3 | 0) >>> 0 > HEAP32[13392 >> 2] << 1 >>> 0) : 0) {
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ if (!(i8 >>> 0 < i3 >>> 0)) {
+ i5 = i8 - i3 | 0;
+ if (!(i5 >>> 0 > 15)) {
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ HEAP32[i4 >> 2] = i6 & 1 | i3 | 2;
+ HEAP32[i1 + (i3 + 4) >> 2] = i5 | 3;
+ HEAP32[i7 >> 2] = HEAP32[i7 >> 2] | 1;
+ _dispose_chunk(i1 + i3 | 0, i5);
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ if ((i5 | 0) == (HEAP32[12936 >> 2] | 0)) {
+ i5 = (HEAP32[12924 >> 2] | 0) + i8 | 0;
+ if (!(i5 >>> 0 > i3 >>> 0)) {
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ i15 = i5 - i3 | 0;
+ HEAP32[i4 >> 2] = i6 & 1 | i3 | 2;
+ HEAP32[i1 + (i3 + 4) >> 2] = i15 | 1;
+ HEAP32[12936 >> 2] = i1 + i3;
+ HEAP32[12924 >> 2] = i15;
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ if ((i5 | 0) == (HEAP32[12932 >> 2] | 0)) {
+ i7 = (HEAP32[12920 >> 2] | 0) + i8 | 0;
+ if (i7 >>> 0 < i3 >>> 0) {
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ i5 = i7 - i3 | 0;
+ if (i5 >>> 0 > 15) {
+ HEAP32[i4 >> 2] = i6 & 1 | i3 | 2;
+ HEAP32[i1 + (i3 + 4) >> 2] = i5 | 1;
+ HEAP32[i1 + i7 >> 2] = i5;
+ i15 = i1 + (i7 + 4) | 0;
+ HEAP32[i15 >> 2] = HEAP32[i15 >> 2] & -2;
+ i3 = i1 + i3 | 0;
+ } else {
+ HEAP32[i4 >> 2] = i6 & 1 | i7 | 2;
+ i3 = i1 + (i7 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] | 1;
+ i3 = 0;
+ i5 = 0;
+ }
+ HEAP32[12920 >> 2] = i5;
+ HEAP32[12932 >> 2] = i3;
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ if ((i13 & 2 | 0) != 0) {
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ i7 = (i13 & -8) + i8 | 0;
+ if (i7 >>> 0 < i3 >>> 0) {
+ i15 = 0;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ i6 = i7 - i3 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i11 = HEAP32[i1 + (i8 + 24) >> 2] | 0;
+ i13 = HEAP32[i1 + (i8 + 12) >> 2] | 0;
+ do {
+ if ((i13 | 0) == (i5 | 0)) {
+ i13 = i1 + (i8 + 20) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i1 + (i8 + 16) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i15 = i12 + 20 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ if ((i14 | 0) != 0) {
+ i12 = i14;
+ i13 = i15;
+ continue;
+ }
+ i15 = i12 + 16 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ break;
+ } else {
+ i12 = i14;
+ i13 = i15;
+ }
+ }
+ if (i13 >>> 0 < i10 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i12 = HEAP32[i1 + (i8 + 8) >> 2] | 0;
+ if (i12 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i14 = i12 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i5 | 0)) {
+ _abort();
+ }
+ i10 = i13 + 8 | 0;
+ if ((HEAP32[i10 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i14 >> 2] = i13;
+ HEAP32[i10 >> 2] = i12;
+ i9 = i13;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i11 | 0) != 0) {
+ i10 = HEAP32[i1 + (i8 + 28) >> 2] | 0;
+ i12 = 13216 + (i10 << 2) | 0;
+ if ((i5 | 0) == (HEAP32[i12 >> 2] | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[12916 >> 2] = HEAP32[12916 >> 2] & ~(1 << i10);
+ break;
+ }
+ } else {
+ if (i11 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i10 = i11 + 16 | 0;
+ if ((HEAP32[i10 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i10 >> 2] = i9;
+ } else {
+ HEAP32[i11 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i11;
+ i5 = HEAP32[i1 + (i8 + 16) >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i1 + (i8 + 20) >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[12928 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i1 + (i8 + 8) >> 2] | 0;
+ i8 = HEAP32[i1 + (i8 + 12) >> 2] | 0;
+ i13 = 12952 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i13 | 0)) {
+ if (i9 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i5 | 0)) {
+ _abort();
+ }
+ }
+ if ((i8 | 0) == (i9 | 0)) {
+ HEAP32[3228] = HEAP32[3228] & ~(1 << i12);
+ break;
+ }
+ if ((i8 | 0) != (i13 | 0)) {
+ if (i8 >>> 0 < i10 >>> 0) {
+ _abort();
+ }
+ i10 = i8 + 8 | 0;
+ if ((HEAP32[i10 >> 2] | 0) == (i5 | 0)) {
+ i11 = i10;
+ } else {
+ _abort();
+ }
+ } else {
+ i11 = i8 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i8;
+ HEAP32[i11 >> 2] = i9;
+ }
+ } while (0);
+ if (i6 >>> 0 < 16) {
+ HEAP32[i4 >> 2] = i7 | HEAP32[i4 >> 2] & 1 | 2;
+ i15 = i1 + (i7 | 4) | 0;
+ HEAP32[i15 >> 2] = HEAP32[i15 >> 2] | 1;
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ } else {
+ HEAP32[i4 >> 2] = HEAP32[i4 >> 2] & 1 | i3 | 2;
+ HEAP32[i1 + (i3 + 4) >> 2] = i6 | 3;
+ i15 = i1 + (i7 | 4) | 0;
+ HEAP32[i15 >> 2] = HEAP32[i15 >> 2] | 1;
+ _dispose_chunk(i1 + i3 | 0, i6);
+ i15 = i1;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ return 0;
+}
+function _luaK_posfix(i3, i16, i1, i4, i14) {
+ i3 = i3 | 0;
+ i16 = i16 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i14 = i14 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i15 = 0;
+ i2 = STACKTOP;
+ switch (i16 | 0) {
+ case 14:
+ {
+ _luaK_dischargevars(i3, i4);
+ i6 = i4 + 16 | 0;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if (!((i5 | 0) == -1)) {
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((i9 | 0) == -1) {
+ HEAP32[i6 >> 2] = i5;
+ break;
+ }
+ i7 = HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i6 = i7 + (i9 << 2) | 0;
+ i8 = HEAP32[i6 >> 2] | 0;
+ i10 = (i8 >>> 14) + -131071 | 0;
+ if ((i10 | 0) == -1) {
+ break;
+ }
+ i10 = i9 + 1 + i10 | 0;
+ if ((i10 | 0) == -1) {
+ break;
+ } else {
+ i9 = i10;
+ }
+ }
+ i5 = i5 + ~i9 | 0;
+ if ((((i5 | 0) > -1 ? i5 : 0 - i5 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i3 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i6 >> 2] = (i5 << 14) + 2147467264 | i8 & 16383;
+ break;
+ }
+ }
+ } while (0);
+ HEAP32[i1 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i1 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i1 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i1 + 12 >> 2] = HEAP32[i4 + 12 >> 2];
+ HEAP32[i1 + 16 >> 2] = HEAP32[i4 + 16 >> 2];
+ HEAP32[i1 + 20 >> 2] = HEAP32[i4 + 20 >> 2];
+ STACKTOP = i2;
+ return;
+ }
+ case 13:
+ {
+ _luaK_dischargevars(i3, i4);
+ i6 = i4 + 20 | 0;
+ i5 = HEAP32[i1 + 20 >> 2] | 0;
+ do {
+ if (!((i5 | 0) == -1)) {
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((i9 | 0) == -1) {
+ HEAP32[i6 >> 2] = i5;
+ break;
+ }
+ i7 = HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i8 = i7 + (i9 << 2) | 0;
+ i6 = HEAP32[i8 >> 2] | 0;
+ i10 = (i6 >>> 14) + -131071 | 0;
+ if ((i10 | 0) == -1) {
+ break;
+ }
+ i10 = i9 + 1 + i10 | 0;
+ if ((i10 | 0) == -1) {
+ break;
+ } else {
+ i9 = i10;
+ }
+ }
+ i5 = i5 + ~i9 | 0;
+ if ((((i5 | 0) > -1 ? i5 : 0 - i5 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i3 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i8 >> 2] = (i5 << 14) + 2147467264 | i6 & 16383;
+ break;
+ }
+ }
+ } while (0);
+ HEAP32[i1 + 0 >> 2] = HEAP32[i4 + 0 >> 2];
+ HEAP32[i1 + 4 >> 2] = HEAP32[i4 + 4 >> 2];
+ HEAP32[i1 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i1 + 12 >> 2] = HEAP32[i4 + 12 >> 2];
+ HEAP32[i1 + 16 >> 2] = HEAP32[i4 + 16 >> 2];
+ HEAP32[i1 + 20 >> 2] = HEAP32[i4 + 20 >> 2];
+ STACKTOP = i2;
+ return;
+ }
+ case 6:
+ {
+ i12 = i4 + 16 | 0;
+ i13 = i4 + 20 | 0;
+ i16 = (HEAP32[i12 >> 2] | 0) == (HEAP32[i13 >> 2] | 0);
+ _luaK_dischargevars(i3, i4);
+ do {
+ if (!i16) {
+ if ((HEAP32[i4 >> 2] | 0) == 6) {
+ i10 = HEAP32[i4 + 8 >> 2] | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (HEAP32[i13 >> 2] | 0)) {
+ break;
+ }
+ if ((i10 | 0) >= (HEAPU8[i3 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i3, i4, i10);
+ break;
+ }
+ }
+ _luaK_exp2nextreg(i3, i4);
+ }
+ } while (0);
+ if ((HEAP32[i4 >> 2] | 0) == 11 ? (i5 = i4 + 8 | 0, i7 = HEAP32[i5 >> 2] | 0, i8 = (HEAP32[i3 >> 2] | 0) + 12 | 0, i9 = HEAP32[i8 >> 2] | 0, i6 = HEAP32[i9 + (i7 << 2) >> 2] | 0, (i6 & 63 | 0) == 22) : 0) {
+ i4 = i1 + 8 | 0;
+ if (((HEAP32[i1 >> 2] | 0) == 6 ? (i11 = HEAP32[i4 >> 2] | 0, (i11 & 256 | 0) == 0) : 0) ? (HEAPU8[i3 + 46 | 0] | 0 | 0) <= (i11 | 0) : 0) {
+ i6 = i3 + 48 | 0;
+ HEAP8[i6] = (HEAP8[i6] | 0) + -1 << 24 >> 24;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i16 = HEAP32[i8 >> 2] | 0;
+ i9 = i16;
+ i7 = i6;
+ i6 = HEAP32[i16 + (i6 << 2) >> 2] | 0;
+ }
+ HEAP32[i9 + (i7 << 2) >> 2] = HEAP32[i4 >> 2] << 23 | i6 & 8388607;
+ HEAP32[i1 >> 2] = 11;
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ STACKTOP = i2;
+ return;
+ }
+ _luaK_exp2nextreg(i3, i4);
+ _codearith(i3, 22, i1, i4, i14);
+ STACKTOP = i2;
+ return;
+ }
+ case 9:
+ case 8:
+ case 7:
+ {
+ i7 = i16 + 17 | 0;
+ i6 = _luaK_exp2RK(i3, i1) | 0;
+ i5 = _luaK_exp2RK(i3, i4) | 0;
+ if (((HEAP32[i4 >> 2] | 0) == 6 ? (i15 = HEAP32[i4 + 8 >> 2] | 0, (i15 & 256 | 0) == 0) : 0) ? (HEAPU8[i3 + 46 | 0] | 0 | 0) <= (i15 | 0) : 0) {
+ i16 = i3 + 48 | 0;
+ HEAP8[i16] = (HEAP8[i16] | 0) + -1 << 24 >> 24;
+ }
+ i4 = i1 + 8 | 0;
+ if (((HEAP32[i1 >> 2] | 0) == 6 ? (i10 = HEAP32[i4 >> 2] | 0, (i10 & 256 | 0) == 0) : 0) ? (HEAPU8[i3 + 46 | 0] | 0 | 0) <= (i10 | 0) : 0) {
+ i16 = i3 + 48 | 0;
+ HEAP8[i16] = (HEAP8[i16] | 0) + -1 << 24 >> 24;
+ }
+ HEAP32[i4 >> 2] = _condjump(i3, i7, 1, i6, i5) | 0;
+ HEAP32[i1 >> 2] = 10;
+ STACKTOP = i2;
+ return;
+ }
+ case 12:
+ case 11:
+ case 10:
+ {
+ i7 = i16 + 14 | 0;
+ i6 = _luaK_exp2RK(i3, i1) | 0;
+ i5 = _luaK_exp2RK(i3, i4) | 0;
+ if (((HEAP32[i4 >> 2] | 0) == 6 ? (i13 = HEAP32[i4 + 8 >> 2] | 0, (i13 & 256 | 0) == 0) : 0) ? (HEAPU8[i3 + 46 | 0] | 0 | 0) <= (i13 | 0) : 0) {
+ i16 = i3 + 48 | 0;
+ HEAP8[i16] = (HEAP8[i16] | 0) + -1 << 24 >> 24;
+ }
+ i4 = i1 + 8 | 0;
+ if (((HEAP32[i1 >> 2] | 0) == 6 ? (i12 = HEAP32[i4 >> 2] | 0, (i12 & 256 | 0) == 0) : 0) ? (HEAPU8[i3 + 46 | 0] | 0 | 0) <= (i12 | 0) : 0) {
+ i16 = i3 + 48 | 0;
+ HEAP8[i16] = (HEAP8[i16] | 0) + -1 << 24 >> 24;
+ }
+ i8 = (i7 | 0) == 24;
+ HEAP32[i4 >> 2] = _condjump(i3, i7, i8 & 1 ^ 1, i8 ? i6 : i5, i8 ? i5 : i6) | 0;
+ HEAP32[i1 >> 2] = 10;
+ STACKTOP = i2;
+ return;
+ }
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ case 0:
+ {
+ _codearith(i3, i16 + 13 | 0, i1, i4, i14);
+ STACKTOP = i2;
+ return;
+ }
+ default:
+ {
+ STACKTOP = i2;
+ return;
+ }
+ }
+}
+function _body(i1, i4, i13, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i13 = i13 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i3 = i6 + 12 | 0;
+ i14 = i6;
+ i2 = i1 + 48 | 0;
+ i19 = HEAP32[i2 >> 2] | 0;
+ i18 = i1 + 52 | 0;
+ i17 = HEAP32[i18 >> 2] | 0;
+ i16 = HEAP32[i19 >> 2] | 0;
+ i19 = i19 + 36 | 0;
+ i23 = i16 + 56 | 0;
+ i24 = HEAP32[i23 >> 2] | 0;
+ i15 = i16 + 16 | 0;
+ if (((HEAP32[i19 >> 2] | 0) >= (i24 | 0) ? (i21 = _luaM_growaux_(i17, HEAP32[i15 >> 2] | 0, i23, 4, 262143, 6512) | 0, HEAP32[i15 >> 2] = i21, i20 = HEAP32[i23 >> 2] | 0, (i24 | 0) < (i20 | 0)) : 0) ? (i22 = i24 + 1 | 0, HEAP32[i21 + (i24 << 2) >> 2] = 0, (i22 | 0) < (i20 | 0)) : 0) {
+ while (1) {
+ i21 = i22 + 1 | 0;
+ HEAP32[(HEAP32[i15 >> 2] | 0) + (i22 << 2) >> 2] = 0;
+ if ((i21 | 0) == (i20 | 0)) {
+ break;
+ } else {
+ i22 = i21;
+ }
+ }
+ }
+ i20 = _luaF_newproto(i17) | 0;
+ i24 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i24 + 1;
+ HEAP32[(HEAP32[i15 >> 2] | 0) + (i24 << 2) >> 2] = i20;
+ if (!((HEAP8[i20 + 5 | 0] & 3) == 0) ? !((HEAP8[i16 + 5 | 0] & 4) == 0) : 0) {
+ _luaC_barrier_(i17, i16, i20);
+ }
+ HEAP32[i3 >> 2] = i20;
+ HEAP32[i20 + 64 >> 2] = i5;
+ i16 = HEAP32[i18 >> 2] | 0;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i2 >> 2];
+ i17 = i3 + 12 | 0;
+ HEAP32[i17 >> 2] = i1;
+ HEAP32[i2 >> 2] = i3;
+ HEAP32[i3 + 20 >> 2] = 0;
+ HEAP32[i3 + 24 >> 2] = 0;
+ HEAP32[i3 + 28 >> 2] = -1;
+ HEAP32[i3 + 32 >> 2] = 0;
+ HEAP32[i3 + 36 >> 2] = 0;
+ i22 = i3 + 44 | 0;
+ i15 = i1 + 64 | 0;
+ HEAP32[i22 + 0 >> 2] = 0;
+ HEAP8[i22 + 4 | 0] = 0;
+ HEAP32[i3 + 40 >> 2] = HEAP32[(HEAP32[i15 >> 2] | 0) + 4 >> 2];
+ i15 = i3 + 16 | 0;
+ HEAP32[i15 >> 2] = 0;
+ HEAP32[i20 + 36 >> 2] = HEAP32[i1 + 68 >> 2];
+ HEAP8[i20 + 78 | 0] = 2;
+ i22 = _luaH_new(i16) | 0;
+ HEAP32[i3 + 4 >> 2] = i22;
+ i23 = i16 + 8 | 0;
+ i24 = HEAP32[i23 >> 2] | 0;
+ HEAP32[i24 >> 2] = i22;
+ HEAP32[i24 + 8 >> 2] = 69;
+ i24 = (HEAP32[i23 >> 2] | 0) + 16 | 0;
+ HEAP32[i23 >> 2] = i24;
+ if (((HEAP32[i16 + 24 >> 2] | 0) - i24 | 0) < 16) {
+ _luaD_growstack(i16, 0);
+ }
+ HEAP8[i14 + 10 | 0] = 0;
+ HEAP8[i14 + 8 | 0] = HEAP8[i3 + 46 | 0] | 0;
+ i24 = HEAP32[(HEAP32[i17 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i14 + 4 >> 1] = HEAP32[i24 + 28 >> 2];
+ HEAP16[i14 + 6 >> 1] = HEAP32[i24 + 16 >> 2];
+ HEAP8[i14 + 9 | 0] = 0;
+ HEAP32[i14 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i14;
+ i14 = i1 + 16 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != 40) {
+ _error_expected(i1, 40);
+ }
+ _luaX_next(i1);
+ if ((i13 | 0) != 0) {
+ _new_localvar(i1, _luaX_newstring(i1, 6456, 4) | 0);
+ i24 = HEAP32[i2 >> 2] | 0;
+ i22 = i24 + 46 | 0;
+ i23 = (HEAPU8[i22] | 0) + 1 | 0;
+ HEAP8[i22] = i23;
+ HEAP32[(HEAP32[(HEAP32[i24 >> 2] | 0) + 24 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[(HEAP32[i24 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0) + ((i23 & 255) + -1 + (HEAP32[i24 + 40 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i24 + 20 >> 2];
+ }
+ i13 = HEAP32[i2 >> 2] | 0;
+ i15 = HEAP32[i13 >> 2] | 0;
+ i16 = i15 + 77 | 0;
+ HEAP8[i16] = 0;
+ i19 = HEAP32[i14 >> 2] | 0;
+ L20 : do {
+ if ((i19 | 0) != 41) {
+ i17 = i1 + 24 | 0;
+ i18 = 0;
+ while (1) {
+ if ((i19 | 0) == 280) {
+ i17 = 18;
+ break;
+ } else if ((i19 | 0) != 288) {
+ i17 = 19;
+ break;
+ }
+ i24 = HEAP32[i17 >> 2] | 0;
+ _luaX_next(i1);
+ _new_localvar(i1, i24);
+ i18 = i18 + 1 | 0;
+ if ((HEAP8[i16] | 0) != 0) {
+ i11 = i18;
+ break L20;
+ }
+ if ((HEAP32[i14 >> 2] | 0) != 44) {
+ i11 = i18;
+ break L20;
+ }
+ _luaX_next(i1);
+ i19 = HEAP32[i14 >> 2] | 0;
+ }
+ if ((i17 | 0) == 18) {
+ _luaX_next(i1);
+ HEAP8[i16] = 1;
+ i11 = i18;
+ break;
+ } else if ((i17 | 0) == 19) {
+ _luaX_syntaxerror(i1, 6464);
+ }
+ } else {
+ i11 = 0;
+ }
+ } while (0);
+ i18 = HEAP32[i2 >> 2] | 0;
+ i16 = i18 + 46 | 0;
+ i17 = (HEAPU8[i16] | 0) + i11 | 0;
+ HEAP8[i16] = i17;
+ if ((i11 | 0) != 0 ? (i8 = i18 + 20 | 0, i9 = i18 + 40 | 0, i7 = HEAP32[(HEAP32[i18 >> 2] | 0) + 24 >> 2] | 0, i10 = HEAP32[HEAP32[(HEAP32[i18 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0, HEAP32[i7 + ((HEAP16[i10 + ((i17 & 255) - i11 + (HEAP32[i9 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i8 >> 2], i12 = i11 + -1 | 0, (i12 | 0) != 0) : 0) {
+ do {
+ HEAP32[i7 + ((HEAP16[i10 + ((HEAPU8[i16] | 0) - i12 + (HEAP32[i9 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i8 >> 2];
+ i12 = i12 + -1 | 0;
+ } while ((i12 | 0) != 0);
+ }
+ i24 = i13 + 46 | 0;
+ HEAP8[i15 + 76 | 0] = HEAP8[i24] | 0;
+ _luaK_reserveregs(i13, HEAPU8[i24] | 0);
+ if ((HEAP32[i14 >> 2] | 0) != 41) {
+ _error_expected(i1, 41);
+ }
+ _luaX_next(i1);
+ L39 : while (1) {
+ i7 = HEAP32[i14 >> 2] | 0;
+ switch (i7 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ i17 = 30;
+ break L39;
+ }
+ default:
+ {}
+ }
+ _statement(i1);
+ if ((i7 | 0) == 274) {
+ i17 = 30;
+ break;
+ }
+ }
+ if ((i17 | 0) == 30) {
+ HEAP32[(HEAP32[i3 >> 2] | 0) + 68 >> 2] = HEAP32[i1 + 4 >> 2];
+ _check_match(i1, 262, 265, i5);
+ i24 = HEAP32[(HEAP32[i2 >> 2] | 0) + 8 >> 2] | 0;
+ i23 = _luaK_codeABx(i24, 37, 0, (HEAP32[i24 + 36 >> 2] | 0) + -1 | 0) | 0;
+ HEAP32[i4 + 16 >> 2] = -1;
+ HEAP32[i4 + 20 >> 2] = -1;
+ HEAP32[i4 >> 2] = 11;
+ HEAP32[i4 + 8 >> 2] = i23;
+ _luaK_exp2nextreg(i24, i4);
+ _close_func(i1);
+ STACKTOP = i6;
+ return;
+ }
+}
+function _luaH_newkey(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, d21 = 0.0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 144 | 0;
+ i8 = i4 + 8 | 0;
+ i10 = i4;
+ i5 = i4 + 16 | 0;
+ i6 = i1 + 8 | 0;
+ i11 = HEAP32[i6 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ _luaG_runerror(i3, 7968, i8);
+ } else if ((i11 | 0) == 3) {
+ i15 = 3;
+ }
+ if ((i15 | 0) == 3 ? (d21 = +HEAPF64[i1 >> 3], !(d21 == d21 & 0.0 == 0.0)) : 0) {
+ _luaG_runerror(i3, 7992, i8);
+ }
+ i13 = _mainposition(i2, i1) | 0;
+ i14 = i13 + 8 | 0;
+ do {
+ if ((HEAP32[i14 >> 2] | 0) != 0 | (i13 | 0) == 8016) {
+ i18 = i2 + 20 | 0;
+ i11 = i2 + 16 | 0;
+ i17 = HEAP32[i11 >> 2] | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ while (1) {
+ if (!(i16 >>> 0 > i17 >>> 0)) {
+ break;
+ }
+ i12 = i16 + -32 | 0;
+ HEAP32[i18 >> 2] = i12;
+ if ((HEAP32[i16 + -8 >> 2] | 0) == 0) {
+ i15 = 37;
+ break;
+ } else {
+ i16 = i12;
+ }
+ }
+ if ((i15 | 0) == 37) {
+ i5 = _mainposition(i2, i13 + 16 | 0) | 0;
+ if ((i5 | 0) == (i13 | 0)) {
+ i20 = i13 + 28 | 0;
+ HEAP32[i16 + -4 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i20 >> 2] = i12;
+ break;
+ } else {
+ i7 = i5;
+ }
+ do {
+ i5 = i7 + 28 | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ } while ((i7 | 0) != (i13 | 0));
+ HEAP32[i5 >> 2] = i12;
+ HEAP32[i12 + 0 >> 2] = HEAP32[i13 + 0 >> 2];
+ HEAP32[i12 + 4 >> 2] = HEAP32[i13 + 4 >> 2];
+ HEAP32[i12 + 8 >> 2] = HEAP32[i13 + 8 >> 2];
+ HEAP32[i12 + 12 >> 2] = HEAP32[i13 + 12 >> 2];
+ HEAP32[i12 + 16 >> 2] = HEAP32[i13 + 16 >> 2];
+ HEAP32[i12 + 20 >> 2] = HEAP32[i13 + 20 >> 2];
+ HEAP32[i12 + 24 >> 2] = HEAP32[i13 + 24 >> 2];
+ HEAP32[i12 + 28 >> 2] = HEAP32[i13 + 28 >> 2];
+ HEAP32[i13 + 28 >> 2] = 0;
+ HEAP32[i14 >> 2] = 0;
+ i12 = i13;
+ break;
+ }
+ i13 = i5 + 0 | 0;
+ i12 = i13 + 124 | 0;
+ do {
+ HEAP32[i13 >> 2] = 0;
+ i13 = i13 + 4 | 0;
+ } while ((i13 | 0) < (i12 | 0));
+ i15 = i2 + 12 | 0;
+ i13 = HEAP32[i2 + 28 >> 2] | 0;
+ i12 = 0;
+ i20 = 1;
+ i16 = 0;
+ i14 = 1;
+ while (1) {
+ if ((i14 | 0) > (i13 | 0)) {
+ if ((i20 | 0) > (i13 | 0)) {
+ break;
+ } else {
+ i19 = i13;
+ }
+ } else {
+ i19 = i14;
+ }
+ if ((i20 | 0) > (i19 | 0)) {
+ i18 = i20;
+ i17 = 0;
+ } else {
+ i18 = HEAP32[i15 >> 2] | 0;
+ i17 = 0;
+ while (1) {
+ i17 = ((HEAP32[i18 + (i20 + -1 << 4) + 8 >> 2] | 0) != 0) + i17 | 0;
+ if ((i20 | 0) >= (i19 | 0)) {
+ break;
+ } else {
+ i20 = i20 + 1 | 0;
+ }
+ }
+ i18 = i19 + 1 | 0;
+ }
+ i20 = i5 + (i16 << 2) | 0;
+ HEAP32[i20 >> 2] = (HEAP32[i20 >> 2] | 0) + i17;
+ i12 = i17 + i12 | 0;
+ i16 = i16 + 1 | 0;
+ if ((i16 | 0) < 31) {
+ i20 = i18;
+ i14 = i14 << 1;
+ } else {
+ break;
+ }
+ }
+ i14 = 0;
+ i15 = 1 << (HEAPU8[i2 + 7 | 0] | 0);
+ i13 = 0;
+ L32 : while (1) {
+ i16 = i15;
+ while (1) {
+ i15 = i16 + -1 | 0;
+ if ((i16 | 0) == 0) {
+ break L32;
+ }
+ i16 = HEAP32[i11 >> 2] | 0;
+ if ((HEAP32[i16 + (i15 << 5) + 8 >> 2] | 0) == 0) {
+ i16 = i15;
+ } else {
+ break;
+ }
+ }
+ if (((HEAP32[i16 + (i15 << 5) + 24 >> 2] | 0) == 3 ? (d21 = +HEAPF64[i16 + (i15 << 5) + 16 >> 3], HEAPF64[i10 >> 3] = d21 + 6755399441055744.0, i9 = HEAP32[i10 >> 2] | 0, +(i9 | 0) == d21) : 0) ? (i9 + -1 | 0) >>> 0 < 1073741824 : 0) {
+ i16 = i5 + ((_luaO_ceillog2(i9) | 0) << 2) | 0;
+ HEAP32[i16 >> 2] = (HEAP32[i16 >> 2] | 0) + 1;
+ i16 = 1;
+ } else {
+ i16 = 0;
+ }
+ i14 = i16 + i14 | 0;
+ i13 = i13 + 1 | 0;
+ }
+ i9 = i14 + i12 | 0;
+ if (((HEAP32[i6 >> 2] | 0) == 3 ? (d21 = +HEAPF64[i1 >> 3], HEAPF64[i8 >> 3] = d21 + 6755399441055744.0, i7 = HEAP32[i8 >> 2] | 0, +(i7 | 0) == d21) : 0) ? (i7 + -1 | 0) >>> 0 < 1073741824 : 0) {
+ i6 = i5 + ((_luaO_ceillog2(i7) | 0) << 2) | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 1;
+ i6 = 1;
+ } else {
+ i6 = 0;
+ }
+ i7 = i9 + i6 | 0;
+ L49 : do {
+ if ((i7 | 0) > 0) {
+ i14 = 0;
+ i10 = 0;
+ i6 = 0;
+ i8 = 0;
+ i11 = 0;
+ i9 = 1;
+ while (1) {
+ i15 = HEAP32[i5 + (i6 << 2) >> 2] | 0;
+ if ((i15 | 0) > 0) {
+ i15 = i15 + i10 | 0;
+ i14 = (i15 | 0) > (i14 | 0);
+ i10 = i15;
+ i8 = i14 ? i9 : i8;
+ i11 = i14 ? i15 : i11;
+ }
+ if ((i10 | 0) == (i7 | 0)) {
+ break L49;
+ }
+ i9 = i9 << 1;
+ i14 = (i9 | 0) / 2 | 0;
+ if ((i14 | 0) < (i7 | 0)) {
+ i6 = i6 + 1 | 0;
+ } else {
+ break;
+ }
+ }
+ } else {
+ i8 = 0;
+ i11 = 0;
+ }
+ } while (0);
+ _luaH_resize(i3, i2, i8, i12 + 1 + i13 - i11 | 0);
+ i5 = _luaH_get(i2, i1) | 0;
+ if ((i5 | 0) != 5192) {
+ i20 = i5;
+ STACKTOP = i4;
+ return i20 | 0;
+ }
+ i20 = _luaH_newkey(i3, i2, i1) | 0;
+ STACKTOP = i4;
+ return i20 | 0;
+ } else {
+ i12 = i13;
+ }
+ } while (0);
+ i18 = i1;
+ i19 = HEAP32[i18 + 4 >> 2] | 0;
+ i20 = i12 + 16 | 0;
+ HEAP32[i20 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i20 + 4 >> 2] = i19;
+ HEAP32[i12 + 24 >> 2] = HEAP32[i6 >> 2];
+ if (((HEAP32[i6 >> 2] & 64 | 0) != 0 ? !((HEAP8[(HEAP32[i1 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) ? !((HEAP8[i2 + 5 | 0] & 4) == 0) : 0) {
+ _luaC_barrierback_(i3, i2);
+ }
+ i20 = i12;
+ STACKTOP = i4;
+ return i20 | 0;
+}
+function _luaV_concat(i7, i10) {
+ i7 = i7 | 0;
+ i10 = i10 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i9 = i5;
+ i8 = i5 + 8 | 0;
+ i6 = i7 + 8 | 0;
+ i2 = i7 + 12 | 0;
+ i3 = i7 + 28 | 0;
+ i4 = i7 + 16 | 0;
+ i11 = HEAP32[i6 >> 2] | 0;
+ L1 : while (1) {
+ i14 = i11 + -32 | 0;
+ i12 = i11 + -24 | 0;
+ i17 = HEAP32[i12 >> 2] | 0;
+ i13 = i11 + -16 | 0;
+ do {
+ if ((i17 & 15 | 0) == 4 | (i17 | 0) == 3) {
+ i15 = i11 + -8 | 0;
+ i16 = HEAP32[i15 >> 2] | 0;
+ if ((i16 & 15 | 0) == 4) {
+ i16 = i13;
+ } else {
+ if ((i16 | 0) != 3) {
+ i1 = 7;
+ break;
+ }
+ HEAPF64[tempDoublePtr >> 3] = +HEAPF64[i13 >> 3];
+ HEAP32[i9 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i16 = _luaS_newlstr(i7, i8, _sprintf(i8 | 0, 8936, i9 | 0) | 0) | 0;
+ HEAP32[i13 >> 2] = i16;
+ HEAP32[i15 >> 2] = HEAPU8[i16 + 4 | 0] | 0 | 64;
+ i16 = i13;
+ i17 = HEAP32[i12 >> 2] | 0;
+ }
+ i16 = HEAP32[(HEAP32[i16 >> 2] | 0) + 12 >> 2] | 0;
+ i18 = (i17 & 15 | 0) == 4;
+ if ((i16 | 0) == 0) {
+ if (i18) {
+ i12 = 2;
+ break;
+ }
+ if ((i17 | 0) != 3) {
+ i12 = 2;
+ break;
+ }
+ HEAPF64[tempDoublePtr >> 3] = +HEAPF64[i14 >> 3];
+ HEAP32[i9 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i18 = _luaS_newlstr(i7, i8, _sprintf(i8 | 0, 8936, i9 | 0) | 0) | 0;
+ HEAP32[i14 >> 2] = i18;
+ HEAP32[i12 >> 2] = HEAPU8[i18 + 4 | 0] | 0 | 64;
+ i12 = 2;
+ break;
+ }
+ if (i18 ? (HEAP32[(HEAP32[i14 >> 2] | 0) + 12 >> 2] | 0) == 0 : 0) {
+ i16 = i13;
+ i17 = HEAP32[i16 + 4 >> 2] | 0;
+ i18 = i14;
+ HEAP32[i18 >> 2] = HEAP32[i16 >> 2];
+ HEAP32[i18 + 4 >> 2] = i17;
+ HEAP32[i12 >> 2] = HEAP32[i15 >> 2];
+ i12 = 2;
+ break;
+ }
+ L19 : do {
+ if ((i10 | 0) > 1) {
+ i12 = 1;
+ do {
+ i15 = ~i12;
+ i14 = i11 + (i15 << 4) | 0;
+ i15 = i11 + (i15 << 4) + 8 | 0;
+ i13 = HEAP32[i15 >> 2] | 0;
+ if ((i13 & 15 | 0) != 4) {
+ if ((i13 | 0) != 3) {
+ break L19;
+ }
+ HEAPF64[tempDoublePtr >> 3] = +HEAPF64[i14 >> 3];
+ HEAP32[i9 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i9 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i18 = _luaS_newlstr(i7, i8, _sprintf(i8 | 0, 8936, i9 | 0) | 0) | 0;
+ HEAP32[i14 >> 2] = i18;
+ HEAP32[i15 >> 2] = HEAPU8[i18 + 4 | 0] | 0 | 64;
+ }
+ i13 = HEAP32[(HEAP32[i14 >> 2] | 0) + 12 >> 2] | 0;
+ if (!(i13 >>> 0 < (-3 - i16 | 0) >>> 0)) {
+ i1 = 24;
+ break L1;
+ }
+ i16 = i13 + i16 | 0;
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) < (i10 | 0));
+ } else {
+ i12 = 1;
+ }
+ } while (0);
+ i14 = _luaZ_openspace(i7, (HEAP32[i2 >> 2] | 0) + 144 | 0, i16) | 0;
+ i15 = i12;
+ i13 = 0;
+ do {
+ i17 = HEAP32[i11 + (0 - i15 << 4) >> 2] | 0;
+ i18 = HEAP32[i17 + 12 >> 2] | 0;
+ _memcpy(i14 + i13 | 0, i17 + 16 | 0, i18 | 0) | 0;
+ i13 = i18 + i13 | 0;
+ i15 = i15 + -1 | 0;
+ } while ((i15 | 0) > 0);
+ i18 = 0 - i12 | 0;
+ i17 = _luaS_newlstr(i7, i14, i13) | 0;
+ HEAP32[i11 + (i18 << 4) >> 2] = i17;
+ HEAP32[i11 + (i18 << 4) + 8 >> 2] = HEAPU8[i17 + 4 | 0] | 0 | 64;
+ } else {
+ i1 = 7;
+ }
+ } while (0);
+ if ((i1 | 0) == 7) {
+ i1 = 0;
+ i15 = _luaT_gettmbyobj(i7, i14, 15) | 0;
+ if ((HEAP32[i15 + 8 >> 2] | 0) == 0) {
+ i15 = _luaT_gettmbyobj(i7, i13, 15) | 0;
+ if ((HEAP32[i15 + 8 >> 2] | 0) == 0) {
+ i1 = 10;
+ break;
+ }
+ }
+ i18 = i14 - (HEAP32[i3 >> 2] | 0) | 0;
+ i16 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i16 + 16;
+ i20 = i15;
+ i19 = HEAP32[i20 + 4 >> 2] | 0;
+ i17 = i16;
+ HEAP32[i17 >> 2] = HEAP32[i20 >> 2];
+ HEAP32[i17 + 4 >> 2] = i19;
+ HEAP32[i16 + 8 >> 2] = HEAP32[i15 + 8 >> 2];
+ i15 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i15 + 16;
+ i16 = i14;
+ i17 = HEAP32[i16 + 4 >> 2] | 0;
+ i14 = i15;
+ HEAP32[i14 >> 2] = HEAP32[i16 >> 2];
+ HEAP32[i14 + 4 >> 2] = i17;
+ HEAP32[i15 + 8 >> 2] = HEAP32[i12 >> 2];
+ i12 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i12 + 16;
+ i15 = i13;
+ i14 = HEAP32[i15 + 4 >> 2] | 0;
+ i17 = i12;
+ HEAP32[i17 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i17 + 4 >> 2] = i14;
+ HEAP32[i12 + 8 >> 2] = HEAP32[i11 + -8 >> 2];
+ _luaD_call(i7, (HEAP32[i6 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i4 >> 2] | 0) + 18 | 0] & 1);
+ i12 = HEAP32[i3 >> 2] | 0;
+ i17 = HEAP32[i6 >> 2] | 0;
+ i14 = i17 + -16 | 0;
+ HEAP32[i6 >> 2] = i14;
+ i15 = HEAP32[i14 + 4 >> 2] | 0;
+ i16 = i12 + i18 | 0;
+ HEAP32[i16 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i16 + 4 >> 2] = i15;
+ HEAP32[i12 + (i18 + 8) >> 2] = HEAP32[i17 + -8 >> 2];
+ i12 = 2;
+ }
+ i10 = i10 + 1 - i12 | 0;
+ i11 = (HEAP32[i6 >> 2] | 0) + (1 - i12 << 4) | 0;
+ HEAP32[i6 >> 2] = i11;
+ if ((i10 | 0) <= 1) {
+ i1 = 30;
+ break;
+ }
+ }
+ if ((i1 | 0) == 10) {
+ _luaG_concaterror(i7, i14, i13);
+ } else if ((i1 | 0) == 24) {
+ _luaG_runerror(i7, 9e3, i9);
+ } else if ((i1 | 0) == 30) {
+ STACKTOP = i5;
+ return;
+ }
+}
+function _str_gsub(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 1344 | 0;
+ i4 = i3;
+ i5 = i3 + 1336 | 0;
+ i14 = i3 + 1332 | 0;
+ i10 = i3 + 1328 | 0;
+ i6 = i3 + 1048 | 0;
+ i2 = i3 + 8 | 0;
+ i20 = _luaL_checklstring(i1, 1, i14) | 0;
+ i13 = _luaL_checklstring(i1, 2, i10) | 0;
+ i8 = _lua_type(i1, 3) | 0;
+ i9 = _luaL_optinteger(i1, 4, (HEAP32[i14 >> 2] | 0) + 1 | 0) | 0;
+ i7 = (HEAP8[i13] | 0) == 94;
+ if (!((i8 + -3 | 0) >>> 0 < 2 | (i8 | 0) == 6 | (i8 | 0) == 5)) {
+ _luaL_argerror(i1, 3, 7528) | 0;
+ }
+ _luaL_buffinit(i1, i2);
+ if (i7) {
+ i15 = (HEAP32[i10 >> 2] | 0) + -1 | 0;
+ HEAP32[i10 >> 2] = i15;
+ i13 = i13 + 1 | 0;
+ } else {
+ i15 = HEAP32[i10 >> 2] | 0;
+ }
+ i11 = i6 + 16 | 0;
+ HEAP32[i11 >> 2] = i1;
+ HEAP32[i6 >> 2] = 200;
+ i12 = i6 + 4 | 0;
+ HEAP32[i12 >> 2] = i20;
+ i10 = i6 + 8 | 0;
+ HEAP32[i10 >> 2] = i20 + (HEAP32[i14 >> 2] | 0);
+ HEAP32[i6 + 12 >> 2] = i13 + i15;
+ i14 = i6 + 20 | 0;
+ i15 = i2 + 8 | 0;
+ i18 = i2 + 4 | 0;
+ i16 = i6 + 28 | 0;
+ i17 = i6 + 24 | 0;
+ i22 = 0;
+ while (1) {
+ if (!(i22 >>> 0 < i9 >>> 0)) {
+ i19 = 48;
+ break;
+ }
+ HEAP32[i14 >> 2] = 0;
+ i21 = _match(i6, i20, i13) | 0;
+ if ((i21 | 0) != 0) {
+ i22 = i22 + 1 | 0;
+ i23 = HEAP32[i11 >> 2] | 0;
+ if ((i8 | 0) == 5) {
+ do {
+ if ((HEAP32[i14 >> 2] | 0) > 0) {
+ i24 = HEAP32[i16 >> 2] | 0;
+ if (!((i24 | 0) == -1)) {
+ i25 = HEAP32[i17 >> 2] | 0;
+ if ((i24 | 0) == -2) {
+ _lua_pushinteger(i23, i25 + 1 - (HEAP32[i12 >> 2] | 0) | 0);
+ break;
+ } else {
+ i19 = i23;
+ }
+ } else {
+ _luaL_error(i23, 7248, i4) | 0;
+ i19 = HEAP32[i11 >> 2] | 0;
+ i25 = HEAP32[i17 >> 2] | 0;
+ }
+ _lua_pushlstring(i19, i25, i24) | 0;
+ } else {
+ _lua_pushlstring(i23, i20, i21 - i20 | 0) | 0;
+ }
+ } while (0);
+ _lua_gettable(i23, 3);
+ i19 = 37;
+ } else if ((i8 | 0) != 6) {
+ i24 = _lua_tolstring(i23, 3, i5) | 0;
+ if ((HEAP32[i5 >> 2] | 0) != 0) {
+ i23 = i21 - i20 | 0;
+ i25 = 0;
+ do {
+ i26 = i24 + i25 | 0;
+ i27 = HEAP8[i26] | 0;
+ do {
+ if (i27 << 24 >> 24 == 37) {
+ i25 = i25 + 1 | 0;
+ i26 = i24 + i25 | 0;
+ i28 = HEAP8[i26] | 0;
+ i27 = i28 << 24 >> 24;
+ if (((i28 & 255) + -48 | 0) >>> 0 < 10) {
+ if (i28 << 24 >> 24 == 48) {
+ _luaL_addlstring(i2, i20, i23);
+ break;
+ } else {
+ _push_onecapture(i6, i27 + -49 | 0, i20, i21);
+ _luaL_addvalue(i2);
+ break;
+ }
+ }
+ if (!(i28 << 24 >> 24 == 37)) {
+ i28 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i4 >> 2] = 37;
+ _luaL_error(i28, 7600, i4) | 0;
+ }
+ i27 = HEAP32[i15 >> 2] | 0;
+ if (!(i27 >>> 0 < (HEAP32[i18 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i2, 1) | 0;
+ i27 = HEAP32[i15 >> 2] | 0;
+ }
+ i28 = HEAP8[i26] | 0;
+ HEAP32[i15 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i2 >> 2] | 0) + i27 | 0] = i28;
+ } else {
+ i28 = HEAP32[i15 >> 2] | 0;
+ if (!(i28 >>> 0 < (HEAP32[i18 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i2, 1) | 0;
+ i28 = HEAP32[i15 >> 2] | 0;
+ i27 = HEAP8[i26] | 0;
+ }
+ HEAP32[i15 >> 2] = i28 + 1;
+ HEAP8[(HEAP32[i2 >> 2] | 0) + i28 | 0] = i27;
+ }
+ } while (0);
+ i25 = i25 + 1 | 0;
+ } while (i25 >>> 0 < (HEAP32[i5 >> 2] | 0) >>> 0);
+ }
+ } else {
+ _lua_pushvalue(i23, 3);
+ i19 = HEAP32[i14 >> 2] | 0;
+ i19 = (i19 | 0) != 0 | (i20 | 0) == 0 ? i19 : 1;
+ _luaL_checkstack(HEAP32[i11 >> 2] | 0, i19, 7200);
+ if ((i19 | 0) > 0) {
+ i24 = 0;
+ do {
+ _push_onecapture(i6, i24, i20, i21);
+ i24 = i24 + 1 | 0;
+ } while ((i24 | 0) != (i19 | 0));
+ }
+ _lua_callk(i23, i19, 1, 0, 0);
+ i19 = 37;
+ }
+ if ((i19 | 0) == 37) {
+ i19 = 0;
+ if ((_lua_toboolean(i23, -1) | 0) != 0) {
+ if ((_lua_isstring(i23, -1) | 0) == 0) {
+ HEAP32[i4 >> 2] = _lua_typename(i23, _lua_type(i23, -1) | 0) | 0;
+ _luaL_error(i23, 7560, i4) | 0;
+ }
+ } else {
+ _lua_settop(i23, -2);
+ _lua_pushlstring(i23, i20, i21 - i20 | 0) | 0;
+ }
+ _luaL_addvalue(i2);
+ }
+ if (i21 >>> 0 > i20 >>> 0) {
+ i20 = i21;
+ } else {
+ i19 = 43;
+ }
+ } else {
+ i19 = 43;
+ }
+ if ((i19 | 0) == 43) {
+ i19 = 0;
+ if (!(i20 >>> 0 < (HEAP32[i10 >> 2] | 0) >>> 0)) {
+ i19 = 48;
+ break;
+ }
+ i21 = HEAP32[i15 >> 2] | 0;
+ if (!(i21 >>> 0 < (HEAP32[i18 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i2, 1) | 0;
+ i21 = HEAP32[i15 >> 2] | 0;
+ }
+ i28 = HEAP8[i20] | 0;
+ HEAP32[i15 >> 2] = i21 + 1;
+ HEAP8[(HEAP32[i2 >> 2] | 0) + i21 | 0] = i28;
+ i20 = i20 + 1 | 0;
+ }
+ if (i7) {
+ i19 = 48;
+ break;
+ }
+ }
+ if ((i19 | 0) == 48) {
+ _luaL_addlstring(i2, i20, (HEAP32[i10 >> 2] | 0) - i20 | 0);
+ _luaL_pushresult(i2);
+ _lua_pushinteger(i1, i22);
+ STACKTOP = i3;
+ return 2;
+ }
+ return 0;
+}
+function _constructor(i11, i13) {
+ i11 = i11 | 0;
+ i13 = i13 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i12 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i10 = i5 + 40 | 0;
+ i8 = i5;
+ i12 = i11 + 48 | 0;
+ i6 = HEAP32[i12 >> 2] | 0;
+ i9 = HEAP32[i11 + 4 >> 2] | 0;
+ i2 = _luaK_codeABC(i6, 11, 0, 0, 0) | 0;
+ i7 = i8 + 36 | 0;
+ HEAP32[i7 >> 2] = 0;
+ i4 = i8 + 28 | 0;
+ HEAP32[i4 >> 2] = 0;
+ i3 = i8 + 32 | 0;
+ HEAP32[i3 >> 2] = 0;
+ i1 = i8 + 24 | 0;
+ HEAP32[i1 >> 2] = i13;
+ HEAP32[i13 + 16 >> 2] = -1;
+ HEAP32[i13 + 20 >> 2] = -1;
+ HEAP32[i13 >> 2] = 11;
+ HEAP32[i13 + 8 >> 2] = i2;
+ HEAP32[i8 + 16 >> 2] = -1;
+ HEAP32[i8 + 20 >> 2] = -1;
+ HEAP32[i8 >> 2] = 0;
+ HEAP32[i8 + 8 >> 2] = 0;
+ _luaK_exp2nextreg(HEAP32[i12 >> 2] | 0, i13);
+ i13 = i11 + 16 | 0;
+ if ((HEAP32[i13 >> 2] | 0) != 123) {
+ _error_expected(i11, 123);
+ }
+ _luaX_next(i11);
+ L4 : do {
+ if ((HEAP32[i13 >> 2] | 0) != 125) {
+ L5 : while (1) {
+ if ((HEAP32[i8 >> 2] | 0) != 0 ? (_luaK_exp2nextreg(i6, i8), HEAP32[i8 >> 2] = 0, (HEAP32[i7 >> 2] | 0) == 50) : 0) {
+ _luaK_setlist(i6, HEAP32[(HEAP32[i1 >> 2] | 0) + 8 >> 2] | 0, HEAP32[i3 >> 2] | 0, 50);
+ HEAP32[i7 >> 2] = 0;
+ }
+ i14 = HEAP32[i13 >> 2] | 0;
+ do {
+ if ((i14 | 0) == 288) {
+ if ((_luaX_lookahead(i11) | 0) == 61) {
+ _recfield(i11, i8);
+ break;
+ }
+ _subexpr(i11, i8, 0) | 0;
+ i14 = HEAP32[i12 >> 2] | 0;
+ i15 = HEAP32[i3 >> 2] | 0;
+ if ((i15 | 0) > 2147483645) {
+ i12 = 10;
+ break L5;
+ }
+ HEAP32[i3 >> 2] = i15 + 1;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + 1;
+ } else if ((i14 | 0) == 91) {
+ _recfield(i11, i8);
+ } else {
+ _subexpr(i11, i8, 0) | 0;
+ i14 = HEAP32[i12 >> 2] | 0;
+ i15 = HEAP32[i3 >> 2] | 0;
+ if ((i15 | 0) > 2147483645) {
+ i12 = 17;
+ break L5;
+ }
+ HEAP32[i3 >> 2] = i15 + 1;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + 1;
+ }
+ } while (0);
+ i14 = HEAP32[i13 >> 2] | 0;
+ if ((i14 | 0) == 44) {
+ _luaX_next(i11);
+ } else if ((i14 | 0) == 59) {
+ _luaX_next(i11);
+ } else {
+ break L4;
+ }
+ if ((HEAP32[i13 >> 2] | 0) == 125) {
+ break L4;
+ }
+ }
+ if ((i12 | 0) == 10) {
+ i12 = i14 + 12 | 0;
+ i13 = HEAP32[(HEAP32[i12 >> 2] | 0) + 52 >> 2] | 0;
+ i14 = HEAP32[(HEAP32[i14 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ i16 = 6552;
+ HEAP32[i10 >> 2] = 6528;
+ i15 = i10 + 4 | 0;
+ HEAP32[i15 >> 2] = 2147483645;
+ i15 = i10 + 8 | 0;
+ HEAP32[i15 >> 2] = i16;
+ i15 = _luaO_pushfstring(i13, 6592, i10) | 0;
+ i16 = HEAP32[i12 >> 2] | 0;
+ _luaX_syntaxerror(i16, i15);
+ }
+ HEAP32[i10 >> 2] = i14;
+ i15 = _luaO_pushfstring(i13, 6568, i10) | 0;
+ HEAP32[i10 >> 2] = 6528;
+ i16 = i10 + 4 | 0;
+ HEAP32[i16 >> 2] = 2147483645;
+ i16 = i10 + 8 | 0;
+ HEAP32[i16 >> 2] = i15;
+ i16 = _luaO_pushfstring(i13, 6592, i10) | 0;
+ i15 = HEAP32[i12 >> 2] | 0;
+ _luaX_syntaxerror(i15, i16);
+ } else if ((i12 | 0) == 17) {
+ i13 = i14 + 12 | 0;
+ i12 = HEAP32[(HEAP32[i13 >> 2] | 0) + 52 >> 2] | 0;
+ i14 = HEAP32[(HEAP32[i14 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ i15 = 6552;
+ HEAP32[i10 >> 2] = 6528;
+ i16 = i10 + 4 | 0;
+ HEAP32[i16 >> 2] = 2147483645;
+ i16 = i10 + 8 | 0;
+ HEAP32[i16 >> 2] = i15;
+ i16 = _luaO_pushfstring(i12, 6592, i10) | 0;
+ i15 = HEAP32[i13 >> 2] | 0;
+ _luaX_syntaxerror(i15, i16);
+ }
+ HEAP32[i10 >> 2] = i14;
+ i15 = _luaO_pushfstring(i12, 6568, i10) | 0;
+ HEAP32[i10 >> 2] = 6528;
+ i16 = i10 + 4 | 0;
+ HEAP32[i16 >> 2] = 2147483645;
+ i16 = i10 + 8 | 0;
+ HEAP32[i16 >> 2] = i15;
+ i16 = _luaO_pushfstring(i12, 6592, i10) | 0;
+ i15 = HEAP32[i13 >> 2] | 0;
+ _luaX_syntaxerror(i15, i16);
+ }
+ }
+ } while (0);
+ _check_match(i11, 125, 123, i9);
+ i9 = HEAP32[i7 >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ i10 = HEAP32[i8 >> 2] | 0;
+ if ((i10 | 0) != 0) if ((i10 | 0) == 13 | (i10 | 0) == 12) {
+ _luaK_setreturns(i6, i8, -1);
+ _luaK_setlist(i6, HEAP32[(HEAP32[i1 >> 2] | 0) + 8 >> 2] | 0, HEAP32[i3 >> 2] | 0, -1);
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + -1;
+ break;
+ } else {
+ _luaK_exp2nextreg(i6, i8);
+ i9 = HEAP32[i7 >> 2] | 0;
+ }
+ _luaK_setlist(i6, HEAP32[(HEAP32[i1 >> 2] | 0) + 8 >> 2] | 0, HEAP32[i3 >> 2] | 0, i9);
+ }
+ } while (0);
+ i16 = HEAP32[(HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) + (i2 << 2) >> 2] & 8388607;
+ i16 = (_luaO_int2fb(HEAP32[i3 >> 2] | 0) | 0) << 23 | i16;
+ HEAP32[(HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) + (i2 << 2) >> 2] = i16;
+ i16 = (_luaO_int2fb(HEAP32[i4 >> 2] | 0) | 0) << 14 & 8372224 | i16 & -8372225;
+ HEAP32[(HEAP32[(HEAP32[i6 >> 2] | 0) + 12 >> 2] | 0) + (i2 << 2) >> 2] = i16;
+ STACKTOP = i5;
+ return;
+}
+function _luaK_prefix(i4, i14, i7, i13) {
+ i4 = i4 | 0;
+ i14 = i14 | 0;
+ i7 = i7 | 0;
+ i13 = i13 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i12 = i1;
+ HEAP32[i12 + 20 >> 2] = -1;
+ HEAP32[i12 + 16 >> 2] = -1;
+ HEAP32[i12 >> 2] = 5;
+ HEAPF64[i12 + 8 >> 3] = 0.0;
+ if ((i14 | 0) == 1) {
+ _luaK_dischargevars(i4, i7);
+ switch (HEAP32[i7 >> 2] | 0) {
+ case 2:
+ case 5:
+ case 4:
+ {
+ HEAP32[i7 >> 2] = 3;
+ break;
+ }
+ case 10:
+ {
+ i13 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ i12 = HEAP32[i7 + 8 >> 2] | 0;
+ i10 = i13 + (i12 << 2) | 0;
+ if (!((i12 | 0) > 0 ? (i11 = i13 + (i12 + -1 << 2) | 0, i9 = HEAP32[i11 >> 2] | 0, (HEAP8[5584 + (i9 & 63) | 0] | 0) < 0) : 0)) {
+ i11 = i10;
+ i9 = HEAP32[i10 >> 2] | 0;
+ }
+ HEAP32[i11 >> 2] = ((i9 & 16320 | 0) == 0) << 6 | i9 & -16321;
+ break;
+ }
+ case 6:
+ {
+ i8 = 25;
+ break;
+ }
+ case 3:
+ case 1:
+ {
+ HEAP32[i7 >> 2] = 2;
+ break;
+ }
+ case 11:
+ {
+ i12 = i4 + 48 | 0;
+ i8 = HEAP8[i12] | 0;
+ i11 = (i8 & 255) + 1 | 0;
+ i9 = (HEAP32[i4 >> 2] | 0) + 78 | 0;
+ do {
+ if (i11 >>> 0 > (HEAPU8[i9] | 0) >>> 0) {
+ if (i11 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i9] = i11;
+ i10 = HEAP8[i12] | 0;
+ break;
+ }
+ } else {
+ i10 = i8;
+ }
+ } while (0);
+ i14 = (i10 & 255) + 1 | 0;
+ HEAP8[i12] = i14;
+ _discharge2reg(i4, i7, (i14 & 255) + -1 | 0);
+ if ((HEAP32[i7 >> 2] | 0) == 6) {
+ i8 = 25;
+ } else {
+ i9 = i7 + 8 | 0;
+ i8 = 28;
+ }
+ break;
+ }
+ default:
+ {}
+ }
+ if ((i8 | 0) == 25) {
+ i8 = i7 + 8 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ if ((i9 & 256 | 0) == 0 ? (HEAPU8[i4 + 46 | 0] | 0) <= (i9 | 0) : 0) {
+ i9 = i4 + 48 | 0;
+ HEAP8[i9] = (HEAP8[i9] | 0) + -1 << 24 >> 24;
+ i9 = i8;
+ i8 = 28;
+ } else {
+ i9 = i8;
+ i8 = 28;
+ }
+ }
+ if ((i8 | 0) == 28) {
+ HEAP32[i9 >> 2] = _luaK_code(i4, HEAP32[i9 >> 2] << 23 | 20) | 0;
+ HEAP32[i7 >> 2] = 11;
+ }
+ i14 = i7 + 20 | 0;
+ i8 = HEAP32[i14 >> 2] | 0;
+ i7 = i7 + 16 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i14 >> 2] = i9;
+ HEAP32[i7 >> 2] = i8;
+ if (!((i9 | 0) == -1)) {
+ i8 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ do {
+ i12 = i8 + (i9 << 2) | 0;
+ if ((i9 | 0) > 0 ? (i5 = i8 + (i9 + -1 << 2) | 0, i6 = HEAP32[i5 >> 2] | 0, (HEAP8[5584 + (i6 & 63) | 0] | 0) < 0) : 0) {
+ i10 = i5;
+ i11 = i6;
+ } else {
+ i10 = i12;
+ i11 = HEAP32[i12 >> 2] | 0;
+ }
+ if ((i11 & 63 | 0) == 28) {
+ HEAP32[i10 >> 2] = i11 & 8372224 | i11 >>> 23 << 6 | 27;
+ }
+ i10 = ((HEAP32[i12 >> 2] | 0) >>> 14) + -131071 | 0;
+ if ((i10 | 0) == -1) {
+ break;
+ }
+ i9 = i9 + 1 + i10 | 0;
+ } while (!((i9 | 0) == -1));
+ i8 = HEAP32[i7 >> 2] | 0;
+ }
+ if ((i8 | 0) == -1) {
+ STACKTOP = i1;
+ return;
+ }
+ i4 = HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i6 = i4 + (i8 << 2) | 0;
+ if ((i8 | 0) > 0 ? (i2 = i4 + (i8 + -1 << 2) | 0, i3 = HEAP32[i2 >> 2] | 0, (HEAP8[5584 + (i3 & 63) | 0] | 0) < 0) : 0) {
+ i7 = i2;
+ i5 = i3;
+ } else {
+ i7 = i6;
+ i5 = HEAP32[i6 >> 2] | 0;
+ }
+ if ((i5 & 63 | 0) == 28) {
+ HEAP32[i7 >> 2] = i5 & 8372224 | i5 >>> 23 << 6 | 27;
+ }
+ i5 = ((HEAP32[i6 >> 2] | 0) >>> 14) + -131071 | 0;
+ if ((i5 | 0) == -1) {
+ i8 = 54;
+ break;
+ }
+ i8 = i8 + 1 + i5 | 0;
+ if ((i8 | 0) == -1) {
+ i8 = 54;
+ break;
+ }
+ }
+ if ((i8 | 0) == 54) {
+ STACKTOP = i1;
+ return;
+ }
+ } else if ((i14 | 0) == 0) {
+ if (((HEAP32[i7 >> 2] | 0) == 5 ? (HEAP32[i7 + 16 >> 2] | 0) == -1 : 0) ? (HEAP32[i7 + 20 >> 2] | 0) == -1 : 0) {
+ i14 = i7 + 8 | 0;
+ HEAPF64[i14 >> 3] = -+HEAPF64[i14 >> 3];
+ STACKTOP = i1;
+ return;
+ }
+ _luaK_dischargevars(i4, i7);
+ if ((HEAP32[i7 >> 2] | 0) == 6) {
+ i2 = HEAP32[i7 + 8 >> 2] | 0;
+ if ((HEAP32[i7 + 16 >> 2] | 0) != (HEAP32[i7 + 20 >> 2] | 0)) {
+ if ((i2 | 0) < (HEAPU8[i4 + 46 | 0] | 0)) {
+ i8 = 10;
+ } else {
+ _exp2reg(i4, i7, i2);
+ }
+ }
+ } else {
+ i8 = 10;
+ }
+ if ((i8 | 0) == 10) {
+ _luaK_exp2nextreg(i4, i7);
+ }
+ _codearith(i4, 19, i7, i12, i13);
+ STACKTOP = i1;
+ return;
+ } else if ((i14 | 0) == 2) {
+ _luaK_dischargevars(i4, i7);
+ if ((HEAP32[i7 >> 2] | 0) == 6) {
+ i2 = HEAP32[i7 + 8 >> 2] | 0;
+ if ((HEAP32[i7 + 16 >> 2] | 0) != (HEAP32[i7 + 20 >> 2] | 0)) {
+ if ((i2 | 0) < (HEAPU8[i4 + 46 | 0] | 0)) {
+ i8 = 52;
+ } else {
+ _exp2reg(i4, i7, i2);
+ }
+ }
+ } else {
+ i8 = 52;
+ }
+ if ((i8 | 0) == 52) {
+ _luaK_exp2nextreg(i4, i7);
+ }
+ _codearith(i4, 21, i7, i12, i13);
+ STACKTOP = i1;
+ return;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _subexpr(i6, i3, i7) {
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i11 = i2 + 24 | 0;
+ i5 = i2;
+ i4 = i6 + 48 | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ i1 = i6 + 52 | 0;
+ i12 = (HEAP32[i1 >> 2] | 0) + 38 | 0;
+ i13 = (HEAP16[i12 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP16[i12 >> 1] = i13;
+ if ((i13 & 65535) > 200) {
+ i10 = i9 + 12 | 0;
+ i12 = HEAP32[(HEAP32[i10 >> 2] | 0) + 52 >> 2] | 0;
+ i13 = HEAP32[(HEAP32[i9 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i13 | 0) == 0) {
+ i15 = 6552;
+ HEAP32[i11 >> 2] = 6360;
+ i14 = i11 + 4 | 0;
+ HEAP32[i14 >> 2] = 200;
+ i14 = i11 + 8 | 0;
+ HEAP32[i14 >> 2] = i15;
+ i14 = _luaO_pushfstring(i12, 6592, i11) | 0;
+ i15 = HEAP32[i10 >> 2] | 0;
+ _luaX_syntaxerror(i15, i14);
+ }
+ HEAP32[i11 >> 2] = i13;
+ i14 = _luaO_pushfstring(i12, 6568, i11) | 0;
+ HEAP32[i11 >> 2] = 6360;
+ i15 = i11 + 4 | 0;
+ HEAP32[i15 >> 2] = 200;
+ i15 = i11 + 8 | 0;
+ HEAP32[i15 >> 2] = i14;
+ i15 = _luaO_pushfstring(i12, 6592, i11) | 0;
+ i14 = HEAP32[i10 >> 2] | 0;
+ _luaX_syntaxerror(i14, i15);
+ }
+ i10 = i6 + 16 | 0;
+ L8 : do {
+ switch (HEAP32[i10 >> 2] | 0) {
+ case 287:
+ {
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 5;
+ HEAP32[i3 + 8 >> 2] = 0;
+ HEAPF64[i3 + 8 >> 3] = +HEAPF64[i6 + 24 >> 3];
+ i8 = 20;
+ break;
+ }
+ case 271:
+ {
+ i9 = 1;
+ i8 = 8;
+ break;
+ }
+ case 289:
+ {
+ i8 = _luaK_stringK(i9, HEAP32[i6 + 24 >> 2] | 0) | 0;
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 4;
+ HEAP32[i3 + 8 >> 2] = i8;
+ i8 = 20;
+ break;
+ }
+ case 265:
+ {
+ _luaX_next(i6);
+ _body(i6, i3, 0, HEAP32[i6 + 4 >> 2] | 0);
+ break;
+ }
+ case 276:
+ {
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 2;
+ HEAP32[i3 + 8 >> 2] = 0;
+ i8 = 20;
+ break;
+ }
+ case 45:
+ {
+ i9 = 0;
+ i8 = 8;
+ break;
+ }
+ case 35:
+ {
+ i9 = 2;
+ i8 = 8;
+ break;
+ }
+ case 123:
+ {
+ _constructor(i6, i3);
+ break;
+ }
+ case 263:
+ {
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 3;
+ HEAP32[i3 + 8 >> 2] = 0;
+ i8 = 20;
+ break;
+ }
+ case 280:
+ {
+ if ((HEAP8[(HEAP32[i9 >> 2] | 0) + 77 | 0] | 0) == 0) {
+ _luaX_syntaxerror(i6, 6408);
+ } else {
+ i8 = _luaK_codeABC(i9, 38, 0, 1, 0) | 0;
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 13;
+ HEAP32[i3 + 8 >> 2] = i8;
+ i8 = 20;
+ break L8;
+ }
+ break;
+ }
+ case 270:
+ {
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 1;
+ HEAP32[i3 + 8 >> 2] = 0;
+ i8 = 20;
+ break;
+ }
+ default:
+ {
+ _suffixedexp(i6, i3);
+ }
+ }
+ } while (0);
+ if ((i8 | 0) == 8) {
+ i15 = HEAP32[i6 + 4 >> 2] | 0;
+ _luaX_next(i6);
+ _subexpr(i6, i3, 8) | 0;
+ _luaK_prefix(HEAP32[i4 >> 2] | 0, i9, i3, i15);
+ } else if ((i8 | 0) == 20) {
+ _luaX_next(i6);
+ }
+ switch (HEAP32[i10 >> 2] | 0) {
+ case 257:
+ {
+ i9 = 13;
+ break;
+ }
+ case 272:
+ {
+ i9 = 14;
+ break;
+ }
+ case 47:
+ {
+ i9 = 3;
+ break;
+ }
+ case 37:
+ {
+ i9 = 4;
+ break;
+ }
+ case 43:
+ {
+ i9 = 0;
+ break;
+ }
+ case 284:
+ {
+ i9 = 10;
+ break;
+ }
+ case 281:
+ {
+ i9 = 7;
+ break;
+ }
+ case 62:
+ {
+ i9 = 11;
+ break;
+ }
+ case 282:
+ {
+ i9 = 12;
+ break;
+ }
+ case 45:
+ {
+ i9 = 1;
+ break;
+ }
+ case 42:
+ {
+ i9 = 2;
+ break;
+ }
+ case 60:
+ {
+ i9 = 8;
+ break;
+ }
+ case 283:
+ {
+ i9 = 9;
+ break;
+ }
+ case 94:
+ {
+ i9 = 5;
+ break;
+ }
+ case 279:
+ {
+ i9 = 6;
+ break;
+ }
+ default:
+ {
+ i15 = 15;
+ i14 = HEAP32[i1 >> 2] | 0;
+ i14 = i14 + 38 | 0;
+ i13 = HEAP16[i14 >> 1] | 0;
+ i13 = i13 + -1 << 16 >> 16;
+ HEAP16[i14 >> 1] = i13;
+ STACKTOP = i2;
+ return i15 | 0;
+ }
+ }
+ i8 = i6 + 4 | 0;
+ while (1) {
+ if ((HEAPU8[6376 + (i9 << 1) | 0] | 0) <= (i7 | 0)) {
+ i8 = 39;
+ break;
+ }
+ i15 = HEAP32[i8 >> 2] | 0;
+ _luaX_next(i6);
+ _luaK_infix(HEAP32[i4 >> 2] | 0, i9, i3);
+ i10 = _subexpr(i6, i5, HEAPU8[6377 + (i9 << 1) | 0] | 0) | 0;
+ _luaK_posfix(HEAP32[i4 >> 2] | 0, i9, i3, i5, i15);
+ if ((i10 | 0) == 15) {
+ i9 = 15;
+ i8 = 39;
+ break;
+ } else {
+ i9 = i10;
+ }
+ }
+ if ((i8 | 0) == 39) {
+ i15 = HEAP32[i1 >> 2] | 0;
+ i15 = i15 + 38 | 0;
+ i14 = HEAP16[i15 >> 1] | 0;
+ i14 = i14 + -1 << 16 >> 16;
+ HEAP16[i15 >> 1] = i14;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ return 0;
+}
+function _luaV_lessequal(i5, i3, i2) {
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ i4 = i3 + 8 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == 3) {
+ if ((HEAP32[i2 + 8 >> 2] | 0) == 3) {
+ i9 = +HEAPF64[i3 >> 3] <= +HEAPF64[i2 >> 3] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ } else {
+ if ((i7 & 15 | 0) == 4 ? (HEAP32[i2 + 8 >> 2] & 15 | 0) == 4 : 0) {
+ i3 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ i4 = i3 + 16 | 0;
+ i5 = i6 + 16 | 0;
+ i7 = _strcmp(i4, i5) | 0;
+ L8 : do {
+ if ((i7 | 0) == 0) {
+ i2 = HEAP32[i3 + 12 >> 2] | 0;
+ i3 = HEAP32[i6 + 12 >> 2] | 0;
+ i6 = i5;
+ while (1) {
+ i5 = _strlen(i4 | 0) | 0;
+ i7 = (i5 | 0) == (i2 | 0);
+ if ((i5 | 0) == (i3 | 0)) {
+ break;
+ }
+ if (i7) {
+ i7 = -1;
+ break L8;
+ }
+ i5 = i5 + 1 | 0;
+ i4 = i4 + i5 | 0;
+ i6 = i6 + i5 | 0;
+ i7 = _strcmp(i4, i6) | 0;
+ if ((i7 | 0) == 0) {
+ i2 = i2 - i5 | 0;
+ i3 = i3 - i5 | 0;
+ } else {
+ break L8;
+ }
+ }
+ i7 = i7 & 1 ^ 1;
+ }
+ } while (0);
+ i9 = (i7 | 0) < 1 | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ }
+ i7 = i5 + 8 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i9 = _luaT_gettmbyobj(i5, i3, 14) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ i9 = _luaT_gettmbyobj(i5, i2, 14) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ i8 = HEAP32[i7 >> 2] | 0;
+ i9 = _luaT_gettmbyobj(i5, i2, 13) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ i9 = _luaT_gettmbyobj(i5, i3, 13) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ _luaG_ordererror(i5, i3, i2);
+ } else {
+ i6 = i9;
+ }
+ } else {
+ i6 = i9;
+ }
+ i10 = i5 + 28 | 0;
+ i9 = i8 - (HEAP32[i10 >> 2] | 0) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i8 + 16;
+ i13 = i6;
+ i11 = HEAP32[i13 + 4 >> 2] | 0;
+ i12 = i8;
+ HEAP32[i12 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAP32[i8 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i8 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i8 + 16;
+ i12 = i2;
+ i11 = HEAP32[i12 + 4 >> 2] | 0;
+ i6 = i8;
+ HEAP32[i6 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i6 + 4 >> 2] = i11;
+ HEAP32[i8 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ i2 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i2 + 16;
+ i8 = i3;
+ i6 = HEAP32[i8 + 4 >> 2] | 0;
+ i3 = i2;
+ HEAP32[i3 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i3 + 4 >> 2] = i6;
+ HEAP32[i2 + 8 >> 2] = HEAP32[i4 >> 2];
+ _luaD_call(i5, (HEAP32[i7 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i5 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i3 = HEAP32[i10 >> 2] | 0;
+ i2 = HEAP32[i7 >> 2] | 0;
+ i5 = i2 + -16 | 0;
+ HEAP32[i7 >> 2] = i5;
+ i6 = HEAP32[i5 + 4 >> 2] | 0;
+ i8 = i3 + i9 | 0;
+ HEAP32[i8 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i8 + 4 >> 2] = i6;
+ HEAP32[i3 + (i9 + 8) >> 2] = HEAP32[i2 + -8 >> 2];
+ i3 = HEAP32[i7 >> 2] | 0;
+ i2 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i2 | 0) != 0) {
+ if ((i2 | 0) == 1) {
+ i2 = (HEAP32[i3 >> 2] | 0) != 0;
+ } else {
+ i2 = 1;
+ }
+ } else {
+ i2 = 0;
+ }
+ i13 = i2 & 1 ^ 1;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ i10 = i5 + 28 | 0;
+ i13 = i8 - (HEAP32[i10 >> 2] | 0) | 0;
+ i11 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i11 + 16;
+ i6 = i9;
+ i8 = HEAP32[i6 + 4 >> 2] | 0;
+ i12 = i11;
+ HEAP32[i12 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i12 + 4 >> 2] = i8;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ i9 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i9 + 16;
+ i11 = i3;
+ i12 = HEAP32[i11 + 4 >> 2] | 0;
+ i3 = i9;
+ HEAP32[i3 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i3 + 4 >> 2] = i12;
+ HEAP32[i9 + 8 >> 2] = HEAP32[i4 >> 2];
+ i3 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i3 + 16;
+ i9 = i2;
+ i12 = HEAP32[i9 + 4 >> 2] | 0;
+ i11 = i3;
+ HEAP32[i11 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i11 + 4 >> 2] = i12;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ _luaD_call(i5, (HEAP32[i7 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i5 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i2 = HEAP32[i10 >> 2] | 0;
+ i3 = HEAP32[i7 >> 2] | 0;
+ i10 = i3 + -16 | 0;
+ HEAP32[i7 >> 2] = i10;
+ i11 = HEAP32[i10 + 4 >> 2] | 0;
+ i12 = i2 + i13 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAP32[i2 + (i13 + 8) >> 2] = HEAP32[i3 + -8 >> 2];
+ i2 = HEAP32[i7 >> 2] | 0;
+ i3 = HEAP32[i2 + 8 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if ((i3 | 0) == 1) {
+ i2 = (HEAP32[i2 >> 2] | 0) != 0;
+ } else {
+ i2 = 1;
+ }
+ } else {
+ i2 = 0;
+ }
+ i13 = i2 & 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function ___udivmoddi4(i6, i8, i2, i4, i1) {
+ i6 = i6 | 0;
+ i8 = i8 | 0;
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i5 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i5 = i6;
+ i9 = i8;
+ i7 = i9;
+ i10 = i2;
+ i3 = i4;
+ i11 = i3;
+ if ((i7 | 0) == 0) {
+ i2 = (i1 | 0) != 0;
+ if ((i11 | 0) == 0) {
+ if (i2) {
+ HEAP32[i1 >> 2] = (i5 >>> 0) % (i10 >>> 0);
+ HEAP32[i1 + 4 >> 2] = 0;
+ }
+ i11 = 0;
+ i12 = (i5 >>> 0) / (i10 >>> 0) >>> 0;
+ return (tempRet0 = i11, i12) | 0;
+ } else {
+ if (!i2) {
+ i11 = 0;
+ i12 = 0;
+ return (tempRet0 = i11, i12) | 0;
+ }
+ HEAP32[i1 >> 2] = i6 | 0;
+ HEAP32[i1 + 4 >> 2] = i8 & 0;
+ i11 = 0;
+ i12 = 0;
+ return (tempRet0 = i11, i12) | 0;
+ }
+ }
+ i12 = (i11 | 0) == 0;
+ do {
+ if ((i10 | 0) != 0) {
+ if (!i12) {
+ i10 = (_llvm_ctlz_i32(i11 | 0) | 0) - (_llvm_ctlz_i32(i7 | 0) | 0) | 0;
+ if (i10 >>> 0 <= 31) {
+ i11 = i10 + 1 | 0;
+ i12 = 31 - i10 | 0;
+ i8 = i10 - 31 >> 31;
+ i9 = i11;
+ i6 = i5 >>> (i11 >>> 0) & i8 | i7 << i12;
+ i8 = i7 >>> (i11 >>> 0) & i8;
+ i11 = 0;
+ i7 = i5 << i12;
+ break;
+ }
+ if ((i1 | 0) == 0) {
+ i11 = 0;
+ i12 = 0;
+ return (tempRet0 = i11, i12) | 0;
+ }
+ HEAP32[i1 >> 2] = i6 | 0;
+ HEAP32[i1 + 4 >> 2] = i9 | i8 & 0;
+ i11 = 0;
+ i12 = 0;
+ return (tempRet0 = i11, i12) | 0;
+ }
+ i11 = i10 - 1 | 0;
+ if ((i11 & i10 | 0) != 0) {
+ i12 = (_llvm_ctlz_i32(i10 | 0) | 0) + 33 - (_llvm_ctlz_i32(i7 | 0) | 0) | 0;
+ i15 = 64 - i12 | 0;
+ i10 = 32 - i12 | 0;
+ i13 = i10 >> 31;
+ i14 = i12 - 32 | 0;
+ i8 = i14 >> 31;
+ i9 = i12;
+ i6 = i10 - 1 >> 31 & i7 >>> (i14 >>> 0) | (i7 << i10 | i5 >>> (i12 >>> 0)) & i8;
+ i8 = i8 & i7 >>> (i12 >>> 0);
+ i11 = i5 << i15 & i13;
+ i7 = (i7 << i15 | i5 >>> (i14 >>> 0)) & i13 | i5 << i10 & i12 - 33 >> 31;
+ break;
+ }
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = i11 & i5;
+ HEAP32[i1 + 4 >> 2] = 0;
+ }
+ if ((i10 | 0) == 1) {
+ i14 = i9 | i8 & 0;
+ i15 = i6 | 0 | 0;
+ return (tempRet0 = i14, i15) | 0;
+ } else {
+ i15 = _llvm_cttz_i32(i10 | 0) | 0;
+ i14 = i7 >>> (i15 >>> 0) | 0;
+ i15 = i7 << 32 - i15 | i5 >>> (i15 >>> 0) | 0;
+ return (tempRet0 = i14, i15) | 0;
+ }
+ } else {
+ if (i12) {
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = (i7 >>> 0) % (i10 >>> 0);
+ HEAP32[i1 + 4 >> 2] = 0;
+ }
+ i14 = 0;
+ i15 = (i7 >>> 0) / (i10 >>> 0) >>> 0;
+ return (tempRet0 = i14, i15) | 0;
+ }
+ if ((i5 | 0) == 0) {
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = 0;
+ HEAP32[i1 + 4 >> 2] = (i7 >>> 0) % (i11 >>> 0);
+ }
+ i14 = 0;
+ i15 = (i7 >>> 0) / (i11 >>> 0) >>> 0;
+ return (tempRet0 = i14, i15) | 0;
+ }
+ i10 = i11 - 1 | 0;
+ if ((i10 & i11 | 0) == 0) {
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = i6 | 0;
+ HEAP32[i1 + 4 >> 2] = i10 & i7 | i8 & 0;
+ }
+ i14 = 0;
+ i15 = i7 >>> ((_llvm_cttz_i32(i11 | 0) | 0) >>> 0);
+ return (tempRet0 = i14, i15) | 0;
+ }
+ i10 = (_llvm_ctlz_i32(i11 | 0) | 0) - (_llvm_ctlz_i32(i7 | 0) | 0) | 0;
+ if (i10 >>> 0 <= 30) {
+ i8 = i10 + 1 | 0;
+ i15 = 31 - i10 | 0;
+ i9 = i8;
+ i6 = i7 << i15 | i5 >>> (i8 >>> 0);
+ i8 = i7 >>> (i8 >>> 0);
+ i11 = 0;
+ i7 = i5 << i15;
+ break;
+ }
+ if ((i1 | 0) == 0) {
+ i14 = 0;
+ i15 = 0;
+ return (tempRet0 = i14, i15) | 0;
+ }
+ HEAP32[i1 >> 2] = i6 | 0;
+ HEAP32[i1 + 4 >> 2] = i9 | i8 & 0;
+ i14 = 0;
+ i15 = 0;
+ return (tempRet0 = i14, i15) | 0;
+ }
+ } while (0);
+ if ((i9 | 0) == 0) {
+ i12 = i6;
+ i2 = 0;
+ i6 = 0;
+ } else {
+ i2 = i2 | 0 | 0;
+ i3 = i3 | i4 & 0;
+ i4 = _i64Add(i2, i3, -1, -1) | 0;
+ i5 = tempRet0;
+ i10 = i8;
+ i12 = i6;
+ i6 = 0;
+ while (1) {
+ i8 = i11 >>> 31 | i7 << 1;
+ i11 = i6 | i11 << 1;
+ i7 = i12 << 1 | i7 >>> 31 | 0;
+ i10 = i12 >>> 31 | i10 << 1 | 0;
+ _i64Subtract(i4, i5, i7, i10) | 0;
+ i12 = tempRet0;
+ i15 = i12 >> 31 | ((i12 | 0) < 0 ? -1 : 0) << 1;
+ i6 = i15 & 1;
+ i12 = _i64Subtract(i7, i10, i15 & i2, (((i12 | 0) < 0 ? -1 : 0) >> 31 | ((i12 | 0) < 0 ? -1 : 0) << 1) & i3) | 0;
+ i10 = tempRet0;
+ i9 = i9 - 1 | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i8;
+ }
+ }
+ i7 = i8;
+ i8 = i10;
+ i2 = 0;
+ }
+ i3 = 0;
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = i12;
+ HEAP32[i1 + 4 >> 2] = i8;
+ }
+ i14 = (i11 | 0) >>> 31 | (i7 | i3) << 1 | (i3 << 1 | i11 >>> 31) & 0 | i2;
+ i15 = (i11 << 1 | 0 >>> 31) & -2 | i6;
+ return (tempRet0 = i14, i15) | 0;
+}
+function _leaveblock(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i3;
+ i7 = i1 + 16 | 0;
+ i4 = HEAP32[i7 >> 2] | 0;
+ i2 = i1 + 12 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ if ((HEAP32[i4 >> 2] | 0) != 0 ? (HEAP8[i4 + 9 | 0] | 0) != 0 : 0) {
+ i16 = _luaK_jump(i1) | 0;
+ _luaK_patchclose(i1, i16, HEAPU8[i4 + 8 | 0] | 0);
+ _luaK_patchtohere(i1, i16);
+ }
+ L5 : do {
+ if ((HEAP8[i4 + 10 | 0] | 0) != 0) {
+ i15 = i6 + 52 | 0;
+ i14 = _luaS_new(HEAP32[i15 >> 2] | 0, 6304) | 0;
+ i13 = i6 + 64 | 0;
+ i16 = HEAP32[i13 >> 2] | 0;
+ i10 = i16 + 24 | 0;
+ i8 = i6 + 48 | 0;
+ i11 = HEAP32[(HEAP32[i8 >> 2] | 0) + 20 >> 2] | 0;
+ i12 = i16 + 28 | 0;
+ i9 = HEAP32[i12 >> 2] | 0;
+ i16 = i16 + 32 | 0;
+ if ((i9 | 0) < (HEAP32[i16 >> 2] | 0)) {
+ i15 = HEAP32[i10 >> 2] | 0;
+ } else {
+ i15 = _luaM_growaux_(HEAP32[i15 >> 2] | 0, HEAP32[i10 >> 2] | 0, i16, 16, 32767, 6312) | 0;
+ HEAP32[i10 >> 2] = i15;
+ }
+ HEAP32[i15 + (i9 << 4) >> 2] = i14;
+ i16 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i16 + (i9 << 4) + 8 >> 2] = 0;
+ HEAP8[i16 + (i9 << 4) + 12 | 0] = HEAP8[(HEAP32[i8 >> 2] | 0) + 46 | 0] | 0;
+ HEAP32[(HEAP32[i10 >> 2] | 0) + (i9 << 4) + 4 >> 2] = i11;
+ HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + 1;
+ i10 = HEAP32[i13 >> 2] | 0;
+ i9 = (HEAP32[i10 + 24 >> 2] | 0) + (i9 << 4) | 0;
+ i11 = HEAP16[(HEAP32[(HEAP32[i8 >> 2] | 0) + 16 >> 2] | 0) + 6 >> 1] | 0;
+ i8 = i10 + 16 | 0;
+ if ((i11 | 0) < (HEAP32[i8 >> 2] | 0)) {
+ i10 = i10 + 12 | 0;
+ do {
+ while (1) {
+ if ((_luaS_eqstr(HEAP32[(HEAP32[i10 >> 2] | 0) + (i11 << 4) >> 2] | 0, HEAP32[i9 >> 2] | 0) | 0) == 0) {
+ break;
+ }
+ _closegoto(i6, i11, i9);
+ if ((i11 | 0) >= (HEAP32[i8 >> 2] | 0)) {
+ break L5;
+ }
+ }
+ i11 = i11 + 1 | 0;
+ } while ((i11 | 0) < (HEAP32[i8 >> 2] | 0));
+ }
+ }
+ } while (0);
+ HEAP32[i7 >> 2] = HEAP32[i4 >> 2];
+ i7 = i4 + 8 | 0;
+ i9 = HEAP8[i7] | 0;
+ i10 = i1 + 46 | 0;
+ i8 = (HEAP32[i2 >> 2] | 0) + 64 | 0;
+ i14 = (HEAP32[i8 >> 2] | 0) + 4 | 0;
+ HEAP32[i14 >> 2] = (i9 & 255) - (HEAPU8[i10] | 0) + (HEAP32[i14 >> 2] | 0);
+ i14 = HEAP8[i10] | 0;
+ if ((i14 & 255) > (i9 & 255)) {
+ i13 = i1 + 20 | 0;
+ i11 = i1 + 40 | 0;
+ i12 = (HEAP32[i1 >> 2] | 0) + 24 | 0;
+ do {
+ i16 = HEAP32[i13 >> 2] | 0;
+ i14 = i14 + -1 << 24 >> 24;
+ HEAP8[i10] = i14;
+ HEAP32[(HEAP32[i12 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[i8 >> 2] >> 2] | 0) + ((HEAP32[i11 >> 2] | 0) + (i14 & 255) << 1) >> 1] | 0) * 12 | 0) + 8 >> 2] = i16;
+ i14 = HEAP8[i10] | 0;
+ } while ((i14 & 255) > (i9 & 255));
+ }
+ HEAP8[i1 + 48 | 0] = i14;
+ i10 = HEAP32[i6 + 64 >> 2] | 0;
+ HEAP32[i10 + 28 >> 2] = HEAP16[i4 + 4 >> 1] | 0;
+ i9 = HEAP16[i4 + 6 >> 1] | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ if ((i9 | 0) >= (HEAP32[i10 + 16 >> 2] | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ i10 = HEAP32[i10 + 12 >> 2] | 0;
+ i11 = HEAP32[i10 + (i9 << 4) >> 2] | 0;
+ if ((HEAP8[i11 + 4 | 0] | 0) != 4) {
+ i16 = 6200;
+ i15 = i6 + 52 | 0;
+ i15 = HEAP32[i15 >> 2] | 0;
+ i14 = i11 + 16 | 0;
+ i13 = i10 + (i9 << 4) + 8 | 0;
+ i13 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i5 >> 2] = i14;
+ i14 = i5 + 4 | 0;
+ HEAP32[i14 >> 2] = i13;
+ i16 = _luaO_pushfstring(i15, i16, i5) | 0;
+ _semerror(i6, i16);
+ }
+ i16 = (HEAP8[i11 + 6 | 0] | 0) != 0 ? 6160 : 6200;
+ i15 = i6 + 52 | 0;
+ i15 = HEAP32[i15 >> 2] | 0;
+ i14 = i11 + 16 | 0;
+ i13 = i10 + (i9 << 4) + 8 | 0;
+ i13 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i5 >> 2] = i14;
+ i14 = i5 + 4 | 0;
+ HEAP32[i14 >> 2] = i13;
+ i16 = _luaO_pushfstring(i15, i16, i5) | 0;
+ _semerror(i6, i16);
+ }
+ i6 = HEAP32[i8 >> 2] | 0;
+ i5 = i6 + 16 | 0;
+ if ((i9 | 0) >= (HEAP32[i5 >> 2] | 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ i6 = i6 + 12 | 0;
+ i4 = i4 + 9 | 0;
+ do {
+ i10 = HEAP32[i6 >> 2] | 0;
+ i8 = i10 + (i9 << 4) + 12 | 0;
+ i11 = HEAP8[i7] | 0;
+ i12 = i11 & 255;
+ if ((HEAPU8[i8] | 0) > (i11 & 255)) {
+ if ((HEAP8[i4] | 0) != 0) {
+ _luaK_patchclose(i1, HEAP32[i10 + (i9 << 4) + 4 >> 2] | 0, i12);
+ i11 = HEAP8[i7] | 0;
+ }
+ HEAP8[i8] = i11;
+ }
+ i9 = ((_findlabel(HEAP32[i2 >> 2] | 0, i9) | 0) == 0) + i9 | 0;
+ } while ((i9 | 0) < (HEAP32[i5 >> 2] | 0));
+ STACKTOP = i3;
+ return;
+}
+function _getobjname(i3, i7, i9, i2) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ i9 = i9 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ i4 = i3 + 12 | 0;
+ L1 : while (1) {
+ i13 = _luaF_getlocalname(i3, i9 + 1 | 0, i7) | 0;
+ HEAP32[i2 >> 2] = i13;
+ if ((i13 | 0) != 0) {
+ i2 = 2040;
+ i4 = 42;
+ break;
+ }
+ if ((i7 | 0) <= 0) {
+ i2 = 0;
+ i4 = 42;
+ break;
+ }
+ i6 = HEAP32[i4 >> 2] | 0;
+ i8 = 0;
+ i5 = -1;
+ do {
+ i12 = HEAP32[i6 + (i8 << 2) >> 2] | 0;
+ i13 = i12 & 63;
+ i11 = i12 >>> 6 & 255;
+ switch (i13 | 0) {
+ case 27:
+ {
+ i10 = i8;
+ i5 = (i11 | 0) == (i9 | 0) ? i8 : i5;
+ break;
+ }
+ case 30:
+ case 29:
+ {
+ i10 = i8;
+ i5 = (i11 | 0) > (i9 | 0) ? i5 : i8;
+ break;
+ }
+ case 23:
+ {
+ i10 = (i12 >>> 14) + -131071 | 0;
+ i13 = i8 + 1 + i10 | 0;
+ i10 = ((i8 | 0) >= (i13 | 0) | (i13 | 0) > (i7 | 0) ? 0 : i10) + i8 | 0;
+ break;
+ }
+ case 4:
+ {
+ if ((i11 | 0) > (i9 | 0)) {
+ i10 = i8;
+ } else {
+ i10 = i8;
+ i5 = (i11 + (i12 >>> 23) | 0) < (i9 | 0) ? i5 : i8;
+ }
+ break;
+ }
+ case 34:
+ {
+ i10 = i8;
+ i5 = (i11 + 2 | 0) > (i9 | 0) ? i5 : i8;
+ break;
+ }
+ default:
+ {
+ i10 = i8;
+ i5 = (HEAP8[5584 + i13 | 0] & 64) != 0 & (i11 | 0) == (i9 | 0) ? i8 : i5;
+ }
+ }
+ i8 = i10 + 1 | 0;
+ } while ((i8 | 0) < (i7 | 0));
+ if ((i5 | 0) == -1) {
+ i2 = 0;
+ i4 = 42;
+ break;
+ }
+ i7 = HEAP32[i6 + (i5 << 2) >> 2] | 0;
+ i9 = i7 & 63;
+ switch (i9 | 0) {
+ case 0:
+ {
+ break;
+ }
+ case 7:
+ case 6:
+ {
+ i4 = 17;
+ break L1;
+ }
+ case 5:
+ {
+ i4 = 29;
+ break L1;
+ }
+ case 1:
+ {
+ i4 = 32;
+ break L1;
+ }
+ case 2:
+ {
+ i4 = 33;
+ break L1;
+ }
+ case 12:
+ {
+ i4 = 36;
+ break L1;
+ }
+ default:
+ {
+ i2 = 0;
+ i4 = 42;
+ break L1;
+ }
+ }
+ i9 = i7 >>> 23;
+ if (i9 >>> 0 < (i7 >>> 6 & 255) >>> 0) {
+ i7 = i5;
+ } else {
+ i2 = 0;
+ i4 = 42;
+ break;
+ }
+ }
+ if ((i4 | 0) == 17) {
+ i6 = i7 >>> 14;
+ i8 = i6 & 511;
+ i7 = i7 >>> 23;
+ if ((i9 | 0) != 7) {
+ i7 = HEAP32[(HEAP32[i3 + 28 >> 2] | 0) + (i7 << 3) >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i7 = 2104;
+ } else {
+ i7 = i7 + 16 | 0;
+ }
+ } else {
+ i7 = _luaF_getlocalname(i3, i7 + 1 | 0, i5) | 0;
+ }
+ if ((i6 & 256 | 0) == 0) {
+ i3 = _getobjname(i3, i5, i8, i2) | 0;
+ if (!((i3 | 0) != 0 ? (HEAP8[i3] | 0) == 99 : 0)) {
+ i4 = 26;
+ }
+ } else {
+ i5 = i6 & 255;
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + (i5 << 4) + 8 >> 2] & 15 | 0) == 4) {
+ HEAP32[i2 >> 2] = (HEAP32[i3 + (i5 << 4) >> 2] | 0) + 16;
+ } else {
+ i4 = 26;
+ }
+ }
+ if ((i4 | 0) == 26) {
+ HEAP32[i2 >> 2] = 2104;
+ }
+ if ((i7 | 0) == 0) {
+ i13 = 2064;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i13 = (_strcmp(i7, 2048) | 0) == 0;
+ i13 = i13 ? 2056 : 2064;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else if ((i4 | 0) == 29) {
+ i3 = HEAP32[(HEAP32[i3 + 28 >> 2] | 0) + (i7 >>> 23 << 3) >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 2104;
+ } else {
+ i3 = i3 + 16 | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ i13 = 2072;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else if ((i4 | 0) == 32) {
+ i5 = i7 >>> 14;
+ } else if ((i4 | 0) == 33) {
+ i5 = (HEAP32[i6 + (i5 + 1 << 2) >> 2] | 0) >>> 6;
+ } else if ((i4 | 0) == 36) {
+ i4 = i7 >>> 14;
+ if ((i4 & 256 | 0) == 0) {
+ i3 = _getobjname(i3, i5, i4 & 511, i2) | 0;
+ if ((i3 | 0) != 0 ? (HEAP8[i3] | 0) == 99 : 0) {
+ i13 = 2096;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ } else {
+ i4 = i4 & 255;
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + (i4 << 4) + 8 >> 2] & 15 | 0) == 4) {
+ HEAP32[i2 >> 2] = (HEAP32[i3 + (i4 << 4) >> 2] | 0) + 16;
+ i13 = 2096;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ }
+ HEAP32[i2 >> 2] = 2104;
+ i13 = 2096;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else if ((i4 | 0) == 42) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + (i5 << 4) + 8 >> 2] & 15 | 0) != 4) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ HEAP32[i2 >> 2] = (HEAP32[i3 + (i5 << 4) >> 2] | 0) + 16;
+ i13 = 2080;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _assignment(i2, i16, i5) {
+ i2 = i2 | 0;
+ i16 = i16 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 80 | 0;
+ i6 = i3 + 56 | 0;
+ i1 = i3 + 32 | 0;
+ i8 = i3;
+ i4 = i16 + 8 | 0;
+ if (!(((HEAP32[i4 >> 2] | 0) + -7 | 0) >>> 0 < 3)) {
+ _luaX_syntaxerror(i2, 6344);
+ }
+ i13 = i2 + 16 | 0;
+ i14 = HEAP32[i13 >> 2] | 0;
+ do {
+ if ((i14 | 0) == 44) {
+ _luaX_next(i2);
+ HEAP32[i8 >> 2] = i16;
+ i14 = i8 + 8 | 0;
+ _suffixedexp(i2, i14);
+ i15 = i2 + 48 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != 9 ? (i10 = HEAP32[i15 >> 2] | 0, i11 = HEAP8[i10 + 48 | 0] | 0, i9 = i11 & 255, (i16 | 0) != 0) : 0) {
+ i13 = i8 + 16 | 0;
+ i12 = i11 & 255;
+ i18 = 0;
+ do {
+ if ((HEAP32[i16 + 8 >> 2] | 0) == 9) {
+ i17 = i16 + 16 | 0;
+ i19 = i17 + 3 | 0;
+ i20 = HEAPU8[i19] | 0;
+ i21 = HEAP32[i14 >> 2] | 0;
+ if ((i20 | 0) == (i21 | 0)) {
+ i21 = i17 + 2 | 0;
+ if ((HEAPU8[i21] | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP8[i19] = 7;
+ HEAP8[i21] = i11;
+ i20 = HEAP32[i14 >> 2] | 0;
+ i18 = 1;
+ }
+ } else {
+ i20 = i21;
+ }
+ if ((i20 | 0) == 7 ? (HEAP16[i17 >> 1] | 0) == (HEAP32[i13 >> 2] | 0) : 0) {
+ HEAP16[i17 >> 1] = i12;
+ i18 = 1;
+ }
+ }
+ i16 = HEAP32[i16 >> 2] | 0;
+ } while ((i16 | 0) != 0);
+ if ((i18 | 0) != 0) {
+ _luaK_codeABC(i10, (HEAP32[i14 >> 2] | 0) == 7 ? 0 : 5, i9, HEAP32[i13 >> 2] | 0, 0) | 0;
+ _luaK_reserveregs(i10, 1);
+ }
+ }
+ i9 = HEAP32[i15 >> 2] | 0;
+ if (((HEAPU16[(HEAP32[i2 + 52 >> 2] | 0) + 38 >> 1] | 0) + i5 | 0) <= 200) {
+ _assignment(i2, i8, i5 + 1 | 0);
+ i7 = i1;
+ break;
+ }
+ i8 = i9 + 12 | 0;
+ i5 = HEAP32[(HEAP32[i8 >> 2] | 0) + 52 >> 2] | 0;
+ i9 = HEAP32[(HEAP32[i9 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i20 = 6552;
+ HEAP32[i6 >> 2] = 6360;
+ i21 = i6 + 4 | 0;
+ HEAP32[i21 >> 2] = 200;
+ i21 = i6 + 8 | 0;
+ HEAP32[i21 >> 2] = i20;
+ i21 = _luaO_pushfstring(i5, 6592, i6) | 0;
+ i20 = HEAP32[i8 >> 2] | 0;
+ _luaX_syntaxerror(i20, i21);
+ }
+ HEAP32[i6 >> 2] = i9;
+ i20 = _luaO_pushfstring(i5, 6568, i6) | 0;
+ HEAP32[i6 >> 2] = 6360;
+ i21 = i6 + 4 | 0;
+ HEAP32[i21 >> 2] = 200;
+ i21 = i6 + 8 | 0;
+ HEAP32[i21 >> 2] = i20;
+ i21 = _luaO_pushfstring(i5, 6592, i6) | 0;
+ i20 = HEAP32[i8 >> 2] | 0;
+ _luaX_syntaxerror(i20, i21);
+ } else if ((i14 | 0) == 61) {
+ _luaX_next(i2);
+ _subexpr(i2, i1, 0) | 0;
+ i6 = i2 + 48 | 0;
+ if ((HEAP32[i13 >> 2] | 0) == 44) {
+ i9 = 1;
+ do {
+ _luaX_next(i2);
+ _luaK_exp2nextreg(HEAP32[i6 >> 2] | 0, i1);
+ _subexpr(i2, i1, 0) | 0;
+ i9 = i9 + 1 | 0;
+ } while ((HEAP32[i13 >> 2] | 0) == 44);
+ } else {
+ i9 = 1;
+ }
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i9 | 0) == (i5 | 0)) {
+ _luaK_setoneret(i8, i1);
+ _luaK_storevar(HEAP32[i6 >> 2] | 0, i4, i1);
+ STACKTOP = i3;
+ return;
+ }
+ i7 = i5 - i9 | 0;
+ i10 = HEAP32[i1 >> 2] | 0;
+ if ((i10 | 0) == 13 | (i10 | 0) == 12) {
+ i10 = i7 + 1 | 0;
+ i10 = (i10 | 0) < 0 ? 0 : i10;
+ _luaK_setreturns(i8, i1, i10);
+ if ((i10 | 0) > 1) {
+ _luaK_reserveregs(i8, i10 + -1 | 0);
+ }
+ } else if ((i10 | 0) == 0) {
+ i12 = 30;
+ } else {
+ _luaK_exp2nextreg(i8, i1);
+ i12 = 30;
+ }
+ if ((i12 | 0) == 30 ? (i7 | 0) > 0 : 0) {
+ i21 = HEAPU8[i8 + 48 | 0] | 0;
+ _luaK_reserveregs(i8, i7);
+ _luaK_nil(i8, i21, i7);
+ }
+ if ((i9 | 0) > (i5 | 0)) {
+ i21 = (HEAP32[i6 >> 2] | 0) + 48 | 0;
+ HEAP8[i21] = i7 + (HEAPU8[i21] | 0);
+ i7 = i1;
+ } else {
+ i7 = i1;
+ }
+ } else {
+ _error_expected(i2, 61);
+ }
+ } while (0);
+ i21 = HEAP32[i2 + 48 >> 2] | 0;
+ i20 = (HEAPU8[i21 + 48 | 0] | 0) + -1 | 0;
+ HEAP32[i1 + 16 >> 2] = -1;
+ HEAP32[i1 + 20 >> 2] = -1;
+ HEAP32[i7 >> 2] = 6;
+ HEAP32[i1 + 8 >> 2] = i20;
+ _luaK_storevar(i21, i4, i1);
+ STACKTOP = i3;
+ return;
+}
+function _str_find_aux(i3, i7) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 288 | 0;
+ i9 = i1 + 284 | 0;
+ i5 = i1 + 280 | 0;
+ i4 = i1;
+ i2 = _luaL_checklstring(i3, 1, i9) | 0;
+ i8 = _luaL_checklstring(i3, 2, i5) | 0;
+ i12 = _luaL_optinteger(i3, 3, 1) | 0;
+ i10 = HEAP32[i9 >> 2] | 0;
+ if (!((i12 | 0) > -1)) {
+ if (i10 >>> 0 < (0 - i12 | 0) >>> 0) {
+ i12 = 1;
+ } else {
+ i12 = i12 + 1 + i10 | 0;
+ i6 = 4;
+ }
+ } else {
+ i6 = 4;
+ }
+ if ((i6 | 0) == 4) {
+ if ((i12 | 0) != 0) {
+ if (i12 >>> 0 > (i10 + 1 | 0) >>> 0) {
+ _lua_pushnil(i3);
+ i13 = 1;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ } else {
+ i12 = 1;
+ }
+ }
+ i7 = (i7 | 0) != 0;
+ L10 : do {
+ if (i7) {
+ i13 = (_lua_toboolean(i3, 4) | 0) == 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ if (i13) {
+ i11 = 0;
+ do {
+ i13 = i8 + i11 | 0;
+ if ((_strpbrk(i13, 7512) | 0) != 0) {
+ i6 = 20;
+ break L10;
+ }
+ i11 = i11 + 1 + (_strlen(i13 | 0) | 0) | 0;
+ } while (!(i11 >>> 0 > i10 >>> 0));
+ }
+ i11 = i2 + (i12 + -1) | 0;
+ i9 = (HEAP32[i9 >> 2] | 0) - i12 + 1 | 0;
+ L17 : do {
+ if ((i10 | 0) == 0) {
+ if ((i11 | 0) == 0) {
+ break L10;
+ }
+ } else {
+ if (i10 >>> 0 > i9 >>> 0) {
+ break L10;
+ }
+ i4 = i10 + -1 | 0;
+ if ((i4 | 0) == (i9 | 0)) {
+ break L10;
+ }
+ i7 = HEAP8[i8] | 0;
+ i8 = i8 + 1 | 0;
+ i9 = i9 - i4 | 0;
+ i12 = i11;
+ while (1) {
+ i11 = _memchr(i12, i7, i9) | 0;
+ if ((i11 | 0) == 0) {
+ break L10;
+ }
+ i10 = i11 + 1 | 0;
+ if ((_memcmp(i10, i8, i4) | 0) == 0) {
+ break L17;
+ }
+ i11 = i10;
+ i9 = i12 + i9 | 0;
+ if ((i9 | 0) == (i11 | 0)) {
+ break L10;
+ } else {
+ i9 = i9 - i11 | 0;
+ i12 = i10;
+ }
+ }
+ }
+ } while (0);
+ i13 = i11 - i2 | 0;
+ _lua_pushinteger(i3, i13 + 1 | 0);
+ _lua_pushinteger(i3, i13 + (HEAP32[i5 >> 2] | 0) | 0);
+ i13 = 2;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else {
+ i6 = 20;
+ }
+ } while (0);
+ L28 : do {
+ if ((i6 | 0) == 20) {
+ i6 = i2 + (i12 + -1) | 0;
+ i10 = (HEAP8[i8] | 0) == 94;
+ if (i10) {
+ i12 = (HEAP32[i5 >> 2] | 0) + -1 | 0;
+ HEAP32[i5 >> 2] = i12;
+ i8 = i8 + 1 | 0;
+ } else {
+ i12 = HEAP32[i5 >> 2] | 0;
+ }
+ i5 = i4 + 16 | 0;
+ HEAP32[i5 >> 2] = i3;
+ HEAP32[i4 >> 2] = 200;
+ HEAP32[i4 + 4 >> 2] = i2;
+ i11 = i4 + 8 | 0;
+ HEAP32[i11 >> 2] = i2 + (HEAP32[i9 >> 2] | 0);
+ HEAP32[i4 + 12 >> 2] = i8 + i12;
+ i9 = i4 + 20 | 0;
+ L34 : do {
+ if (i10) {
+ HEAP32[i9 >> 2] = 0;
+ i8 = _match(i4, i6, i8) | 0;
+ if ((i8 | 0) == 0) {
+ break L28;
+ }
+ } else {
+ while (1) {
+ HEAP32[i9 >> 2] = 0;
+ i10 = _match(i4, i6, i8) | 0;
+ if ((i10 | 0) != 0) {
+ i8 = i10;
+ break L34;
+ }
+ if (!(i6 >>> 0 < (HEAP32[i11 >> 2] | 0) >>> 0)) {
+ break L28;
+ }
+ i6 = i6 + 1 | 0;
+ }
+ }
+ } while (0);
+ if (i7) {
+ _lua_pushinteger(i3, 1 - i2 + i6 | 0);
+ _lua_pushinteger(i3, i8 - i2 | 0);
+ i2 = HEAP32[i9 >> 2] | 0;
+ _luaL_checkstack(HEAP32[i5 >> 2] | 0, i2, 7200);
+ if ((i2 | 0) > 0) {
+ i3 = 0;
+ do {
+ _push_onecapture(i4, i3, 0, 0);
+ i3 = i3 + 1 | 0;
+ } while ((i3 | 0) != (i2 | 0));
+ }
+ i13 = i2 + 2 | 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ } else {
+ i3 = HEAP32[i9 >> 2] | 0;
+ i2 = (i3 | 0) != 0 | (i6 | 0) == 0 ? i3 : 1;
+ _luaL_checkstack(HEAP32[i5 >> 2] | 0, i2, 7200);
+ if ((i2 | 0) > 0) {
+ i3 = 0;
+ } else {
+ i13 = i3;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ do {
+ _push_onecapture(i4, i3, i6, i8);
+ i3 = i3 + 1 | 0;
+ } while ((i3 | 0) != (i2 | 0));
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ }
+ } while (0);
+ _lua_pushnil(i3);
+ i13 = 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _luaO_pushvfstring(i2, i13, i10) {
+ i2 = i2 | 0;
+ i13 = i13 | 0;
+ i10 = i10 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, d18 = 0.0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i7 = i3;
+ i9 = i3 + 32 | 0;
+ i8 = i3 + 8 | 0;
+ i14 = _strchr(i13, 37) | 0;
+ i6 = i2 + 24 | 0;
+ i4 = i2 + 8 | 0;
+ i15 = HEAP32[i4 >> 2] | 0;
+ i17 = (HEAP32[i6 >> 2] | 0) - i15 | 0;
+ L1 : do {
+ if ((i14 | 0) == 0) {
+ i5 = i13;
+ i11 = i17;
+ i12 = i15;
+ i1 = 0;
+ } else {
+ i16 = 0;
+ L3 : while (1) {
+ if ((i17 | 0) < 48) {
+ _luaD_growstack(i2, 2);
+ i15 = HEAP32[i4 >> 2] | 0;
+ }
+ HEAP32[i4 >> 2] = i15 + 16;
+ i13 = _luaS_newlstr(i2, i13, i14 - i13 | 0) | 0;
+ HEAP32[i15 >> 2] = i13;
+ HEAP32[i15 + 8 >> 2] = HEAPU8[i13 + 4 | 0] | 64;
+ i13 = HEAP8[i14 + 1 | 0] | 0;
+ switch (i13 | 0) {
+ case 115:
+ {
+ i17 = HEAP32[i10 >> 2] | 0;
+ i13 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i10 >> 2] = i17 + 4;
+ i13 = (i13 | 0) == 0 ? 5480 : i13;
+ i15 = _strlen(i13 | 0) | 0;
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i15 = _luaS_newlstr(i2, i13, i15) | 0;
+ HEAP32[i17 >> 2] = i15;
+ HEAP32[i17 + 8 >> 2] = HEAPU8[i15 + 4 | 0] | 64;
+ break;
+ }
+ case 100:
+ {
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i13 = HEAP32[i10 >> 2] | 0;
+ i15 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i10 >> 2] = i13 + 4;
+ HEAPF64[i17 >> 3] = +(i15 | 0);
+ HEAP32[i17 + 8 >> 2] = 3;
+ break;
+ }
+ case 37:
+ {
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i15 = _luaS_newlstr(i2, 5496, 1) | 0;
+ HEAP32[i17 >> 2] = i15;
+ HEAP32[i17 + 8 >> 2] = HEAPU8[i15 + 4 | 0] | 64;
+ break;
+ }
+ case 99:
+ {
+ i15 = HEAP32[i10 >> 2] | 0;
+ i17 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i10 >> 2] = i15 + 4;
+ HEAP8[i9] = i17;
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i15 = _luaS_newlstr(i2, i9, 1) | 0;
+ HEAP32[i17 >> 2] = i15;
+ HEAP32[i17 + 8 >> 2] = HEAPU8[i15 + 4 | 0] | 64;
+ break;
+ }
+ case 102:
+ {
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i15 = HEAP32[i10 >> 2] | 0;
+ d18 = +HEAPF64[i15 >> 3];
+ HEAP32[i10 >> 2] = i15 + 8;
+ HEAPF64[i17 >> 3] = d18;
+ HEAP32[i17 + 8 >> 2] = 3;
+ break;
+ }
+ case 112:
+ {
+ i17 = HEAP32[i10 >> 2] | 0;
+ i15 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i10 >> 2] = i17 + 4;
+ HEAP32[i7 >> 2] = i15;
+ i15 = _sprintf(i8 | 0, 5488, i7 | 0) | 0;
+ i17 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i17 + 16;
+ i15 = _luaS_newlstr(i2, i8, i15) | 0;
+ HEAP32[i17 >> 2] = i15;
+ HEAP32[i17 + 8 >> 2] = HEAPU8[i15 + 4 | 0] | 64;
+ break;
+ }
+ default:
+ {
+ break L3;
+ }
+ }
+ i16 = i16 + 2 | 0;
+ i13 = i14 + 2 | 0;
+ i14 = _strchr(i13, 37) | 0;
+ i15 = HEAP32[i4 >> 2] | 0;
+ i17 = (HEAP32[i6 >> 2] | 0) - i15 | 0;
+ if ((i14 | 0) == 0) {
+ i5 = i13;
+ i11 = i17;
+ i12 = i15;
+ i1 = i16;
+ break L1;
+ }
+ }
+ HEAP32[i7 >> 2] = i13;
+ _luaG_runerror(i2, 5504, i7);
+ }
+ } while (0);
+ if ((i11 | 0) < 32) {
+ _luaD_growstack(i2, 1);
+ i12 = HEAP32[i4 >> 2] | 0;
+ }
+ i17 = _strlen(i5 | 0) | 0;
+ HEAP32[i4 >> 2] = i12 + 16;
+ i17 = _luaS_newlstr(i2, i5, i17) | 0;
+ HEAP32[i12 >> 2] = i17;
+ HEAP32[i12 + 8 >> 2] = HEAPU8[i17 + 4 | 0] | 64;
+ if ((i1 | 0) <= 0) {
+ i17 = HEAP32[i4 >> 2] | 0;
+ i17 = i17 + -16 | 0;
+ i17 = HEAP32[i17 >> 2] | 0;
+ i17 = i17 + 16 | 0;
+ STACKTOP = i3;
+ return i17 | 0;
+ }
+ _luaV_concat(i2, i1 | 1);
+ i17 = HEAP32[i4 >> 2] | 0;
+ i17 = i17 + -16 | 0;
+ i17 = HEAP32[i17 >> 2] | 0;
+ i17 = i17 + 16 | 0;
+ STACKTOP = i3;
+ return i17 | 0;
+}
+function _luaH_getn(i6) {
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, d11 = 0.0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ i3 = i6 + 28 | 0;
+ i12 = HEAP32[i3 >> 2] | 0;
+ if ((i12 | 0) != 0 ? (i4 = HEAP32[i6 + 12 >> 2] | 0, (HEAP32[i4 + (i12 + -1 << 4) + 8 >> 2] | 0) == 0) : 0) {
+ if (i12 >>> 0 > 1) {
+ i10 = 0;
+ } else {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ do {
+ i2 = (i10 + i12 | 0) >>> 1;
+ i3 = (HEAP32[i4 + (i2 + -1 << 4) + 8 >> 2] | 0) == 0;
+ i12 = i3 ? i2 : i12;
+ i10 = i3 ? i10 : i2;
+ } while ((i12 - i10 | 0) >>> 0 > 1);
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i4 = i6 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 8016) {
+ i13 = i12;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i5 = i6 + 12 | 0;
+ i6 = i6 + 7 | 0;
+ i9 = i2 + 4 | 0;
+ i8 = i12 + 1 | 0;
+ i13 = i12;
+ i10 = i12;
+ while (1) {
+ i12 = i8 + -1 | 0;
+ L15 : do {
+ if (i12 >>> 0 < i13 >>> 0) {
+ i12 = (HEAP32[i5 >> 2] | 0) + (i12 << 4) | 0;
+ } else {
+ d11 = +(i8 | 0);
+ HEAPF64[i2 >> 3] = d11 + 1.0;
+ i13 = (HEAP32[i9 >> 2] | 0) + (HEAP32[i2 >> 2] | 0) | 0;
+ if ((i13 | 0) < 0) {
+ i12 = 0 - i13 | 0;
+ i13 = (i13 | 0) == (i12 | 0) ? 0 : i12;
+ }
+ i12 = (HEAP32[i4 >> 2] | 0) + (((i13 | 0) % ((1 << (HEAPU8[i6] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i12 + 24 >> 2] | 0) == 3 ? +HEAPF64[i12 + 16 >> 3] == d11 : 0) {
+ break;
+ }
+ i12 = HEAP32[i12 + 28 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i12 = 5192;
+ break L15;
+ }
+ }
+ }
+ } while (0);
+ if ((HEAP32[i12 + 8 >> 2] | 0) == 0) {
+ break;
+ }
+ i10 = i8 << 1;
+ if (i10 >>> 0 > 2147483645) {
+ i7 = 21;
+ break;
+ }
+ i12 = i8;
+ i8 = i10;
+ i13 = HEAP32[i3 >> 2] | 0;
+ i10 = i12;
+ }
+ if ((i7 | 0) == 21) {
+ i8 = i2 + 4 | 0;
+ i7 = 1;
+ while (1) {
+ i10 = i7 + -1 | 0;
+ L34 : do {
+ if (i10 >>> 0 < (HEAP32[i3 >> 2] | 0) >>> 0) {
+ i9 = (HEAP32[i5 >> 2] | 0) + (i10 << 4) | 0;
+ } else {
+ d11 = +(i7 | 0);
+ HEAPF64[i2 >> 3] = d11 + 1.0;
+ i9 = (HEAP32[i8 >> 2] | 0) + (HEAP32[i2 >> 2] | 0) | 0;
+ if ((i9 | 0) < 0) {
+ i12 = 0 - i9 | 0;
+ i9 = (i9 | 0) == (i12 | 0) ? 0 : i12;
+ }
+ i9 = (HEAP32[i4 >> 2] | 0) + (((i9 | 0) % ((1 << (HEAPU8[i6] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i9 + 24 >> 2] | 0) == 3 ? +HEAPF64[i9 + 16 >> 3] == d11 : 0) {
+ break;
+ }
+ i9 = HEAP32[i9 + 28 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = 5192;
+ break L34;
+ }
+ }
+ }
+ } while (0);
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ break;
+ }
+ i7 = i7 + 1 | 0;
+ }
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ if (!((i8 - i10 | 0) >>> 0 > 1)) {
+ i13 = i10;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i7 = i2 + 4 | 0;
+ do {
+ i9 = (i8 + i10 | 0) >>> 1;
+ i12 = i9 + -1 | 0;
+ L55 : do {
+ if (i12 >>> 0 < (HEAP32[i3 >> 2] | 0) >>> 0) {
+ i12 = (HEAP32[i5 >> 2] | 0) + (i12 << 4) | 0;
+ } else {
+ d11 = +(i9 | 0);
+ HEAPF64[i2 >> 3] = d11 + 1.0;
+ i13 = (HEAP32[i7 >> 2] | 0) + (HEAP32[i2 >> 2] | 0) | 0;
+ if ((i13 | 0) < 0) {
+ i12 = 0 - i13 | 0;
+ i13 = (i13 | 0) == (i12 | 0) ? 0 : i12;
+ }
+ i12 = (HEAP32[i4 >> 2] | 0) + (((i13 | 0) % ((1 << (HEAPU8[i6] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i12 + 24 >> 2] | 0) == 3 ? +HEAPF64[i12 + 16 >> 3] == d11 : 0) {
+ break;
+ }
+ i12 = HEAP32[i12 + 28 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i12 = 5192;
+ break L55;
+ }
+ }
+ }
+ } while (0);
+ i12 = (HEAP32[i12 + 8 >> 2] | 0) == 0;
+ i8 = i12 ? i9 : i8;
+ i10 = i12 ? i10 : i9;
+ } while ((i8 - i10 | 0) >>> 0 > 1);
+ STACKTOP = i1;
+ return i10 | 0;
+}
+function _lua_resume(i4, i3, i7) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i3 | 0) == 0) {
+ i5 = 1;
+ } else {
+ i5 = (HEAPU16[i3 + 38 >> 1] | 0) + 1 & 65535;
+ }
+ i3 = i4 + 38 | 0;
+ HEAP16[i3 >> 1] = i5;
+ i5 = i4 + 36 | 0;
+ HEAP16[i5 >> 1] = 0;
+ i6 = i4 + 8 | 0;
+ i13 = _luaD_rawrunprotected(i4, 4, (HEAP32[i6 >> 2] | 0) + (0 - i7 << 4) | 0) | 0;
+ if ((i13 | 0) == -1) {
+ i18 = 2;
+ HEAP16[i5 >> 1] = 1;
+ i17 = HEAP16[i3 >> 1] | 0;
+ i17 = i17 + -1 << 16 >> 16;
+ HEAP16[i3 >> 1] = i17;
+ STACKTOP = i1;
+ return i18 | 0;
+ }
+ if (!(i13 >>> 0 > 1)) {
+ i18 = i13;
+ HEAP16[i5 >> 1] = 1;
+ i17 = HEAP16[i3 >> 1] | 0;
+ i17 = i17 + -1 << 16 >> 16;
+ HEAP16[i3 >> 1] = i17;
+ STACKTOP = i1;
+ return i18 | 0;
+ }
+ i7 = i4 + 16 | 0;
+ i12 = i4 + 28 | 0;
+ i11 = i4 + 41 | 0;
+ i10 = i4 + 68 | 0;
+ i9 = i4 + 32 | 0;
+ i8 = i4 + 12 | 0;
+ L10 : while (1) {
+ i15 = HEAP32[i7 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ }
+ while (1) {
+ i14 = i15 + 18 | 0;
+ if (!((HEAP8[i14] & 16) == 0)) {
+ break;
+ }
+ i15 = HEAP32[i15 + 8 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break L10;
+ }
+ }
+ i16 = HEAP32[i12 >> 2] | 0;
+ i17 = HEAP32[i15 + 20 >> 2] | 0;
+ i18 = i16 + i17 | 0;
+ _luaF_close(i4, i18);
+ if ((i13 | 0) == 4) {
+ i19 = HEAP32[(HEAP32[i8 >> 2] | 0) + 180 >> 2] | 0;
+ HEAP32[i18 >> 2] = i19;
+ HEAP32[i16 + (i17 + 8) >> 2] = HEAPU8[i19 + 4 | 0] | 0 | 64;
+ } else if ((i13 | 0) == 6) {
+ i19 = _luaS_newlstr(i4, 2424, 23) | 0;
+ HEAP32[i18 >> 2] = i19;
+ HEAP32[i16 + (i17 + 8) >> 2] = HEAPU8[i19 + 4 | 0] | 0 | 64;
+ } else {
+ i19 = HEAP32[i6 >> 2] | 0;
+ i21 = i19 + -16 | 0;
+ i20 = HEAP32[i21 + 4 >> 2] | 0;
+ HEAP32[i18 >> 2] = HEAP32[i21 >> 2];
+ HEAP32[i18 + 4 >> 2] = i20;
+ HEAP32[i16 + (i17 + 8) >> 2] = HEAP32[i19 + -8 >> 2];
+ }
+ i17 = i16 + (i17 + 16) | 0;
+ HEAP32[i6 >> 2] = i17;
+ HEAP32[i7 >> 2] = i15;
+ HEAP8[i11] = HEAP8[i15 + 36 | 0] | 0;
+ HEAP16[i5 >> 1] = 0;
+ if ((i15 | 0) != 0) {
+ i16 = i15;
+ do {
+ i18 = HEAP32[i16 + 4 >> 2] | 0;
+ i17 = i17 >>> 0 < i18 >>> 0 ? i18 : i17;
+ i16 = HEAP32[i16 + 8 >> 2] | 0;
+ } while ((i16 | 0) != 0);
+ }
+ i16 = i17 - (HEAP32[i12 >> 2] | 0) | 0;
+ i17 = (i16 >> 4) + 1 | 0;
+ i17 = ((i17 | 0) / 8 | 0) + 10 + i17 | 0;
+ i17 = (i17 | 0) > 1e6 ? 1e6 : i17;
+ if ((i16 | 0) <= 15999984 ? (i17 | 0) < (HEAP32[i9 >> 2] | 0) : 0) {
+ _luaD_reallocstack(i4, i17);
+ }
+ HEAP32[i10 >> 2] = HEAP32[i15 + 32 >> 2];
+ HEAP8[i14] = HEAPU8[i14] | 0 | 32;
+ HEAP8[i15 + 37 | 0] = i13;
+ i13 = _luaD_rawrunprotected(i4, 5, 0) | 0;
+ if (!(i13 >>> 0 > 1)) {
+ i2 = 24;
+ break;
+ }
+ }
+ if ((i2 | 0) == 24) {
+ HEAP16[i5 >> 1] = 1;
+ i21 = HEAP16[i3 >> 1] | 0;
+ i21 = i21 + -1 << 16 >> 16;
+ HEAP16[i3 >> 1] = i21;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ HEAP8[i4 + 6 | 0] = i13;
+ i2 = HEAP32[i6 >> 2] | 0;
+ if ((i13 | 0) == 4) {
+ i21 = HEAP32[(HEAP32[i8 >> 2] | 0) + 180 >> 2] | 0;
+ HEAP32[i2 >> 2] = i21;
+ HEAP32[i2 + 8 >> 2] = HEAPU8[i21 + 4 | 0] | 0 | 64;
+ } else if ((i13 | 0) == 6) {
+ i21 = _luaS_newlstr(i4, 2424, 23) | 0;
+ HEAP32[i2 >> 2] = i21;
+ HEAP32[i2 + 8 >> 2] = HEAPU8[i21 + 4 | 0] | 0 | 64;
+ } else {
+ i19 = i2 + -16 | 0;
+ i20 = HEAP32[i19 + 4 >> 2] | 0;
+ i21 = i2;
+ HEAP32[i21 >> 2] = HEAP32[i19 >> 2];
+ HEAP32[i21 + 4 >> 2] = i20;
+ HEAP32[i2 + 8 >> 2] = HEAP32[i2 + -8 >> 2];
+ }
+ i21 = i2 + 16 | 0;
+ HEAP32[i6 >> 2] = i21;
+ HEAP32[(HEAP32[i7 >> 2] | 0) + 4 >> 2] = i21;
+ i21 = i13;
+ HEAP16[i5 >> 1] = 1;
+ i20 = HEAP16[i3 >> 1] | 0;
+ i20 = i20 + -1 << 16 >> 16;
+ HEAP16[i3 >> 1] = i20;
+ STACKTOP = i1;
+ return i21 | 0;
+}
+function _luaK_goiftrue(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i2 = STACKTOP;
+ _luaK_dischargevars(i1, i3);
+ i12 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i12 | 0) == 10) {
+ i9 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ i5 = i3 + 8 | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i7 = i9 + (i8 << 2) | 0;
+ if (!((i8 | 0) > 0 ? (i10 = i9 + (i8 + -1 << 2) | 0, i6 = HEAP32[i10 >> 2] | 0, (HEAP8[5584 + (i6 & 63) | 0] | 0) < 0) : 0)) {
+ i10 = i7;
+ i6 = HEAP32[i7 >> 2] | 0;
+ }
+ HEAP32[i10 >> 2] = ((i6 & 16320 | 0) == 0) << 6 | i6 & -16321;
+ i5 = HEAP32[i5 >> 2] | 0;
+ i8 = 18;
+ } else if (!((i12 | 0) == 2 | (i12 | 0) == 5 | (i12 | 0) == 4)) {
+ i5 = i3 + 8 | 0;
+ if ((i12 | 0) == 6) {
+ i8 = 14;
+ } else if ((i12 | 0) == 11 ? (i11 = HEAP32[(HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i5 >> 2] << 2) >> 2] | 0, (i11 & 63 | 0) == 20) : 0) {
+ i5 = i1 + 20 | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + -1;
+ i5 = _condjump(i1, 27, i11 >>> 23, 0, 1) | 0;
+ i8 = 18;
+ break;
+ } else {
+ i8 = 9;
+ }
+ if ((i8 | 0) == 9) {
+ i12 = i1 + 48 | 0;
+ i10 = HEAP8[i12] | 0;
+ i11 = (i10 & 255) + 1 | 0;
+ i6 = (HEAP32[i1 >> 2] | 0) + 78 | 0;
+ do {
+ if (i11 >>> 0 > (HEAPU8[i6] | 0) >>> 0) {
+ if (i11 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i6] = i11;
+ i9 = HEAP8[i12] | 0;
+ break;
+ }
+ } else {
+ i9 = i10;
+ }
+ } while (0);
+ i11 = (i9 & 255) + 1 | 0;
+ HEAP8[i12] = i11;
+ _discharge2reg(i1, i3, (i11 & 255) + -1 | 0);
+ if ((HEAP32[i3 >> 2] | 0) == 6) {
+ i8 = 14;
+ }
+ }
+ if (((i8 | 0) == 14 ? (i7 = HEAP32[i5 >> 2] | 0, (i7 & 256 | 0) == 0) : 0) ? (HEAPU8[i1 + 46 | 0] | 0) <= (i7 | 0) : 0) {
+ i12 = i1 + 48 | 0;
+ HEAP8[i12] = (HEAP8[i12] | 0) + -1 << 24 >> 24;
+ }
+ i5 = _condjump(i1, 28, 255, HEAP32[i5 >> 2] | 0, 0) | 0;
+ i8 = 18;
+ }
+ } while (0);
+ do {
+ if ((i8 | 0) == 18 ? (i4 = i3 + 20 | 0, !((i5 | 0) == -1)) : 0) {
+ i8 = HEAP32[i4 >> 2] | 0;
+ if ((i8 | 0) == -1) {
+ HEAP32[i4 >> 2] = i5;
+ break;
+ }
+ i4 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i7 = i4 + (i8 << 2) | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ i9 = (i6 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i8 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i4 = i5 + ~i8 | 0;
+ if ((((i4 | 0) > -1 ? i4 : 0 - i4 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i7 >> 2] = (i4 << 14) + 2147467264 | i6 & 16383;
+ break;
+ }
+ }
+ } while (0);
+ i3 = i3 + 16 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i1 + 24 >> 2] = HEAP32[i1 + 20 >> 2];
+ i5 = i1 + 28 | 0;
+ if ((i4 | 0) == -1) {
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == -1) {
+ HEAP32[i5 >> 2] = i4;
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+ }
+ i7 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i5 = i7 + (i8 << 2) | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i9 = (i6 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i8 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i4 = i4 + ~i8 | 0;
+ if ((((i4 | 0) > -1 ? i4 : 0 - i4 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i5 >> 2] = (i4 << 14) + 2147467264 | i6 & 16383;
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+}
+function _luaO_str2d(i1, i3, i5) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, d9 = 0.0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ if ((_strpbrk(i1, 5464) | 0) != 0) {
+ i13 = 0;
+ STACKTOP = i2;
+ return i13 | 0;
+ }
+ do {
+ if ((_strpbrk(i1, 5472) | 0) == 0) {
+ d9 = +_strtod(i1, i4);
+ i10 = HEAP32[i4 >> 2] | 0;
+ } else {
+ HEAP32[i4 >> 2] = i1;
+ i8 = i1;
+ while (1) {
+ i6 = HEAP8[i8] | 0;
+ i10 = i8 + 1 | 0;
+ if ((HEAP8[(i6 & 255) + 10913 | 0] & 8) == 0) {
+ break;
+ } else {
+ i8 = i10;
+ }
+ }
+ if (i6 << 24 >> 24 == 43) {
+ i6 = 0;
+ i8 = i10;
+ } else if (i6 << 24 >> 24 == 45) {
+ i6 = 1;
+ i8 = i10;
+ } else {
+ i6 = 0;
+ }
+ if ((HEAP8[i8] | 0) == 48 ? (i13 = HEAP8[i8 + 1 | 0] | 0, i13 << 24 >> 24 == 88 | i13 << 24 >> 24 == 120) : 0) {
+ i10 = i8 + 2 | 0;
+ i8 = HEAP8[i10] | 0;
+ i12 = i8 & 255;
+ i11 = HEAP8[i12 + 10913 | 0] | 0;
+ if ((i11 & 16) == 0) {
+ d9 = 0.0;
+ i11 = i8;
+ i8 = 0;
+ } else {
+ d9 = 0.0;
+ i8 = 0;
+ while (1) {
+ if ((i11 & 2) == 0) {
+ i11 = (i12 | 32) + -87 | 0;
+ } else {
+ i11 = i12 + -48 | 0;
+ }
+ d9 = d9 * 16.0 + +(i11 | 0);
+ i8 = i8 + 1 | 0;
+ i10 = i10 + 1 | 0;
+ i13 = HEAP8[i10] | 0;
+ i12 = i13 & 255;
+ i11 = HEAP8[i12 + 10913 | 0] | 0;
+ if ((i11 & 16) == 0) {
+ i11 = i13;
+ break;
+ }
+ }
+ }
+ if (i11 << 24 >> 24 == 46) {
+ i10 = i10 + 1 | 0;
+ i13 = HEAPU8[i10] | 0;
+ i11 = HEAP8[i13 + 10913 | 0] | 0;
+ if ((i11 & 16) == 0) {
+ i12 = 0;
+ } else {
+ i12 = 0;
+ do {
+ if ((i11 & 2) == 0) {
+ i11 = (i13 | 32) + -87 | 0;
+ } else {
+ i11 = i13 + -48 | 0;
+ }
+ d9 = d9 * 16.0 + +(i11 | 0);
+ i12 = i12 + 1 | 0;
+ i10 = i10 + 1 | 0;
+ i13 = HEAPU8[i10] | 0;
+ i11 = HEAP8[i13 + 10913 | 0] | 0;
+ } while (!((i11 & 16) == 0));
+ }
+ } else {
+ i12 = 0;
+ }
+ if ((i12 | i8 | 0) != 0) {
+ i8 = Math_imul(i12, -4) | 0;
+ HEAP32[i4 >> 2] = i10;
+ i13 = HEAP8[i10] | 0;
+ if (i13 << 24 >> 24 == 80 | i13 << 24 >> 24 == 112) {
+ i13 = i10 + 1 | 0;
+ i11 = HEAP8[i13] | 0;
+ if (i11 << 24 >> 24 == 45) {
+ i11 = 1;
+ i13 = i10 + 2 | 0;
+ } else if (i11 << 24 >> 24 == 43) {
+ i11 = 0;
+ i13 = i10 + 2 | 0;
+ } else {
+ i11 = 0;
+ }
+ i12 = HEAP8[i13] | 0;
+ if (!((HEAP8[(i12 & 255) + 10913 | 0] & 2) == 0)) {
+ i10 = i13;
+ i7 = 0;
+ do {
+ i10 = i10 + 1 | 0;
+ i7 = (i12 << 24 >> 24) + -48 + (i7 * 10 | 0) | 0;
+ i12 = HEAP8[i10] | 0;
+ } while (!((HEAP8[(i12 & 255) + 10913 | 0] & 2) == 0));
+ i8 = ((i11 | 0) == 0 ? i7 : 0 - i7 | 0) + i8 | 0;
+ i7 = 29;
+ }
+ } else {
+ i7 = 29;
+ }
+ if ((i7 | 0) == 29) {
+ HEAP32[i4 >> 2] = i10;
+ }
+ if ((i6 | 0) != 0) {
+ d9 = -d9;
+ }
+ d9 = +_ldexp(d9, i8);
+ break;
+ }
+ }
+ HEAPF64[i5 >> 3] = 0.0;
+ i13 = 0;
+ STACKTOP = i2;
+ return i13 | 0;
+ }
+ } while (0);
+ HEAPF64[i5 >> 3] = d9;
+ if ((i10 | 0) == (i1 | 0)) {
+ i13 = 0;
+ STACKTOP = i2;
+ return i13 | 0;
+ }
+ if (!((HEAP8[(HEAPU8[i10] | 0) + 10913 | 0] & 8) == 0)) {
+ do {
+ i10 = i10 + 1 | 0;
+ } while (!((HEAP8[(HEAPU8[i10] | 0) + 10913 | 0] & 8) == 0));
+ HEAP32[i4 >> 2] = i10;
+ }
+ i13 = (i10 | 0) == (i1 + i3 | 0) | 0;
+ STACKTOP = i2;
+ return i13 | 0;
+}
+function _luaV_equalobj_(i2, i4, i5) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ i3 = i4 + 8 | 0;
+ L1 : do {
+ switch (HEAP32[i3 >> 2] & 63 | 0) {
+ case 7:
+ {
+ i6 = HEAP32[i4 >> 2] | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == (i7 | 0)) {
+ i7 = 1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ if ((i2 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ } else {
+ i6 = _get_equalTM(i2, HEAP32[i6 + 8 >> 2] | 0, HEAP32[i7 + 8 >> 2] | 0) | 0;
+ break L1;
+ }
+ }
+ case 5:
+ {
+ i7 = HEAP32[i4 >> 2] | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i7 | 0) == (i6 | 0)) {
+ i7 = 1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ if ((i2 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ } else {
+ i6 = _get_equalTM(i2, HEAP32[i7 + 8 >> 2] | 0, HEAP32[i6 + 8 >> 2] | 0) | 0;
+ break L1;
+ }
+ }
+ case 4:
+ {
+ i7 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 20:
+ {
+ i7 = _luaS_eqlngstr(HEAP32[i4 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 3:
+ {
+ i7 = +HEAPF64[i4 >> 3] == +HEAPF64[i5 >> 3] | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 1:
+ {
+ i7 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 22:
+ {
+ i7 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 2:
+ {
+ i7 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ case 0:
+ {
+ i7 = 1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ default:
+ {
+ i7 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ }
+ } while (0);
+ if ((i6 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i7 = i2 + 8 | 0;
+ i10 = HEAP32[i7 >> 2] | 0;
+ i9 = i2 + 28 | 0;
+ i8 = i10 - (HEAP32[i9 >> 2] | 0) | 0;
+ HEAP32[i7 >> 2] = i10 + 16;
+ i13 = i6;
+ i12 = HEAP32[i13 + 4 >> 2] | 0;
+ i11 = i10;
+ HEAP32[i11 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i11 + 4 >> 2] = i12;
+ HEAP32[i10 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i10 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i10 + 16;
+ i11 = i4;
+ i4 = HEAP32[i11 + 4 >> 2] | 0;
+ i6 = i10;
+ HEAP32[i6 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i6 + 4 >> 2] = i4;
+ HEAP32[i10 + 8 >> 2] = HEAP32[i3 >> 2];
+ i3 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i3 + 16;
+ i10 = i5;
+ i6 = HEAP32[i10 + 4 >> 2] | 0;
+ i4 = i3;
+ HEAP32[i4 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i4 + 4 >> 2] = i6;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ _luaD_call(i2, (HEAP32[i7 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i2 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i2 = HEAP32[i9 >> 2] | 0;
+ i3 = HEAP32[i7 >> 2] | 0;
+ i4 = i3 + -16 | 0;
+ HEAP32[i7 >> 2] = i4;
+ i5 = HEAP32[i4 + 4 >> 2] | 0;
+ i6 = i2 + i8 | 0;
+ HEAP32[i6 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i6 + 4 >> 2] = i5;
+ HEAP32[i2 + (i8 + 8) >> 2] = HEAP32[i3 + -8 >> 2];
+ i2 = HEAP32[i7 >> 2] | 0;
+ i3 = HEAP32[i2 + 8 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if ((i3 | 0) == 1) {
+ i2 = (HEAP32[i2 >> 2] | 0) != 0;
+ } else {
+ i2 = 1;
+ }
+ } else {
+ i2 = 0;
+ }
+ i13 = i2 & 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _forbody(i1, i5, i6, i4, i9) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i8 = i3 + 12 | 0;
+ i19 = i3;
+ i11 = i1 + 48 | 0;
+ i7 = HEAP32[i11 >> 2] | 0;
+ i18 = i7 + 46 | 0;
+ i22 = (HEAPU8[i18] | 0) + 3 | 0;
+ HEAP8[i18] = i22;
+ i21 = i7 + 20 | 0;
+ i17 = i7 + 12 | 0;
+ i2 = i7 + 40 | 0;
+ i20 = HEAP32[(HEAP32[i7 >> 2] | 0) + 24 >> 2] | 0;
+ i10 = HEAP32[HEAP32[(HEAP32[i17 >> 2] | 0) + 64 >> 2] >> 2] | 0;
+ HEAP32[i20 + ((HEAP16[i10 + ((i22 & 255) + -3 + (HEAP32[i2 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i21 >> 2];
+ HEAP32[i20 + ((HEAP16[i10 + ((HEAPU8[i18] | 0) + -2 + (HEAP32[i2 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i21 >> 2];
+ HEAP32[i20 + ((HEAP16[i10 + ((HEAPU8[i18] | 0) + -1 + (HEAP32[i2 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i21 >> 2];
+ i2 = i1 + 16 | 0;
+ if ((HEAP32[i2 >> 2] | 0) != 259) {
+ _error_expected(i1, 259);
+ }
+ _luaX_next(i1);
+ i10 = (i9 | 0) != 0;
+ if (i10) {
+ i9 = _luaK_codeABx(i7, 33, i5, 131070) | 0;
+ } else {
+ i9 = _luaK_jump(i7) | 0;
+ }
+ HEAP8[i19 + 10 | 0] = 0;
+ HEAP8[i19 + 8 | 0] = HEAP8[i18] | 0;
+ i17 = HEAP32[(HEAP32[i17 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i19 + 4 >> 1] = HEAP32[i17 + 28 >> 2];
+ HEAP16[i19 + 6 >> 1] = HEAP32[i17 + 16 >> 2];
+ HEAP8[i19 + 9 | 0] = 0;
+ i17 = i7 + 16 | 0;
+ HEAP32[i19 >> 2] = HEAP32[i17 >> 2];
+ HEAP32[i17 >> 2] = i19;
+ i19 = HEAP32[i11 >> 2] | 0;
+ i17 = i19 + 46 | 0;
+ i18 = (HEAPU8[i17] | 0) + i4 | 0;
+ HEAP8[i17] = i18;
+ if ((i4 | 0) != 0 ? (i13 = i19 + 20 | 0, i12 = i19 + 40 | 0, i14 = HEAP32[(HEAP32[i19 >> 2] | 0) + 24 >> 2] | 0, i15 = HEAP32[HEAP32[(HEAP32[i19 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0, HEAP32[i14 + ((HEAP16[i15 + ((i18 & 255) - i4 + (HEAP32[i12 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i13 >> 2], i16 = i4 + -1 | 0, (i16 | 0) != 0) : 0) {
+ do {
+ HEAP32[i14 + ((HEAP16[i15 + ((HEAPU8[i17] | 0) - i16 + (HEAP32[i12 >> 2] | 0) << 1) >> 1] | 0) * 12 | 0) + 4 >> 2] = HEAP32[i13 >> 2];
+ i16 = i16 + -1 | 0;
+ } while ((i16 | 0) != 0);
+ }
+ _luaK_reserveregs(i7, i4);
+ i11 = HEAP32[i11 >> 2] | 0;
+ HEAP8[i8 + 10 | 0] = 0;
+ HEAP8[i8 + 8 | 0] = HEAP8[i11 + 46 | 0] | 0;
+ i22 = HEAP32[(HEAP32[i11 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i8 + 4 >> 1] = HEAP32[i22 + 28 >> 2];
+ HEAP16[i8 + 6 >> 1] = HEAP32[i22 + 16 >> 2];
+ HEAP8[i8 + 9 | 0] = 0;
+ i22 = i11 + 16 | 0;
+ HEAP32[i8 >> 2] = HEAP32[i22 >> 2];
+ HEAP32[i22 >> 2] = i8;
+ L13 : do {
+ i8 = HEAP32[i2 >> 2] | 0;
+ switch (i8 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L13;
+ }
+ default:
+ {}
+ }
+ _statement(i1);
+ } while ((i8 | 0) != 274);
+ _leaveblock(i11);
+ _leaveblock(i7);
+ _luaK_patchtohere(i7, i9);
+ if (i10) {
+ i21 = _luaK_codeABx(i7, 32, i5, 131070) | 0;
+ i22 = i9 + 1 | 0;
+ _luaK_patchlist(i7, i21, i22);
+ _luaK_fixline(i7, i6);
+ STACKTOP = i3;
+ return;
+ } else {
+ _luaK_codeABC(i7, 34, i5, 0, i4) | 0;
+ _luaK_fixline(i7, i6);
+ i21 = _luaK_codeABx(i7, 35, i5 + 2 | 0, 131070) | 0;
+ i22 = i9 + 1 | 0;
+ _luaK_patchlist(i7, i21, i22);
+ _luaK_fixline(i7, i6);
+ STACKTOP = i3;
+ return;
+ }
+}
+function _dotty(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i6;
+ i4 = i6 + 4 | 0;
+ i2 = HEAP32[20] | 0;
+ HEAP32[20] = 0;
+ _lua_settop(i1, 0);
+ if ((_pushline(i1, 1) | 0) == 0) {
+ _lua_settop(i1, 0);
+ i10 = HEAP32[_stdout >> 2] | 0;
+ _fputc(10, i10 | 0) | 0;
+ _fflush(i10 | 0) | 0;
+ HEAP32[20] = i2;
+ STACKTOP = i6;
+ return;
+ }
+ i5 = HEAP32[_stderr >> 2] | 0;
+ L4 : while (1) {
+ i8 = _lua_tolstring(i1, 1, i4) | 0;
+ i8 = _luaL_loadbufferx(i1, i8, HEAP32[i4 >> 2] | 0, 256, 0) | 0;
+ L6 : do {
+ if ((i8 | 0) == 3) {
+ while (1) {
+ i8 = _lua_tolstring(i1, -1, i3) | 0;
+ i7 = HEAP32[i3 >> 2] | 0;
+ if (!(i7 >>> 0 > 4)) {
+ break;
+ }
+ if ((_strcmp(i8 + (i7 + -5) | 0, 264) | 0) != 0) {
+ break;
+ }
+ _lua_settop(i1, -2);
+ if ((_pushline(i1, 0) | 0) == 0) {
+ i7 = 23;
+ break L4;
+ }
+ _lua_pushlstring(i1, 184, 1) | 0;
+ _lua_insert(i1, -2);
+ _lua_concat(i1, 3);
+ i8 = _lua_tolstring(i1, 1, i4) | 0;
+ i8 = _luaL_loadbufferx(i1, i8, HEAP32[i4 >> 2] | 0, 256, 0) | 0;
+ if ((i8 | 0) != 3) {
+ i7 = 9;
+ break L6;
+ }
+ }
+ _lua_remove(i1, 1);
+ i8 = 3;
+ i7 = 10;
+ } else {
+ i7 = 9;
+ }
+ } while (0);
+ do {
+ if ((i7 | 0) == 9) {
+ _lua_remove(i1, 1);
+ if ((i8 | 0) == -1) {
+ i7 = 23;
+ break L4;
+ } else if ((i8 | 0) != 0) {
+ i7 = 10;
+ break;
+ }
+ i9 = _lua_gettop(i1) | 0;
+ _lua_pushcclosure(i1, 142, 0);
+ _lua_insert(i1, i9);
+ HEAP32[48] = i1;
+ _signal(2, 1) | 0;
+ i10 = _lua_pcallk(i1, 0, -1, i9, 0, 0) | 0;
+ _signal(2, 0) | 0;
+ _lua_remove(i1, i9);
+ if ((i10 | 0) == 0) {
+ i7 = 17;
+ } else {
+ i9 = 0;
+ i7 = 12;
+ }
+ }
+ } while (0);
+ if ((i7 | 0) == 10) {
+ i9 = (i8 | 0) == 0;
+ i7 = 12;
+ }
+ do {
+ if ((i7 | 0) == 12) {
+ i7 = 0;
+ if ((_lua_type(i1, -1) | 0) == 0) {
+ if (i9) {
+ i7 = 17;
+ break;
+ } else {
+ break;
+ }
+ }
+ i10 = _lua_tolstring(i1, -1, 0) | 0;
+ i8 = HEAP32[20] | 0;
+ if ((i8 | 0) != 0) {
+ HEAP32[i3 >> 2] = i8;
+ _fprintf(i5 | 0, 496, i3 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ }
+ HEAP32[i3 >> 2] = (i10 | 0) == 0 ? 48 : i10;
+ _fprintf(i5 | 0, 912, i3 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ _lua_settop(i1, -2);
+ _lua_gc(i1, 2, 0) | 0;
+ if (i9) {
+ i7 = 17;
+ }
+ }
+ } while (0);
+ if (((i7 | 0) == 17 ? (0, (_lua_gettop(i1) | 0) > 0) : 0) ? (_luaL_checkstack(i1, 20, 112), _lua_getglobal(i1, 144), _lua_insert(i1, 1), (_lua_pcallk(i1, (_lua_gettop(i1) | 0) + -1 | 0, 0, 0, 0, 0) | 0) != 0) : 0) {
+ i7 = HEAP32[20] | 0;
+ HEAP32[i3 >> 2] = _lua_tolstring(i1, -1, 0) | 0;
+ i8 = _lua_pushfstring(i1, 152, i3) | 0;
+ if ((i7 | 0) != 0) {
+ HEAP32[i3 >> 2] = i7;
+ _fprintf(i5 | 0, 496, i3 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ }
+ HEAP32[i3 >> 2] = i8;
+ _fprintf(i5 | 0, 912, i3 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ }
+ _lua_settop(i1, 0);
+ if ((_pushline(i1, 1) | 0) == 0) {
+ i7 = 23;
+ break;
+ }
+ }
+ if ((i7 | 0) == 23) {
+ _lua_settop(i1, 0);
+ i10 = HEAP32[_stdout >> 2] | 0;
+ _fputc(10, i10 | 0) | 0;
+ _fflush(i10 | 0) | 0;
+ HEAP32[20] = i2;
+ STACKTOP = i6;
+ return;
+ }
+}
+function _test_then_block(i5, i1) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i10 = i2 + 24 | 0;
+ i9 = i2;
+ i8 = i5 + 48 | 0;
+ i4 = HEAP32[i8 >> 2] | 0;
+ _luaX_next(i5);
+ _subexpr(i5, i9, 0) | 0;
+ i3 = i5 + 16 | 0;
+ if ((HEAP32[i3 >> 2] | 0) != 275) {
+ _error_expected(i5, 275);
+ }
+ _luaX_next(i5);
+ i14 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i14 | 0) == 258 | (i14 | 0) == 266) {
+ _luaK_goiffalse(HEAP32[i8 >> 2] | 0, i9);
+ HEAP8[i10 + 10 | 0] = 0;
+ HEAP8[i10 + 8 | 0] = HEAP8[i4 + 46 | 0] | 0;
+ i11 = HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i10 + 4 >> 1] = HEAP32[i11 + 28 >> 2];
+ HEAP16[i10 + 6 >> 1] = HEAP32[i11 + 16 >> 2];
+ HEAP8[i10 + 9 | 0] = 0;
+ i11 = i4 + 16 | 0;
+ HEAP32[i10 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i11 >> 2] = i10;
+ i11 = HEAP32[i9 + 16 >> 2] | 0;
+ i10 = HEAP32[i5 + 4 >> 2] | 0;
+ i14 = (HEAP32[i3 >> 2] | 0) == 266;
+ _luaX_next(i5);
+ do {
+ if (i14) {
+ if ((HEAP32[i3 >> 2] | 0) == 288) {
+ i7 = HEAP32[i5 + 24 >> 2] | 0;
+ _luaX_next(i5);
+ break;
+ } else {
+ _error_expected(i5, 288);
+ }
+ } else {
+ i7 = _luaS_new(HEAP32[i5 + 52 >> 2] | 0, 6304) | 0;
+ }
+ } while (0);
+ i14 = HEAP32[i5 + 64 >> 2] | 0;
+ i12 = i14 + 12 | 0;
+ i13 = i14 + 16 | 0;
+ i9 = HEAP32[i13 >> 2] | 0;
+ i14 = i14 + 20 | 0;
+ if ((i9 | 0) < (HEAP32[i14 >> 2] | 0)) {
+ i14 = HEAP32[i12 >> 2] | 0;
+ } else {
+ i14 = _luaM_growaux_(HEAP32[i5 + 52 >> 2] | 0, HEAP32[i12 >> 2] | 0, i14, 16, 32767, 6312) | 0;
+ HEAP32[i12 >> 2] = i14;
+ }
+ HEAP32[i14 + (i9 << 4) >> 2] = i7;
+ i14 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i14 + (i9 << 4) + 8 >> 2] = i10;
+ HEAP8[i14 + (i9 << 4) + 12 | 0] = HEAP8[(HEAP32[i8 >> 2] | 0) + 46 | 0] | 0;
+ HEAP32[(HEAP32[i12 >> 2] | 0) + (i9 << 4) + 4 >> 2] = i11;
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + 1;
+ _findlabel(i5, i9) | 0;
+ L18 : while (1) {
+ switch (HEAP32[i3 >> 2] | 0) {
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L18;
+ }
+ case 285:
+ case 59:
+ {
+ break;
+ }
+ default:
+ {
+ i6 = 16;
+ break L18;
+ }
+ }
+ _statement(i5);
+ }
+ if ((i6 | 0) == 16) {
+ i6 = _luaK_jump(i4) | 0;
+ break;
+ }
+ _leaveblock(i4);
+ STACKTOP = i2;
+ return;
+ } else {
+ _luaK_goiftrue(HEAP32[i8 >> 2] | 0, i9);
+ HEAP8[i10 + 10 | 0] = 0;
+ HEAP8[i10 + 8 | 0] = HEAP8[i4 + 46 | 0] | 0;
+ i6 = HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i10 + 4 >> 1] = HEAP32[i6 + 28 >> 2];
+ HEAP16[i10 + 6 >> 1] = HEAP32[i6 + 16 >> 2];
+ HEAP8[i10 + 9 | 0] = 0;
+ i6 = i4 + 16 | 0;
+ HEAP32[i10 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i6 >> 2] = i10;
+ i6 = HEAP32[i9 + 20 >> 2] | 0;
+ }
+ } while (0);
+ L26 : do {
+ i7 = HEAP32[i3 >> 2] | 0;
+ switch (i7 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L26;
+ }
+ default:
+ {}
+ }
+ _statement(i5);
+ } while ((i7 | 0) != 274);
+ _leaveblock(i4);
+ if (((HEAP32[i3 >> 2] | 0) + -260 | 0) >>> 0 < 2) {
+ _luaK_concat(i4, i1, _luaK_jump(i4) | 0);
+ }
+ _luaK_patchtohere(i4, i6);
+ STACKTOP = i2;
+ return;
+}
+function _luaL_gsub(i2, i13, i11, i10) {
+ i2 = i2 | 0;
+ i13 = i13 | 0;
+ i11 = i11 | 0;
+ i10 = i10 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i12 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i8 = i1;
+ i4 = i1 + 8 | 0;
+ i9 = _strlen(i11 | 0) | 0;
+ i6 = i4 + 12 | 0;
+ HEAP32[i6 >> 2] = i2;
+ i3 = i4 + 16 | 0;
+ HEAP32[i4 >> 2] = i3;
+ i5 = i4 + 8 | 0;
+ HEAP32[i5 >> 2] = 0;
+ i7 = i4 + 4 | 0;
+ HEAP32[i7 >> 2] = 1024;
+ i12 = _strstr(i13, i11) | 0;
+ if ((i12 | 0) == 0) {
+ i14 = 0;
+ i17 = 1024;
+ i16 = i2;
+ } else {
+ i14 = 0;
+ i17 = 1024;
+ i16 = i2;
+ do {
+ i15 = i12 - i13 | 0;
+ if ((i17 - i14 | 0) >>> 0 < i15 >>> 0) {
+ i17 = i17 << 1;
+ i17 = (i17 - i14 | 0) >>> 0 < i15 >>> 0 ? i14 + i15 | 0 : i17;
+ if (i17 >>> 0 < i14 >>> 0 | (i17 - i14 | 0) >>> 0 < i15 >>> 0) {
+ _luaL_error(i16, 1272, i8) | 0;
+ }
+ i14 = _lua_newuserdata(i16, i17) | 0;
+ _memcpy(i14 | 0, HEAP32[i4 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ if ((HEAP32[i4 >> 2] | 0) != (i3 | 0)) {
+ _lua_remove(i16, -2);
+ }
+ HEAP32[i4 >> 2] = i14;
+ HEAP32[i7 >> 2] = i17;
+ i16 = i14;
+ i14 = HEAP32[i5 >> 2] | 0;
+ } else {
+ i16 = HEAP32[i4 >> 2] | 0;
+ }
+ _memcpy(i16 + i14 | 0, i13 | 0, i15 | 0) | 0;
+ i15 = (HEAP32[i5 >> 2] | 0) + i15 | 0;
+ HEAP32[i5 >> 2] = i15;
+ i13 = _strlen(i10 | 0) | 0;
+ i14 = HEAP32[i6 >> 2] | 0;
+ i16 = HEAP32[i7 >> 2] | 0;
+ if ((i16 - i15 | 0) >>> 0 < i13 >>> 0) {
+ i16 = i16 << 1;
+ i16 = (i16 - i15 | 0) >>> 0 < i13 >>> 0 ? i15 + i13 | 0 : i16;
+ if (i16 >>> 0 < i15 >>> 0 | (i16 - i15 | 0) >>> 0 < i13 >>> 0) {
+ _luaL_error(i14, 1272, i8) | 0;
+ }
+ i15 = _lua_newuserdata(i14, i16) | 0;
+ _memcpy(i15 | 0, HEAP32[i4 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ if ((HEAP32[i4 >> 2] | 0) != (i3 | 0)) {
+ _lua_remove(i14, -2);
+ }
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i7 >> 2] = i16;
+ i14 = i15;
+ i15 = HEAP32[i5 >> 2] | 0;
+ } else {
+ i14 = HEAP32[i4 >> 2] | 0;
+ }
+ _memcpy(i14 + i15 | 0, i10 | 0, i13 | 0) | 0;
+ i14 = (HEAP32[i5 >> 2] | 0) + i13 | 0;
+ HEAP32[i5 >> 2] = i14;
+ i13 = i12 + i9 | 0;
+ i12 = _strstr(i13, i11) | 0;
+ i16 = HEAP32[i6 >> 2] | 0;
+ i17 = HEAP32[i7 >> 2] | 0;
+ } while ((i12 | 0) != 0);
+ }
+ i9 = _strlen(i13 | 0) | 0;
+ if ((i17 - i14 | 0) >>> 0 < i9 >>> 0) {
+ i10 = i17 << 1;
+ i10 = (i10 - i14 | 0) >>> 0 < i9 >>> 0 ? i14 + i9 | 0 : i10;
+ if (i10 >>> 0 < i14 >>> 0 | (i10 - i14 | 0) >>> 0 < i9 >>> 0) {
+ _luaL_error(i16, 1272, i8) | 0;
+ }
+ i8 = _lua_newuserdata(i16, i10) | 0;
+ _memcpy(i8 | 0, HEAP32[i4 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ if ((HEAP32[i4 >> 2] | 0) != (i3 | 0)) {
+ _lua_remove(i16, -2);
+ }
+ HEAP32[i4 >> 2] = i8;
+ HEAP32[i7 >> 2] = i10;
+ i14 = HEAP32[i5 >> 2] | 0;
+ } else {
+ i8 = HEAP32[i4 >> 2] | 0;
+ }
+ _memcpy(i8 + i14 | 0, i13 | 0, i9 | 0) | 0;
+ i17 = (HEAP32[i5 >> 2] | 0) + i9 | 0;
+ HEAP32[i5 >> 2] = i17;
+ i5 = HEAP32[i6 >> 2] | 0;
+ _lua_pushlstring(i5, HEAP32[i4 >> 2] | 0, i17) | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i3 | 0)) {
+ i17 = _lua_tolstring(i2, -1, 0) | 0;
+ STACKTOP = i1;
+ return i17 | 0;
+ }
+ _lua_remove(i5, -2);
+ i17 = _lua_tolstring(i2, -1, 0) | 0;
+ STACKTOP = i1;
+ return i17 | 0;
+}
+function _luaK_goiffalse(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i2 = STACKTOP;
+ _luaK_dischargevars(i1, i3);
+ i9 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i9 | 0) == 10) {
+ i4 = HEAP32[i3 + 8 >> 2] | 0;
+ i8 = 15;
+ } else if (!((i9 | 0) == 3 | (i9 | 0) == 1)) {
+ i4 = i3 + 8 | 0;
+ if ((i9 | 0) == 6) {
+ i8 = 11;
+ } else if ((i9 | 0) == 11 ? (i10 = HEAP32[(HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i4 >> 2] << 2) >> 2] | 0, (i10 & 63 | 0) == 20) : 0) {
+ i4 = i1 + 20 | 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -1;
+ i4 = _condjump(i1, 27, i10 >>> 23, 0, 0) | 0;
+ i8 = 15;
+ break;
+ } else {
+ i8 = 6;
+ }
+ if ((i8 | 0) == 6) {
+ i9 = i1 + 48 | 0;
+ i11 = HEAP8[i9] | 0;
+ i10 = (i11 & 255) + 1 | 0;
+ i12 = (HEAP32[i1 >> 2] | 0) + 78 | 0;
+ do {
+ if (i10 >>> 0 > (HEAPU8[i12] | 0) >>> 0) {
+ if (i10 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i12] = i10;
+ i7 = HEAP8[i9] | 0;
+ break;
+ }
+ } else {
+ i7 = i11;
+ }
+ } while (0);
+ i12 = (i7 & 255) + 1 | 0;
+ HEAP8[i9] = i12;
+ _discharge2reg(i1, i3, (i12 & 255) + -1 | 0);
+ if ((HEAP32[i3 >> 2] | 0) == 6) {
+ i8 = 11;
+ }
+ }
+ if (((i8 | 0) == 11 ? (i6 = HEAP32[i4 >> 2] | 0, (i6 & 256 | 0) == 0) : 0) ? (HEAPU8[i1 + 46 | 0] | 0 | 0) <= (i6 | 0) : 0) {
+ i12 = i1 + 48 | 0;
+ HEAP8[i12] = (HEAP8[i12] | 0) + -1 << 24 >> 24;
+ }
+ i4 = _condjump(i1, 28, 255, HEAP32[i4 >> 2] | 0, 1) | 0;
+ i8 = 15;
+ }
+ } while (0);
+ do {
+ if ((i8 | 0) == 15 ? (i5 = i3 + 16 | 0, !((i4 | 0) == -1)) : 0) {
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == -1) {
+ HEAP32[i5 >> 2] = i4;
+ break;
+ }
+ i5 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i7 = i5 + (i8 << 2) | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ i9 = (i6 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i8 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i4 = i4 + ~i8 | 0;
+ if ((((i4 | 0) > -1 ? i4 : 0 - i4 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ } else {
+ HEAP32[i7 >> 2] = (i4 << 14) + 2147467264 | i6 & 16383;
+ break;
+ }
+ }
+ } while (0);
+ i3 = i3 + 20 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i1 + 24 >> 2] = HEAP32[i1 + 20 >> 2];
+ i5 = i1 + 28 | 0;
+ if ((i4 | 0) == -1) {
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ if ((i8 | 0) == -1) {
+ HEAP32[i5 >> 2] = i4;
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+ }
+ i7 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i5 = i7 + (i8 << 2) | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i9 = (i6 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i8 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i4 = i4 + ~i8 | 0;
+ if ((((i4 | 0) > -1 ? i4 : 0 - i4 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i5 >> 2] = (i4 << 14) + 2147467264 | i6 & 16383;
+ HEAP32[i3 >> 2] = -1;
+ STACKTOP = i2;
+ return;
+}
+function _luaV_settable(i2, i11, i7, i9) {
+ i2 = i2 | 0;
+ i11 = i11 | 0;
+ i7 = i7 | 0;
+ i9 = i9 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i10 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i6;
+ i4 = i2 + 12 | 0;
+ i3 = i11;
+ i13 = HEAP32[i11 + 8 >> 2] | 0;
+ i12 = 0;
+ while (1) {
+ i11 = i3 + 8 | 0;
+ if ((i13 | 0) != 69) {
+ i14 = _luaT_gettmbyobj(i2, i3, 1) | 0;
+ i13 = HEAP32[i14 + 8 >> 2] | 0;
+ if ((i13 | 0) == 0) {
+ i1 = 16;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 >> 2] | 0;
+ i13 = _luaH_get(i8, i7) | 0;
+ if ((HEAP32[i13 + 8 >> 2] | 0) != 0) {
+ i10 = i13;
+ break;
+ }
+ i14 = HEAP32[i8 + 8 >> 2] | 0;
+ if ((i14 | 0) == 0) {
+ i1 = 9;
+ break;
+ }
+ if (!((HEAP8[i14 + 6 | 0] & 2) == 0)) {
+ i1 = 9;
+ break;
+ }
+ i14 = _luaT_gettm(i14, 1, HEAP32[(HEAP32[i4 >> 2] | 0) + 188 >> 2] | 0) | 0;
+ if ((i14 | 0) == 0) {
+ i1 = 9;
+ break;
+ }
+ i13 = HEAP32[i14 + 8 >> 2] | 0;
+ }
+ i12 = i12 + 1 | 0;
+ if ((i13 & 15 | 0) == 6) {
+ i1 = 18;
+ break;
+ }
+ if ((i12 | 0) < 100) {
+ i3 = i14;
+ } else {
+ i1 = 19;
+ break;
+ }
+ }
+ if ((i1 | 0) == 9) {
+ if ((i13 | 0) == 5192) {
+ i10 = _luaH_newkey(i2, i8, i7) | 0;
+ } else {
+ i10 = i13;
+ }
+ } else if ((i1 | 0) == 16) {
+ _luaG_typeerror(i2, i3, 8944);
+ } else if ((i1 | 0) == 18) {
+ i13 = i2 + 8 | 0;
+ i8 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i8 + 16;
+ i5 = i14;
+ i12 = HEAP32[i5 + 4 >> 2] | 0;
+ i10 = i8;
+ HEAP32[i10 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i10 + 4 >> 2] = i12;
+ HEAP32[i8 + 8 >> 2] = HEAP32[i14 + 8 >> 2];
+ i14 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i14 + 16;
+ i8 = i3;
+ i10 = HEAP32[i8 + 4 >> 2] | 0;
+ i12 = i14;
+ HEAP32[i12 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i12 + 4 >> 2] = i10;
+ HEAP32[i14 + 8 >> 2] = HEAP32[i11 >> 2];
+ i14 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i14 + 16;
+ i12 = i7;
+ i11 = HEAP32[i12 + 4 >> 2] | 0;
+ i10 = i14;
+ HEAP32[i10 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i10 + 4 >> 2] = i11;
+ HEAP32[i14 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i14 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i14 + 16;
+ i10 = i9;
+ i11 = HEAP32[i10 + 4 >> 2] | 0;
+ i12 = i14;
+ HEAP32[i12 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAP32[i14 + 8 >> 2] = HEAP32[i9 + 8 >> 2];
+ _luaD_call(i2, (HEAP32[i13 >> 2] | 0) + -64 | 0, 0, HEAP8[(HEAP32[i2 + 16 >> 2] | 0) + 18 | 0] & 1);
+ STACKTOP = i6;
+ return;
+ } else if ((i1 | 0) == 19) {
+ _luaG_runerror(i2, 8976, i5);
+ }
+ i12 = i9;
+ i13 = HEAP32[i12 + 4 >> 2] | 0;
+ i14 = i10;
+ HEAP32[i14 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i14 + 4 >> 2] = i13;
+ i14 = i9 + 8 | 0;
+ HEAP32[i10 + 8 >> 2] = HEAP32[i14 >> 2];
+ HEAP8[i8 + 6 | 0] = 0;
+ if ((HEAP32[i14 >> 2] & 64 | 0) == 0) {
+ STACKTOP = i6;
+ return;
+ }
+ if ((HEAP8[(HEAP32[i9 >> 2] | 0) + 5 | 0] & 3) == 0) {
+ STACKTOP = i6;
+ return;
+ }
+ if ((HEAP8[i8 + 5 | 0] & 4) == 0) {
+ STACKTOP = i6;
+ return;
+ }
+ _luaC_barrierback_(i2, i8);
+ STACKTOP = i6;
+ return;
+}
+function _luaK_code(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0;
+ i2 = STACKTOP;
+ i1 = HEAP32[i4 >> 2] | 0;
+ i7 = i4 + 28 | 0;
+ i15 = HEAP32[i7 >> 2] | 0;
+ i3 = i4 + 20 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ do {
+ if (!((i15 | 0) == -1)) {
+ i11 = HEAP32[i1 + 12 >> 2] | 0;
+ while (1) {
+ i12 = i11 + (i15 << 2) | 0;
+ i14 = HEAP32[i12 >> 2] | 0;
+ i13 = (i14 >>> 14) + -131071 | 0;
+ if ((i13 | 0) == -1) {
+ i13 = -1;
+ } else {
+ i13 = i15 + 1 + i13 | 0;
+ }
+ if ((i15 | 0) > 0 ? (i9 = i11 + (i15 + -1 << 2) | 0, i10 = HEAP32[i9 >> 2] | 0, (HEAP8[5584 + (i10 & 63) | 0] | 0) < 0) : 0) {
+ i17 = i9;
+ i16 = i10;
+ } else {
+ i17 = i12;
+ i16 = i14;
+ }
+ if ((i16 & 63 | 0) == 28) {
+ HEAP32[i17 >> 2] = i16 & 8372224 | i16 >>> 23 << 6 | 27;
+ i14 = i8 + ~i15 | 0;
+ if ((((i14 | 0) > -1 ? i14 : 0 - i14 | 0) | 0) > 131071) {
+ i8 = 10;
+ break;
+ }
+ i14 = HEAP32[i12 >> 2] & 16383 | (i14 << 14) + 2147467264;
+ } else {
+ i15 = i8 + ~i15 | 0;
+ if ((((i15 | 0) > -1 ? i15 : 0 - i15 | 0) | 0) > 131071) {
+ i8 = 13;
+ break;
+ }
+ i14 = (i15 << 14) + 2147467264 | i14 & 16383;
+ }
+ HEAP32[i12 >> 2] = i14;
+ if ((i13 | 0) == -1) {
+ i8 = 16;
+ break;
+ } else {
+ i15 = i13;
+ }
+ }
+ if ((i8 | 0) == 10) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else if ((i8 | 0) == 13) {
+ _luaX_syntaxerror(HEAP32[i4 + 12 >> 2] | 0, 10624);
+ } else if ((i8 | 0) == 16) {
+ i6 = HEAP32[i3 >> 2] | 0;
+ break;
+ }
+ } else {
+ i6 = i8;
+ }
+ } while (0);
+ HEAP32[i7 >> 2] = -1;
+ i7 = i1 + 48 | 0;
+ if ((i6 | 0) < (HEAP32[i7 >> 2] | 0)) {
+ i7 = i1 + 12 | 0;
+ } else {
+ i6 = i1 + 12 | 0;
+ HEAP32[i6 >> 2] = _luaM_growaux_(HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 52 >> 2] | 0, HEAP32[i6 >> 2] | 0, i7, 4, 2147483645, 10616) | 0;
+ i7 = i6;
+ i6 = HEAP32[i3 >> 2] | 0;
+ }
+ HEAP32[(HEAP32[i7 >> 2] | 0) + (i6 << 2) >> 2] = i5;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i6 = i1 + 52 | 0;
+ i4 = i4 + 12 | 0;
+ if ((i5 | 0) < (HEAP32[i6 >> 2] | 0)) {
+ i15 = i1 + 20 | 0;
+ i17 = i5;
+ i16 = HEAP32[i4 >> 2] | 0;
+ i16 = i16 + 8 | 0;
+ i16 = HEAP32[i16 >> 2] | 0;
+ i15 = HEAP32[i15 >> 2] | 0;
+ i17 = i15 + (i17 << 2) | 0;
+ HEAP32[i17 >> 2] = i16;
+ i17 = HEAP32[i3 >> 2] | 0;
+ i16 = i17 + 1 | 0;
+ HEAP32[i3 >> 2] = i16;
+ STACKTOP = i2;
+ return i17 | 0;
+ } else {
+ i15 = i1 + 20 | 0;
+ HEAP32[i15 >> 2] = _luaM_growaux_(HEAP32[(HEAP32[i4 >> 2] | 0) + 52 >> 2] | 0, HEAP32[i15 >> 2] | 0, i6, 4, 2147483645, 10616) | 0;
+ i17 = HEAP32[i3 >> 2] | 0;
+ i16 = HEAP32[i4 >> 2] | 0;
+ i16 = i16 + 8 | 0;
+ i16 = HEAP32[i16 >> 2] | 0;
+ i15 = HEAP32[i15 >> 2] | 0;
+ i17 = i15 + (i17 << 2) | 0;
+ HEAP32[i17 >> 2] = i16;
+ i17 = HEAP32[i3 >> 2] | 0;
+ i16 = i17 + 1 | 0;
+ HEAP32[i3 >> 2] = i16;
+ STACKTOP = i2;
+ return i17 | 0;
+ }
+ return 0;
+}
+function _luaH_next(i9, i5, i2) {
+ i9 = i9 | 0;
+ i5 = i5 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, d14 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i8 = i1 + 8 | 0;
+ i11 = i1;
+ i3 = i2 + 8 | 0;
+ i10 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i10 | 0) != 0) {
+ if ((((i10 | 0) == 3 ? (d14 = +HEAPF64[i2 >> 3], HEAPF64[i11 >> 3] = d14 + 6755399441055744.0, i12 = HEAP32[i11 >> 2] | 0, +(i12 | 0) == d14) : 0) ? (i12 | 0) > 0 : 0) ? (i13 = HEAP32[i5 + 28 >> 2] | 0, (i12 | 0) <= (i13 | 0)) : 0) {
+ i6 = i13;
+ i7 = i12 + -1 | 0;
+ break;
+ }
+ i10 = _mainposition(i5, i2) | 0;
+ while (1) {
+ i4 = i10 + 16 | 0;
+ i11 = i10 + 24 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ if ((i12 | 0) == (HEAP32[i3 >> 2] | 0)) {
+ if ((_luaV_equalobj_(0, i4, i2) | 0) != 0) {
+ i4 = 15;
+ break;
+ }
+ i12 = HEAP32[i11 >> 2] | 0;
+ }
+ if (((i12 | 0) == 11 ? (HEAP32[i3 >> 2] & 64 | 0) != 0 : 0) ? (HEAP32[i4 >> 2] | 0) == (HEAP32[i2 >> 2] | 0) : 0) {
+ i4 = 15;
+ break;
+ }
+ i10 = HEAP32[i10 + 28 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i4 = 18;
+ break;
+ }
+ }
+ if ((i4 | 0) == 15) {
+ i7 = HEAP32[i5 + 28 >> 2] | 0;
+ i6 = i7;
+ i7 = (i10 - (HEAP32[i5 + 16 >> 2] | 0) >> 5) + i7 | 0;
+ break;
+ } else if ((i4 | 0) == 18) {
+ _luaG_runerror(i9, 8064, i8);
+ }
+ } else {
+ i6 = HEAP32[i5 + 28 >> 2] | 0;
+ i7 = -1;
+ }
+ } while (0);
+ i8 = i5 + 12 | 0;
+ while (1) {
+ i9 = i7 + 1 | 0;
+ if ((i9 | 0) >= (i6 | 0)) {
+ break;
+ }
+ i11 = HEAP32[i8 >> 2] | 0;
+ i10 = i11 + (i9 << 4) + 8 | 0;
+ if ((HEAP32[i10 >> 2] | 0) == 0) {
+ i7 = i9;
+ } else {
+ i4 = 21;
+ break;
+ }
+ }
+ if ((i4 | 0) == 21) {
+ HEAPF64[i2 >> 3] = +(i7 + 2 | 0);
+ HEAP32[i3 >> 2] = 3;
+ i11 = i11 + (i9 << 4) | 0;
+ i12 = HEAP32[i11 + 4 >> 2] | 0;
+ i13 = i2 + 16 | 0;
+ HEAP32[i13 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i13 + 4 >> 2] = i12;
+ HEAP32[i2 + 24 >> 2] = HEAP32[i10 >> 2];
+ i13 = 1;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i8 = i9 - i6 | 0;
+ i6 = 1 << (HEAPU8[i5 + 7 | 0] | 0);
+ if ((i8 | 0) >= (i6 | 0)) {
+ i13 = 0;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i7 = i5 + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ while (1) {
+ i9 = i8 + 1 | 0;
+ if ((HEAP32[i5 + (i8 << 5) + 8 >> 2] | 0) != 0) {
+ break;
+ }
+ if ((i9 | 0) < (i6 | 0)) {
+ i8 = i9;
+ } else {
+ i2 = 0;
+ i4 = 27;
+ break;
+ }
+ }
+ if ((i4 | 0) == 27) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ i11 = i5 + (i8 << 5) + 16 | 0;
+ i10 = HEAP32[i11 + 4 >> 2] | 0;
+ i13 = i2;
+ HEAP32[i13 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i13 + 4 >> 2] = i10;
+ HEAP32[i3 >> 2] = HEAP32[i5 + (i8 << 5) + 24 >> 2];
+ i13 = HEAP32[i7 >> 2] | 0;
+ i10 = i13 + (i8 << 5) | 0;
+ i11 = HEAP32[i10 + 4 >> 2] | 0;
+ i12 = i2 + 16 | 0;
+ HEAP32[i12 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAP32[i2 + 24 >> 2] = HEAP32[i13 + (i8 << 5) + 8 >> 2];
+ i13 = 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _g_read(i1, i3, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i7 = i4 + 8 | 0;
+ i9 = i4;
+ i10 = _lua_gettop(i1) | 0;
+ _clearerr(i3 | 0);
+ L1 : do {
+ if ((i10 | 0) == 1) {
+ i11 = i2 + 1 | 0;
+ i12 = _read_line(i1, i3, 1) | 0;
+ } else {
+ _luaL_checkstack(i1, i10 + 19 | 0, 3256);
+ i6 = i7 + 8 | 0;
+ i5 = i7 + 8 | 0;
+ i10 = i10 + -2 | 0;
+ i11 = i2;
+ L4 : while (1) {
+ do {
+ if ((_lua_type(i1, i11) | 0) == 3) {
+ i12 = _lua_tointegerx(i1, i11, 0) | 0;
+ if ((i12 | 0) == 0) {
+ i12 = _fgetc(i3 | 0) | 0;
+ _ungetc(i12 | 0, i3 | 0) | 0;
+ _lua_pushlstring(i1, 0, 0) | 0;
+ i12 = (i12 | 0) != -1 | 0;
+ break;
+ } else {
+ _luaL_buffinit(i1, i7);
+ i12 = _fread(_luaL_prepbuffsize(i7, i12) | 0, 1, i12 | 0, i3 | 0) | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i12;
+ _luaL_pushresult(i7);
+ i12 = (i12 | 0) != 0 | 0;
+ break;
+ }
+ } else {
+ i12 = _lua_tolstring(i1, i11, 0) | 0;
+ if (!((i12 | 0) != 0 ? (HEAP8[i12] | 0) == 42 : 0)) {
+ _luaL_argerror(i1, i11, 3280) | 0;
+ }
+ i12 = HEAP8[i12 + 1 | 0] | 0;
+ if ((i12 | 0) == 110) {
+ HEAP32[i7 >> 2] = i9;
+ if ((_fscanf(i3 | 0, 3312, i7 | 0) | 0) != 1) {
+ i8 = 14;
+ break L4;
+ }
+ _lua_pushnumber(i1, +HEAPF64[i9 >> 3]);
+ i12 = 1;
+ break;
+ } else if ((i12 | 0) == 108) {
+ i12 = _read_line(i1, i3, 1) | 0;
+ break;
+ } else if ((i12 | 0) == 76) {
+ i12 = _read_line(i1, i3, 0) | 0;
+ break;
+ } else if ((i12 | 0) == 97) {
+ _luaL_buffinit(i1, i7);
+ i12 = _fread(_luaL_prepbuffsize(i7, 1024) | 0, 1, 1024, i3 | 0) | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + i12;
+ if (!(i12 >>> 0 < 1024)) {
+ i12 = 1024;
+ do {
+ i12 = i12 << (i12 >>> 0 < 1073741824);
+ i13 = _fread(_luaL_prepbuffsize(i7, i12) | 0, 1, i12 | 0, i3 | 0) | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + i13;
+ } while (!(i13 >>> 0 < i12 >>> 0));
+ }
+ _luaL_pushresult(i7);
+ i12 = 1;
+ break;
+ } else {
+ break L4;
+ }
+ }
+ } while (0);
+ i11 = i11 + 1 | 0;
+ if ((i10 | 0) == 0 | (i12 | 0) == 0) {
+ break L1;
+ } else {
+ i10 = i10 + -1 | 0;
+ }
+ }
+ if ((i8 | 0) == 14) {
+ _lua_pushnil(i1);
+ i11 = i11 + 1 | 0;
+ i12 = 0;
+ break;
+ }
+ i13 = _luaL_argerror(i1, i11, 3296) | 0;
+ STACKTOP = i4;
+ return i13 | 0;
+ }
+ } while (0);
+ if ((_ferror(i3 | 0) | 0) != 0) {
+ i13 = _luaL_fileresult(i1, 0, 0) | 0;
+ STACKTOP = i4;
+ return i13 | 0;
+ }
+ if ((i12 | 0) == 0) {
+ _lua_settop(i1, -2);
+ _lua_pushnil(i1);
+ }
+ i13 = i11 - i2 | 0;
+ STACKTOP = i4;
+ return i13 | 0;
+}
+function _luaY_parser(i8, i12, i10, i11, i9, i13) {
+ i8 = i8 | 0;
+ i12 = i12 | 0;
+ i10 = i10 | 0;
+ i11 = i11 | 0;
+ i9 = i9 | 0;
+ i13 = i13 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i14 = 0, i15 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 176 | 0;
+ i5 = i2 + 156 | 0;
+ i7 = i2 + 80 | 0;
+ i4 = i2;
+ i6 = i2 + 104 | 0;
+ i3 = _luaF_newLclosure(i8, 1) | 0;
+ i15 = i8 + 8 | 0;
+ i14 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i14 >> 2] = i3;
+ HEAP32[i14 + 8 >> 2] = 70;
+ i14 = (HEAP32[i15 >> 2] | 0) + 16 | 0;
+ HEAP32[i15 >> 2] = i14;
+ if (((HEAP32[i8 + 24 >> 2] | 0) - i14 | 0) < 16) {
+ _luaD_growstack(i8, 0);
+ }
+ i14 = _luaF_newproto(i8) | 0;
+ HEAP32[i3 + 12 >> 2] = i14;
+ HEAP32[i6 >> 2] = i14;
+ i9 = _luaS_new(i8, i9) | 0;
+ HEAP32[(HEAP32[i6 >> 2] | 0) + 36 >> 2] = i9;
+ HEAP32[i4 + 60 >> 2] = i10;
+ i9 = i4 + 64 | 0;
+ HEAP32[i9 >> 2] = i11;
+ HEAP32[i11 + 28 >> 2] = 0;
+ HEAP32[i11 + 16 >> 2] = 0;
+ HEAP32[i11 + 4 >> 2] = 0;
+ _luaX_setinput(i8, i4, i12, HEAP32[(HEAP32[i6 >> 2] | 0) + 36 >> 2] | 0, i13);
+ i10 = HEAP32[i4 + 52 >> 2] | 0;
+ i13 = i4 + 48 | 0;
+ HEAP32[i6 + 8 >> 2] = HEAP32[i13 >> 2];
+ i8 = i6 + 12 | 0;
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i13 >> 2] = i6;
+ HEAP32[i6 + 20 >> 2] = 0;
+ HEAP32[i6 + 24 >> 2] = 0;
+ HEAP32[i6 + 28 >> 2] = -1;
+ HEAP32[i6 + 32 >> 2] = 0;
+ HEAP32[i6 + 36 >> 2] = 0;
+ i13 = i6 + 44 | 0;
+ HEAP32[i13 + 0 >> 2] = 0;
+ HEAP8[i13 + 4 | 0] = 0;
+ HEAP32[i6 + 40 >> 2] = HEAP32[(HEAP32[i9 >> 2] | 0) + 4 >> 2];
+ i9 = i6 + 16 | 0;
+ HEAP32[i9 >> 2] = 0;
+ i13 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i13 + 36 >> 2] = HEAP32[i4 + 68 >> 2];
+ HEAP8[i13 + 78 | 0] = 2;
+ i13 = _luaH_new(i10) | 0;
+ HEAP32[i6 + 4 >> 2] = i13;
+ i14 = i10 + 8 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i15 >> 2] = i13;
+ HEAP32[i15 + 8 >> 2] = 69;
+ i15 = (HEAP32[i14 >> 2] | 0) + 16 | 0;
+ HEAP32[i14 >> 2] = i15;
+ if (((HEAP32[i10 + 24 >> 2] | 0) - i15 | 0) < 16) {
+ _luaD_growstack(i10, 0);
+ }
+ HEAP8[i5 + 10 | 0] = 0;
+ HEAP8[i5 + 8 | 0] = HEAP8[i6 + 46 | 0] | 0;
+ i15 = HEAP32[(HEAP32[i8 >> 2] | 0) + 64 >> 2] | 0;
+ HEAP16[i5 + 4 >> 1] = HEAP32[i15 + 28 >> 2];
+ HEAP16[i5 + 6 >> 1] = HEAP32[i15 + 16 >> 2];
+ HEAP8[i5 + 9 | 0] = 0;
+ HEAP32[i5 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i9 >> 2] = i5;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + 77 | 0] = 1;
+ HEAP32[i7 + 16 >> 2] = -1;
+ HEAP32[i7 + 20 >> 2] = -1;
+ HEAP32[i7 >> 2] = 7;
+ HEAP32[i7 + 8 >> 2] = 0;
+ _newupvalue(i6, HEAP32[i4 + 72 >> 2] | 0, i7) | 0;
+ _luaX_next(i4);
+ i5 = i4 + 16 | 0;
+ L7 : while (1) {
+ i6 = HEAP32[i5 >> 2] | 0;
+ switch (i6 | 0) {
+ case 277:
+ case 286:
+ case 262:
+ case 261:
+ case 260:
+ {
+ break L7;
+ }
+ default:
+ {}
+ }
+ _statement(i4);
+ if ((i6 | 0) == 274) {
+ i1 = 8;
+ break;
+ }
+ }
+ if ((i1 | 0) == 8) {
+ i6 = HEAP32[i5 >> 2] | 0;
+ }
+ if ((i6 | 0) == 286) {
+ _close_func(i4);
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ _error_expected(i4, 286);
+ }
+ return 0;
+}
+function _luaV_lessthan(i5, i4, i2) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ i6 = i4 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if ((i7 | 0) == 3) {
+ if ((HEAP32[i2 + 8 >> 2] | 0) == 3) {
+ i9 = +HEAPF64[i4 >> 3] < +HEAPF64[i2 >> 3] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ } else {
+ if ((i7 & 15 | 0) == 4 ? (HEAP32[i2 + 8 >> 2] & 15 | 0) == 4 : 0) {
+ i6 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ i3 = i6 + 16 | 0;
+ i5 = i4 + 16 | 0;
+ i7 = _strcmp(i3, i5) | 0;
+ L8 : do {
+ if ((i7 | 0) == 0) {
+ i2 = HEAP32[i6 + 12 >> 2] | 0;
+ i4 = HEAP32[i4 + 12 >> 2] | 0;
+ while (1) {
+ i7 = _strlen(i3 | 0) | 0;
+ i6 = (i7 | 0) == (i2 | 0);
+ if ((i7 | 0) == (i4 | 0)) {
+ break;
+ }
+ if (i6) {
+ i7 = -1;
+ break L8;
+ }
+ i6 = i7 + 1 | 0;
+ i3 = i3 + i6 | 0;
+ i5 = i5 + i6 | 0;
+ i7 = _strcmp(i3, i5) | 0;
+ if ((i7 | 0) == 0) {
+ i2 = i2 - i6 | 0;
+ i4 = i4 - i6 | 0;
+ } else {
+ break L8;
+ }
+ }
+ i7 = i6 & 1 ^ 1;
+ }
+ } while (0);
+ i9 = i7 >>> 31;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ }
+ i8 = i5 + 8 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ i9 = _luaT_gettmbyobj(i5, i4, 13) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ i9 = _luaT_gettmbyobj(i5, i2, 13) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ _luaG_ordererror(i5, i4, i2);
+ } else {
+ i3 = i9;
+ }
+ } else {
+ i3 = i9;
+ }
+ i10 = i5 + 28 | 0;
+ i9 = i7 - (HEAP32[i10 >> 2] | 0) | 0;
+ i11 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i11 + 16;
+ i13 = i3;
+ i12 = HEAP32[i13 + 4 >> 2] | 0;
+ i7 = i11;
+ HEAP32[i7 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i7 + 4 >> 2] = i12;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ i3 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i3 + 16;
+ i11 = i4;
+ i7 = HEAP32[i11 + 4 >> 2] | 0;
+ i4 = i3;
+ HEAP32[i4 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i4 + 4 >> 2] = i7;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 >> 2];
+ i3 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i3 + 16;
+ i4 = i2;
+ i7 = HEAP32[i4 + 4 >> 2] | 0;
+ i6 = i3;
+ HEAP32[i6 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i6 + 4 >> 2] = i7;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i2 + 8 >> 2];
+ _luaD_call(i5, (HEAP32[i8 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i5 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i2 = HEAP32[i10 >> 2] | 0;
+ i3 = HEAP32[i8 >> 2] | 0;
+ i5 = i3 + -16 | 0;
+ HEAP32[i8 >> 2] = i5;
+ i6 = HEAP32[i5 + 4 >> 2] | 0;
+ i7 = i2 + i9 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i7 + 4 >> 2] = i6;
+ HEAP32[i2 + (i9 + 8) >> 2] = HEAP32[i3 + -8 >> 2];
+ i2 = HEAP32[i8 >> 2] | 0;
+ i3 = HEAP32[i2 + 8 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if ((i3 | 0) == 1) {
+ i2 = (HEAP32[i2 >> 2] | 0) != 0;
+ } else {
+ i2 = 1;
+ }
+ } else {
+ i2 = 0;
+ }
+ i13 = i2 & 1;
+ STACKTOP = i1;
+ return i13 | 0;
+}
+function _discharge2reg(i4, i3, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, d11 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i9 = i2 + 16 | 0;
+ i8 = i2;
+ _luaK_dischargevars(i4, i3);
+ i10 = HEAP32[i3 >> 2] | 0;
+ L1 : do {
+ switch (i10 | 0) {
+ case 5:
+ {
+ d11 = +HEAPF64[i3 + 8 >> 3];
+ HEAPF64[i9 >> 3] = d11;
+ i5 = HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 52 >> 2] | 0;
+ HEAPF64[i8 >> 3] = d11;
+ HEAP32[i8 + 8 >> 2] = 3;
+ if (d11 != d11 | 0.0 != 0.0 | d11 == 0.0) {
+ i10 = i5 + 8 | 0;
+ i7 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i10 >> 2] = i7 + 16;
+ i5 = _luaS_newlstr(i5, i9, 8) | 0;
+ HEAP32[i7 >> 2] = i5;
+ HEAP32[i7 + 8 >> 2] = HEAPU8[i5 + 4 | 0] | 0 | 64;
+ i5 = _addk(i4, (HEAP32[i10 >> 2] | 0) + -16 | 0, i8) | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -16;
+ } else {
+ i5 = _addk(i4, i8, i8) | 0;
+ }
+ i6 = i1 << 6;
+ if ((i5 | 0) < 262144) {
+ _luaK_code(i4, i6 | i5 << 14 | 1) | 0;
+ break L1;
+ } else {
+ _luaK_code(i4, i6 | 2) | 0;
+ _luaK_code(i4, i5 << 6 | 39) | 0;
+ break L1;
+ }
+ }
+ case 2:
+ case 3:
+ {
+ _luaK_code(i4, i1 << 6 | ((i10 | 0) == 2) << 23 | 3) | 0;
+ break;
+ }
+ case 4:
+ {
+ i6 = HEAP32[i3 + 8 >> 2] | 0;
+ i5 = i1 << 6;
+ if ((i6 | 0) < 262144) {
+ _luaK_code(i4, i5 | i6 << 14 | 1) | 0;
+ break L1;
+ } else {
+ _luaK_code(i4, i5 | 2) | 0;
+ _luaK_code(i4, i6 << 6 | 39) | 0;
+ break L1;
+ }
+ }
+ case 1:
+ {
+ i9 = i1 + 1 | 0;
+ i8 = HEAP32[i4 + 20 >> 2] | 0;
+ do {
+ if ((i8 | 0) > (HEAP32[i4 + 24 >> 2] | 0) ? (i5 = (HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0) + (i8 + -1 << 2) | 0, i6 = HEAP32[i5 >> 2] | 0, (i6 & 63 | 0) == 4) : 0) {
+ i10 = i6 >>> 6 & 255;
+ i8 = i10 + (i6 >>> 23) | 0;
+ if (!((i10 | 0) <= (i1 | 0) ? (i8 + 1 | 0) >= (i1 | 0) : 0)) {
+ i7 = 6;
+ }
+ if ((i7 | 0) == 6 ? (i10 | 0) < (i1 | 0) | (i10 | 0) > (i9 | 0) : 0) {
+ break;
+ }
+ i4 = (i10 | 0) < (i1 | 0) ? i10 : i1;
+ HEAP32[i5 >> 2] = i4 << 6 & 16320 | i6 & 8372287 | ((i8 | 0) > (i1 | 0) ? i8 : i1) - i4 << 23;
+ break L1;
+ }
+ } while (0);
+ _luaK_code(i4, i1 << 6 | 4) | 0;
+ break;
+ }
+ case 6:
+ {
+ i5 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i5 | 0) != (i1 | 0)) {
+ _luaK_code(i4, i5 << 23 | i1 << 6) | 0;
+ }
+ break;
+ }
+ case 11:
+ {
+ i10 = (HEAP32[(HEAP32[i4 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i3 + 8 >> 2] << 2) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i10 >> 2] & -16321 | i1 << 6 & 16320;
+ break;
+ }
+ default:
+ {
+ STACKTOP = i2;
+ return;
+ }
+ }
+ } while (0);
+ HEAP32[i3 + 8 >> 2] = i1;
+ HEAP32[i3 >> 2] = 6;
+ STACKTOP = i2;
+ return;
+}
+function _unroll(i3, i4) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0;
+ i9 = STACKTOP;
+ i11 = i3 + 16 | 0;
+ i13 = HEAP32[i11 >> 2] | 0;
+ i5 = i3 + 72 | 0;
+ if ((i13 | 0) == (i5 | 0)) {
+ STACKTOP = i9;
+ return;
+ }
+ i6 = i3 + 8 | 0;
+ i10 = i3 + 40 | 0;
+ i7 = i3 + 20 | 0;
+ i8 = i3 + 28 | 0;
+ i4 = i3 + 68 | 0;
+ do {
+ i12 = i13 + 18 | 0;
+ i14 = HEAP8[i12] | 0;
+ if ((i14 & 1) == 0) {
+ i14 = i14 & 255;
+ if ((i14 & 16 | 0) != 0) {
+ HEAP8[i12] = i14 & 239;
+ HEAP32[i4 >> 2] = HEAP32[i13 + 32 >> 2];
+ }
+ if ((HEAP16[i13 + 16 >> 1] | 0) == -1 ? (i2 = (HEAP32[i11 >> 2] | 0) + 4 | 0, i1 = HEAP32[i6 >> 2] | 0, (HEAP32[i2 >> 2] | 0) >>> 0 < i1 >>> 0) : 0) {
+ HEAP32[i2 >> 2] = i1;
+ }
+ i14 = HEAP8[i12] | 0;
+ if ((i14 & 32) == 0) {
+ HEAP8[i13 + 37 | 0] = 1;
+ }
+ HEAP8[i12] = i14 & 199 | 8;
+ i14 = FUNCTION_TABLE_ii[HEAP32[i13 + 28 >> 2] & 255](i3) | 0;
+ i14 = (HEAP32[i6 >> 2] | 0) + (0 - i14 << 4) | 0;
+ i13 = HEAP32[i11 >> 2] | 0;
+ i12 = HEAPU8[i10] | 0;
+ if ((i12 & 6 | 0) == 0) {
+ i15 = i13 + 8 | 0;
+ } else {
+ if ((i12 & 2 | 0) != 0) {
+ i14 = i14 - (HEAP32[i8 >> 2] | 0) | 0;
+ _luaD_hook(i3, 1, -1);
+ i14 = (HEAP32[i8 >> 2] | 0) + i14 | 0;
+ }
+ i15 = i13 + 8 | 0;
+ HEAP32[i7 >> 2] = HEAP32[(HEAP32[i15 >> 2] | 0) + 28 >> 2];
+ }
+ i12 = HEAP32[i13 >> 2] | 0;
+ i13 = HEAP16[i13 + 16 >> 1] | 0;
+ HEAP32[i11 >> 2] = HEAP32[i15 >> 2];
+ L25 : do {
+ if (!(i13 << 16 >> 16 == 0)) {
+ i15 = i13 << 16 >> 16;
+ if (i14 >>> 0 < (HEAP32[i6 >> 2] | 0) >>> 0) {
+ i13 = i14;
+ i14 = i15;
+ i15 = i12;
+ while (1) {
+ i12 = i15 + 16 | 0;
+ i18 = i13;
+ i17 = HEAP32[i18 + 4 >> 2] | 0;
+ i16 = i15;
+ HEAP32[i16 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i16 + 4 >> 2] = i17;
+ HEAP32[i15 + 8 >> 2] = HEAP32[i13 + 8 >> 2];
+ i14 = i14 + -1 | 0;
+ i13 = i13 + 16 | 0;
+ if ((i14 | 0) == 0) {
+ break L25;
+ }
+ if (i13 >>> 0 < (HEAP32[i6 >> 2] | 0) >>> 0) {
+ i15 = i12;
+ } else {
+ i13 = i14;
+ break;
+ }
+ }
+ } else {
+ i13 = i15;
+ }
+ if ((i13 | 0) > 0) {
+ i14 = i13;
+ i15 = i12;
+ while (1) {
+ i14 = i14 + -1 | 0;
+ HEAP32[i15 + 8 >> 2] = 0;
+ if ((i14 | 0) <= 0) {
+ break;
+ } else {
+ i15 = i15 + 16 | 0;
+ }
+ }
+ i12 = i12 + (i13 << 4) | 0;
+ }
+ }
+ } while (0);
+ HEAP32[i6 >> 2] = i12;
+ } else {
+ _luaV_finishOp(i3);
+ _luaV_execute(i3);
+ }
+ i13 = HEAP32[i11 >> 2] | 0;
+ } while ((i13 | 0) != (i5 | 0));
+ STACKTOP = i9;
+ return;
+}
+function _traverseephemeron(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i3 = STACKTOP;
+ i11 = i2 + 16 | 0;
+ i9 = HEAP32[i11 >> 2] | 0;
+ i5 = i9 + (1 << (HEAPU8[i2 + 7 | 0] | 0) << 5) | 0;
+ i10 = i2 + 28 | 0;
+ i13 = HEAP32[i10 >> 2] | 0;
+ if ((i13 | 0) > 0) {
+ i9 = i2 + 12 | 0;
+ i12 = 0;
+ i8 = 0;
+ do {
+ i14 = HEAP32[i9 >> 2] | 0;
+ if ((HEAP32[i14 + (i12 << 4) + 8 >> 2] & 64 | 0) != 0 ? (i7 = HEAP32[i14 + (i12 << 4) >> 2] | 0, !((HEAP8[i7 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i1, i7);
+ i13 = HEAP32[i10 >> 2] | 0;
+ i8 = 1;
+ }
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) < (i13 | 0));
+ i9 = HEAP32[i11 >> 2] | 0;
+ } else {
+ i8 = 0;
+ }
+ if (i9 >>> 0 < i5 >>> 0) {
+ i7 = 0;
+ i10 = 0;
+ do {
+ i11 = i9 + 8 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ i14 = i9 + 24 | 0;
+ i13 = HEAP32[i14 >> 2] | 0;
+ i15 = (i13 & 64 | 0) == 0;
+ L14 : do {
+ if ((i12 | 0) == 0) {
+ if (!i15 ? !((HEAP8[(HEAP32[i9 + 16 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) {
+ HEAP32[i14 >> 2] = 11;
+ }
+ } else {
+ do {
+ if (i15) {
+ i6 = i12;
+ i4 = 18;
+ } else {
+ i14 = HEAP32[i9 + 16 >> 2] | 0;
+ if ((i13 & 15 | 0) == 4) {
+ if ((i14 | 0) == 0) {
+ i6 = i12;
+ i4 = 18;
+ break;
+ }
+ if ((HEAP8[i14 + 5 | 0] & 3) == 0) {
+ i6 = i12;
+ i4 = 18;
+ break;
+ }
+ _reallymarkobject(i1, i14);
+ i6 = HEAP32[i11 >> 2] | 0;
+ i4 = 18;
+ break;
+ }
+ i11 = (i12 & 64 | 0) == 0;
+ if ((HEAP8[i14 + 5 | 0] & 3) == 0) {
+ if (i11) {
+ break L14;
+ } else {
+ break;
+ }
+ }
+ if (i11) {
+ i7 = 1;
+ break L14;
+ }
+ i7 = 1;
+ i10 = (HEAP8[(HEAP32[i9 >> 2] | 0) + 5 | 0] & 3) == 0 ? i10 : 1;
+ break L14;
+ }
+ } while (0);
+ if ((i4 | 0) == 18 ? (i4 = 0, (i6 & 64 | 0) == 0) : 0) {
+ break;
+ }
+ i11 = HEAP32[i9 >> 2] | 0;
+ if (!((HEAP8[i11 + 5 | 0] & 3) == 0)) {
+ _reallymarkobject(i1, i11);
+ i8 = 1;
+ }
+ }
+ } while (0);
+ i9 = i9 + 32 | 0;
+ } while (i9 >>> 0 < i5 >>> 0);
+ if ((i10 | 0) != 0) {
+ i15 = i1 + 96 | 0;
+ HEAP32[i2 + 24 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i2;
+ i15 = i8;
+ STACKTOP = i3;
+ return i15 | 0;
+ }
+ if ((i7 | 0) != 0) {
+ i15 = i1 + 100 | 0;
+ HEAP32[i2 + 24 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i2;
+ i15 = i8;
+ STACKTOP = i3;
+ return i15 | 0;
+ }
+ }
+ i15 = i1 + 88 | 0;
+ HEAP32[i2 + 24 >> 2] = HEAP32[i15 >> 2];
+ HEAP32[i15 >> 2] = i2;
+ i15 = i8;
+ STACKTOP = i3;
+ return i15 | 0;
+}
+function _luaV_gettable(i2, i7, i5, i1) {
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i6;
+ i8 = i2 + 12 | 0;
+ i3 = i7;
+ i10 = HEAP32[i7 + 8 >> 2] | 0;
+ i9 = 0;
+ while (1) {
+ i7 = i3 + 8 | 0;
+ if ((i10 | 0) != 69) {
+ i12 = _luaT_gettmbyobj(i2, i3, 0) | 0;
+ i10 = HEAP32[i12 + 8 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i8 = 11;
+ break;
+ }
+ } else {
+ i12 = HEAP32[i3 >> 2] | 0;
+ i11 = _luaH_get(i12, i5) | 0;
+ i10 = i11 + 8 | 0;
+ if ((HEAP32[i10 >> 2] | 0) != 0) {
+ i8 = 9;
+ break;
+ }
+ i12 = HEAP32[i12 + 8 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i8 = 9;
+ break;
+ }
+ if (!((HEAP8[i12 + 6 | 0] & 1) == 0)) {
+ i8 = 9;
+ break;
+ }
+ i12 = _luaT_gettm(i12, 0, HEAP32[(HEAP32[i8 >> 2] | 0) + 184 >> 2] | 0) | 0;
+ if ((i12 | 0) == 0) {
+ i8 = 9;
+ break;
+ }
+ i10 = HEAP32[i12 + 8 >> 2] | 0;
+ }
+ i9 = i9 + 1 | 0;
+ if ((i10 & 15 | 0) == 6) {
+ i8 = 13;
+ break;
+ }
+ if ((i9 | 0) < 100) {
+ i3 = i12;
+ } else {
+ i8 = 14;
+ break;
+ }
+ }
+ if ((i8 | 0) == 9) {
+ i9 = i11;
+ i11 = HEAP32[i9 + 4 >> 2] | 0;
+ i12 = i1;
+ HEAP32[i12 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ HEAP32[i1 + 8 >> 2] = HEAP32[i10 >> 2];
+ STACKTOP = i6;
+ return;
+ } else if ((i8 | 0) == 11) {
+ _luaG_typeerror(i2, i3, 8944);
+ } else if ((i8 | 0) == 13) {
+ i10 = i2 + 28 | 0;
+ i11 = i1 - (HEAP32[i10 >> 2] | 0) | 0;
+ i8 = i2 + 8 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i9 + 16;
+ i13 = i12;
+ i1 = HEAP32[i13 + 4 >> 2] | 0;
+ i4 = i9;
+ HEAP32[i4 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i4 + 4 >> 2] = i1;
+ HEAP32[i9 + 8 >> 2] = HEAP32[i12 + 8 >> 2];
+ i12 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i12 + 16;
+ i9 = HEAP32[i3 + 4 >> 2] | 0;
+ i4 = i12;
+ HEAP32[i4 >> 2] = HEAP32[i3 >> 2];
+ HEAP32[i4 + 4 >> 2] = i9;
+ HEAP32[i12 + 8 >> 2] = HEAP32[i7 >> 2];
+ i12 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i12 + 16;
+ i4 = i5;
+ i9 = HEAP32[i4 + 4 >> 2] | 0;
+ i7 = i12;
+ HEAP32[i7 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i7 + 4 >> 2] = i9;
+ HEAP32[i12 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ _luaD_call(i2, (HEAP32[i8 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i2 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i12 = HEAP32[i10 >> 2] | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ i7 = i10 + -16 | 0;
+ HEAP32[i8 >> 2] = i7;
+ i8 = HEAP32[i7 + 4 >> 2] | 0;
+ i9 = i12 + i11 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ HEAP32[i12 + (i11 + 8) >> 2] = HEAP32[i10 + -8 >> 2];
+ STACKTOP = i6;
+ return;
+ } else if ((i8 | 0) == 14) {
+ _luaG_runerror(i2, 8952, i4);
+ }
+}
+function _db_getinfo(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i3 = i2;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i4 = _lua_tothread(i1, 1) | 0;
+ i7 = 1;
+ } else {
+ i4 = i1;
+ i7 = 0;
+ }
+ i5 = i7 | 2;
+ i6 = _luaL_optlstring(i1, i5, 11784, 0) | 0;
+ i7 = i7 + 1 | 0;
+ do {
+ if ((_lua_isnumber(i1, i7) | 0) != 0) {
+ if ((_lua_getstack(i4, _lua_tointegerx(i1, i7, 0) | 0, i3) | 0) == 0) {
+ _lua_pushnil(i1);
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ } else {
+ if ((_lua_type(i1, i7) | 0) == 6) {
+ HEAP32[i3 >> 2] = i6;
+ _lua_pushfstring(i1, 11792, i3) | 0;
+ i6 = _lua_tolstring(i1, -1, 0) | 0;
+ _lua_pushvalue(i1, i7);
+ _lua_xmove(i1, i4, 1);
+ break;
+ }
+ i7 = _luaL_argerror(i1, i7, 11800) | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ } while (0);
+ if ((_lua_getinfo(i4, i6, i3) | 0) == 0) {
+ i7 = _luaL_argerror(i1, i5, 11832) | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ _lua_createtable(i1, 0, 2);
+ if ((_strchr(i6, 83) | 0) != 0) {
+ _lua_pushstring(i1, HEAP32[i3 + 16 >> 2] | 0) | 0;
+ _lua_setfield(i1, -2, 11848);
+ _lua_pushstring(i1, i3 + 36 | 0) | 0;
+ _lua_setfield(i1, -2, 11856);
+ _lua_pushinteger(i1, HEAP32[i3 + 24 >> 2] | 0);
+ _lua_setfield(i1, -2, 11872);
+ _lua_pushinteger(i1, HEAP32[i3 + 28 >> 2] | 0);
+ _lua_setfield(i1, -2, 11888);
+ _lua_pushstring(i1, HEAP32[i3 + 12 >> 2] | 0) | 0;
+ _lua_setfield(i1, -2, 11904);
+ }
+ if ((_strchr(i6, 108) | 0) != 0) {
+ _lua_pushinteger(i1, HEAP32[i3 + 20 >> 2] | 0);
+ _lua_setfield(i1, -2, 11912);
+ }
+ if ((_strchr(i6, 117) | 0) != 0) {
+ _lua_pushinteger(i1, HEAPU8[i3 + 32 | 0] | 0);
+ _lua_setfield(i1, -2, 11928);
+ _lua_pushinteger(i1, HEAPU8[i3 + 33 | 0] | 0);
+ _lua_setfield(i1, -2, 11936);
+ _lua_pushboolean(i1, HEAP8[i3 + 34 | 0] | 0);
+ _lua_setfield(i1, -2, 11944);
+ }
+ if ((_strchr(i6, 110) | 0) != 0) {
+ _lua_pushstring(i1, HEAP32[i3 + 4 >> 2] | 0) | 0;
+ _lua_setfield(i1, -2, 11960);
+ _lua_pushstring(i1, HEAP32[i3 + 8 >> 2] | 0) | 0;
+ _lua_setfield(i1, -2, 11968);
+ }
+ if ((_strchr(i6, 116) | 0) != 0) {
+ _lua_pushboolean(i1, HEAP8[i3 + 35 | 0] | 0);
+ _lua_setfield(i1, -2, 11984);
+ }
+ if ((_strchr(i6, 76) | 0) != 0) {
+ if ((i4 | 0) == (i1 | 0)) {
+ _lua_pushvalue(i1, -2);
+ _lua_remove(i1, -3);
+ } else {
+ _lua_xmove(i4, i1, 1);
+ }
+ _lua_setfield(i1, -2, 12e3);
+ }
+ if ((_strchr(i6, 102) | 0) == 0) {
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ if ((i4 | 0) == (i1 | 0)) {
+ _lua_pushvalue(i1, -2);
+ _lua_remove(i1, -3);
+ } else {
+ _lua_xmove(i4, i1, 1);
+ }
+ _lua_setfield(i1, -2, 12016);
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _luaL_traceback(i4, i1, i9, i7) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i9 = i9 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 208 | 0;
+ i6 = i3;
+ i5 = i3 + 100 | 0;
+ i2 = _lua_gettop(i4) | 0;
+ i8 = 1;
+ i10 = 1;
+ while (1) {
+ if ((_lua_getstack(i1, i8, i6) | 0) == 0) {
+ break;
+ } else {
+ i10 = i8;
+ i8 = i8 << 1;
+ }
+ }
+ if ((i10 | 0) < (i8 | 0)) {
+ while (1) {
+ i11 = (i8 + i10 | 0) / 2 | 0;
+ i12 = (_lua_getstack(i1, i11, i6) | 0) == 0;
+ i8 = i12 ? i11 : i8;
+ i10 = i12 ? i10 : i11 + 1 | 0;
+ if ((i10 | 0) >= (i8 | 0)) {
+ i10 = i8;
+ break;
+ }
+ }
+ } else {
+ i10 = i8;
+ }
+ i8 = (i10 + -1 | 0) > 22 ? 12 : 0;
+ if ((i9 | 0) != 0) {
+ HEAP32[i6 >> 2] = i9;
+ _lua_pushfstring(i4, 944, i6) | 0;
+ }
+ _lua_pushlstring(i4, 952, 16) | 0;
+ if ((_lua_getstack(i1, i7, i5) | 0) == 0) {
+ i17 = _lua_gettop(i4) | 0;
+ i17 = i17 - i2 | 0;
+ _lua_concat(i4, i17);
+ STACKTOP = i3;
+ return;
+ }
+ i10 = i10 + -11 | 0;
+ i13 = i5 + 36 | 0;
+ i9 = i5 + 20 | 0;
+ i16 = i5 + 8 | 0;
+ i12 = i5 + 12 | 0;
+ i15 = i5 + 24 | 0;
+ i14 = i5 + 35 | 0;
+ i11 = i5 + 4 | 0;
+ do {
+ i7 = i7 + 1 | 0;
+ if ((i7 | 0) == (i8 | 0)) {
+ _lua_pushlstring(i4, 976, 5) | 0;
+ i7 = i10;
+ } else {
+ _lua_getinfo(i1, 984, i5) | 0;
+ HEAP32[i6 >> 2] = i13;
+ _lua_pushfstring(i4, 992, i6) | 0;
+ i17 = HEAP32[i9 >> 2] | 0;
+ if ((i17 | 0) > 0) {
+ HEAP32[i6 >> 2] = i17;
+ _lua_pushfstring(i4, 1e3, i6) | 0;
+ }
+ _lua_pushlstring(i4, 1008, 4) | 0;
+ do {
+ if ((HEAP8[HEAP32[i16 >> 2] | 0] | 0) == 0) {
+ i17 = HEAP8[HEAP32[i12 >> 2] | 0] | 0;
+ if (i17 << 24 >> 24 == 109) {
+ _lua_pushlstring(i4, 1800, 10) | 0;
+ break;
+ } else if (i17 << 24 >> 24 == 67) {
+ if ((_pushglobalfuncname(i4, i5) | 0) == 0) {
+ _lua_pushlstring(i4, 1112, 1) | 0;
+ break;
+ } else {
+ HEAP32[i6 >> 2] = _lua_tolstring(i4, -1, 0) | 0;
+ _lua_pushfstring(i4, 1784, i6) | 0;
+ _lua_remove(i4, -2);
+ break;
+ }
+ } else {
+ i17 = HEAP32[i15 >> 2] | 0;
+ HEAP32[i6 >> 2] = i13;
+ HEAP32[i6 + 4 >> 2] = i17;
+ _lua_pushfstring(i4, 1816, i6) | 0;
+ break;
+ }
+ } else {
+ HEAP32[i6 >> 2] = HEAP32[i11 >> 2];
+ _lua_pushfstring(i4, 1784, i6) | 0;
+ }
+ } while (0);
+ if ((HEAP8[i14] | 0) != 0) {
+ _lua_pushlstring(i4, 1016, 20) | 0;
+ }
+ _lua_concat(i4, (_lua_gettop(i4) | 0) - i2 | 0);
+ }
+ } while ((_lua_getstack(i1, i7, i5) | 0) != 0);
+ i17 = _lua_gettop(i4) | 0;
+ i17 = i17 - i2 | 0;
+ _lua_concat(i4, i17);
+ STACKTOP = i3;
+ return;
+}
+function _luaK_exp2RK(i3, i1) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, d11 = 0.0, i12 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i7 = i2 + 16 | 0;
+ i6 = i2;
+ i4 = i1 + 16 | 0;
+ i5 = i1 + 20 | 0;
+ i10 = (HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0);
+ _luaK_dischargevars(i3, i1);
+ do {
+ if (!i10) {
+ if ((HEAP32[i1 >> 2] | 0) == 6) {
+ i10 = HEAP32[i1 + 8 >> 2] | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0)) {
+ break;
+ }
+ if ((i10 | 0) >= (HEAPU8[i3 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i3, i1, i10);
+ break;
+ }
+ }
+ _luaK_exp2nextreg(i3, i1);
+ }
+ } while (0);
+ i10 = HEAP32[i1 >> 2] | 0;
+ switch (i10 | 0) {
+ case 4:
+ {
+ i8 = HEAP32[i1 + 8 >> 2] | 0;
+ i9 = 18;
+ break;
+ }
+ case 1:
+ case 3:
+ case 2:
+ {
+ if ((HEAP32[i3 + 32 >> 2] | 0) < 256) {
+ if ((i10 | 0) == 1) {
+ HEAP32[i6 + 8 >> 2] = 0;
+ HEAP32[i7 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i7 + 8 >> 2] = 69;
+ i3 = _addk(i3, i7, i6) | 0;
+ } else {
+ HEAP32[i7 >> 2] = (i10 | 0) == 2;
+ HEAP32[i7 + 8 >> 2] = 1;
+ i3 = _addk(i3, i7, i7) | 0;
+ }
+ HEAP32[i1 + 8 >> 2] = i3;
+ HEAP32[i1 >> 2] = 4;
+ i10 = i3 | 256;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ break;
+ }
+ case 5:
+ {
+ i9 = i1 + 8 | 0;
+ d11 = +HEAPF64[i9 >> 3];
+ HEAPF64[i7 >> 3] = d11;
+ i8 = HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 52 >> 2] | 0;
+ HEAPF64[i6 >> 3] = d11;
+ HEAP32[i6 + 8 >> 2] = 3;
+ if (d11 != d11 | 0.0 != 0.0 | d11 == 0.0) {
+ i10 = i8 + 8 | 0;
+ i12 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i10 >> 2] = i12 + 16;
+ i8 = _luaS_newlstr(i8, i7, 8) | 0;
+ HEAP32[i12 >> 2] = i8;
+ HEAP32[i12 + 8 >> 2] = HEAPU8[i8 + 4 | 0] | 0 | 64;
+ i8 = _addk(i3, (HEAP32[i10 >> 2] | 0) + -16 | 0, i6) | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -16;
+ } else {
+ i8 = _addk(i3, i6, i6) | 0;
+ }
+ HEAP32[i9 >> 2] = i8;
+ HEAP32[i1 >> 2] = 4;
+ i9 = 18;
+ break;
+ }
+ default:
+ {}
+ }
+ if ((i9 | 0) == 18 ? (i8 | 0) < 256 : 0) {
+ i12 = i8 | 256;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ _luaK_dischargevars(i3, i1);
+ if ((HEAP32[i1 >> 2] | 0) == 6) {
+ i7 = i1 + 8 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (HEAP32[i5 >> 2] | 0)) {
+ i12 = i6;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ if ((i6 | 0) >= (HEAPU8[i3 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i3, i1, i6);
+ i12 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ } else {
+ i7 = i1 + 8 | 0;
+ }
+ _luaK_exp2nextreg(i3, i1);
+ i12 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i2;
+ return i12 | 0;
+}
+function _os_date(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1264 | 0;
+ i4 = i2;
+ i7 = i2 + 1048 | 0;
+ i6 = i2 + 1256 | 0;
+ i3 = i2 + 8 | 0;
+ i5 = i2 + 1056 | 0;
+ i12 = _luaL_optlstring(i1, 1, 6064, 0) | 0;
+ if ((_lua_type(i1, 2) | 0) < 1) {
+ i8 = _time(0) | 0;
+ } else {
+ i8 = ~~+_luaL_checknumber(i1, 2);
+ }
+ HEAP32[i7 >> 2] = i8;
+ if ((HEAP8[i12] | 0) == 33) {
+ i12 = i12 + 1 | 0;
+ i10 = _gmtime(i7 | 0) | 0;
+ } else {
+ i10 = _localtime(i7 | 0) | 0;
+ }
+ if ((i10 | 0) == 0) {
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return 1;
+ }
+ if ((_strcmp(i12, 6072) | 0) == 0) {
+ _lua_createtable(i1, 0, 9);
+ _lua_pushinteger(i1, HEAP32[i10 >> 2] | 0);
+ _lua_setfield(i1, -2, 5864);
+ _lua_pushinteger(i1, HEAP32[i10 + 4 >> 2] | 0);
+ _lua_setfield(i1, -2, 5872);
+ _lua_pushinteger(i1, HEAP32[i10 + 8 >> 2] | 0);
+ _lua_setfield(i1, -2, 5880);
+ _lua_pushinteger(i1, HEAP32[i10 + 12 >> 2] | 0);
+ _lua_setfield(i1, -2, 5888);
+ _lua_pushinteger(i1, (HEAP32[i10 + 16 >> 2] | 0) + 1 | 0);
+ _lua_setfield(i1, -2, 5896);
+ _lua_pushinteger(i1, (HEAP32[i10 + 20 >> 2] | 0) + 1900 | 0);
+ _lua_setfield(i1, -2, 5904);
+ _lua_pushinteger(i1, (HEAP32[i10 + 24 >> 2] | 0) + 1 | 0);
+ _lua_setfield(i1, -2, 6080);
+ _lua_pushinteger(i1, (HEAP32[i10 + 28 >> 2] | 0) + 1 | 0);
+ _lua_setfield(i1, -2, 6088);
+ i3 = HEAP32[i10 + 32 >> 2] | 0;
+ if ((i3 | 0) < 0) {
+ STACKTOP = i2;
+ return 1;
+ }
+ _lua_pushboolean(i1, i3);
+ _lua_setfield(i1, -2, 5912);
+ STACKTOP = i2;
+ return 1;
+ }
+ HEAP8[i6] = 37;
+ _luaL_buffinit(i1, i3);
+ i11 = i3 + 8 | 0;
+ i9 = i3 + 4 | 0;
+ i8 = i6 + 1 | 0;
+ i7 = i6 + 2 | 0;
+ while (1) {
+ i14 = HEAP8[i12] | 0;
+ if (i14 << 24 >> 24 == 0) {
+ break;
+ } else if (!(i14 << 24 >> 24 == 37)) {
+ i13 = HEAP32[i11 >> 2] | 0;
+ if (!(i13 >>> 0 < (HEAP32[i9 >> 2] | 0) >>> 0)) {
+ _luaL_prepbuffsize(i3, 1) | 0;
+ i13 = HEAP32[i11 >> 2] | 0;
+ i14 = HEAP8[i12] | 0;
+ }
+ HEAP32[i11 >> 2] = i13 + 1;
+ HEAP8[(HEAP32[i3 >> 2] | 0) + i13 | 0] = i14;
+ i12 = i12 + 1 | 0;
+ continue;
+ }
+ i13 = i12 + 1 | 0;
+ i12 = i12 + 2 | 0;
+ i14 = HEAP8[i13] | 0;
+ if (!(i14 << 24 >> 24 == 0) ? (_memchr(6096, i14 << 24 >> 24, 23) | 0) != 0 : 0) {
+ HEAP8[i8] = i14;
+ HEAP8[i7] = 0;
+ } else {
+ HEAP32[i4 >> 2] = i13;
+ _luaL_argerror(i1, 1, _lua_pushfstring(i1, 6120, i4) | 0) | 0;
+ i12 = i13;
+ }
+ _luaL_addlstring(i3, i5, _strftime(i5 | 0, 200, i6 | 0, i10 | 0) | 0);
+ }
+ _luaL_pushresult(i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaV_finishOp(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ i8 = HEAP32[i3 + 16 >> 2] | 0;
+ i7 = i8 + 24 | 0;
+ i4 = HEAP32[i7 >> 2] | 0;
+ i5 = i8 + 28 | 0;
+ i2 = HEAP32[(HEAP32[i5 >> 2] | 0) + -4 >> 2] | 0;
+ i6 = i2 & 63;
+ switch (i6 | 0) {
+ case 34:
+ {
+ HEAP32[i3 + 8 >> 2] = HEAP32[i8 + 4 >> 2];
+ STACKTOP = i1;
+ return;
+ }
+ case 24:
+ case 25:
+ case 26:
+ {
+ i7 = i3 + 8 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i9 = HEAP32[i8 + -8 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if ((i9 | 0) == 1) {
+ i9 = (HEAP32[i8 + -16 >> 2] | 0) == 0;
+ } else {
+ i9 = 0;
+ }
+ } else {
+ i9 = 1;
+ }
+ i9 = i9 & 1;
+ i10 = i9 ^ 1;
+ HEAP32[i7 >> 2] = i8 + -16;
+ if ((i6 | 0) == 26) {
+ i8 = (HEAP32[(_luaT_gettmbyobj(i3, i4 + (i2 >>> 23 << 4) | 0, 14) | 0) + 8 >> 2] | 0) == 0;
+ i10 = i8 ? i9 : i10;
+ }
+ if ((i10 | 0) == (i2 >>> 6 & 255 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 4;
+ STACKTOP = i1;
+ return;
+ }
+ case 22:
+ {
+ i5 = i3 + 8 | 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ i6 = i10 + -32 | 0;
+ i4 = i6 - (i4 + (i2 >>> 23 << 4)) | 0;
+ i12 = i10 + -16 | 0;
+ i11 = HEAP32[i12 + 4 >> 2] | 0;
+ i9 = i10 + -48 | 0;
+ HEAP32[i9 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i9 + 4 >> 2] = i11;
+ HEAP32[i10 + -40 >> 2] = HEAP32[i10 + -8 >> 2];
+ if ((i4 | 0) > 16) {
+ HEAP32[i5 >> 2] = i6;
+ _luaV_concat(i3, i4 >> 4);
+ }
+ i10 = HEAP32[i5 >> 2] | 0;
+ i11 = HEAP32[i7 >> 2] | 0;
+ i12 = i2 >>> 6 & 255;
+ i6 = i10 + -16 | 0;
+ i7 = HEAP32[i6 + 4 >> 2] | 0;
+ i9 = i11 + (i12 << 4) | 0;
+ HEAP32[i9 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i9 + 4 >> 2] = i7;
+ HEAP32[i11 + (i12 << 4) + 8 >> 2] = HEAP32[i10 + -8 >> 2];
+ HEAP32[i5 >> 2] = HEAP32[i8 + 4 >> 2];
+ STACKTOP = i1;
+ return;
+ }
+ case 12:
+ case 7:
+ case 6:
+ case 21:
+ case 19:
+ case 18:
+ case 17:
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ {
+ i12 = i3 + 8 | 0;
+ i11 = HEAP32[i12 >> 2] | 0;
+ i8 = i11 + -16 | 0;
+ HEAP32[i12 >> 2] = i8;
+ i12 = i2 >>> 6 & 255;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i10 = i4 + (i12 << 4) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP32[i4 + (i12 << 4) + 8 >> 2] = HEAP32[i11 + -8 >> 2];
+ STACKTOP = i1;
+ return;
+ }
+ case 29:
+ {
+ if ((i2 & 8372224 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i3 + 8 >> 2] = HEAP32[i8 + 4 >> 2];
+ STACKTOP = i1;
+ return;
+ }
+ default:
+ {
+ STACKTOP = i1;
+ return;
+ }
+ }
+}
+function _auxsort(i2, i4, i5) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ if ((i4 | 0) >= (i5 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ _lua_rawgeti(i2, 1, i4);
+ _lua_rawgeti(i2, 1, i5);
+ if ((_sort_comp(i2, -1, -2) | 0) == 0) {
+ _lua_settop(i2, -3);
+ } else {
+ _lua_rawseti(i2, 1, i4);
+ _lua_rawseti(i2, 1, i5);
+ }
+ i6 = i5 - i4 | 0;
+ if ((i6 | 0) == 1) {
+ i2 = 24;
+ break;
+ }
+ i7 = (i5 + i4 | 0) / 2 | 0;
+ _lua_rawgeti(i2, 1, i7);
+ _lua_rawgeti(i2, 1, i4);
+ do {
+ if ((_sort_comp(i2, -2, -1) | 0) == 0) {
+ _lua_settop(i2, -2);
+ _lua_rawgeti(i2, 1, i5);
+ if ((_sort_comp(i2, -1, -2) | 0) == 0) {
+ _lua_settop(i2, -3);
+ break;
+ } else {
+ _lua_rawseti(i2, 1, i7);
+ _lua_rawseti(i2, 1, i5);
+ break;
+ }
+ } else {
+ _lua_rawseti(i2, 1, i7);
+ _lua_rawseti(i2, 1, i4);
+ }
+ } while (0);
+ if ((i6 | 0) == 2) {
+ i2 = 24;
+ break;
+ }
+ _lua_rawgeti(i2, 1, i7);
+ _lua_pushvalue(i2, -1);
+ i6 = i5 + -1 | 0;
+ _lua_rawgeti(i2, 1, i6);
+ _lua_rawseti(i2, 1, i7);
+ _lua_rawseti(i2, 1, i6);
+ i7 = i4;
+ i9 = i6;
+ while (1) {
+ i8 = i7 + 1 | 0;
+ _lua_rawgeti(i2, 1, i8);
+ if ((_sort_comp(i2, -1, -2) | 0) != 0) {
+ i7 = i8;
+ while (1) {
+ if ((i7 | 0) >= (i5 | 0)) {
+ _luaL_error(i2, 8216, i3) | 0;
+ }
+ _lua_settop(i2, -2);
+ i8 = i7 + 1 | 0;
+ _lua_rawgeti(i2, 1, i8);
+ if ((_sort_comp(i2, -1, -2) | 0) == 0) {
+ break;
+ } else {
+ i7 = i8;
+ }
+ }
+ }
+ i10 = i9 + -1 | 0;
+ _lua_rawgeti(i2, 1, i10);
+ if ((_sort_comp(i2, -3, -1) | 0) != 0) {
+ i9 = i10;
+ while (1) {
+ if ((i9 | 0) <= (i4 | 0)) {
+ _luaL_error(i2, 8216, i3) | 0;
+ }
+ _lua_settop(i2, -2);
+ i10 = i9 + -1 | 0;
+ _lua_rawgeti(i2, 1, i10);
+ if ((_sort_comp(i2, -3, -1) | 0) == 0) {
+ break;
+ } else {
+ i9 = i10;
+ }
+ }
+ }
+ if ((i9 | 0) <= (i8 | 0)) {
+ break;
+ }
+ _lua_rawseti(i2, 1, i8);
+ _lua_rawseti(i2, 1, i10);
+ i7 = i8;
+ i9 = i10;
+ }
+ _lua_settop(i2, -4);
+ _lua_rawgeti(i2, 1, i6);
+ _lua_rawgeti(i2, 1, i8);
+ _lua_rawseti(i2, 1, i6);
+ _lua_rawseti(i2, 1, i8);
+ i8 = (i8 - i4 | 0) < (i5 - i8 | 0);
+ i9 = i7 + 2 | 0;
+ i10 = i8 ? i9 : i4;
+ i6 = i8 ? i5 : i7;
+ _auxsort(i2, i8 ? i4 : i9, i8 ? i7 : i5);
+ if ((i10 | 0) < (i6 | 0)) {
+ i4 = i10;
+ i5 = i6;
+ } else {
+ i2 = 24;
+ break;
+ }
+ }
+ if ((i2 | 0) == 24) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _skip_sep(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i3 >> 2] | 0;
+ i4 = i3 + 60 | 0;
+ i10 = HEAP32[i4 >> 2] | 0;
+ i8 = i10 + 4 | 0;
+ i11 = HEAP32[i8 >> 2] | 0;
+ i7 = i10 + 8 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ do {
+ if ((i11 + 1 | 0) >>> 0 > i5 >>> 0) {
+ if (i5 >>> 0 > 2147483645) {
+ _lexerror(i3, 12368, 0);
+ }
+ i12 = i5 << 1;
+ i11 = HEAP32[i3 + 52 >> 2] | 0;
+ if ((i12 | 0) == -2) {
+ _luaM_toobig(i11);
+ } else {
+ i9 = _luaM_realloc_(i11, HEAP32[i10 >> 2] | 0, i5, i12) | 0;
+ HEAP32[i10 >> 2] = i9;
+ HEAP32[i7 >> 2] = i12;
+ i6 = HEAP32[i8 >> 2] | 0;
+ break;
+ }
+ } else {
+ i6 = i11;
+ i9 = HEAP32[i10 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i8 >> 2] = i6 + 1;
+ HEAP8[i9 + i6 | 0] = i2;
+ i5 = i3 + 56 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i13 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i13 + -1;
+ if ((i13 | 0) == 0) {
+ i6 = _luaZ_fill(i6) | 0;
+ } else {
+ i13 = i6 + 4 | 0;
+ i6 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i6 + 1;
+ i6 = HEAPU8[i6] | 0;
+ }
+ HEAP32[i3 >> 2] = i6;
+ if ((i6 | 0) != 61) {
+ i12 = i6;
+ i13 = 0;
+ i12 = (i12 | 0) != (i2 | 0);
+ i12 = i12 << 31 >> 31;
+ i13 = i12 ^ i13;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ i6 = i3 + 52 | 0;
+ i7 = 0;
+ while (1) {
+ i9 = HEAP32[i4 >> 2] | 0;
+ i8 = i9 + 4 | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ i11 = i9 + 8 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ if ((i10 + 1 | 0) >>> 0 > i12 >>> 0) {
+ if (i12 >>> 0 > 2147483645) {
+ i4 = 16;
+ break;
+ }
+ i13 = i12 << 1;
+ i10 = HEAP32[i6 >> 2] | 0;
+ if ((i13 | 0) == -2) {
+ i4 = 18;
+ break;
+ }
+ i12 = _luaM_realloc_(i10, HEAP32[i9 >> 2] | 0, i12, i13) | 0;
+ HEAP32[i9 >> 2] = i12;
+ HEAP32[i11 >> 2] = i13;
+ i10 = HEAP32[i8 >> 2] | 0;
+ i9 = i12;
+ } else {
+ i9 = HEAP32[i9 >> 2] | 0;
+ }
+ HEAP32[i8 >> 2] = i10 + 1;
+ HEAP8[i9 + i10 | 0] = 61;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i13 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i13 + -1;
+ if ((i13 | 0) == 0) {
+ i8 = _luaZ_fill(i8) | 0;
+ } else {
+ i13 = i8 + 4 | 0;
+ i8 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i8 + 1;
+ i8 = HEAPU8[i8] | 0;
+ }
+ HEAP32[i3 >> 2] = i8;
+ i7 = i7 + 1 | 0;
+ if ((i8 | 0) != 61) {
+ i4 = 24;
+ break;
+ }
+ }
+ if ((i4 | 0) == 16) {
+ _lexerror(i3, 12368, 0);
+ } else if ((i4 | 0) == 18) {
+ _luaM_toobig(i10);
+ } else if ((i4 | 0) == 24) {
+ i13 = (i8 | 0) != (i2 | 0);
+ i13 = i13 << 31 >> 31;
+ i13 = i13 ^ i7;
+ STACKTOP = i1;
+ return i13 | 0;
+ }
+ return 0;
+}
+function _luaV_arith(i8, i2, i3, i5, i4) {
+ i8 = i8 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, d14 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i9 = i1 + 24 | 0;
+ i13 = i1 + 16 | 0;
+ i12 = i1;
+ i6 = i3 + 8 | 0;
+ i11 = HEAP32[i6 >> 2] | 0;
+ if ((i11 | 0) != 3) {
+ if ((i11 & 15 | 0) == 4 ? (i11 = HEAP32[i3 >> 2] | 0, (_luaO_str2d(i11 + 16 | 0, HEAP32[i11 + 12 >> 2] | 0, i13) | 0) != 0) : 0) {
+ HEAPF64[i12 >> 3] = +HEAPF64[i13 >> 3];
+ HEAP32[i12 + 8 >> 2] = 3;
+ i10 = 5;
+ }
+ } else {
+ i12 = i3;
+ i10 = 5;
+ }
+ do {
+ if ((i10 | 0) == 5) {
+ i10 = HEAP32[i5 + 8 >> 2] | 0;
+ if ((i10 | 0) == 3) {
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ d14 = +HEAPF64[i5 >> 3];
+ } else {
+ if ((i10 & 15 | 0) != 4) {
+ break;
+ }
+ i13 = HEAP32[i5 >> 2] | 0;
+ if ((_luaO_str2d(i13 + 16 | 0, HEAP32[i13 + 12 >> 2] | 0, i9) | 0) == 0) {
+ break;
+ }
+ d14 = +HEAPF64[i9 >> 3];
+ }
+ HEAPF64[i2 >> 3] = +_luaO_arith(i4 + -6 | 0, +HEAPF64[i12 >> 3], d14);
+ HEAP32[i2 + 8 >> 2] = 3;
+ STACKTOP = i1;
+ return;
+ }
+ } while (0);
+ i9 = _luaT_gettmbyobj(i8, i3, i4) | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) == 0) {
+ i4 = _luaT_gettmbyobj(i8, i5, i4) | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) == 0) {
+ _luaG_aritherror(i8, i3, i5);
+ } else {
+ i7 = i4;
+ }
+ } else {
+ i7 = i9;
+ }
+ i12 = i8 + 28 | 0;
+ i13 = i2 - (HEAP32[i12 >> 2] | 0) | 0;
+ i9 = i8 + 8 | 0;
+ i11 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i11 + 16;
+ i2 = i7;
+ i10 = HEAP32[i2 + 4 >> 2] | 0;
+ i4 = i11;
+ HEAP32[i4 >> 2] = HEAP32[i2 >> 2];
+ HEAP32[i4 + 4 >> 2] = i10;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i11 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i11 + 16;
+ i4 = i3;
+ i10 = HEAP32[i4 + 4 >> 2] | 0;
+ i7 = i11;
+ HEAP32[i7 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[i7 + 4 >> 2] = i10;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i6 >> 2];
+ i11 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i11 + 16;
+ i6 = i5;
+ i7 = HEAP32[i6 + 4 >> 2] | 0;
+ i10 = i11;
+ HEAP32[i10 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i10 + 4 >> 2] = i7;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i5 + 8 >> 2];
+ _luaD_call(i8, (HEAP32[i9 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i8 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i12 = HEAP32[i12 >> 2] | 0;
+ i11 = HEAP32[i9 >> 2] | 0;
+ i8 = i11 + -16 | 0;
+ HEAP32[i9 >> 2] = i8;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i10 = i12 + i13 | 0;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP32[i12 + (i13 + 8) >> 2] = HEAP32[i11 + -8 >> 2];
+ STACKTOP = i1;
+ return;
+}
+function _new_localvar(i1, i8) {
+ i1 = i1 | 0;
+ i8 = i8 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ i5 = HEAP32[i1 + 48 >> 2] | 0;
+ i2 = HEAP32[i1 + 64 >> 2] | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i10 = i7 + 60 | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ i6 = i5 + 44 | 0;
+ if ((HEAP16[i6 >> 1] | 0) < (i11 | 0)) {
+ i9 = i7 + 24 | 0;
+ i10 = i11;
+ } else {
+ i9 = i7 + 24 | 0;
+ HEAP32[i9 >> 2] = _luaM_growaux_(HEAP32[i1 + 52 >> 2] | 0, HEAP32[i9 >> 2] | 0, i10, 12, 32767, 6496) | 0;
+ i10 = HEAP32[i10 >> 2] | 0;
+ }
+ if ((i11 | 0) < (i10 | 0)) {
+ i12 = i11;
+ while (1) {
+ i11 = i12 + 1 | 0;
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i12 * 12 | 0) >> 2] = 0;
+ if ((i11 | 0) == (i10 | 0)) {
+ break;
+ } else {
+ i12 = i11;
+ }
+ }
+ }
+ i10 = HEAP16[i6 >> 1] | 0;
+ HEAP32[(HEAP32[i9 >> 2] | 0) + ((i10 << 16 >> 16) * 12 | 0) >> 2] = i8;
+ if (!((HEAP8[i8 + 5 | 0] & 3) == 0) ? !((HEAP8[i7 + 5 | 0] & 4) == 0) : 0) {
+ _luaC_barrier_(HEAP32[i1 + 52 >> 2] | 0, i7, i8);
+ i7 = HEAP16[i6 >> 1] | 0;
+ } else {
+ i7 = i10;
+ }
+ HEAP16[i6 >> 1] = i7 + 1 << 16 >> 16;
+ i6 = i2 + 4 | 0;
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i8 + 1 - (HEAP32[i5 + 40 >> 2] | 0) | 0) > 200) {
+ i10 = i5 + 12 | 0;
+ i9 = HEAP32[(HEAP32[i10 >> 2] | 0) + 52 >> 2] | 0;
+ i5 = HEAP32[(HEAP32[i5 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i11 = 6552;
+ HEAP32[i4 >> 2] = 6496;
+ i12 = i4 + 4 | 0;
+ HEAP32[i12 >> 2] = 200;
+ i12 = i4 + 8 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = _luaO_pushfstring(i9, 6592, i4) | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ _luaX_syntaxerror(i11, i12);
+ }
+ HEAP32[i4 >> 2] = i5;
+ i11 = _luaO_pushfstring(i9, 6568, i4) | 0;
+ HEAP32[i4 >> 2] = 6496;
+ i12 = i4 + 4 | 0;
+ HEAP32[i12 >> 2] = 200;
+ i12 = i4 + 8 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = _luaO_pushfstring(i9, 6592, i4) | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ _luaX_syntaxerror(i11, i12);
+ }
+ i4 = i2 + 8 | 0;
+ if ((i8 + 2 | 0) > (HEAP32[i4 >> 2] | 0)) {
+ i11 = _luaM_growaux_(HEAP32[i1 + 52 >> 2] | 0, HEAP32[i2 >> 2] | 0, i4, 2, 2147483645, 6496) | 0;
+ HEAP32[i2 >> 2] = i11;
+ i12 = HEAP32[i6 >> 2] | 0;
+ i10 = i12 + 1 | 0;
+ HEAP32[i6 >> 2] = i10;
+ i12 = i11 + (i12 << 1) | 0;
+ HEAP16[i12 >> 1] = i7;
+ STACKTOP = i3;
+ return;
+ } else {
+ i12 = i8;
+ i11 = HEAP32[i2 >> 2] | 0;
+ i10 = i12 + 1 | 0;
+ HEAP32[i6 >> 2] = i10;
+ i12 = i11 + (i12 << 1) | 0;
+ HEAP16[i12 >> 1] = i7;
+ STACKTOP = i3;
+ return;
+ }
+}
+function _luaC_fullgc(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ i4 = i1 + 12 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i6 = i3 + 62 | 0;
+ i8 = HEAP8[i6] | 0;
+ i5 = (i5 | 0) != 0;
+ if (!i5) {
+ HEAP8[i6] = 0;
+ i9 = (HEAP32[i4 >> 2] | 0) + 104 | 0;
+ i10 = HEAP32[i9 >> 2] | 0;
+ if ((i10 | 0) != 0) {
+ do {
+ i11 = i10 + 5 | 0;
+ HEAP8[i11] = HEAP8[i11] & 191;
+ _GCTM(i1, 1);
+ i10 = HEAP32[i9 >> 2] | 0;
+ } while ((i10 | 0) != 0);
+ if ((HEAP8[i6] | 0) == 2) {
+ i9 = 7;
+ } else {
+ i9 = 6;
+ }
+ } else {
+ i9 = 6;
+ }
+ } else {
+ HEAP8[i6] = 1;
+ i9 = 6;
+ }
+ if ((i9 | 0) == 6 ? (HEAPU8[i3 + 61 | 0] | 0) < 2 : 0) {
+ i9 = 7;
+ }
+ if ((i9 | 0) == 7) {
+ i9 = HEAP32[i4 >> 2] | 0;
+ HEAP8[i9 + 61 | 0] = 2;
+ HEAP32[i9 + 64 >> 2] = 0;
+ i10 = i9 + 72 | 0;
+ do {
+ i11 = _sweeplist(i1, i10, 1) | 0;
+ } while ((i11 | 0) == (i10 | 0));
+ HEAP32[i9 + 80 >> 2] = i11;
+ i11 = i9 + 68 | 0;
+ do {
+ i10 = _sweeplist(i1, i11, 1) | 0;
+ } while ((i10 | 0) == (i11 | 0));
+ HEAP32[i9 + 76 >> 2] = i10;
+ }
+ i11 = HEAP32[i4 >> 2] | 0;
+ i9 = i11 + 61 | 0;
+ if ((HEAP8[i9] | 0) == 5) {
+ i9 = 5;
+ } else {
+ do {
+ _singlestep(i1) | 0;
+ } while ((HEAP8[i9] | 0) != 5);
+ i9 = HEAP32[i4 >> 2] | 0;
+ i11 = i9;
+ i9 = HEAP8[i9 + 61 | 0] | 0;
+ }
+ i10 = i11 + 61 | 0;
+ if ((1 << (i9 & 255) & -33 | 0) == 0) {
+ do {
+ _singlestep(i1) | 0;
+ } while ((1 << HEAPU8[i10] & -33 | 0) == 0);
+ i9 = HEAP32[i4 >> 2] | 0;
+ i11 = i9;
+ i9 = HEAP8[i9 + 61 | 0] | 0;
+ }
+ i10 = i11 + 61 | 0;
+ if (!(i9 << 24 >> 24 == 5)) {
+ do {
+ _singlestep(i1) | 0;
+ } while ((HEAP8[i10] | 0) != 5);
+ }
+ if (i8 << 24 >> 24 == 2 ? (i7 = (HEAP32[i4 >> 2] | 0) + 61 | 0, (HEAP8[i7] | 0) != 0) : 0) {
+ do {
+ _singlestep(i1) | 0;
+ } while ((HEAP8[i7] | 0) != 0);
+ }
+ HEAP8[i6] = i8;
+ i6 = HEAP32[i3 + 8 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ i8 = (i7 + i6 | 0) / 100 | 0;
+ i9 = HEAP32[i3 + 156 >> 2] | 0;
+ if ((i9 | 0) < (2147483644 / (i8 | 0) | 0 | 0)) {
+ i8 = Math_imul(i9, i8) | 0;
+ } else {
+ i8 = 2147483644;
+ }
+ _luaE_setdebt(i3, i6 - i8 + i7 | 0);
+ if (i5) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = (HEAP32[i4 >> 2] | 0) + 104 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ do {
+ i11 = i4 + 5 | 0;
+ HEAP8[i11] = HEAP8[i11] & 191;
+ _GCTM(i1, 1);
+ i4 = HEAP32[i3 >> 2] | 0;
+ } while ((i4 | 0) != 0);
+ STACKTOP = i2;
+ return;
+}
+function _scanexp(i3, i6) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ i2 = i3 + 4 | 0;
+ i5 = HEAP32[i2 >> 2] | 0;
+ i4 = i3 + 100 | 0;
+ if (i5 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0) {
+ HEAP32[i2 >> 2] = i5 + 1;
+ i8 = HEAPU8[i5] | 0;
+ } else {
+ i8 = ___shgetc(i3) | 0;
+ }
+ if ((i8 | 0) == 43 | (i8 | 0) == 45) {
+ i5 = (i8 | 0) == 45 | 0;
+ i7 = HEAP32[i2 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0) {
+ HEAP32[i2 >> 2] = i7 + 1;
+ i8 = HEAPU8[i7] | 0;
+ } else {
+ i8 = ___shgetc(i3) | 0;
+ }
+ if (!((i8 + -48 | 0) >>> 0 < 10 | (i6 | 0) == 0) ? (HEAP32[i4 >> 2] | 0) != 0 : 0) {
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + -1;
+ }
+ } else {
+ i5 = 0;
+ }
+ if ((i8 + -48 | 0) >>> 0 > 9) {
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ i7 = -2147483648;
+ i8 = 0;
+ tempRet0 = i7;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + -1;
+ i7 = -2147483648;
+ i8 = 0;
+ tempRet0 = i7;
+ STACKTOP = i1;
+ return i8 | 0;
+ } else {
+ i6 = 0;
+ }
+ while (1) {
+ i6 = i8 + -48 + i6 | 0;
+ i7 = HEAP32[i2 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0) {
+ HEAP32[i2 >> 2] = i7 + 1;
+ i8 = HEAPU8[i7] | 0;
+ } else {
+ i8 = ___shgetc(i3) | 0;
+ }
+ if (!((i8 + -48 | 0) >>> 0 < 10 & (i6 | 0) < 214748364)) {
+ break;
+ }
+ i6 = i6 * 10 | 0;
+ }
+ i7 = ((i6 | 0) < 0) << 31 >> 31;
+ if ((i8 + -48 | 0) >>> 0 < 10) {
+ do {
+ i7 = ___muldi3(i6 | 0, i7 | 0, 10, 0) | 0;
+ i6 = tempRet0;
+ i8 = _i64Add(i8 | 0, ((i8 | 0) < 0) << 31 >> 31 | 0, -48, -1) | 0;
+ i6 = _i64Add(i8 | 0, tempRet0 | 0, i7 | 0, i6 | 0) | 0;
+ i7 = tempRet0;
+ i8 = HEAP32[i2 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0) {
+ HEAP32[i2 >> 2] = i8 + 1;
+ i8 = HEAPU8[i8] | 0;
+ } else {
+ i8 = ___shgetc(i3) | 0;
+ }
+ } while ((i8 + -48 | 0) >>> 0 < 10 & ((i7 | 0) < 21474836 | (i7 | 0) == 21474836 & i6 >>> 0 < 2061584302));
+ }
+ if ((i8 + -48 | 0) >>> 0 < 10) {
+ do {
+ i8 = HEAP32[i2 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0) {
+ HEAP32[i2 >> 2] = i8 + 1;
+ i8 = HEAPU8[i8] | 0;
+ } else {
+ i8 = ___shgetc(i3) | 0;
+ }
+ } while ((i8 + -48 | 0) >>> 0 < 10);
+ }
+ if ((HEAP32[i4 >> 2] | 0) != 0) {
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + -1;
+ }
+ i3 = (i5 | 0) != 0;
+ i2 = _i64Subtract(0, 0, i6 | 0, i7 | 0) | 0;
+ i4 = i3 ? tempRet0 : i7;
+ i8 = i3 ? i2 : i6;
+ tempRet0 = i4;
+ STACKTOP = i1;
+ return i8 | 0;
+}
+function _sweeplist(i3, i8, i9) {
+ i3 = i3 | 0;
+ i8 = i8 | 0;
+ i9 = i9 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ i5 = i3 + 12 | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAPU8[i7 + 60 | 0] | 0;
+ i2 = i6 ^ 3;
+ i7 = (HEAP8[i7 + 62 | 0] | 0) == 2;
+ i4 = i7 ? 255 : 184;
+ i6 = i7 ? 64 : i6 & 3;
+ i7 = i7 ? 64 : 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ L1 : do {
+ if ((i10 | 0) == 0) {
+ i10 = 0;
+ } else {
+ i11 = i9;
+ L2 : while (1) {
+ i9 = i11 + -1 | 0;
+ if ((i11 | 0) == 0) {
+ break L1;
+ }
+ i11 = i10 + 5 | 0;
+ i12 = HEAPU8[i11] | 0;
+ L5 : do {
+ if (((i12 ^ 3) & i2 | 0) == 0) {
+ HEAP32[i8 >> 2] = HEAP32[i10 >> 2];
+ switch (HEAPU8[i10 + 4 | 0] | 0) {
+ case 4:
+ {
+ i12 = (HEAP32[i5 >> 2] | 0) + 28 | 0;
+ HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + -1;
+ break;
+ }
+ case 38:
+ {
+ _luaM_realloc_(i3, i10, (HEAPU8[i10 + 6 | 0] << 4) + 16 | 0, 0) | 0;
+ break L5;
+ }
+ case 6:
+ {
+ _luaM_realloc_(i3, i10, (HEAPU8[i10 + 6 | 0] << 2) + 16 | 0, 0) | 0;
+ break L5;
+ }
+ case 20:
+ {
+ break;
+ }
+ case 5:
+ {
+ _luaH_free(i3, i10);
+ break L5;
+ }
+ case 10:
+ {
+ _luaF_freeupval(i3, i10);
+ break L5;
+ }
+ case 8:
+ {
+ _luaE_freethread(i3, i10);
+ break L5;
+ }
+ case 9:
+ {
+ _luaF_freeproto(i3, i10);
+ break L5;
+ }
+ case 7:
+ {
+ _luaM_realloc_(i3, i10, (HEAP32[i10 + 16 >> 2] | 0) + 24 | 0, 0) | 0;
+ break L5;
+ }
+ default:
+ {
+ break L5;
+ }
+ }
+ _luaM_realloc_(i3, i10, (HEAP32[i10 + 12 >> 2] | 0) + 17 | 0, 0) | 0;
+ } else {
+ if ((i12 & i7 | 0) != 0) {
+ i2 = 0;
+ break L2;
+ }
+ if (((HEAP8[i10 + 4 | 0] | 0) == 8 ? (HEAP32[i10 + 28 >> 2] | 0) != 0 : 0) ? (_sweeplist(i3, i10 + 56 | 0, -3) | 0, _luaE_freeCI(i10), (HEAP8[(HEAP32[i5 >> 2] | 0) + 62 | 0] | 0) != 1) : 0) {
+ _luaD_shrinkstack(i10);
+ }
+ HEAP8[i11] = i12 & i4 | i6;
+ i8 = i10;
+ }
+ } while (0);
+ i10 = HEAP32[i8 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i10 = 0;
+ break L1;
+ } else {
+ i11 = i9;
+ }
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ } while (0);
+ i12 = (i10 | 0) == 0 ? 0 : i8;
+ STACKTOP = i1;
+ return i12 | 0;
+}
+function _resume(i1, i6) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ i3 = i1 + 16 | 0;
+ i5 = HEAP32[i3 >> 2] | 0;
+ if ((HEAPU16[i1 + 38 >> 1] | 0) > 199) {
+ _resume_error(i1, 2240, i6);
+ }
+ i4 = i1 + 6 | 0;
+ i7 = HEAP8[i4] | 0;
+ if (i7 << 24 >> 24 == 0) {
+ if ((i5 | 0) != (i1 + 72 | 0)) {
+ _resume_error(i1, 2448, i6);
+ }
+ if ((_luaD_precall(i1, i6 + -16 | 0, -1) | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaV_execute(i1);
+ STACKTOP = i2;
+ return;
+ } else if (i7 << 24 >> 24 == 1) {
+ HEAP8[i4] = 0;
+ i4 = i1 + 28 | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i4 >> 2] | 0) + (HEAP32[i5 + 20 >> 2] | 0);
+ i8 = i5 + 18 | 0;
+ i7 = HEAP8[i8] | 0;
+ if ((i7 & 1) == 0) {
+ i9 = HEAP32[i5 + 28 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ HEAP8[i5 + 37 | 0] = 1;
+ HEAP8[i8] = i7 & 255 | 8;
+ i6 = FUNCTION_TABLE_ii[i9 & 255](i1) | 0;
+ i6 = (HEAP32[i1 + 8 >> 2] | 0) + (0 - i6 << 4) | 0;
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ i7 = HEAPU8[i1 + 40 | 0] | 0;
+ if ((i7 & 6 | 0) == 0) {
+ i7 = i5 + 8 | 0;
+ } else {
+ if ((i7 & 2 | 0) != 0) {
+ i6 = i6 - (HEAP32[i4 >> 2] | 0) | 0;
+ _luaD_hook(i1, 1, -1);
+ i6 = (HEAP32[i4 >> 2] | 0) + i6 | 0;
+ }
+ i7 = i5 + 8 | 0;
+ HEAP32[i1 + 20 >> 2] = HEAP32[(HEAP32[i7 >> 2] | 0) + 28 >> 2];
+ }
+ i4 = HEAP32[i5 >> 2] | 0;
+ i5 = HEAP16[i5 + 16 >> 1] | 0;
+ HEAP32[i3 >> 2] = HEAP32[i7 >> 2];
+ i3 = i1 + 8 | 0;
+ L27 : do {
+ if (!(i5 << 16 >> 16 == 0)) {
+ i5 = i5 << 16 >> 16;
+ while (1) {
+ if (!(i6 >>> 0 < (HEAP32[i3 >> 2] | 0) >>> 0)) {
+ break;
+ }
+ i7 = i4 + 16 | 0;
+ i10 = i6;
+ i8 = HEAP32[i10 + 4 >> 2] | 0;
+ i9 = i4;
+ HEAP32[i9 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i9 + 4 >> 2] = i8;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i6 + 8 >> 2];
+ i5 = i5 + -1 | 0;
+ if ((i5 | 0) == 0) {
+ i4 = i7;
+ break L27;
+ }
+ i6 = i6 + 16 | 0;
+ i4 = i7;
+ }
+ if ((i5 | 0) > 0) {
+ i7 = i5;
+ i6 = i4;
+ while (1) {
+ i7 = i7 + -1 | 0;
+ HEAP32[i6 + 8 >> 2] = 0;
+ if ((i7 | 0) <= 0) {
+ break;
+ } else {
+ i6 = i6 + 16 | 0;
+ }
+ }
+ i4 = i4 + (i5 << 4) | 0;
+ }
+ }
+ } while (0);
+ HEAP32[i3 >> 2] = i4;
+ } else {
+ _luaV_execute(i1);
+ }
+ _unroll(i1, 0);
+ STACKTOP = i2;
+ return;
+ } else {
+ _resume_error(i1, 2488, i6);
+ }
+}
+function _lua_setupvalue(i1, i5, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ i6 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i6 >> 2] | 0, (i5 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i4 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i6 >> 2] | 0) + (i5 << 4) | 0;
+ i5 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i4 = HEAP32[i5 + 8 >> 2] & 63;
+ do {
+ if ((i4 | 0) == 6) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ i4 = HEAP32[i5 + 12 >> 2] | 0;
+ if ((i3 | 0) <= 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((HEAP32[i4 + 40 >> 2] | 0) < (i3 | 0)) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i6 = i3 + -1 | 0;
+ i3 = HEAP32[i5 + 16 + (i6 << 2) >> 2] | 0;
+ i5 = HEAP32[i3 + 8 >> 2] | 0;
+ i4 = HEAP32[(HEAP32[i4 + 28 >> 2] | 0) + (i6 << 3) >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i4 = 936;
+ } else {
+ i4 = i4 + 16 | 0;
+ }
+ } else if ((i4 | 0) == 38) {
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i3 | 0) <= 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((HEAPU8[i6 + 6 | 0] | 0 | 0) >= (i3 | 0)) {
+ i4 = 936;
+ i5 = i6 + (i3 + -1 << 4) + 16 | 0;
+ i3 = i6;
+ break;
+ } else {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ } else {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ } while (0);
+ i6 = i1 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ i10 = i7 + -16 | 0;
+ HEAP32[i6 >> 2] = i10;
+ i9 = HEAP32[i10 + 4 >> 2] | 0;
+ i8 = i5;
+ HEAP32[i8 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i8 + 4 >> 2] = i9;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + -8 >> 2];
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] & 64 | 0) == 0) {
+ i10 = i4;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP8[i5 + 5 | 0] & 3) == 0) {
+ i10 = i4;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ if ((HEAP8[i3 + 5 | 0] & 4) == 0) {
+ i10 = i4;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ _luaC_barrier_(i1, i3, i5);
+ i10 = i4;
+ STACKTOP = i2;
+ return i10 | 0;
+}
+function _luaC_forcestep(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i2 + 12 >> 2] | 0;
+ do {
+ if ((HEAP8[i3 + 62 | 0] | 0) == 2) {
+ i4 = i3 + 20 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ i5 = i3 + 61 | 0;
+ if ((HEAP8[i5] | 0) != 5) {
+ do {
+ _singlestep(i2) | 0;
+ } while ((HEAP8[i5] | 0) != 5);
+ }
+ HEAP8[i5] = 0;
+ i5 = HEAP32[i3 + 8 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ if ((i7 + i5 | 0) >>> 0 > (Math_imul(HEAP32[i3 + 160 >> 2] | 0, (i6 >>> 0) / 100 | 0) | 0) >>> 0) {
+ HEAP32[i4 >> 2] = 0;
+ break;
+ } else {
+ HEAP32[i4 >> 2] = i6;
+ break;
+ }
+ } else {
+ _luaC_fullgc(i2, 0);
+ i5 = HEAP32[i3 + 8 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ HEAP32[i4 >> 2] = i7 + i5;
+ }
+ } while (0);
+ i4 = i5 + i7 | 0;
+ i5 = (i4 | 0) / 100 | 0;
+ i6 = HEAP32[i3 + 156 >> 2] | 0;
+ if ((i6 | 0) < (2147483644 / (i5 | 0) | 0 | 0)) {
+ i5 = Math_imul(i6, i5) | 0;
+ } else {
+ i5 = 2147483644;
+ }
+ _luaE_setdebt(i3, i4 - i5 | 0);
+ i5 = i3 + 61 | 0;
+ } else {
+ i4 = i3 + 12 | 0;
+ i5 = HEAP32[i3 + 164 >> 2] | 0;
+ i7 = (i5 | 0) < 40 ? 40 : i5;
+ i5 = ((HEAP32[i4 >> 2] | 0) / 200 | 0) + 1 | 0;
+ if ((i5 | 0) < (2147483644 / (i7 | 0) | 0 | 0)) {
+ i8 = Math_imul(i5, i7) | 0;
+ } else {
+ i8 = 2147483644;
+ }
+ i5 = i3 + 61 | 0;
+ do {
+ i8 = i8 - (_singlestep(i2) | 0) | 0;
+ i9 = (HEAP8[i5] | 0) == 5;
+ if (!((i8 | 0) > -1600)) {
+ i6 = 17;
+ break;
+ }
+ } while (!i9);
+ if ((i6 | 0) == 17 ? !i9 : 0) {
+ _luaE_setdebt(i3, ((i8 | 0) / (i7 | 0) | 0) * 200 | 0);
+ break;
+ }
+ i6 = (HEAP32[i3 + 20 >> 2] | 0) / 100 | 0;
+ i7 = HEAP32[i3 + 156 >> 2] | 0;
+ if ((i7 | 0) < (2147483644 / (i6 | 0) | 0 | 0)) {
+ i6 = Math_imul(i7, i6) | 0;
+ } else {
+ i6 = 2147483644;
+ }
+ _luaE_setdebt(i3, (HEAP32[i3 + 8 >> 2] | 0) - i6 + (HEAP32[i4 >> 2] | 0) | 0);
+ }
+ } while (0);
+ i3 = i3 + 104 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ } else {
+ i4 = 0;
+ }
+ while (1) {
+ if ((i4 | 0) >= 4 ? (HEAP8[i5] | 0) != 5 : 0) {
+ i6 = 26;
+ break;
+ }
+ _GCTM(i2, 1);
+ if ((HEAP32[i3 >> 2] | 0) == 0) {
+ i6 = 26;
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ if ((i6 | 0) == 26) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _luaL_loadfilex(i1, i9, i7) {
+ i1 = i1 | 0;
+ i9 = i9 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i10 = 0, i11 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i3 = i5;
+ i6 = i5 + 16 | 0;
+ i8 = i5 + 12 | 0;
+ i2 = (_lua_gettop(i1) | 0) + 1 | 0;
+ i4 = (i9 | 0) == 0;
+ if (!i4) {
+ HEAP32[i3 >> 2] = i9;
+ _lua_pushfstring(i1, 1304, i3) | 0;
+ i10 = _fopen(i9 | 0, 1312) | 0;
+ HEAP32[i6 + 4 >> 2] = i10;
+ if ((i10 | 0) == 0) {
+ i10 = _strerror(HEAP32[(___errno_location() | 0) >> 2] | 0) | 0;
+ i9 = (_lua_tolstring(i1, i2, 0) | 0) + 1 | 0;
+ HEAP32[i3 >> 2] = 1320;
+ HEAP32[i3 + 4 >> 2] = i9;
+ HEAP32[i3 + 8 >> 2] = i10;
+ _lua_pushfstring(i1, 1720, i3) | 0;
+ _lua_remove(i1, i2);
+ i10 = 7;
+ STACKTOP = i5;
+ return i10 | 0;
+ }
+ } else {
+ _lua_pushlstring(i1, 1296, 6) | 0;
+ HEAP32[i6 + 4 >> 2] = HEAP32[_stdin >> 2];
+ }
+ if ((_skipcomment(i6, i8) | 0) != 0) {
+ i10 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i10 + 1;
+ HEAP8[i6 + i10 + 8 | 0] = 10;
+ }
+ i10 = HEAP32[i8 >> 2] | 0;
+ do {
+ if (!((i10 | 0) != 27 | i4)) {
+ i11 = i6 + 4 | 0;
+ i10 = _freopen(i9 | 0, 1328, HEAP32[i11 >> 2] | 0) | 0;
+ HEAP32[i11 >> 2] = i10;
+ if ((i10 | 0) != 0) {
+ _skipcomment(i6, i8) | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ break;
+ }
+ i11 = _strerror(HEAP32[(___errno_location() | 0) >> 2] | 0) | 0;
+ i10 = (_lua_tolstring(i1, i2, 0) | 0) + 1 | 0;
+ HEAP32[i3 >> 2] = 1336;
+ HEAP32[i3 + 4 >> 2] = i10;
+ HEAP32[i3 + 8 >> 2] = i11;
+ _lua_pushfstring(i1, 1720, i3) | 0;
+ _lua_remove(i1, i2);
+ i11 = 7;
+ STACKTOP = i5;
+ return i11 | 0;
+ }
+ } while (0);
+ if (!((i10 | 0) == -1)) {
+ i11 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i11 + 1;
+ HEAP8[i6 + i11 + 8 | 0] = i10;
+ }
+ i7 = _lua_load(i1, 1, i6, _lua_tolstring(i1, -1, 0) | 0, i7) | 0;
+ i8 = HEAP32[i6 + 4 >> 2] | 0;
+ i6 = _ferror(i8 | 0) | 0;
+ if (!i4) {
+ _fclose(i8 | 0) | 0;
+ }
+ if ((i6 | 0) == 0) {
+ _lua_remove(i1, i2);
+ i11 = i7;
+ STACKTOP = i5;
+ return i11 | 0;
+ } else {
+ _lua_settop(i1, i2);
+ i11 = _strerror(HEAP32[(___errno_location() | 0) >> 2] | 0) | 0;
+ i10 = (_lua_tolstring(i1, i2, 0) | 0) + 1 | 0;
+ HEAP32[i3 >> 2] = 1344;
+ HEAP32[i3 + 4 >> 2] = i10;
+ HEAP32[i3 + 8 >> 2] = i11;
+ _lua_pushfstring(i1, 1720, i3) | 0;
+ _lua_remove(i1, i2);
+ i11 = 7;
+ STACKTOP = i5;
+ return i11 | 0;
+ }
+ return 0;
+}
+function _newupvalue(i3, i1, i2) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i12 = i4;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i9 = i5 + 40 | 0;
+ i7 = HEAP32[i9 >> 2] | 0;
+ i6 = i3 + 47 | 0;
+ i10 = HEAPU8[i6] | 0;
+ if ((i10 + 1 | 0) >>> 0 > 255) {
+ i11 = i3 + 12 | 0;
+ i8 = HEAP32[(HEAP32[i11 >> 2] | 0) + 52 >> 2] | 0;
+ i13 = HEAP32[i5 + 64 >> 2] | 0;
+ if ((i13 | 0) == 0) {
+ i15 = 6552;
+ HEAP32[i12 >> 2] = 6880;
+ i14 = i12 + 4 | 0;
+ HEAP32[i14 >> 2] = 255;
+ i14 = i12 + 8 | 0;
+ HEAP32[i14 >> 2] = i15;
+ i14 = _luaO_pushfstring(i8, 6592, i12) | 0;
+ i15 = HEAP32[i11 >> 2] | 0;
+ _luaX_syntaxerror(i15, i14);
+ }
+ HEAP32[i12 >> 2] = i13;
+ i14 = _luaO_pushfstring(i8, 6568, i12) | 0;
+ HEAP32[i12 >> 2] = 6880;
+ i15 = i12 + 4 | 0;
+ HEAP32[i15 >> 2] = 255;
+ i15 = i12 + 8 | 0;
+ HEAP32[i15 >> 2] = i14;
+ i15 = _luaO_pushfstring(i8, 6592, i12) | 0;
+ i14 = HEAP32[i11 >> 2] | 0;
+ _luaX_syntaxerror(i14, i15);
+ }
+ if ((i10 | 0) < (i7 | 0)) {
+ i8 = i7;
+ } else {
+ i8 = i5 + 28 | 0;
+ HEAP32[i8 >> 2] = _luaM_growaux_(HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 52 >> 2] | 0, HEAP32[i8 >> 2] | 0, i9, 8, 255, 6880) | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ }
+ i9 = i5 + 28 | 0;
+ if ((i7 | 0) < (i8 | 0)) {
+ while (1) {
+ i10 = i7 + 1 | 0;
+ HEAP32[(HEAP32[i9 >> 2] | 0) + (i7 << 3) >> 2] = 0;
+ if ((i10 | 0) < (i8 | 0)) {
+ i7 = i10;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP8[(HEAP32[i9 >> 2] | 0) + ((HEAPU8[i6] | 0) << 3) + 4 | 0] = (HEAP32[i2 >> 2] | 0) == 7 | 0;
+ HEAP8[(HEAP32[i9 >> 2] | 0) + ((HEAPU8[i6] | 0) << 3) + 5 | 0] = HEAP32[i2 + 8 >> 2];
+ HEAP32[(HEAP32[i9 >> 2] | 0) + ((HEAPU8[i6] | 0) << 3) >> 2] = i1;
+ if ((HEAP8[i1 + 5 | 0] & 3) == 0) {
+ i15 = HEAP8[i6] | 0;
+ i14 = i15 + 1 << 24 >> 24;
+ HEAP8[i6] = i14;
+ i15 = i15 & 255;
+ STACKTOP = i4;
+ return i15 | 0;
+ }
+ if ((HEAP8[i5 + 5 | 0] & 4) == 0) {
+ i15 = HEAP8[i6] | 0;
+ i14 = i15 + 1 << 24 >> 24;
+ HEAP8[i6] = i14;
+ i15 = i15 & 255;
+ STACKTOP = i4;
+ return i15 | 0;
+ }
+ _luaC_barrier_(HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 52 >> 2] | 0, i5, i1);
+ i15 = HEAP8[i6] | 0;
+ i14 = i15 + 1 << 24 >> 24;
+ HEAP8[i6] = i14;
+ i15 = i15 & 255;
+ STACKTOP = i4;
+ return i15 | 0;
+}
+function _close_func(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i6 = STACKTOP;
+ i2 = HEAP32[i1 + 52 >> 2] | 0;
+ i5 = i1 + 48 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ _luaK_ret(i4, 0, 0);
+ _leaveblock(i4);
+ i7 = i4 + 20 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i2);
+ }
+ i10 = i3 + 12 | 0;
+ i9 = i3 + 48 | 0;
+ HEAP32[i10 >> 2] = _luaM_realloc_(i2, HEAP32[i10 >> 2] | 0, HEAP32[i9 >> 2] << 2, i8 << 2) | 0;
+ HEAP32[i9 >> 2] = HEAP32[i7 >> 2];
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i2);
+ }
+ i9 = i3 + 20 | 0;
+ i10 = i3 + 52 | 0;
+ HEAP32[i9 >> 2] = _luaM_realloc_(i2, HEAP32[i9 >> 2] | 0, HEAP32[i10 >> 2] << 2, i8 << 2) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i7 >> 2];
+ i8 = i4 + 32 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 + 1 | 0) >>> 0 > 268435455) {
+ _luaM_toobig(i2);
+ }
+ i9 = i3 + 8 | 0;
+ i10 = i3 + 44 | 0;
+ HEAP32[i9 >> 2] = _luaM_realloc_(i2, HEAP32[i9 >> 2] | 0, HEAP32[i10 >> 2] << 4, i7 << 4) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ i8 = i4 + 36 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i2);
+ }
+ i9 = i3 + 16 | 0;
+ i10 = i3 + 56 | 0;
+ HEAP32[i9 >> 2] = _luaM_realloc_(i2, HEAP32[i9 >> 2] | 0, HEAP32[i10 >> 2] << 2, i7 << 2) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i8 >> 2];
+ i7 = i4 + 44 | 0;
+ i8 = HEAP16[i7 >> 1] | 0;
+ if ((i8 + 1 | 0) >>> 0 > 357913941) {
+ _luaM_toobig(i2);
+ }
+ i10 = i3 + 24 | 0;
+ i9 = i3 + 60 | 0;
+ HEAP32[i10 >> 2] = _luaM_realloc_(i2, HEAP32[i10 >> 2] | 0, (HEAP32[i9 >> 2] | 0) * 12 | 0, i8 * 12 | 0) | 0;
+ HEAP32[i9 >> 2] = HEAP16[i7 >> 1] | 0;
+ i9 = i4 + 47 | 0;
+ i8 = i3 + 28 | 0;
+ i10 = i3 + 40 | 0;
+ HEAP32[i8 >> 2] = _luaM_realloc_(i2, HEAP32[i8 >> 2] | 0, HEAP32[i10 >> 2] << 3, HEAPU8[i9] << 3) | 0;
+ HEAP32[i10 >> 2] = HEAPU8[i9] | 0;
+ HEAP32[i5 >> 2] = HEAP32[i4 + 8 >> 2];
+ if (((HEAP32[i1 + 16 >> 2] | 0) + -288 | 0) >>> 0 < 2) {
+ i10 = HEAP32[i1 + 24 >> 2] | 0;
+ _luaX_newstring(i1, i10 + 16 | 0, HEAP32[i10 + 12 >> 2] | 0) | 0;
+ }
+ i10 = i2 + 8 | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -16;
+ if ((HEAP32[(HEAP32[i2 + 12 >> 2] | 0) + 12 >> 2] | 0) <= 0) {
+ STACKTOP = i6;
+ return;
+ }
+ _luaC_step(i2);
+ STACKTOP = i6;
+ return;
+}
+function _lua_topointer(i3, i6) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ i5 = (i6 | 0) > 0;
+ do {
+ if (!i5) {
+ if (!((i6 | 0) < -1000999)) {
+ i7 = (HEAP32[i3 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i7 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i8 = -1001e3 - i6 | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) != 22 ? (i7 = HEAP32[i9 >> 2] | 0, (i8 | 0) <= (HEAPU8[i7 + 6 | 0] | 0 | 0)) : 0) {
+ i7 = i7 + (i8 + -1 << 4) + 16 | 0;
+ } else {
+ i7 = 5192;
+ }
+ } else {
+ i7 = (HEAP32[i4 >> 2] | 0) + (i6 << 4) | 0;
+ i7 = i7 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i7 : 5192;
+ }
+ } while (0);
+ switch (HEAP32[i7 + 8 >> 2] & 63 | 0) {
+ case 22:
+ {
+ i9 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ case 2:
+ case 7:
+ {
+ do {
+ if (!i5) {
+ if (!((i6 | 0) < -1000999)) {
+ i2 = (HEAP32[i3 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i2 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i6 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i2 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i2 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i6 << 4) | 0;
+ i2 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ i3 = HEAP32[i2 + 8 >> 2] & 15;
+ if ((i3 | 0) == 7) {
+ i9 = (HEAP32[i2 >> 2] | 0) + 24 | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ } else if ((i3 | 0) == 2) {
+ i9 = HEAP32[i2 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ } else {
+ i9 = 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ }
+ case 8:
+ {
+ i9 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ case 5:
+ {
+ i9 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ case 38:
+ {
+ i9 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ case 6:
+ {
+ i9 = HEAP32[i7 >> 2] | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ default:
+ {
+ i9 = 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ }
+ }
+ return 0;
+}
+function _luaH_get(i4, i6) {
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, d5 = 0.0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, d11 = 0.0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i8 = i3 + 8 | 0;
+ i9 = i3;
+ i7 = i6 + 8 | 0;
+ i10 = HEAP32[i7 >> 2] & 63;
+ if ((i10 | 0) == 4) {
+ i6 = HEAP32[i6 >> 2] | 0;
+ i7 = (HEAP32[i4 + 16 >> 2] | 0) + (((1 << (HEAPU8[i4 + 7 | 0] | 0)) + -1 & HEAP32[i6 + 8 >> 2]) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i7 + 24 >> 2] | 0) == 68 ? (HEAP32[i7 + 16 >> 2] | 0) == (i6 | 0) : 0) {
+ break;
+ }
+ i4 = HEAP32[i7 + 28 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i2 = 5192;
+ i1 = 22;
+ break;
+ } else {
+ i7 = i4;
+ }
+ }
+ if ((i1 | 0) == 22) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ i10 = i7;
+ STACKTOP = i3;
+ return i10 | 0;
+ } else if ((i10 | 0) == 3) {
+ d11 = +HEAPF64[i6 >> 3];
+ HEAPF64[i9 >> 3] = d11 + 6755399441055744.0;
+ i9 = HEAP32[i9 >> 2] | 0;
+ d5 = +(i9 | 0);
+ if (d5 == d11) {
+ i6 = i9 + -1 | 0;
+ if (i6 >>> 0 < (HEAP32[i4 + 28 >> 2] | 0) >>> 0) {
+ i10 = (HEAP32[i4 + 12 >> 2] | 0) + (i6 << 4) | 0;
+ STACKTOP = i3;
+ return i10 | 0;
+ }
+ HEAPF64[i8 >> 3] = d5 + 1.0;
+ i6 = (HEAP32[i8 + 4 >> 2] | 0) + (HEAP32[i8 >> 2] | 0) | 0;
+ if ((i6 | 0) < 0) {
+ i7 = 0 - i6 | 0;
+ i6 = (i6 | 0) == (i7 | 0) ? 0 : i7;
+ }
+ i4 = (HEAP32[i4 + 16 >> 2] | 0) + (((i6 | 0) % ((1 << (HEAPU8[i4 + 7 | 0] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i4 + 24 >> 2] | 0) == 3 ? +HEAPF64[i4 + 16 >> 3] == d5 : 0) {
+ break;
+ }
+ i6 = HEAP32[i4 + 28 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ i2 = 5192;
+ i1 = 22;
+ break;
+ } else {
+ i4 = i6;
+ }
+ }
+ if ((i1 | 0) == 22) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ i10 = i4;
+ STACKTOP = i3;
+ return i10 | 0;
+ }
+ } else if ((i10 | 0) == 0) {
+ i10 = 5192;
+ STACKTOP = i3;
+ return i10 | 0;
+ }
+ i8 = _mainposition(i4, i6) | 0;
+ while (1) {
+ if ((HEAP32[i8 + 24 >> 2] | 0) == (HEAP32[i7 >> 2] | 0) ? (_luaV_equalobj_(0, i8 + 16 | 0, i6) | 0) != 0 : 0) {
+ break;
+ }
+ i4 = HEAP32[i8 + 28 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i2 = 5192;
+ i1 = 22;
+ break;
+ } else {
+ i8 = i4;
+ }
+ }
+ if ((i1 | 0) == 22) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ i10 = i8;
+ STACKTOP = i3;
+ return i10 | 0;
+}
+function _suffixedexp(i1, i8) {
+ i1 = i1 | 0;
+ i8 = i8 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 80 | 0;
+ i10 = i2 + 48 | 0;
+ i3 = i2 + 24 | 0;
+ i6 = i2;
+ i4 = i1 + 48 | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[i1 + 4 >> 2] | 0;
+ i7 = i1 + 16 | 0;
+ i12 = HEAP32[i7 >> 2] | 0;
+ if ((i12 | 0) == 40) {
+ _luaX_next(i1);
+ _subexpr(i1, i8, 0) | 0;
+ _check_match(i1, 41, 40, i5);
+ _luaK_dischargevars(HEAP32[i4 >> 2] | 0, i8);
+ i11 = i1 + 24 | 0;
+ } else if ((i12 | 0) == 288) {
+ i11 = i1 + 24 | 0;
+ i13 = HEAP32[i11 >> 2] | 0;
+ _luaX_next(i1);
+ i12 = HEAP32[i4 >> 2] | 0;
+ if ((_singlevaraux(i12, i13, i8, 1) | 0) == 0) {
+ _singlevaraux(i12, HEAP32[i1 + 72 >> 2] | 0, i8, 1) | 0;
+ i13 = _luaK_stringK(HEAP32[i4 >> 2] | 0, i13) | 0;
+ HEAP32[i10 + 16 >> 2] = -1;
+ HEAP32[i10 + 20 >> 2] = -1;
+ HEAP32[i10 >> 2] = 4;
+ HEAP32[i10 + 8 >> 2] = i13;
+ _luaK_indexed(i12, i8, i10);
+ }
+ } else {
+ _luaX_syntaxerror(i1, 6656);
+ }
+ i10 = i6 + 16 | 0;
+ i12 = i6 + 20 | 0;
+ i13 = i6 + 8 | 0;
+ L7 : while (1) {
+ switch (HEAP32[i7 >> 2] | 0) {
+ case 46:
+ {
+ _fieldsel(i1, i8);
+ continue L7;
+ }
+ case 91:
+ {
+ _luaK_exp2anyregup(i9, i8);
+ _luaX_next(i1);
+ _subexpr(i1, i3, 0) | 0;
+ _luaK_exp2val(HEAP32[i4 >> 2] | 0, i3);
+ if ((HEAP32[i7 >> 2] | 0) != 93) {
+ i3 = 10;
+ break L7;
+ }
+ _luaX_next(i1);
+ _luaK_indexed(i9, i8, i3);
+ continue L7;
+ }
+ case 58:
+ {
+ _luaX_next(i1);
+ if ((HEAP32[i7 >> 2] | 0) != 288) {
+ i3 = 13;
+ break L7;
+ }
+ i14 = HEAP32[i11 >> 2] | 0;
+ _luaX_next(i1);
+ i14 = _luaK_stringK(HEAP32[i4 >> 2] | 0, i14) | 0;
+ HEAP32[i10 >> 2] = -1;
+ HEAP32[i12 >> 2] = -1;
+ HEAP32[i6 >> 2] = 4;
+ HEAP32[i13 >> 2] = i14;
+ _luaK_self(i9, i8, i6);
+ _funcargs(i1, i8, i5);
+ continue L7;
+ }
+ case 123:
+ case 289:
+ case 40:
+ {
+ _luaK_exp2nextreg(i9, i8);
+ _funcargs(i1, i8, i5);
+ continue L7;
+ }
+ default:
+ {
+ i3 = 16;
+ break L7;
+ }
+ }
+ }
+ if ((i3 | 0) == 10) {
+ _error_expected(i1, 93);
+ } else if ((i3 | 0) == 13) {
+ _error_expected(i1, 288);
+ } else if ((i3 | 0) == 16) {
+ STACKTOP = i2;
+ return;
+ }
+}
+function _luaK_patchlist(i2, i7, i3) {
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ if ((HEAP32[i2 + 20 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i2 + 24 >> 2] = i3;
+ i3 = i2 + 28 | 0;
+ if ((i7 | 0) == -1) {
+ STACKTOP = i1;
+ return;
+ }
+ i6 = HEAP32[i3 >> 2] | 0;
+ if ((i6 | 0) == -1) {
+ HEAP32[i3 >> 2] = i7;
+ STACKTOP = i1;
+ return;
+ }
+ i5 = HEAP32[(HEAP32[i2 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i3 = i5 + (i6 << 2) | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ i8 = (i4 >>> 14) + -131071 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ }
+ i8 = i6 + 1 + i8 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ } else {
+ i6 = i8;
+ }
+ }
+ i5 = ~i6 + i7 | 0;
+ if ((((i5 | 0) > -1 ? i5 : 0 - i5 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i2 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i3 >> 2] = (i5 << 14) + 2147467264 | i4 & 16383;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i7 | 0) == -1) {
+ STACKTOP = i1;
+ return;
+ }
+ i6 = HEAP32[(HEAP32[i2 >> 2] | 0) + 12 >> 2] | 0;
+ i10 = i7;
+ while (1) {
+ i7 = i6 + (i10 << 2) | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ i8 = (i9 >>> 14) + -131071 | 0;
+ if ((i8 | 0) == -1) {
+ i8 = -1;
+ } else {
+ i8 = i10 + 1 + i8 | 0;
+ }
+ if ((i10 | 0) > 0 ? (i4 = i6 + (i10 + -1 << 2) | 0, i5 = HEAP32[i4 >> 2] | 0, (HEAP8[5584 + (i5 & 63) | 0] | 0) < 0) : 0) {
+ i12 = i4;
+ i11 = i5;
+ } else {
+ i12 = i7;
+ i11 = i9;
+ }
+ if ((i11 & 63 | 0) == 28) {
+ HEAP32[i12 >> 2] = i11 & 8372224 | i11 >>> 23 << 6 | 27;
+ i9 = ~i10 + i3 | 0;
+ if ((((i9 | 0) > -1 ? i9 : 0 - i9 | 0) | 0) > 131071) {
+ i3 = 20;
+ break;
+ }
+ i9 = HEAP32[i7 >> 2] & 16383 | (i9 << 14) + 2147467264;
+ } else {
+ i10 = ~i10 + i3 | 0;
+ if ((((i10 | 0) > -1 ? i10 : 0 - i10 | 0) | 0) > 131071) {
+ i3 = 23;
+ break;
+ }
+ i9 = i9 & 16383 | (i10 << 14) + 2147467264;
+ }
+ HEAP32[i7 >> 2] = i9;
+ if ((i8 | 0) == -1) {
+ i3 = 26;
+ break;
+ } else {
+ i10 = i8;
+ }
+ }
+ if ((i3 | 0) == 20) {
+ _luaX_syntaxerror(HEAP32[i2 + 12 >> 2] | 0, 10624);
+ } else if ((i3 | 0) == 23) {
+ _luaX_syntaxerror(HEAP32[i2 + 12 >> 2] | 0, 10624);
+ } else if ((i3 | 0) == 26) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _luaG_typeerror(i5, i6, i1) {
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i2;
+ i2 = i2 + 16 | 0;
+ i8 = HEAP32[i5 + 16 >> 2] | 0;
+ HEAP32[i2 >> 2] = 0;
+ i4 = HEAP32[8528 + ((HEAP32[i6 + 8 >> 2] & 15) + 1 << 2) >> 2] | 0;
+ L1 : do {
+ if (!((HEAP8[i8 + 18 | 0] & 1) == 0)) {
+ i7 = HEAP32[HEAP32[i8 >> 2] >> 2] | 0;
+ i10 = HEAP8[i7 + 6 | 0] | 0;
+ L3 : do {
+ if (!(i10 << 24 >> 24 == 0)) {
+ i9 = i7 + 16 | 0;
+ i11 = i10 & 255;
+ i10 = 0;
+ while (1) {
+ i12 = i10 + 1 | 0;
+ if ((HEAP32[(HEAP32[i9 + (i10 << 2) >> 2] | 0) + 8 >> 2] | 0) == (i6 | 0)) {
+ break;
+ }
+ if ((i12 | 0) < (i11 | 0)) {
+ i10 = i12;
+ } else {
+ break L3;
+ }
+ }
+ i9 = HEAP32[(HEAP32[(HEAP32[i7 + 12 >> 2] | 0) + 28 >> 2] | 0) + (i10 << 3) >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = 2104;
+ } else {
+ i9 = i9 + 16 | 0;
+ }
+ HEAP32[i2 >> 2] = i9;
+ i11 = i9;
+ i10 = 2072;
+ HEAP32[i3 >> 2] = i1;
+ i12 = i3 + 4 | 0;
+ HEAP32[i12 >> 2] = i10;
+ i12 = i3 + 8 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = i3 + 12 | 0;
+ HEAP32[i12 >> 2] = i4;
+ _luaG_runerror(i5, 1840, i3);
+ }
+ } while (0);
+ i9 = HEAP32[i8 + 24 >> 2] | 0;
+ i10 = HEAP32[i8 + 4 >> 2] | 0;
+ if (i9 >>> 0 < i10 >>> 0) {
+ i12 = i9;
+ while (1) {
+ i11 = i12 + 16 | 0;
+ if ((i12 | 0) == (i6 | 0)) {
+ break;
+ }
+ if (i11 >>> 0 < i10 >>> 0) {
+ i12 = i11;
+ } else {
+ break L1;
+ }
+ }
+ i12 = HEAP32[i7 + 12 >> 2] | 0;
+ i6 = _getobjname(i12, ((HEAP32[i8 + 28 >> 2] | 0) - (HEAP32[i12 + 12 >> 2] | 0) >> 2) + -1 | 0, i6 - i9 >> 4, i2) | 0;
+ if ((i6 | 0) != 0) {
+ i11 = HEAP32[i2 >> 2] | 0;
+ i10 = i6;
+ HEAP32[i3 >> 2] = i1;
+ i12 = i3 + 4 | 0;
+ HEAP32[i12 >> 2] = i10;
+ i12 = i3 + 8 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i12 = i3 + 12 | 0;
+ HEAP32[i12 >> 2] = i4;
+ _luaG_runerror(i5, 1840, i3);
+ }
+ }
+ }
+ } while (0);
+ HEAP32[i3 >> 2] = i1;
+ HEAP32[i3 + 4 >> 2] = i4;
+ _luaG_runerror(i5, 1880, i3);
+}
+function _lua_setmetatable(i1, i7) {
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0;
+ i4 = STACKTOP;
+ i6 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i7 | 0) <= 0) {
+ if (!((i7 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i7 = -1001e3 - i7 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i5 = HEAP32[i6 >> 2] | 0, (i7 | 0) <= (HEAPU8[i5 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i5 + (i7 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i5 = (HEAP32[i6 >> 2] | 0) + (i7 << 4) | 0;
+ i5 = i5 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i5 : 5192;
+ }
+ } while (0);
+ i6 = i1 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i7 + -8 >> 2] | 0) == 0) {
+ i7 = 0;
+ } else {
+ i7 = HEAP32[i7 + -16 >> 2] | 0;
+ }
+ i8 = HEAP32[i5 + 8 >> 2] & 15;
+ if ((i8 | 0) == 5) {
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 8 >> 2] = i7;
+ if ((i7 | 0) == 0) {
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = i8 + -16 | 0;
+ HEAP32[i6 >> 2] = i8;
+ STACKTOP = i4;
+ return 1;
+ }
+ if (!((HEAP8[i7 + 5 | 0] & 3) == 0) ? (i2 = HEAP32[i5 >> 2] | 0, !((HEAP8[i2 + 5 | 0] & 4) == 0)) : 0) {
+ _luaC_barrierback_(i1, i2);
+ }
+ _luaC_checkfinalizer(i1, HEAP32[i5 >> 2] | 0, i7);
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = i8 + -16 | 0;
+ HEAP32[i6 >> 2] = i8;
+ STACKTOP = i4;
+ return 1;
+ } else if ((i8 | 0) == 7) {
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 8 >> 2] = i7;
+ if ((i7 | 0) == 0) {
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = i8 + -16 | 0;
+ HEAP32[i6 >> 2] = i8;
+ STACKTOP = i4;
+ return 1;
+ }
+ if (!((HEAP8[i7 + 5 | 0] & 3) == 0) ? (i3 = HEAP32[i5 >> 2] | 0, !((HEAP8[i3 + 5 | 0] & 4) == 0)) : 0) {
+ _luaC_barrier_(i1, i3, i7);
+ }
+ _luaC_checkfinalizer(i1, HEAP32[i5 >> 2] | 0, i7);
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = i8 + -16 | 0;
+ HEAP32[i6 >> 2] = i8;
+ STACKTOP = i4;
+ return 1;
+ } else {
+ HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + (i8 << 2) + 252 >> 2] = i7;
+ i8 = HEAP32[i6 >> 2] | 0;
+ i8 = i8 + -16 | 0;
+ HEAP32[i6 >> 2] = i8;
+ STACKTOP = i4;
+ return 1;
+ }
+ return 0;
+}
+function _recfield(i2, i10) {
+ i2 = i2 | 0;
+ i10 = i10 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i9 = i1 + 48 | 0;
+ i6 = i1 + 24 | 0;
+ i3 = i1;
+ i13 = i2 + 48 | 0;
+ i8 = HEAP32[i13 >> 2] | 0;
+ i5 = i8 + 48 | 0;
+ i4 = HEAP8[i5] | 0;
+ i7 = i2 + 16 | 0;
+ do {
+ if ((HEAP32[i7 >> 2] | 0) != 288) {
+ _luaX_next(i2);
+ _subexpr(i2, i6, 0) | 0;
+ _luaK_exp2val(HEAP32[i13 >> 2] | 0, i6);
+ if ((HEAP32[i7 >> 2] | 0) == 93) {
+ _luaX_next(i2);
+ i11 = i10 + 28 | 0;
+ break;
+ } else {
+ _error_expected(i2, 93);
+ }
+ } else {
+ i12 = i10 + 28 | 0;
+ if ((HEAP32[i12 >> 2] | 0) <= 2147483645) {
+ i11 = HEAP32[i2 + 24 >> 2] | 0;
+ _luaX_next(i2);
+ i11 = _luaK_stringK(HEAP32[i13 >> 2] | 0, i11) | 0;
+ HEAP32[i6 + 16 >> 2] = -1;
+ HEAP32[i6 + 20 >> 2] = -1;
+ HEAP32[i6 >> 2] = 4;
+ HEAP32[i6 + 8 >> 2] = i11;
+ i11 = i12;
+ break;
+ }
+ i14 = i8 + 12 | 0;
+ i13 = HEAP32[(HEAP32[i14 >> 2] | 0) + 52 >> 2] | 0;
+ i12 = HEAP32[(HEAP32[i8 >> 2] | 0) + 64 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i16 = 6552;
+ HEAP32[i9 >> 2] = 6528;
+ i15 = i9 + 4 | 0;
+ HEAP32[i15 >> 2] = 2147483645;
+ i15 = i9 + 8 | 0;
+ HEAP32[i15 >> 2] = i16;
+ i15 = _luaO_pushfstring(i13, 6592, i9) | 0;
+ i16 = HEAP32[i14 >> 2] | 0;
+ _luaX_syntaxerror(i16, i15);
+ }
+ HEAP32[i9 >> 2] = i12;
+ i15 = _luaO_pushfstring(i13, 6568, i9) | 0;
+ HEAP32[i9 >> 2] = 6528;
+ i16 = i9 + 4 | 0;
+ HEAP32[i16 >> 2] = 2147483645;
+ i16 = i9 + 8 | 0;
+ HEAP32[i16 >> 2] = i15;
+ i16 = _luaO_pushfstring(i13, 6592, i9) | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ _luaX_syntaxerror(i15, i16);
+ }
+ } while (0);
+ HEAP32[i11 >> 2] = (HEAP32[i11 >> 2] | 0) + 1;
+ if ((HEAP32[i7 >> 2] | 0) == 61) {
+ _luaX_next(i2);
+ i16 = _luaK_exp2RK(i8, i6) | 0;
+ _subexpr(i2, i3, 0) | 0;
+ i15 = HEAP32[(HEAP32[i10 + 24 >> 2] | 0) + 8 >> 2] | 0;
+ _luaK_codeABC(i8, 10, i15, i16, _luaK_exp2RK(i8, i3) | 0) | 0;
+ HEAP8[i5] = i4;
+ STACKTOP = i1;
+ return;
+ } else {
+ _error_expected(i2, 61);
+ }
+}
+function _lua_newstate(i3, i6) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i5 = i1 + 8 | 0;
+ i4 = i1;
+ i2 = FUNCTION_TABLE_iiiii[i3 & 3](i6, 0, 8, 400) | 0;
+ if ((i2 | 0) == 0) {
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ i7 = i2 + 112 | 0;
+ HEAP32[i2 >> 2] = 0;
+ HEAP8[i2 + 4 | 0] = 8;
+ HEAP8[i2 + 172 | 0] = 33;
+ HEAP8[i2 + 5 | 0] = 1;
+ HEAP8[i2 + 174 | 0] = 0;
+ HEAP32[i2 + 12 >> 2] = i7;
+ HEAP32[i2 + 28 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ HEAP32[i2 + 32 >> 2] = 0;
+ HEAP32[i2 + 64 >> 2] = 0;
+ HEAP16[i2 + 38 >> 1] = 0;
+ HEAP32[i2 + 52 >> 2] = 0;
+ HEAP8[i2 + 40 | 0] = 0;
+ HEAP32[i2 + 44 >> 2] = 0;
+ HEAP8[i2 + 41 | 0] = 1;
+ HEAP32[i2 + 48 >> 2] = 0;
+ HEAP32[i2 + 56 >> 2] = 0;
+ HEAP16[i2 + 36 >> 1] = 1;
+ HEAP8[i2 + 6 | 0] = 0;
+ HEAP32[i2 + 68 >> 2] = 0;
+ HEAP32[i7 >> 2] = i3;
+ HEAP32[i2 + 116 >> 2] = i6;
+ HEAP32[i2 + 284 >> 2] = i2;
+ i3 = _time(0) | 0;
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i5 + 4 >> 2] = i4;
+ HEAP32[i5 + 8 >> 2] = 5192;
+ HEAP32[i5 + 12 >> 2] = 1;
+ HEAP32[i2 + 168 >> 2] = _luaS_hash(i5, 16, i3) | 0;
+ i4 = i2 + 224 | 0;
+ HEAP32[i2 + 240 >> 2] = i4;
+ HEAP32[i2 + 244 >> 2] = i4;
+ HEAP8[i2 + 175 | 0] = 0;
+ i4 = i2 + 132 | 0;
+ HEAP32[i2 + 160 >> 2] = 0;
+ HEAP32[i2 + 256 >> 2] = 0;
+ HEAP32[i2 + 264 >> 2] = 0;
+ HEAP32[i2 + 280 >> 2] = 0;
+ HEAP32[i4 + 0 >> 2] = 0;
+ HEAP32[i4 + 4 >> 2] = 0;
+ HEAP32[i4 + 8 >> 2] = 0;
+ HEAP32[i4 + 12 >> 2] = 0;
+ HEAP32[i2 + 288 >> 2] = _lua_version(0) | 0;
+ HEAP8[i2 + 173 | 0] = 5;
+ i4 = i2 + 120 | 0;
+ i5 = i2 + 180 | 0;
+ i3 = i5 + 40 | 0;
+ do {
+ HEAP32[i5 >> 2] = 0;
+ i5 = i5 + 4 | 0;
+ } while ((i5 | 0) < (i3 | 0));
+ HEAP32[i4 >> 2] = 400;
+ HEAP32[i2 + 124 >> 2] = 0;
+ HEAP32[i2 + 268 >> 2] = 200;
+ HEAP32[i2 + 272 >> 2] = 200;
+ HEAP32[i2 + 276 >> 2] = 200;
+ i5 = i2 + 364 | 0;
+ i3 = i5 + 36 | 0;
+ do {
+ HEAP32[i5 >> 2] = 0;
+ i5 = i5 + 4 | 0;
+ } while ((i5 | 0) < (i3 | 0));
+ if ((_luaD_rawrunprotected(i2, 8, 0) | 0) == 0) {
+ i7 = i2;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ _close_state(i2);
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _luaU_undump(i1, i7, i8, i9) {
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i4 = i2 + 16 | 0;
+ i5 = i2 + 34 | 0;
+ i3 = i2;
+ i6 = HEAP8[i9] | 0;
+ if (i6 << 24 >> 24 == 27) {
+ HEAP32[i3 + 12 >> 2] = 8800;
+ } else if (i6 << 24 >> 24 == 61 | i6 << 24 >> 24 == 64) {
+ HEAP32[i3 + 12 >> 2] = i9 + 1;
+ } else {
+ HEAP32[i3 + 12 >> 2] = i9;
+ }
+ HEAP32[i3 >> 2] = i1;
+ HEAP32[i3 + 4 >> 2] = i7;
+ HEAP32[i3 + 8 >> 2] = i8;
+ HEAP32[i4 >> 2] = 1635077147;
+ HEAP8[i4 + 4 | 0] = 82;
+ HEAP8[i4 + 5 | 0] = 0;
+ HEAP8[i4 + 6 | 0] = 1;
+ HEAP8[i4 + 7 | 0] = 4;
+ HEAP8[i4 + 8 | 0] = 4;
+ HEAP8[i4 + 9 | 0] = 4;
+ HEAP8[i4 + 10 | 0] = 8;
+ i9 = i4 + 12 | 0;
+ HEAP8[i4 + 11 | 0] = 0;
+ HEAP8[i9 + 0 | 0] = HEAP8[8816 | 0] | 0;
+ HEAP8[i9 + 1 | 0] = HEAP8[8817 | 0] | 0;
+ HEAP8[i9 + 2 | 0] = HEAP8[8818 | 0] | 0;
+ HEAP8[i9 + 3 | 0] = HEAP8[8819 | 0] | 0;
+ HEAP8[i9 + 4 | 0] = HEAP8[8820 | 0] | 0;
+ HEAP8[i9 + 5 | 0] = HEAP8[8821 | 0] | 0;
+ HEAP8[i5] = 27;
+ if ((_luaZ_read(i7, i5 + 1 | 0, 17) | 0) != 0) {
+ _error(i3, 8824);
+ }
+ if ((_memcmp(i4, i5, 18) | 0) == 0) {
+ i4 = _luaF_newLclosure(i1, 1) | 0;
+ i5 = i1 + 8 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i9 >> 2] = i4;
+ HEAP32[i9 + 8 >> 2] = 70;
+ i9 = (HEAP32[i5 >> 2] | 0) + 16 | 0;
+ HEAP32[i5 >> 2] = i9;
+ if (((HEAP32[i1 + 24 >> 2] | 0) - i9 | 0) < 16) {
+ _luaD_growstack(i1, 0);
+ }
+ i9 = _luaF_newproto(i1) | 0;
+ i6 = i4 + 12 | 0;
+ HEAP32[i6 >> 2] = i9;
+ _LoadFunction(i3, i9);
+ i6 = HEAP32[i6 >> 2] | 0;
+ i3 = HEAP32[i6 + 40 >> 2] | 0;
+ if ((i3 | 0) == 1) {
+ i9 = i4;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ i9 = _luaF_newLclosure(i1, i3) | 0;
+ HEAP32[i9 + 12 >> 2] = i6;
+ i8 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i8 + -16 >> 2] = i9;
+ HEAP32[i8 + -8 >> 2] = 70;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ if ((_memcmp(i4, i5, 4) | 0) != 0) {
+ _error(i3, 8888);
+ }
+ if ((_memcmp(i4, i5, 6) | 0) != 0) {
+ _error(i3, 8896);
+ }
+ if ((_memcmp(i4, i5, 12) | 0) == 0) {
+ _error(i3, 8872);
+ } else {
+ _error(i3, 8920);
+ }
+ return 0;
+}
+function _lua_compare(i2, i7, i5, i3) {
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i6 = 0, i8 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i7 | 0) <= 0) {
+ if (!((i7 | 0) < -1000999)) {
+ i6 = (HEAP32[i2 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i6 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i7 = -1001e3 - i7 | 0;
+ i8 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i8 + 8 >> 2] | 0) != 22 ? (i6 = HEAP32[i8 >> 2] | 0, (i7 | 0) <= (HEAPU8[i6 + 6 | 0] | 0 | 0)) : 0) {
+ i6 = i6 + (i7 + -1 << 4) + 16 | 0;
+ } else {
+ i6 = 5192;
+ }
+ } else {
+ i6 = (HEAP32[i4 >> 2] | 0) + (i7 << 4) | 0;
+ i6 = i6 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i6 : 5192;
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i4 = (HEAP32[i2 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i4 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) == 22) {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) > (HEAPU8[i4 + 6 | 0] | 0 | 0)) {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ } else {
+ i4 = i4 + (i5 + -1 << 4) + 16 | 0;
+ break;
+ }
+ } else {
+ i4 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ if ((i6 | 0) == 5192 | (i4 | 0) == 5192) {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ if ((i3 | 0) == 1) {
+ i8 = _luaV_lessthan(i2, i6, i4) | 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ } else if ((i3 | 0) == 2) {
+ i8 = _luaV_lessequal(i2, i6, i4) | 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ } else if ((i3 | 0) == 0) {
+ if ((HEAP32[i6 + 8 >> 2] | 0) == (HEAP32[i4 + 8 >> 2] | 0)) {
+ i2 = (_luaV_equalobj_(i2, i6, i4) | 0) != 0;
+ } else {
+ i2 = 0;
+ }
+ i8 = i2 & 1;
+ STACKTOP = i1;
+ return i8 | 0;
+ } else {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ return 0;
+}
+function _lexerror(i7, i3, i8) {
+ i7 = i7 | 0;
+ i3 = i3 | 0;
+ i8 = i8 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i12 = STACKTOP;
+ STACKTOP = STACKTOP + 80 | 0;
+ i2 = i12;
+ i12 = i12 + 12 | 0;
+ _luaO_chunkid(i12, (HEAP32[i7 + 68 >> 2] | 0) + 16 | 0, 60);
+ i1 = i7 + 52 | 0;
+ i4 = HEAP32[i1 >> 2] | 0;
+ i13 = HEAP32[i7 + 4 >> 2] | 0;
+ HEAP32[i2 >> 2] = i12;
+ HEAP32[i2 + 4 >> 2] = i13;
+ HEAP32[i2 + 8 >> 2] = i3;
+ i4 = _luaO_pushfstring(i4, 12592, i2) | 0;
+ if ((i8 | 0) == 0) {
+ i13 = HEAP32[i1 >> 2] | 0;
+ _luaD_throw(i13, 3);
+ }
+ i3 = HEAP32[i1 >> 2] | 0;
+ do {
+ if (!((i8 + -287 | 0) >>> 0 < 3)) {
+ if ((i8 | 0) >= 257) {
+ i5 = HEAP32[12096 + (i8 + -257 << 2) >> 2] | 0;
+ if ((i8 | 0) >= 286) {
+ break;
+ }
+ HEAP32[i2 >> 2] = i5;
+ i5 = _luaO_pushfstring(i3, 12256, i2) | 0;
+ break;
+ }
+ if ((HEAP8[i8 + 10913 | 0] & 4) == 0) {
+ HEAP32[i2 >> 2] = i8;
+ i5 = _luaO_pushfstring(i3, 12240, i2) | 0;
+ break;
+ } else {
+ HEAP32[i2 >> 2] = i8;
+ i5 = _luaO_pushfstring(i3, 12232, i2) | 0;
+ break;
+ }
+ } else {
+ i11 = i7 + 60 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ i10 = i12 + 4 | 0;
+ i13 = HEAP32[i10 >> 2] | 0;
+ i8 = i12 + 8 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ do {
+ if ((i13 + 1 | 0) >>> 0 > i9 >>> 0) {
+ if (i9 >>> 0 > 2147483645) {
+ _lexerror(i7, 12368, 0);
+ }
+ i7 = i9 << 1;
+ if ((i7 | 0) == -2) {
+ _luaM_toobig(i3);
+ } else {
+ i6 = _luaM_realloc_(i3, HEAP32[i12 >> 2] | 0, i9, i7) | 0;
+ HEAP32[i12 >> 2] = i6;
+ HEAP32[i8 >> 2] = i7;
+ i5 = HEAP32[i10 >> 2] | 0;
+ break;
+ }
+ } else {
+ i5 = i13;
+ i6 = HEAP32[i12 >> 2] | 0;
+ }
+ } while (0);
+ HEAP32[i10 >> 2] = i5 + 1;
+ HEAP8[i6 + i5 | 0] = 0;
+ i5 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i2 >> 2] = HEAP32[HEAP32[i11 >> 2] >> 2];
+ i5 = _luaO_pushfstring(i5, 12256, i2) | 0;
+ }
+ } while (0);
+ HEAP32[i2 >> 2] = i4;
+ HEAP32[i2 + 4 >> 2] = i5;
+ _luaO_pushfstring(i3, 12608, i2) | 0;
+ i13 = HEAP32[i1 >> 2] | 0;
+ _luaD_throw(i13, 3);
+}
+function _luaV_objlen(i2, i5, i1) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i3 = STACKTOP;
+ i4 = i1 + 8 | 0;
+ i8 = HEAP32[i4 >> 2] & 15;
+ do {
+ if ((i8 | 0) == 5) {
+ i7 = HEAP32[i1 >> 2] | 0;
+ i8 = HEAP32[i7 + 8 >> 2] | 0;
+ if (((i8 | 0) != 0 ? (HEAP8[i8 + 6 | 0] & 16) == 0 : 0) ? (i6 = _luaT_gettm(i8, 4, HEAP32[(HEAP32[i2 + 12 >> 2] | 0) + 200 >> 2] | 0) | 0, (i6 | 0) != 0) : 0) {
+ i7 = i6;
+ break;
+ }
+ HEAPF64[i5 >> 3] = +(_luaH_getn(i7) | 0);
+ HEAP32[i5 + 8 >> 2] = 3;
+ STACKTOP = i3;
+ return;
+ } else if ((i8 | 0) != 4) {
+ i6 = _luaT_gettmbyobj(i2, i1, 4) | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) == 0) {
+ _luaG_typeerror(i2, i1, 9024);
+ } else {
+ i7 = i6;
+ }
+ } else {
+ HEAPF64[i5 >> 3] = +((HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0) >>> 0);
+ HEAP32[i5 + 8 >> 2] = 3;
+ STACKTOP = i3;
+ return;
+ }
+ } while (0);
+ i6 = i2 + 28 | 0;
+ i8 = i5 - (HEAP32[i6 >> 2] | 0) | 0;
+ i5 = i2 + 8 | 0;
+ i11 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i11 + 16;
+ i12 = i7;
+ i10 = HEAP32[i12 + 4 >> 2] | 0;
+ i9 = i11;
+ HEAP32[i9 >> 2] = HEAP32[i12 >> 2];
+ HEAP32[i9 + 4 >> 2] = i10;
+ HEAP32[i11 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i7 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i7 + 16;
+ i11 = i1;
+ i9 = HEAP32[i11 + 4 >> 2] | 0;
+ i10 = i7;
+ HEAP32[i10 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ HEAP32[i7 + 8 >> 2] = HEAP32[i4 >> 2];
+ i7 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i7 + 16;
+ i10 = i1;
+ i9 = HEAP32[i10 + 4 >> 2] | 0;
+ i1 = i7;
+ HEAP32[i1 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i1 + 4 >> 2] = i9;
+ HEAP32[i7 + 8 >> 2] = HEAP32[i4 >> 2];
+ _luaD_call(i2, (HEAP32[i5 >> 2] | 0) + -48 | 0, 1, HEAP8[(HEAP32[i2 + 16 >> 2] | 0) + 18 | 0] & 1);
+ i7 = HEAP32[i6 >> 2] | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i2 = i6 + -16 | 0;
+ HEAP32[i5 >> 2] = i2;
+ i4 = HEAP32[i2 + 4 >> 2] | 0;
+ i5 = i7 + i8 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i2 >> 2];
+ HEAP32[i5 + 4 >> 2] = i4;
+ HEAP32[i7 + (i8 + 8) >> 2] = HEAP32[i6 + -8 >> 2];
+ STACKTOP = i3;
+ return;
+}
+function _get_equalTM(i6, i5, i4) {
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i7 = 0;
+ i1 = STACKTOP;
+ L1 : do {
+ if (((i5 | 0) != 0 ? (HEAP8[i5 + 6 | 0] & 32) == 0 : 0) ? (i7 = i6 + 12 | 0, i2 = _luaT_gettm(i5, 5, HEAP32[(HEAP32[i7 >> 2] | 0) + 204 >> 2] | 0) | 0, (i2 | 0) != 0) : 0) {
+ if ((i5 | 0) != (i4 | 0)) {
+ if (((i4 | 0) != 0 ? (HEAP8[i4 + 6 | 0] & 32) == 0 : 0) ? (i3 = _luaT_gettm(i4, 5, HEAP32[(HEAP32[i7 >> 2] | 0) + 204 >> 2] | 0) | 0, (i3 | 0) != 0) : 0) {
+ i4 = HEAP32[i2 + 8 >> 2] | 0;
+ L9 : do {
+ if ((i4 | 0) == (HEAP32[i3 + 8 >> 2] | 0)) {
+ switch (i4 & 63 | 0) {
+ case 3:
+ {
+ i3 = +HEAPF64[i2 >> 3] == +HEAPF64[i3 >> 3] | 0;
+ break;
+ }
+ case 22:
+ {
+ i3 = (HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0) | 0;
+ break;
+ }
+ case 5:
+ {
+ if ((HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0)) {
+ break L1;
+ } else {
+ break L9;
+ }
+ }
+ case 1:
+ {
+ i3 = (HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0) | 0;
+ break;
+ }
+ case 4:
+ {
+ i3 = (HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0) | 0;
+ break;
+ }
+ case 0:
+ {
+ break L1;
+ }
+ case 7:
+ {
+ if ((HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0)) {
+ break L1;
+ } else {
+ break L9;
+ }
+ }
+ case 2:
+ {
+ i3 = (HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0) | 0;
+ break;
+ }
+ case 20:
+ {
+ i3 = _luaS_eqlngstr(HEAP32[i2 >> 2] | 0, HEAP32[i3 >> 2] | 0) | 0;
+ break;
+ }
+ default:
+ {
+ i3 = (HEAP32[i2 >> 2] | 0) == (HEAP32[i3 >> 2] | 0) | 0;
+ }
+ }
+ if ((i3 | 0) != 0) {
+ break L1;
+ }
+ }
+ } while (0);
+ i2 = 0;
+ } else {
+ i2 = 0;
+ }
+ }
+ } else {
+ i2 = 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaS_newlstr(i2, i4, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ if (!(i3 >>> 0 < 41)) {
+ if ((i3 + 1 | 0) >>> 0 > 4294967277) {
+ _luaM_toobig(i2);
+ }
+ i10 = HEAP32[(HEAP32[i2 + 12 >> 2] | 0) + 56 >> 2] | 0;
+ i11 = _luaC_newobj(i2, 20, i3 + 17 | 0, 0, 0) | 0;
+ HEAP32[i11 + 12 >> 2] = i3;
+ HEAP32[i11 + 8 >> 2] = i10;
+ HEAP8[i11 + 6 | 0] = 0;
+ i10 = i11 + 16 | 0;
+ _memcpy(i10 | 0, i4 | 0, i3 | 0) | 0;
+ HEAP8[i10 + i3 | 0] = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i5 = HEAP32[i2 + 12 >> 2] | 0;
+ i6 = HEAP32[i5 + 56 >> 2] ^ i3;
+ i7 = (i3 >>> 5) + 1 | 0;
+ if (!(i7 >>> 0 > i3 >>> 0)) {
+ i8 = i3;
+ do {
+ i6 = (i6 << 5) + (i6 >>> 2) + (HEAPU8[i4 + (i8 + -1) | 0] | 0) ^ i6;
+ i8 = i8 - i7 | 0;
+ } while (!(i8 >>> 0 < i7 >>> 0));
+ }
+ i10 = i5 + 32 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ i7 = i5 + 24 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ i11 = HEAP32[i8 + ((i9 + -1 & i6) << 2) >> 2] | 0;
+ L12 : do {
+ if ((i11 | 0) != 0) {
+ while (1) {
+ if (((i6 | 0) == (HEAP32[i11 + 8 >> 2] | 0) ? (HEAP32[i11 + 12 >> 2] | 0) == (i3 | 0) : 0) ? (_memcmp(i4, i11 + 16 | 0, i3) | 0) == 0 : 0) {
+ break;
+ }
+ i11 = HEAP32[i11 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ break L12;
+ }
+ }
+ i2 = i11 + 5 | 0;
+ i3 = (HEAPU8[i2] | 0) ^ 3;
+ if ((((HEAPU8[i5 + 60 | 0] | 0) ^ 3) & i3 | 0) != 0) {
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ HEAP8[i2] = i3;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ } while (0);
+ i5 = i5 + 28 | 0;
+ if ((HEAP32[i5 >> 2] | 0) >>> 0 >= i9 >>> 0 & (i9 | 0) < 1073741823) {
+ _luaS_resize(i2, i9 << 1);
+ i9 = HEAP32[i10 >> 2] | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ }
+ i11 = _luaC_newobj(i2, 4, i3 + 17 | 0, i8 + ((i9 + -1 & i6) << 2) | 0, 0) | 0;
+ HEAP32[i11 + 12 >> 2] = i3;
+ HEAP32[i11 + 8 >> 2] = i6;
+ HEAP8[i11 + 6 | 0] = 0;
+ i10 = i11 + 16 | 0;
+ _memcpy(i10 | 0, i4 | 0, i3 | 0) | 0;
+ HEAP8[i10 + i3 | 0] = 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ STACKTOP = i1;
+ return i11 | 0;
+}
+function _lua_pcallk(i3, i7, i2, i9, i6, i5) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ i2 = i2 | 0;
+ i9 = i9 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i4 = 0, i8 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i1;
+ if ((i9 | 0) == 0) {
+ i9 = 0;
+ } else {
+ i10 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i9 | 0) <= 0) {
+ if (!((i9 | 0) < -1000999)) {
+ i8 = (HEAP32[i3 + 8 >> 2] | 0) + (i9 << 4) | 0;
+ break;
+ }
+ if ((i9 | 0) == -1001e3) {
+ i8 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i9 = -1001e3 - i9 | 0;
+ i10 = HEAP32[i10 >> 2] | 0;
+ if ((HEAP32[i10 + 8 >> 2] | 0) != 22 ? (i8 = HEAP32[i10 >> 2] | 0, (i9 | 0) <= (HEAPU8[i8 + 6 | 0] | 0)) : 0) {
+ i8 = i8 + (i9 + -1 << 4) + 16 | 0;
+ } else {
+ i8 = 5192;
+ }
+ } else {
+ i8 = (HEAP32[i10 >> 2] | 0) + (i9 << 4) | 0;
+ i8 = i8 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i8 : 5192;
+ }
+ } while (0);
+ i9 = i8 - (HEAP32[i3 + 28 >> 2] | 0) | 0;
+ }
+ i8 = i3 + 8 | 0;
+ i7 = (HEAP32[i8 >> 2] | 0) + (~i7 << 4) | 0;
+ HEAP32[i4 >> 2] = i7;
+ if ((i5 | 0) != 0 ? (HEAP16[i3 + 36 >> 1] | 0) == 0 : 0) {
+ i11 = HEAP32[i3 + 16 >> 2] | 0;
+ HEAP32[i11 + 28 >> 2] = i5;
+ HEAP32[i11 + 24 >> 2] = i6;
+ HEAP32[i11 + 20 >> 2] = (HEAP32[i4 >> 2] | 0) - (HEAP32[i3 + 28 >> 2] | 0);
+ HEAP8[i11 + 36 | 0] = HEAP8[i3 + 41 | 0] | 0;
+ i10 = i3 + 68 | 0;
+ i7 = i11 + 32 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i10 >> 2] = i9;
+ i9 = i11 + 18 | 0;
+ HEAP8[i9] = HEAPU8[i9] | 16;
+ _luaD_call(i3, HEAP32[i4 >> 2] | 0, i2, 1);
+ HEAP8[i9] = HEAP8[i9] & 239;
+ HEAP32[i10 >> 2] = HEAP32[i7 >> 2];
+ i4 = 0;
+ } else {
+ HEAP32[i4 + 4 >> 2] = i2;
+ i4 = _luaD_pcall(i3, 3, i4, i7 - (HEAP32[i3 + 28 >> 2] | 0) | 0, i9) | 0;
+ }
+ if (!((i2 | 0) == -1)) {
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i2 = (HEAP32[i3 + 16 >> 2] | 0) + 4 | 0;
+ i3 = HEAP32[i8 >> 2] | 0;
+ if (!((HEAP32[i2 >> 2] | 0) >>> 0 < i3 >>> 0)) {
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _lua_getupvalue(i1, i6, i3) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i4 = (HEAP32[i1 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i4 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i5 = HEAP32[i4 + 8 >> 2] & 63;
+ do {
+ if ((i5 | 0) == 38) {
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) <= 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((HEAPU8[i5 + 6 | 0] | 0 | 0) < (i3 | 0)) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ i4 = 936;
+ i3 = i5 + (i3 + -1 << 4) + 16 | 0;
+ break;
+ }
+ } else if ((i5 | 0) == 6) {
+ i5 = HEAP32[i4 >> 2] | 0;
+ i4 = HEAP32[i5 + 12 >> 2] | 0;
+ if ((i3 | 0) <= 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((HEAP32[i4 + 40 >> 2] | 0) < (i3 | 0)) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i6 = i3 + -1 | 0;
+ i3 = HEAP32[(HEAP32[i5 + 16 + (i6 << 2) >> 2] | 0) + 8 >> 2] | 0;
+ i4 = HEAP32[(HEAP32[i4 + 28 >> 2] | 0) + (i6 << 3) >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i4 = 936;
+ } else {
+ i4 = i4 + 16 | 0;
+ }
+ } else {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ } while (0);
+ i6 = i1 + 8 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i8 = i3;
+ i7 = HEAP32[i8 + 4 >> 2] | 0;
+ i1 = i5;
+ HEAP32[i1 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i1 + 4 >> 2] = i7;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 16;
+ i6 = i4;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _lua_copy(i1, i8, i4) {
+ i1 = i1 | 0;
+ i8 = i8 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0;
+ i2 = STACKTOP;
+ i3 = i1 + 16 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ do {
+ if ((i8 | 0) <= 0) {
+ if (!((i8 | 0) < -1000999)) {
+ i7 = (HEAP32[i1 + 8 >> 2] | 0) + (i8 << 4) | 0;
+ break;
+ }
+ if ((i8 | 0) == -1001e3) {
+ i7 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i8 = -1001e3 - i8 | 0;
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) != 22 ? (i7 = HEAP32[i9 >> 2] | 0, (i8 | 0) <= (HEAPU8[i7 + 6 | 0] | 0 | 0)) : 0) {
+ i7 = i7 + (i8 + -1 << 4) + 16 | 0;
+ } else {
+ i7 = 5192;
+ }
+ } else {
+ i7 = (HEAP32[i6 >> 2] | 0) + (i8 << 4) | 0;
+ i7 = i7 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i7 : 5192;
+ }
+ } while (0);
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i8 = -1001e3 - i4 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i5 = HEAP32[i6 >> 2] | 0, (i8 | 0) <= (HEAPU8[i5 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i5 + (i8 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i5 = (HEAP32[i6 >> 2] | 0) + (i4 << 4) | 0;
+ i5 = i5 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i5 : 5192;
+ }
+ } while (0);
+ i8 = i7;
+ i9 = HEAP32[i8 + 4 >> 2] | 0;
+ i6 = i5;
+ HEAP32[i6 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i6 + 4 >> 2] = i9;
+ i6 = i7 + 8 | 0;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 >> 2];
+ if (!((i4 | 0) < -1001e3)) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP32[i6 >> 2] & 64 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((HEAP8[i4 + 5 | 0] & 3) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = HEAP32[HEAP32[HEAP32[i3 >> 2] >> 2] >> 2] | 0;
+ if ((HEAP8[i3 + 5 | 0] & 4) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaC_barrier_(i1, i3, i4);
+ STACKTOP = i2;
+ return;
+}
+function _lua_tolstring(i4, i5, i1) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ i7 = i4 + 16 | 0;
+ i10 = HEAP32[i7 >> 2] | 0;
+ i6 = (i5 | 0) > 0;
+ do {
+ if (!i6) {
+ if (!((i5 | 0) < -1000999)) {
+ i8 = (HEAP32[i4 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i8 = (HEAP32[i4 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i9 = -1001e3 - i5 | 0;
+ i10 = HEAP32[i10 >> 2] | 0;
+ if ((HEAP32[i10 + 8 >> 2] | 0) != 22 ? (i8 = HEAP32[i10 >> 2] | 0, (i9 | 0) <= (HEAPU8[i8 + 6 | 0] | 0 | 0)) : 0) {
+ i8 = i8 + (i9 + -1 << 4) + 16 | 0;
+ } else {
+ i8 = 5192;
+ }
+ } else {
+ i8 = (HEAP32[i10 >> 2] | 0) + (i5 << 4) | 0;
+ i8 = i8 >>> 0 < (HEAP32[i4 + 8 >> 2] | 0) >>> 0 ? i8 : 5192;
+ }
+ } while (0);
+ do {
+ if ((HEAP32[i8 + 8 >> 2] & 15 | 0) != 4) {
+ if ((_luaV_tostring(i4, i8) | 0) == 0) {
+ if ((i1 | 0) == 0) {
+ i10 = 0;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ HEAP32[i1 >> 2] = 0;
+ i10 = 0;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[(HEAP32[i8 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i4);
+ }
+ i7 = HEAP32[i7 >> 2] | 0;
+ if (i6) {
+ i3 = (HEAP32[i7 >> 2] | 0) + (i5 << 4) | 0;
+ i8 = i3 >>> 0 < (HEAP32[i4 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ break;
+ }
+ if (!((i5 | 0) < -1000999)) {
+ i8 = (HEAP32[i4 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i8 = (HEAP32[i8 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i4 = -1001e3 - i5 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i5 >> 2] | 0, (i4 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i8 = i3 + (i4 + -1 << 4) + 16 | 0;
+ } else {
+ i8 = 5192;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i8 >> 2] | 0;
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = HEAP32[i3 + 12 >> 2];
+ }
+ i10 = i3 + 16 | 0;
+ STACKTOP = i2;
+ return i10 | 0;
+}
+function _luaD_pcall(i3, i6, i5, i13, i14) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i13 = i13 | 0;
+ i14 = i14 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0;
+ i1 = STACKTOP;
+ i10 = i3 + 16 | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ i12 = i3 + 41 | 0;
+ i7 = HEAP8[i12] | 0;
+ i9 = i3 + 36 | 0;
+ i8 = HEAP16[i9 >> 1] | 0;
+ i4 = i3 + 68 | 0;
+ i2 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i14;
+ i5 = _luaD_rawrunprotected(i3, i6, i5) | 0;
+ if ((i5 | 0) == 0) {
+ HEAP32[i4 >> 2] = i2;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i6 = i3 + 28 | 0;
+ i14 = HEAP32[i6 >> 2] | 0;
+ i15 = i14 + i13 | 0;
+ _luaF_close(i3, i15);
+ if ((i5 | 0) == 6) {
+ i16 = _luaS_newlstr(i3, 2424, 23) | 0;
+ HEAP32[i15 >> 2] = i16;
+ HEAP32[i14 + (i13 + 8) >> 2] = HEAPU8[i16 + 4 | 0] | 0 | 64;
+ } else if ((i5 | 0) == 4) {
+ i16 = HEAP32[(HEAP32[i3 + 12 >> 2] | 0) + 180 >> 2] | 0;
+ HEAP32[i15 >> 2] = i16;
+ HEAP32[i14 + (i13 + 8) >> 2] = HEAPU8[i16 + 4 | 0] | 0 | 64;
+ } else {
+ i16 = HEAP32[i3 + 8 >> 2] | 0;
+ i18 = i16 + -16 | 0;
+ i17 = HEAP32[i18 + 4 >> 2] | 0;
+ HEAP32[i15 >> 2] = HEAP32[i18 >> 2];
+ HEAP32[i15 + 4 >> 2] = i17;
+ HEAP32[i14 + (i13 + 8) >> 2] = HEAP32[i16 + -8 >> 2];
+ }
+ i13 = i14 + (i13 + 16) | 0;
+ HEAP32[i3 + 8 >> 2] = i13;
+ HEAP32[i10 >> 2] = i11;
+ HEAP8[i12] = i7;
+ HEAP16[i9 >> 1] = i8;
+ if ((i11 | 0) != 0) {
+ do {
+ i7 = HEAP32[i11 + 4 >> 2] | 0;
+ i13 = i13 >>> 0 < i7 >>> 0 ? i7 : i13;
+ i11 = HEAP32[i11 + 8 >> 2] | 0;
+ } while ((i11 | 0) != 0);
+ }
+ i6 = i13 - (HEAP32[i6 >> 2] | 0) | 0;
+ i7 = (i6 >> 4) + 1 | 0;
+ i7 = ((i7 | 0) / 8 | 0) + 10 + i7 | 0;
+ i7 = (i7 | 0) > 1e6 ? 1e6 : i7;
+ if ((i6 | 0) > 15999984) {
+ HEAP32[i4 >> 2] = i2;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ if ((i7 | 0) >= (HEAP32[i3 + 32 >> 2] | 0)) {
+ HEAP32[i4 >> 2] = i2;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ _luaD_reallocstack(i3, i7);
+ HEAP32[i4 >> 2] = i2;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _luaH_resize(i1, i4, i6, i9) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0;
+ i3 = STACKTOP;
+ i8 = i4 + 28 | 0;
+ i5 = HEAP32[i8 >> 2] | 0;
+ i7 = HEAPU8[i4 + 7 | 0] | 0;
+ i2 = HEAP32[i4 + 16 >> 2] | 0;
+ if ((i5 | 0) < (i6 | 0)) {
+ if ((i6 + 1 | 0) >>> 0 > 268435455) {
+ _luaM_toobig(i1);
+ }
+ i11 = i4 + 12 | 0;
+ i10 = _luaM_realloc_(i1, HEAP32[i11 >> 2] | 0, i5 << 4, i6 << 4) | 0;
+ HEAP32[i11 >> 2] = i10;
+ i11 = HEAP32[i8 >> 2] | 0;
+ if ((i11 | 0) < (i6 | 0)) {
+ do {
+ HEAP32[i10 + (i11 << 4) + 8 >> 2] = 0;
+ i11 = i11 + 1 | 0;
+ } while ((i11 | 0) != (i6 | 0));
+ }
+ HEAP32[i8 >> 2] = i6;
+ }
+ _setnodevector(i1, i4, i9);
+ do {
+ if ((i5 | 0) > (i6 | 0)) {
+ HEAP32[i8 >> 2] = i6;
+ i8 = i4 + 12 | 0;
+ i9 = i6;
+ do {
+ i10 = HEAP32[i8 >> 2] | 0;
+ if ((HEAP32[i10 + (i9 << 4) + 8 >> 2] | 0) == 0) {
+ i9 = i9 + 1 | 0;
+ } else {
+ i11 = i9 + 1 | 0;
+ _luaH_setint(i1, i4, i11, i10 + (i9 << 4) | 0);
+ i9 = i11;
+ }
+ } while ((i9 | 0) != (i5 | 0));
+ if ((i6 + 1 | 0) >>> 0 > 268435455) {
+ _luaM_toobig(i1);
+ } else {
+ i11 = i4 + 12 | 0;
+ HEAP32[i11 >> 2] = _luaM_realloc_(i1, HEAP32[i11 >> 2] | 0, i5 << 4, i6 << 4) | 0;
+ break;
+ }
+ }
+ } while (0);
+ i5 = 1 << i7;
+ if ((i5 | 0) > 0) {
+ i6 = i5;
+ do {
+ i6 = i6 + -1 | 0;
+ i7 = i2 + (i6 << 5) + 8 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != 0) {
+ i8 = i2 + (i6 << 5) + 16 | 0;
+ i9 = _luaH_get(i4, i8) | 0;
+ if ((i9 | 0) == 5192) {
+ i9 = _luaH_newkey(i1, i4, i8) | 0;
+ }
+ i8 = i2 + (i6 << 5) | 0;
+ i10 = HEAP32[i8 + 4 >> 2] | 0;
+ i11 = i9;
+ HEAP32[i11 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i11 + 4 >> 2] = i10;
+ HEAP32[i9 + 8 >> 2] = HEAP32[i7 >> 2];
+ }
+ } while ((i6 | 0) > 0);
+ }
+ if ((i2 | 0) == 8016) {
+ STACKTOP = i3;
+ return;
+ }
+ _luaM_realloc_(i1, i2, i5 << 5, 0) | 0;
+ STACKTOP = i3;
+ return;
+}
+function _codearith(i4, i3, i2, i6, i5) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, d13 = 0.0;
+ i7 = STACKTOP;
+ if (((((((HEAP32[i2 >> 2] | 0) == 5 ? (HEAP32[i2 + 16 >> 2] | 0) == -1 : 0) ? (HEAP32[i2 + 20 >> 2] | 0) == -1 : 0) ? (HEAP32[i6 >> 2] | 0) == 5 : 0) ? (HEAP32[i6 + 16 >> 2] | 0) == -1 : 0) ? (HEAP32[i6 + 20 >> 2] | 0) == -1 : 0) ? (d13 = +HEAPF64[i6 + 8 >> 3], !((i3 & -2 | 0) == 16 & d13 == 0.0)) : 0) {
+ i12 = i2 + 8 | 0;
+ HEAPF64[i12 >> 3] = +_luaO_arith(i3 + -13 | 0, +HEAPF64[i12 >> 3], d13);
+ STACKTOP = i7;
+ return;
+ }
+ if ((i3 | 0) == 19 | (i3 | 0) == 21) {
+ i11 = 0;
+ } else {
+ i11 = _luaK_exp2RK(i4, i6) | 0;
+ }
+ i12 = _luaK_exp2RK(i4, i2) | 0;
+ if ((i12 | 0) > (i11 | 0)) {
+ if (((HEAP32[i2 >> 2] | 0) == 6 ? (i8 = HEAP32[i2 + 8 >> 2] | 0, (i8 & 256 | 0) == 0) : 0) ? (HEAPU8[i4 + 46 | 0] | 0 | 0) <= (i8 | 0) : 0) {
+ i10 = i4 + 48 | 0;
+ HEAP8[i10] = (HEAP8[i10] | 0) + -1 << 24 >> 24;
+ }
+ if (((HEAP32[i6 >> 2] | 0) == 6 ? (i1 = HEAP32[i6 + 8 >> 2] | 0, (i1 & 256 | 0) == 0) : 0) ? (HEAPU8[i4 + 46 | 0] | 0 | 0) <= (i1 | 0) : 0) {
+ i10 = i4 + 48 | 0;
+ HEAP8[i10] = (HEAP8[i10] | 0) + -1 << 24 >> 24;
+ }
+ } else {
+ if (((HEAP32[i6 >> 2] | 0) == 6 ? (i10 = HEAP32[i6 + 8 >> 2] | 0, (i10 & 256 | 0) == 0) : 0) ? (HEAPU8[i4 + 46 | 0] | 0 | 0) <= (i10 | 0) : 0) {
+ i10 = i4 + 48 | 0;
+ HEAP8[i10] = (HEAP8[i10] | 0) + -1 << 24 >> 24;
+ }
+ if (((HEAP32[i2 >> 2] | 0) == 6 ? (i9 = HEAP32[i2 + 8 >> 2] | 0, (i9 & 256 | 0) == 0) : 0) ? (HEAPU8[i4 + 46 | 0] | 0 | 0) <= (i9 | 0) : 0) {
+ i10 = i4 + 48 | 0;
+ HEAP8[i10] = (HEAP8[i10] | 0) + -1 << 24 >> 24;
+ }
+ }
+ HEAP32[i2 + 8 >> 2] = _luaK_code(i4, i11 << 14 | i3 | i12 << 23) | 0;
+ HEAP32[i2 >> 2] = 11;
+ HEAP32[(HEAP32[(HEAP32[i4 >> 2] | 0) + 20 >> 2] | 0) + ((HEAP32[i4 + 20 >> 2] | 0) + -1 << 2) >> 2] = i5;
+ STACKTOP = i7;
+ return;
+}
+function _GCTM(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i2 = i4 + 16 | 0;
+ i5 = i4;
+ i6 = HEAP32[i1 + 12 >> 2] | 0;
+ i9 = i6 + 104 | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = HEAP32[i8 >> 2];
+ i9 = i6 + 68 | 0;
+ HEAP32[i8 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i9 >> 2] = i8;
+ i9 = i8 + 5 | 0;
+ i7 = HEAPU8[i9] | 0;
+ HEAP8[i9] = i7 & 239;
+ if ((HEAPU8[i6 + 61 | 0] | 0) >= 2) {
+ HEAP8[i9] = HEAP8[i6 + 60 | 0] & 3 | i7 & 168;
+ }
+ HEAP32[i5 >> 2] = i8;
+ i7 = i5 + 8 | 0;
+ HEAP32[i7 >> 2] = HEAPU8[i8 + 4 | 0] | 0 | 64;
+ i8 = _luaT_gettmbyobj(i1, i5, 2) | 0;
+ if ((i8 | 0) == 0) {
+ STACKTOP = i4;
+ return;
+ }
+ i9 = i8 + 8 | 0;
+ if ((HEAP32[i9 >> 2] & 15 | 0) != 6) {
+ STACKTOP = i4;
+ return;
+ }
+ i12 = i1 + 41 | 0;
+ i13 = HEAP8[i12] | 0;
+ i10 = i6 + 63 | 0;
+ i11 = HEAP8[i10] | 0;
+ HEAP8[i12] = 0;
+ HEAP8[i10] = 0;
+ i6 = i1 + 8 | 0;
+ i14 = HEAP32[i6 >> 2] | 0;
+ i16 = i8;
+ i15 = HEAP32[i16 + 4 >> 2] | 0;
+ i8 = i14;
+ HEAP32[i8 >> 2] = HEAP32[i16 >> 2];
+ HEAP32[i8 + 4 >> 2] = i15;
+ HEAP32[i14 + 8 >> 2] = HEAP32[i9 >> 2];
+ i9 = HEAP32[i6 >> 2] | 0;
+ i14 = i5;
+ i8 = HEAP32[i14 + 4 >> 2] | 0;
+ i5 = i9 + 16 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i5 + 4 >> 2] = i8;
+ HEAP32[i9 + 24 >> 2] = HEAP32[i7 >> 2];
+ i5 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 + 32;
+ i5 = _luaD_pcall(i1, 7, 0, i5 - (HEAP32[i1 + 28 >> 2] | 0) | 0, 0) | 0;
+ HEAP8[i12] = i13;
+ HEAP8[i10] = i11;
+ if ((i5 | 0) == 0 | (i3 | 0) == 0) {
+ STACKTOP = i4;
+ return;
+ }
+ if ((i5 | 0) != 2) {
+ i16 = i5;
+ _luaD_throw(i1, i16);
+ }
+ i3 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i3 + -8 >> 2] & 15 | 0) == 4) {
+ i3 = (HEAP32[i3 + -16 >> 2] | 0) + 16 | 0;
+ } else {
+ i3 = 2528;
+ }
+ HEAP32[i2 >> 2] = i3;
+ _luaO_pushfstring(i1, 2544, i2) | 0;
+ i16 = 5;
+ _luaD_throw(i1, i16);
+}
+function _lua_gc(i3, i5, i4) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i3 + 12 >> 2] | 0;
+ L1 : do {
+ switch (i5 | 0) {
+ case 8:
+ {
+ i5 = i2 + 160 | 0;
+ i2 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ }
+ case 11:
+ {
+ _luaC_changemode(i3, 0);
+ i2 = 0;
+ break;
+ }
+ case 2:
+ {
+ _luaC_fullgc(i3, 0);
+ i2 = 0;
+ break;
+ }
+ case 5:
+ {
+ if ((HEAP8[i2 + 62 | 0] | 0) == 2) {
+ i2 = (HEAP32[i2 + 20 >> 2] | 0) == 0 | 0;
+ _luaC_forcestep(i3);
+ break L1;
+ }
+ i4 = (i4 << 10) + -1600 | 0;
+ if ((HEAP8[i2 + 63 | 0] | 0) == 0) {
+ i5 = i4;
+ _luaE_setdebt(i2, i5);
+ _luaC_forcestep(i3);
+ i5 = i2 + 61 | 0;
+ i5 = HEAP8[i5] | 0;
+ i5 = i5 << 24 >> 24 == 5;
+ i5 = i5 & 1;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i5 = (HEAP32[i2 + 12 >> 2] | 0) + i4 | 0;
+ _luaE_setdebt(i2, i5);
+ _luaC_forcestep(i3);
+ i5 = i2 + 61 | 0;
+ i5 = HEAP8[i5] | 0;
+ i5 = i5 << 24 >> 24 == 5;
+ i5 = i5 & 1;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ case 4:
+ {
+ i2 = (HEAP32[i2 + 12 >> 2] | 0) + (HEAP32[i2 + 8 >> 2] | 0) & 1023;
+ break;
+ }
+ case 1:
+ {
+ _luaE_setdebt(i2, 0);
+ HEAP8[i2 + 63 | 0] = 1;
+ i2 = 0;
+ break;
+ }
+ case 3:
+ {
+ i2 = ((HEAP32[i2 + 12 >> 2] | 0) + (HEAP32[i2 + 8 >> 2] | 0) | 0) >>> 10;
+ break;
+ }
+ case 7:
+ {
+ i5 = i2 + 164 | 0;
+ i2 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ }
+ case 0:
+ {
+ HEAP8[i2 + 63 | 0] = 0;
+ i2 = 0;
+ break;
+ }
+ case 6:
+ {
+ i5 = i2 + 156 | 0;
+ i2 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ }
+ case 9:
+ {
+ i2 = HEAPU8[i2 + 63 | 0] | 0;
+ break;
+ }
+ case 10:
+ {
+ _luaC_changemode(i3, 2);
+ i2 = 0;
+ break;
+ }
+ default:
+ {
+ i2 = -1;
+ }
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _os_time(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i4 = i2;
+ i5 = i2 + 48 | 0;
+ i3 = i2 + 4 | 0;
+ if ((_lua_type(i1, 1) | 0) < 1) {
+ i3 = _time(0) | 0;
+ } else {
+ _luaL_checktype(i1, 1, 5);
+ _lua_settop(i1, 1);
+ _lua_getfield(i1, -1, 5864);
+ i6 = _lua_tointegerx(i1, -1, i4) | 0;
+ i6 = (HEAP32[i4 >> 2] | 0) == 0 ? 0 : i6;
+ _lua_settop(i1, -2);
+ HEAP32[i3 >> 2] = i6;
+ _lua_getfield(i1, -1, 5872);
+ i6 = _lua_tointegerx(i1, -1, i4) | 0;
+ i6 = (HEAP32[i4 >> 2] | 0) == 0 ? 0 : i6;
+ _lua_settop(i1, -2);
+ HEAP32[i3 + 4 >> 2] = i6;
+ _lua_getfield(i1, -1, 5880);
+ i6 = _lua_tointegerx(i1, -1, i4) | 0;
+ i6 = (HEAP32[i4 >> 2] | 0) == 0 ? 12 : i6;
+ _lua_settop(i1, -2);
+ HEAP32[i3 + 8 >> 2] = i6;
+ _lua_getfield(i1, -1, 5888);
+ i6 = _lua_tointegerx(i1, -1, i5) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 5888;
+ i6 = _luaL_error(i1, 5920, i4) | 0;
+ } else {
+ _lua_settop(i1, -2);
+ }
+ HEAP32[i3 + 12 >> 2] = i6;
+ _lua_getfield(i1, -1, 5896);
+ i6 = _lua_tointegerx(i1, -1, i5) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 5896;
+ i6 = _luaL_error(i1, 5920, i4) | 0;
+ } else {
+ _lua_settop(i1, -2);
+ }
+ HEAP32[i3 + 16 >> 2] = i6 + -1;
+ _lua_getfield(i1, -1, 5904);
+ i6 = _lua_tointegerx(i1, -1, i5) | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 5904;
+ i6 = _luaL_error(i1, 5920, i4) | 0;
+ } else {
+ _lua_settop(i1, -2);
+ }
+ HEAP32[i3 + 20 >> 2] = i6 + -1900;
+ _lua_getfield(i1, -1, 5912);
+ if ((_lua_type(i1, -1) | 0) == 0) {
+ i4 = -1;
+ } else {
+ i4 = _lua_toboolean(i1, -1) | 0;
+ }
+ _lua_settop(i1, -2);
+ HEAP32[i3 + 32 >> 2] = i4;
+ i3 = _mktime(i3 | 0) | 0;
+ }
+ if ((i3 | 0) == -1) {
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ _lua_pushnumber(i1, +(i3 | 0));
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _addk(i6, i4, i3) {
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i10 = i1;
+ i2 = HEAP32[(HEAP32[i6 + 12 >> 2] | 0) + 52 >> 2] | 0;
+ i8 = _luaH_set(i2, HEAP32[i6 + 4 >> 2] | 0, i4) | 0;
+ i4 = HEAP32[i6 >> 2] | 0;
+ i9 = i8 + 8 | 0;
+ if (((HEAP32[i9 >> 2] | 0) == 3 ? (HEAPF64[i10 >> 3] = +HEAPF64[i8 >> 3] + 6755399441055744.0, i7 = HEAP32[i10 >> 2] | 0, i5 = HEAP32[i4 + 8 >> 2] | 0, (HEAP32[i5 + (i7 << 4) + 8 >> 2] | 0) == (HEAP32[i3 + 8 >> 2] | 0)) : 0) ? (_luaV_equalobj_(0, i5 + (i7 << 4) | 0, i3) | 0) != 0 : 0) {
+ i10 = i7;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i5 = i4 + 44 | 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ i7 = i6 + 32 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ HEAPF64[i8 >> 3] = +(i6 | 0);
+ HEAP32[i9 >> 2] = 3;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) >= (i9 | 0)) {
+ i9 = i4 + 8 | 0;
+ HEAP32[i9 >> 2] = _luaM_growaux_(i2, HEAP32[i9 >> 2] | 0, i5, 16, 67108863, 10600) | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ }
+ i8 = HEAP32[i4 + 8 >> 2] | 0;
+ if ((i10 | 0) < (i9 | 0)) {
+ while (1) {
+ i9 = i10 + 1 | 0;
+ HEAP32[i8 + (i10 << 4) + 8 >> 2] = 0;
+ if ((i9 | 0) < (HEAP32[i5 >> 2] | 0)) {
+ i10 = i9;
+ } else {
+ break;
+ }
+ }
+ }
+ i5 = i3;
+ i9 = HEAP32[i5 + 4 >> 2] | 0;
+ i10 = i8 + (i6 << 4) | 0;
+ HEAP32[i10 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i10 + 4 >> 2] = i9;
+ i10 = i3 + 8 | 0;
+ HEAP32[i8 + (i6 << 4) + 8 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + 1;
+ if ((HEAP32[i10 >> 2] & 64 | 0) == 0) {
+ i10 = i6;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP8[i3 + 5 | 0] & 3) == 0) {
+ i10 = i6;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ if ((HEAP8[i4 + 5 | 0] & 4) == 0) {
+ i10 = i6;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ _luaC_barrier_(i2, i4, i3);
+ i10 = i6;
+ STACKTOP = i1;
+ return i10 | 0;
+}
+function _singlevaraux(i5, i4, i2, i11) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i11 = i11 | 0;
+ var i1 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ if ((i5 | 0) == 0) {
+ i11 = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i7 = i5 + 12 | 0;
+ i8 = i5 + 40 | 0;
+ i9 = HEAPU8[i5 + 46 | 0] | 0;
+ while (1) {
+ i6 = i9 + -1 | 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) <= 0) {
+ break;
+ }
+ if ((_luaS_eqstr(i4, HEAP32[(HEAP32[i10 + 24 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[(HEAP32[i7 >> 2] | 0) + 64 >> 2] >> 2] | 0) + ((HEAP32[i8 >> 2] | 0) + i6 << 1) >> 1] | 0) * 12 | 0) >> 2] | 0) | 0) == 0) {
+ i9 = i6;
+ } else {
+ i3 = 5;
+ break;
+ }
+ }
+ if ((i3 | 0) == 5) {
+ HEAP32[i2 + 16 >> 2] = -1;
+ HEAP32[i2 + 20 >> 2] = -1;
+ HEAP32[i2 >> 2] = 7;
+ HEAP32[i2 + 8 >> 2] = i6;
+ if ((i11 | 0) != 0) {
+ i11 = 7;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i2 = i5 + 16 | 0;
+ do {
+ i2 = HEAP32[i2 >> 2] | 0;
+ } while ((HEAPU8[i2 + 8 | 0] | 0) > (i6 | 0));
+ HEAP8[i2 + 9 | 0] = 1;
+ i11 = 7;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i7 = HEAP32[i10 + 28 >> 2] | 0;
+ i6 = i5 + 47 | 0;
+ L17 : do {
+ if ((HEAP8[i6] | 0) != 0) {
+ i8 = 0;
+ while (1) {
+ i9 = i8 + 1 | 0;
+ if ((_luaS_eqstr(HEAP32[i7 + (i8 << 3) >> 2] | 0, i4) | 0) != 0) {
+ break;
+ }
+ if ((i9 | 0) < (HEAPU8[i6] | 0)) {
+ i8 = i9;
+ } else {
+ i3 = 13;
+ break L17;
+ }
+ }
+ if ((i8 | 0) < 0) {
+ i3 = 13;
+ }
+ } else {
+ i3 = 13;
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) == 13) {
+ if ((_singlevaraux(HEAP32[i5 + 8 >> 2] | 0, i4, i2, 0) | 0) == 0) {
+ i11 = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ } else {
+ i8 = _newupvalue(i5, i4, i2) | 0;
+ break;
+ }
+ }
+ } while (0);
+ HEAP32[i2 + 16 >> 2] = -1;
+ HEAP32[i2 + 20 >> 2] = -1;
+ HEAP32[i2 >> 2] = 8;
+ HEAP32[i2 + 8 >> 2] = i8;
+ i11 = 8;
+ STACKTOP = i1;
+ return i11 | 0;
+}
+function _mainposition(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ switch (HEAP32[i3 + 8 >> 2] & 63 | 0) {
+ case 3:
+ {
+ HEAPF64[i4 >> 3] = +HEAPF64[i3 >> 3] + 1.0;
+ i3 = (HEAP32[i4 + 4 >> 2] | 0) + (HEAP32[i4 >> 2] | 0) | 0;
+ if ((i3 | 0) < 0) {
+ i4 = 0 - i3 | 0;
+ i3 = (i3 | 0) == (i4 | 0) ? 0 : i4;
+ }
+ i5 = (HEAP32[i1 + 16 >> 2] | 0) + (((i3 | 0) % ((1 << HEAPU8[i1 + 7 | 0]) + -1 | 1 | 0) | 0) << 5) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ case 2:
+ {
+ i5 = (HEAP32[i1 + 16 >> 2] | 0) + ((((HEAP32[i3 >> 2] | 0) >>> 0) % (((1 << HEAPU8[i1 + 7 | 0]) + -1 | 1) >>> 0) | 0) << 5) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ case 20:
+ {
+ i5 = HEAP32[i3 >> 2] | 0;
+ i4 = i5 + 6 | 0;
+ if ((HEAP8[i4] | 0) == 0) {
+ i6 = i5 + 8 | 0;
+ HEAP32[i6 >> 2] = _luaS_hash(i5 + 16 | 0, HEAP32[i5 + 12 >> 2] | 0, HEAP32[i6 >> 2] | 0) | 0;
+ HEAP8[i4] = 1;
+ i5 = HEAP32[i3 >> 2] | 0;
+ }
+ i6 = (HEAP32[i1 + 16 >> 2] | 0) + (((1 << HEAPU8[i1 + 7 | 0]) + -1 & HEAP32[i5 + 8 >> 2]) << 5) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ case 22:
+ {
+ i6 = (HEAP32[i1 + 16 >> 2] | 0) + ((((HEAP32[i3 >> 2] | 0) >>> 0) % (((1 << HEAPU8[i1 + 7 | 0]) + -1 | 1) >>> 0) | 0) << 5) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ case 4:
+ {
+ i6 = (HEAP32[i1 + 16 >> 2] | 0) + (((1 << HEAPU8[i1 + 7 | 0]) + -1 & HEAP32[(HEAP32[i3 >> 2] | 0) + 8 >> 2]) << 5) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ case 1:
+ {
+ i6 = (HEAP32[i1 + 16 >> 2] | 0) + (((1 << HEAPU8[i1 + 7 | 0]) + -1 & HEAP32[i3 >> 2]) << 5) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ default:
+ {
+ i6 = (HEAP32[i1 + 16 >> 2] | 0) + ((((HEAP32[i3 >> 2] | 0) >>> 0) % (((1 << HEAPU8[i1 + 7 | 0]) + -1 | 1) >>> 0) | 0) << 5) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ }
+ return 0;
+}
+function _clearvalues(i2, i5, i1) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i4 = STACKTOP;
+ if ((i5 | 0) == (i1 | 0)) {
+ STACKTOP = i4;
+ return;
+ }
+ do {
+ i7 = i5 + 16 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ i6 = i9 + (1 << (HEAPU8[i5 + 7 | 0] | 0) << 5) | 0;
+ i8 = i5 + 28 | 0;
+ if ((HEAP32[i8 >> 2] | 0) > 0) {
+ i11 = i5 + 12 | 0;
+ i12 = 0;
+ do {
+ i13 = HEAP32[i11 >> 2] | 0;
+ i10 = i13 + (i12 << 4) + 8 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ do {
+ if ((i9 & 64 | 0) != 0) {
+ i13 = HEAP32[i13 + (i12 << 4) >> 2] | 0;
+ if ((i9 & 15 | 0) != 4) {
+ if ((HEAP8[i13 + 5 | 0] & 3) == 0) {
+ break;
+ }
+ HEAP32[i10 >> 2] = 0;
+ break;
+ }
+ if ((i13 | 0) != 0 ? !((HEAP8[i13 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i2, i13);
+ }
+ }
+ } while (0);
+ i12 = i12 + 1 | 0;
+ } while ((i12 | 0) < (HEAP32[i8 >> 2] | 0));
+ i7 = HEAP32[i7 >> 2] | 0;
+ } else {
+ i7 = i9;
+ }
+ if (i7 >>> 0 < i6 >>> 0) {
+ do {
+ i8 = i7 + 8 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ do {
+ if (!((i9 | 0) == 0 | (i9 & 64 | 0) == 0)) {
+ i10 = HEAP32[i7 >> 2] | 0;
+ if ((i9 & 15 | 0) == 4) {
+ if ((i10 | 0) == 0) {
+ break;
+ }
+ if ((HEAP8[i10 + 5 | 0] & 3) == 0) {
+ break;
+ }
+ _reallymarkobject(i2, i10);
+ break;
+ }
+ if ((!((HEAP8[i10 + 5 | 0] & 3) == 0) ? (HEAP32[i8 >> 2] = 0, i3 = i7 + 24 | 0, (HEAP32[i3 >> 2] & 64 | 0) != 0) : 0) ? !((HEAP8[(HEAP32[i7 + 16 >> 2] | 0) + 5 | 0] & 3) == 0) : 0) {
+ HEAP32[i3 >> 2] = 11;
+ }
+ }
+ } while (0);
+ i7 = i7 + 32 | 0;
+ } while (i7 >>> 0 < i6 >>> 0);
+ }
+ i5 = HEAP32[i5 + 24 >> 2] | 0;
+ } while ((i5 | 0) != (i1 | 0));
+ STACKTOP = i4;
+ return;
+}
+function _reallymarkobject(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ i2 = i4 + 5 | 0;
+ HEAP8[i2] = HEAP8[i2] & 252;
+ switch (HEAPU8[i4 + 4 | 0] | 0 | 0) {
+ case 6:
+ {
+ i7 = i1 + 84 | 0;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i4;
+ STACKTOP = i3;
+ return;
+ }
+ case 20:
+ case 4:
+ {
+ i4 = (HEAP32[i4 + 12 >> 2] | 0) + 17 | 0;
+ break;
+ }
+ case 7:
+ {
+ i5 = HEAP32[i4 + 8 >> 2] | 0;
+ if ((i5 | 0) != 0 ? !((HEAP8[i5 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i5);
+ }
+ i5 = HEAP32[i4 + 12 >> 2] | 0;
+ if ((i5 | 0) != 0 ? !((HEAP8[i5 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i5);
+ }
+ i4 = (HEAP32[i4 + 16 >> 2] | 0) + 24 | 0;
+ break;
+ }
+ case 8:
+ {
+ i7 = i1 + 84 | 0;
+ HEAP32[i4 + 60 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i4;
+ STACKTOP = i3;
+ return;
+ }
+ case 10:
+ {
+ i6 = i4 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i7 + 8 >> 2] & 64 | 0) != 0 ? (i5 = HEAP32[i7 >> 2] | 0, !((HEAP8[i5 + 5 | 0] & 3) == 0)) : 0) {
+ _reallymarkobject(i1, i5);
+ i7 = HEAP32[i6 >> 2] | 0;
+ }
+ if ((i7 | 0) == (i4 + 16 | 0)) {
+ i4 = 32;
+ } else {
+ STACKTOP = i3;
+ return;
+ }
+ break;
+ }
+ case 5:
+ {
+ i7 = i1 + 84 | 0;
+ HEAP32[i4 + 24 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i4;
+ STACKTOP = i3;
+ return;
+ }
+ case 38:
+ {
+ i7 = i1 + 84 | 0;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i4;
+ STACKTOP = i3;
+ return;
+ }
+ case 9:
+ {
+ i7 = i1 + 84 | 0;
+ HEAP32[i4 + 72 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i4;
+ STACKTOP = i3;
+ return;
+ }
+ default:
+ {
+ STACKTOP = i3;
+ return;
+ }
+ }
+ HEAP8[i2] = HEAPU8[i2] | 0 | 4;
+ i7 = i1 + 16 | 0;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + i4;
+ STACKTOP = i3;
+ return;
+}
+function _lua_upvaluejoin(i1, i9, i7, i6, i3) {
+ i1 = i1 | 0;
+ i9 = i9 | 0;
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i8 = 0, i10 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i9 | 0) <= 0) {
+ if (!((i9 | 0) < -1000999)) {
+ i8 = (HEAP32[i1 + 8 >> 2] | 0) + (i9 << 4) | 0;
+ break;
+ }
+ if ((i9 | 0) == -1001e3) {
+ i8 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i10 = -1001e3 - i9 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) != 22 ? (i8 = HEAP32[i9 >> 2] | 0, (i10 | 0) <= (HEAPU8[i8 + 6 | 0] | 0 | 0)) : 0) {
+ i8 = i8 + (i10 + -1 << 4) + 16 | 0;
+ } else {
+ i8 = 5192;
+ }
+ } else {
+ i8 = (HEAP32[i5 >> 2] | 0) + (i9 << 4) | 0;
+ i8 = i8 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i8 : 5192;
+ }
+ } while (0);
+ i8 = HEAP32[i8 >> 2] | 0;
+ i7 = i8 + 16 + (i7 + -1 << 2) | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i4 = (HEAP32[i1 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i4 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i3 = (HEAP32[i4 >> 2] | 0) + 16 + (i3 + -1 << 2) | 0;
+ HEAP32[i7 >> 2] = HEAP32[i3 >> 2];
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP8[i3 + 5 | 0] & 3) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP8[i8 + 5 | 0] & 4) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaC_barrier_(i1, i8, i3);
+ STACKTOP = i2;
+ return;
+}
+function _lua_upvalueid(i5, i7, i1) {
+ i5 = i5 | 0;
+ i7 = i7 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i5 + 16 >> 2] | 0;
+ i6 = (i7 | 0) > 0;
+ do {
+ if (!i6) {
+ if (!((i7 | 0) < -1000999)) {
+ i8 = (HEAP32[i5 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i8 = (HEAP32[i5 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i10 = -1001e3 - i7 | 0;
+ i9 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i9 + 8 >> 2] | 0) != 22 ? (i8 = HEAP32[i9 >> 2] | 0, (i10 | 0) <= (HEAPU8[i8 + 6 | 0] | 0 | 0)) : 0) {
+ i8 = i8 + (i10 + -1 << 4) + 16 | 0;
+ } else {
+ i8 = 5192;
+ }
+ } else {
+ i8 = (HEAP32[i4 >> 2] | 0) + (i7 << 4) | 0;
+ i8 = i8 >>> 0 < (HEAP32[i5 + 8 >> 2] | 0) >>> 0 ? i8 : 5192;
+ }
+ } while (0);
+ i9 = HEAP32[i8 + 8 >> 2] & 63;
+ if ((i9 | 0) == 38) {
+ i10 = (HEAP32[i8 >> 2] | 0) + (i1 + -1 << 4) + 16 | 0;
+ STACKTOP = i2;
+ return i10 | 0;
+ } else if ((i9 | 0) == 6) {
+ do {
+ if (!i6) {
+ if (!((i7 | 0) < -1000999)) {
+ i3 = (HEAP32[i5 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i3 = (HEAP32[i5 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i7 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i7 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i5 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i10 = HEAP32[(HEAP32[i3 >> 2] | 0) + 16 + (i1 + -1 << 2) >> 2] | 0;
+ STACKTOP = i2;
+ return i10 | 0;
+ } else {
+ i10 = 0;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ return 0;
+}
+function _lua_rawequal(i2, i6, i4) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0, i5 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i5 = (HEAP32[i2 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i5 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i7 = -1001e3 - i6 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i5 = HEAP32[i6 >> 2] | 0, (i7 | 0) <= (HEAPU8[i5 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i5 + (i7 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i5 = (HEAP32[i3 >> 2] | 0) + (i6 << 4) | 0;
+ i5 = i5 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i5 : 5192;
+ }
+ } while (0);
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i2 = (HEAP32[i2 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i2 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i2 = -1001e3 - i4 | 0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP32[i3 + 8 >> 2] | 0) == 22) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((i2 | 0) > (HEAPU8[i3 + 6 | 0] | 0 | 0)) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ } else {
+ i2 = i3 + (i2 + -1 << 4) + 16 | 0;
+ break;
+ }
+ } else {
+ i3 = (HEAP32[i3 >> 2] | 0) + (i4 << 4) | 0;
+ i2 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ if ((i5 | 0) == 5192 | (i2 | 0) == 5192) {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ if ((HEAP32[i5 + 8 >> 2] | 0) == (HEAP32[i2 + 8 >> 2] | 0)) {
+ i2 = (_luaV_equalobj_(0, i5, i2) | 0) != 0;
+ } else {
+ i2 = 0;
+ }
+ i7 = i2 & 1;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _luaO_chunkid(i1, i4, i6) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ i3 = _strlen(i4 | 0) | 0;
+ i5 = HEAP8[i4] | 0;
+ if (i5 << 24 >> 24 == 64) {
+ if (i3 >>> 0 > i6 >>> 0) {
+ HEAP8[i1 + 0 | 0] = HEAP8[5552 | 0] | 0;
+ HEAP8[i1 + 1 | 0] = HEAP8[5553 | 0] | 0;
+ HEAP8[i1 + 2 | 0] = HEAP8[5554 | 0] | 0;
+ _memcpy(i1 + 3 | 0, i4 + (4 - i6 + i3) | 0, i6 + -3 | 0) | 0;
+ STACKTOP = i2;
+ return;
+ } else {
+ _memcpy(i1 | 0, i4 + 1 | 0, i3 | 0) | 0;
+ STACKTOP = i2;
+ return;
+ }
+ } else if (i5 << 24 >> 24 == 61) {
+ i4 = i4 + 1 | 0;
+ if (i3 >>> 0 > i6 >>> 0) {
+ i9 = i6 + -1 | 0;
+ _memcpy(i1 | 0, i4 | 0, i9 | 0) | 0;
+ HEAP8[i1 + i9 | 0] = 0;
+ STACKTOP = i2;
+ return;
+ } else {
+ _memcpy(i1 | 0, i4 | 0, i3 | 0) | 0;
+ STACKTOP = i2;
+ return;
+ }
+ } else {
+ i5 = _strchr(i4, 10) | 0;
+ i9 = i1 + 0 | 0;
+ i8 = 5560 | 0;
+ i7 = i9 + 9 | 0;
+ do {
+ HEAP8[i9] = HEAP8[i8] | 0;
+ i9 = i9 + 1 | 0;
+ i8 = i8 + 1 | 0;
+ } while ((i9 | 0) < (i7 | 0));
+ i7 = i1 + 9 | 0;
+ i6 = i6 + -15 | 0;
+ i8 = (i5 | 0) == 0;
+ if (i3 >>> 0 < i6 >>> 0 & i8) {
+ _memcpy(i7 | 0, i4 | 0, i3 | 0) | 0;
+ i3 = i3 + 9 | 0;
+ } else {
+ if (!i8) {
+ i3 = i5 - i4 | 0;
+ }
+ i3 = i3 >>> 0 > i6 >>> 0 ? i6 : i3;
+ _memcpy(i7 | 0, i4 | 0, i3 | 0) | 0;
+ i9 = i1 + (i3 + 9) | 0;
+ HEAP8[i9 + 0 | 0] = HEAP8[5552 | 0] | 0;
+ HEAP8[i9 + 1 | 0] = HEAP8[5553 | 0] | 0;
+ HEAP8[i9 + 2 | 0] = HEAP8[5554 | 0] | 0;
+ i3 = i3 + 12 | 0;
+ }
+ i9 = i1 + i3 | 0;
+ HEAP8[i9 + 0 | 0] = HEAP8[5576 | 0] | 0;
+ HEAP8[i9 + 1 | 0] = HEAP8[5577 | 0] | 0;
+ HEAP8[i9 + 2 | 0] = HEAP8[5578 | 0] | 0;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _luaS_resize(i4, i1) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i3 = STACKTOP;
+ i5 = HEAP32[i4 + 12 >> 2] | 0;
+ i2 = i5 + 24 | 0;
+ _luaC_runtilstate(i4, -5);
+ i5 = i5 + 32 | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ L1 : do {
+ if ((i8 | 0) < (i1 | 0)) {
+ if ((i1 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i4);
+ }
+ i7 = _luaM_realloc_(i4, HEAP32[i2 >> 2] | 0, i8 << 2, i1 << 2) | 0;
+ HEAP32[i2 >> 2] = i7;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) < (i1 | 0)) {
+ i8 = i6;
+ while (1) {
+ HEAP32[i7 + (i8 << 2) >> 2] = 0;
+ i8 = i8 + 1 | 0;
+ if ((i8 | 0) == (i1 | 0)) {
+ i8 = i6;
+ break L1;
+ }
+ i7 = HEAP32[i2 >> 2] | 0;
+ }
+ } else {
+ i8 = i6;
+ }
+ }
+ } while (0);
+ if ((i8 | 0) > 0) {
+ i6 = i1 + -1 | 0;
+ i7 = 0;
+ do {
+ i10 = (HEAP32[i2 >> 2] | 0) + (i7 << 2) | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i10 >> 2] = 0;
+ if ((i9 | 0) != 0) {
+ while (1) {
+ i8 = HEAP32[i9 >> 2] | 0;
+ i10 = HEAP32[i9 + 8 >> 2] & i6;
+ HEAP32[i9 >> 2] = HEAP32[(HEAP32[i2 >> 2] | 0) + (i10 << 2) >> 2];
+ HEAP32[(HEAP32[i2 >> 2] | 0) + (i10 << 2) >> 2] = i9;
+ i10 = i9 + 5 | 0;
+ HEAP8[i10] = HEAP8[i10] & 191;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i9 = i8;
+ }
+ }
+ i8 = HEAP32[i5 >> 2] | 0;
+ }
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) < (i8 | 0));
+ }
+ if ((i8 | 0) <= (i1 | 0)) {
+ HEAP32[i5 >> 2] = i1;
+ STACKTOP = i3;
+ return;
+ }
+ if ((i1 + 1 | 0) >>> 0 > 1073741823) {
+ _luaM_toobig(i4);
+ }
+ HEAP32[i2 >> 2] = _luaM_realloc_(i4, HEAP32[i2 >> 2] | 0, i8 << 2, i1 << 2) | 0;
+ HEAP32[i5 >> 2] = i1;
+ STACKTOP = i3;
+ return;
+}
+function _luaD_poscall(i6, i7) {
+ i6 = i6 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ i4 = i6 + 16 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAPU8[i6 + 40 | 0] | 0;
+ if ((i5 & 6 | 0) == 0) {
+ i8 = i3 + 8 | 0;
+ } else {
+ if ((i5 & 2 | 0) != 0) {
+ i8 = i6 + 28 | 0;
+ i7 = i7 - (HEAP32[i8 >> 2] | 0) | 0;
+ _luaD_hook(i6, 1, -1);
+ i7 = (HEAP32[i8 >> 2] | 0) + i7 | 0;
+ }
+ i8 = i3 + 8 | 0;
+ HEAP32[i6 + 20 >> 2] = HEAP32[(HEAP32[i8 >> 2] | 0) + 28 >> 2];
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ i9 = HEAP16[i3 + 16 >> 1] | 0;
+ i3 = i9 << 16 >> 16;
+ HEAP32[i4 >> 2] = HEAP32[i8 >> 2];
+ i4 = i6 + 8 | 0;
+ if (i9 << 16 >> 16 == 0) {
+ i9 = i5;
+ HEAP32[i4 >> 2] = i9;
+ i9 = i3 + 1 | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+ } else {
+ i6 = i3;
+ }
+ while (1) {
+ if (!(i7 >>> 0 < (HEAP32[i4 >> 2] | 0) >>> 0)) {
+ break;
+ }
+ i8 = i5 + 16 | 0;
+ i11 = i7;
+ i10 = HEAP32[i11 + 4 >> 2] | 0;
+ i9 = i5;
+ HEAP32[i9 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i9 + 4 >> 2] = i10;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i7 + 8 >> 2];
+ i6 = i6 + -1 | 0;
+ if ((i6 | 0) == 0) {
+ i2 = 12;
+ break;
+ } else {
+ i7 = i7 + 16 | 0;
+ i5 = i8;
+ }
+ }
+ if ((i2 | 0) == 12) {
+ HEAP32[i4 >> 2] = i8;
+ i11 = i3 + 1 | 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ if ((i6 | 0) > 0) {
+ i2 = i6;
+ i7 = i5;
+ } else {
+ i11 = i5;
+ HEAP32[i4 >> 2] = i11;
+ i11 = i3 + 1 | 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ while (1) {
+ i2 = i2 + -1 | 0;
+ HEAP32[i7 + 8 >> 2] = 0;
+ if ((i2 | 0) <= 0) {
+ break;
+ } else {
+ i7 = i7 + 16 | 0;
+ }
+ }
+ i11 = i5 + (i6 << 4) | 0;
+ HEAP32[i4 >> 2] = i11;
+ i11 = i3 + 1 | 0;
+ STACKTOP = i1;
+ return i11 | 0;
+}
+function _lua_rawset(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i4 = -1001e3 - i4 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i5 >> 2] | 0, (i4 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i3 + (i4 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i5 >> 2] | 0) + (i4 << 4) | 0;
+ i5 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i4 = i1 + 8 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ i3 = _luaH_set(i1, HEAP32[i5 >> 2] | 0, i6 + -32 | 0) | 0;
+ i9 = i6 + -16 | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i3;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i6 + -8 >> 2];
+ HEAP8[(HEAP32[i5 >> 2] | 0) + 6 | 0] = 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i3 + -8 >> 2] & 64 | 0) == 0) {
+ i9 = i3;
+ i9 = i9 + -32 | 0;
+ HEAP32[i4 >> 2] = i9;
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP8[(HEAP32[i3 + -16 >> 2] | 0) + 5 | 0] & 3) == 0) {
+ i9 = i3;
+ i9 = i9 + -32 | 0;
+ HEAP32[i4 >> 2] = i9;
+ STACKTOP = i2;
+ return;
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP8[i5 + 5 | 0] & 4) == 0) {
+ i9 = i3;
+ i9 = i9 + -32 | 0;
+ HEAP32[i4 >> 2] = i9;
+ STACKTOP = i2;
+ return;
+ }
+ _luaC_barrierback_(i1, i5);
+ i9 = HEAP32[i4 >> 2] | 0;
+ i9 = i9 + -32 | 0;
+ HEAP32[i4 >> 2] = i9;
+ STACKTOP = i2;
+ return;
+}
+function _saveSetjmp(i4, i3, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0;
+ setjmpId = setjmpId + 1 | 0;
+ HEAP32[i4 >> 2] = setjmpId;
+ while ((i2 | 0) < 40) {
+ if ((HEAP32[i1 + (i2 << 2) >> 2] | 0) == 0) {
+ HEAP32[i1 + (i2 << 2) >> 2] = setjmpId;
+ HEAP32[i1 + ((i2 << 2) + 4) >> 2] = i3;
+ HEAP32[i1 + ((i2 << 2) + 8) >> 2] = 0;
+ return 0;
+ }
+ i2 = i2 + 2 | 0;
+ }
+ _putchar(116);
+ _putchar(111);
+ _putchar(111);
+ _putchar(32);
+ _putchar(109);
+ _putchar(97);
+ _putchar(110);
+ _putchar(121);
+ _putchar(32);
+ _putchar(115);
+ _putchar(101);
+ _putchar(116);
+ _putchar(106);
+ _putchar(109);
+ _putchar(112);
+ _putchar(115);
+ _putchar(32);
+ _putchar(105);
+ _putchar(110);
+ _putchar(32);
+ _putchar(97);
+ _putchar(32);
+ _putchar(102);
+ _putchar(117);
+ _putchar(110);
+ _putchar(99);
+ _putchar(116);
+ _putchar(105);
+ _putchar(111);
+ _putchar(110);
+ _putchar(32);
+ _putchar(99);
+ _putchar(97);
+ _putchar(108);
+ _putchar(108);
+ _putchar(44);
+ _putchar(32);
+ _putchar(98);
+ _putchar(117);
+ _putchar(105);
+ _putchar(108);
+ _putchar(100);
+ _putchar(32);
+ _putchar(119);
+ _putchar(105);
+ _putchar(116);
+ _putchar(104);
+ _putchar(32);
+ _putchar(97);
+ _putchar(32);
+ _putchar(104);
+ _putchar(105);
+ _putchar(103);
+ _putchar(104);
+ _putchar(101);
+ _putchar(114);
+ _putchar(32);
+ _putchar(118);
+ _putchar(97);
+ _putchar(108);
+ _putchar(117);
+ _putchar(101);
+ _putchar(32);
+ _putchar(102);
+ _putchar(111);
+ _putchar(114);
+ _putchar(32);
+ _putchar(77);
+ _putchar(65);
+ _putchar(88);
+ _putchar(95);
+ _putchar(83);
+ _putchar(69);
+ _putchar(84);
+ _putchar(74);
+ _putchar(77);
+ _putchar(80);
+ _putchar(83);
+ _putchar(10);
+ abort(0);
+ return 0;
+}
+function _lua_newthread(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ i3 = i5 + 12 | 0;
+ if ((HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i5);
+ }
+ i2 = _luaC_newobj(i5, 8, 112, 0, 0) | 0;
+ i6 = i5 + 8 | 0;
+ i4 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i4 + 8 >> 2] = 72;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 16;
+ HEAP32[i2 + 12 >> 2] = HEAP32[i3 >> 2];
+ i6 = i2 + 28 | 0;
+ HEAP32[i6 >> 2] = 0;
+ i4 = i2 + 16 | 0;
+ HEAP32[i4 >> 2] = 0;
+ i3 = i2 + 32 | 0;
+ HEAP32[i3 >> 2] = 0;
+ HEAP32[i2 + 64 >> 2] = 0;
+ HEAP16[i2 + 38 >> 1] = 0;
+ i9 = i2 + 52 | 0;
+ HEAP32[i9 >> 2] = 0;
+ i8 = i2 + 40 | 0;
+ HEAP8[i8] = 0;
+ i10 = i2 + 44 | 0;
+ HEAP32[i10 >> 2] = 0;
+ HEAP8[i2 + 41 | 0] = 1;
+ i7 = i2 + 48 | 0;
+ HEAP32[i7 >> 2] = 0;
+ HEAP32[i2 + 56 >> 2] = 0;
+ HEAP16[i2 + 36 >> 1] = 1;
+ HEAP8[i2 + 6 | 0] = 0;
+ HEAP32[i2 + 68 >> 2] = 0;
+ HEAP8[i8] = HEAP8[i5 + 40 | 0] | 0;
+ i8 = HEAP32[i5 + 44 >> 2] | 0;
+ HEAP32[i10 >> 2] = i8;
+ HEAP32[i9 >> 2] = HEAP32[i5 + 52 >> 2];
+ HEAP32[i7 >> 2] = i8;
+ i5 = _luaM_realloc_(i5, 0, 0, 640) | 0;
+ HEAP32[i6 >> 2] = i5;
+ HEAP32[i3 >> 2] = 40;
+ i6 = 0;
+ do {
+ HEAP32[i5 + (i6 << 4) + 8 >> 2] = 0;
+ i6 = i6 + 1 | 0;
+ } while ((i6 | 0) != 40);
+ HEAP32[i2 + 24 >> 2] = i5 + ((HEAP32[i3 >> 2] | 0) + -5 << 4);
+ i10 = i2 + 72 | 0;
+ HEAP32[i2 + 80 >> 2] = 0;
+ HEAP32[i2 + 84 >> 2] = 0;
+ HEAP8[i2 + 90 | 0] = 0;
+ HEAP32[i10 >> 2] = i5;
+ HEAP32[i2 + 8 >> 2] = i5 + 16;
+ HEAP32[i5 + 8 >> 2] = 0;
+ HEAP32[i2 + 76 >> 2] = i5 + 336;
+ HEAP32[i4 >> 2] = i10;
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaK_self(i2, i5, i3) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ _luaK_dischargevars(i2, i5);
+ if ((HEAP32[i5 >> 2] | 0) == 6) {
+ i6 = i5 + 8 | 0;
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i5 + 16 >> 2] | 0) != (HEAP32[i5 + 20 >> 2] | 0)) {
+ if ((i8 | 0) < (HEAPU8[i2 + 46 | 0] | 0 | 0)) {
+ i7 = 6;
+ } else {
+ _exp2reg(i2, i5, i8);
+ }
+ }
+ } else {
+ i6 = i5 + 8 | 0;
+ i7 = 6;
+ }
+ if ((i7 | 0) == 6) {
+ _luaK_exp2nextreg(i2, i5);
+ }
+ i8 = HEAP32[i6 >> 2] | 0;
+ if (((HEAP32[i5 >> 2] | 0) == 6 ? (i8 & 256 | 0) == 0 : 0) ? (HEAPU8[i2 + 46 | 0] | 0 | 0) <= (i8 | 0) : 0) {
+ i10 = i2 + 48 | 0;
+ HEAP8[i10] = (HEAP8[i10] | 0) + -1 << 24 >> 24;
+ }
+ i7 = i2 + 48 | 0;
+ HEAP32[i6 >> 2] = HEAPU8[i7] | 0;
+ HEAP32[i5 >> 2] = 6;
+ i10 = HEAP8[i7] | 0;
+ i5 = (i10 & 255) + 2 | 0;
+ i9 = (HEAP32[i2 >> 2] | 0) + 78 | 0;
+ do {
+ if (i5 >>> 0 > (HEAPU8[i9] | 0) >>> 0) {
+ if (i5 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i2 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i9] = i5;
+ i4 = HEAP8[i7] | 0;
+ break;
+ }
+ } else {
+ i4 = i10;
+ }
+ } while (0);
+ HEAP8[i7] = (i4 & 255) + 2;
+ i10 = HEAP32[i6 >> 2] | 0;
+ _luaK_code(i2, i8 << 23 | i10 << 6 | (_luaK_exp2RK(i2, i3) | 0) << 14 | 12) | 0;
+ if ((HEAP32[i3 >> 2] | 0) != 6) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i3 & 256 | 0) != 0) {
+ STACKTOP = i1;
+ return;
+ }
+ if ((HEAPU8[i2 + 46 | 0] | 0 | 0) > (i3 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP8[i7] = (HEAP8[i7] | 0) + -1 << 24 >> 24;
+ STACKTOP = i1;
+ return;
+}
+function _luaD_rawrunprotected(i10, i9, i11) {
+ i10 = i10 | 0;
+ i9 = i9 | 0;
+ i11 = i11 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i12 = 0, i13 = 0;
+ i7 = STACKTOP;
+ STACKTOP = STACKTOP + 176 | 0;
+ i8 = STACKTOP;
+ STACKTOP = STACKTOP + 168 | 0;
+ HEAP32[i8 >> 2] = 0;
+ i6 = i7;
+ i5 = i10 + 38 | 0;
+ i4 = HEAP16[i5 >> 1] | 0;
+ i1 = i6 + 160 | 0;
+ HEAP32[i1 >> 2] = 0;
+ i3 = i10 + 64 | 0;
+ HEAP32[i6 >> 2] = HEAP32[i3 >> 2];
+ HEAP32[i3 >> 2] = i6;
+ _saveSetjmp(i6 + 4 | 0, 1, i8 | 0) | 0;
+ __THREW__ = 0;
+ i13 = __THREW__;
+ __THREW__ = 0;
+ if ((i13 | 0) != 0 & (threwValue | 0) != 0) {
+ i12 = _testSetjmp(HEAP32[i13 >> 2] | 0, i8) | 0;
+ if ((i12 | 0) == 0) {
+ _longjmp(i13 | 0, threwValue | 0);
+ }
+ tempRet0 = threwValue;
+ } else {
+ i12 = -1;
+ }
+ if ((i12 | 0) == 1) {
+ i12 = tempRet0;
+ } else {
+ i12 = 0;
+ }
+ while (1) {
+ if ((i12 | 0) != 0) {
+ i2 = 6;
+ break;
+ }
+ __THREW__ = 0;
+ invoke_vii(i9 | 0, i10 | 0, i11 | 0);
+ i13 = __THREW__;
+ __THREW__ = 0;
+ if ((i13 | 0) != 0 & (threwValue | 0) != 0) {
+ i12 = _testSetjmp(HEAP32[i13 >> 2] | 0, i8) | 0;
+ if ((i12 | 0) == 0) {
+ _longjmp(i13 | 0, threwValue | 0);
+ }
+ tempRet0 = threwValue;
+ } else {
+ i12 = -1;
+ }
+ if ((i12 | 0) == 1) {
+ i12 = tempRet0;
+ } else {
+ break;
+ }
+ }
+ if ((i2 | 0) == 6) {
+ i13 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i3 >> 2] = i13;
+ HEAP16[i5 >> 1] = i4;
+ i13 = HEAP32[i1 >> 2] | 0;
+ STACKTOP = i7;
+ return i13 | 0;
+ }
+ i13 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i3 >> 2] = i13;
+ HEAP16[i5 >> 1] = i4;
+ i13 = HEAP32[i1 >> 2] | 0;
+ STACKTOP = i7;
+ return i13 | 0;
+}
+function _luaB_tonumber(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, d6 = 0.0, i7 = 0, d8 = 0.0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2 + 4 | 0;
+ i4 = i2;
+ do {
+ if ((_lua_type(i1, 2) | 0) >= 1) {
+ i9 = _luaL_checklstring(i1, 1, i4) | 0;
+ i3 = i9 + (HEAP32[i4 >> 2] | 0) | 0;
+ i5 = _luaL_checkinteger(i1, 2) | 0;
+ if (!((i5 + -2 | 0) >>> 0 < 35)) {
+ _luaL_argerror(i1, 2, 9648) | 0;
+ }
+ i10 = _strspn(i9, 9672) | 0;
+ i7 = i9 + i10 | 0;
+ i4 = HEAP8[i7] | 0;
+ if (i4 << 24 >> 24 == 43) {
+ i4 = 0;
+ i7 = i9 + (i10 + 1) | 0;
+ } else if (i4 << 24 >> 24 == 45) {
+ i4 = 1;
+ i7 = i9 + (i10 + 1) | 0;
+ } else {
+ i4 = 0;
+ }
+ if ((_isalnum(HEAPU8[i7] | 0 | 0) | 0) != 0) {
+ d6 = +(i5 | 0);
+ d8 = 0.0;
+ do {
+ i9 = HEAP8[i7] | 0;
+ i10 = i9 & 255;
+ if ((i10 + -48 | 0) >>> 0 < 10) {
+ i9 = (i9 << 24 >> 24) + -48 | 0;
+ } else {
+ i9 = (_toupper(i10 | 0) | 0) + -55 | 0;
+ }
+ if ((i9 | 0) >= (i5 | 0)) {
+ break;
+ }
+ d8 = d6 * d8 + +(i9 | 0);
+ i7 = i7 + 1 | 0;
+ } while ((_isalnum(HEAPU8[i7] | 0 | 0) | 0) != 0);
+ if ((i7 + (_strspn(i7, 9672) | 0) | 0) == (i3 | 0)) {
+ if ((i4 | 0) != 0) {
+ d8 = -d8;
+ }
+ _lua_pushnumber(i1, d8);
+ STACKTOP = i2;
+ return 1;
+ }
+ }
+ } else {
+ d6 = +_lua_tonumberx(i1, 1, i3);
+ if ((HEAP32[i3 >> 2] | 0) == 0) {
+ _luaL_checkany(i1, 1);
+ break;
+ }
+ _lua_pushnumber(i1, d6);
+ STACKTOP = i2;
+ return 1;
+ }
+ } while (0);
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaK_storevar(i1, i5, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ i7 = HEAP32[i5 >> 2] | 0;
+ if ((i7 | 0) == 7) {
+ if (((HEAP32[i3 >> 2] | 0) == 6 ? (i6 = HEAP32[i3 + 8 >> 2] | 0, (i6 & 256 | 0) == 0) : 0) ? (HEAPU8[i1 + 46 | 0] | 0) <= (i6 | 0) : 0) {
+ i7 = i1 + 48 | 0;
+ HEAP8[i7] = (HEAP8[i7] | 0) + -1 << 24 >> 24;
+ }
+ _exp2reg(i1, i3, HEAP32[i5 + 8 >> 2] | 0);
+ STACKTOP = i2;
+ return;
+ } else if ((i7 | 0) == 9) {
+ i4 = i5 + 8 | 0;
+ i7 = (HEAP8[i4 + 3 | 0] | 0) == 7 ? 10 : 8;
+ i6 = _luaK_exp2RK(i1, i3) | 0;
+ _luaK_code(i1, i6 << 14 | i7 | HEAPU8[i4 + 2 | 0] << 6 | HEAPU16[i4 >> 1] << 23) | 0;
+ } else if ((i7 | 0) == 8) {
+ _luaK_dischargevars(i1, i3);
+ if ((HEAP32[i3 >> 2] | 0) == 6) {
+ i6 = i3 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i3 + 16 >> 2] | 0) != (HEAP32[i3 + 20 >> 2] | 0)) {
+ if ((i7 | 0) < (HEAPU8[i1 + 46 | 0] | 0)) {
+ i4 = 12;
+ } else {
+ _exp2reg(i1, i3, i7);
+ i7 = HEAP32[i6 >> 2] | 0;
+ }
+ }
+ } else {
+ i6 = i3 + 8 | 0;
+ i4 = 12;
+ }
+ if ((i4 | 0) == 12) {
+ _luaK_exp2nextreg(i1, i3);
+ i7 = HEAP32[i6 >> 2] | 0;
+ }
+ _luaK_code(i1, i7 << 6 | HEAP32[i5 + 8 >> 2] << 23 | 9) | 0;
+ }
+ if ((HEAP32[i3 >> 2] | 0) != 6) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i3 & 256 | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAPU8[i1 + 46 | 0] | 0) > (i3 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = i1 + 48 | 0;
+ HEAP8[i7] = (HEAP8[i7] | 0) + -1 << 24 >> 24;
+ STACKTOP = i2;
+ return;
+}
+function _closegoto(i10, i3, i9) {
+ i10 = i10 | 0;
+ i3 = i3 | 0;
+ i9 = i9 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i7 = i1;
+ i4 = HEAP32[i10 + 48 >> 2] | 0;
+ i6 = HEAP32[i10 + 64 >> 2] | 0;
+ i2 = i6 + 12 | 0;
+ i5 = HEAP32[i2 >> 2] | 0;
+ i8 = HEAP8[i5 + (i3 << 4) + 12 | 0] | 0;
+ if ((i8 & 255) < (HEAPU8[i9 + 12 | 0] | 0)) {
+ i11 = HEAP32[i10 + 52 >> 2] | 0;
+ i12 = HEAP32[i5 + (i3 << 4) + 8 >> 2] | 0;
+ i8 = (HEAP32[(HEAP32[(HEAP32[i4 >> 2] | 0) + 24 >> 2] | 0) + ((HEAP16[(HEAP32[HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 64 >> 2] >> 2] | 0) + ((HEAP32[i4 + 40 >> 2] | 0) + (i8 & 255) << 1) >> 1] | 0) * 12 | 0) >> 2] | 0) + 16 | 0;
+ HEAP32[i7 >> 2] = (HEAP32[i5 + (i3 << 4) >> 2] | 0) + 16;
+ HEAP32[i7 + 4 >> 2] = i12;
+ HEAP32[i7 + 8 >> 2] = i8;
+ _semerror(i10, _luaO_pushfstring(i11, 6248, i7) | 0);
+ }
+ _luaK_patchlist(i4, HEAP32[i5 + (i3 << 4) + 4 >> 2] | 0, HEAP32[i9 + 4 >> 2] | 0);
+ i4 = i6 + 16 | 0;
+ i5 = (HEAP32[i4 >> 2] | 0) + -1 | 0;
+ if ((i5 | 0) <= (i3 | 0)) {
+ i12 = i5;
+ HEAP32[i4 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ do {
+ i12 = HEAP32[i2 >> 2] | 0;
+ i5 = i12 + (i3 << 4) | 0;
+ i3 = i3 + 1 | 0;
+ i12 = i12 + (i3 << 4) | 0;
+ HEAP32[i5 + 0 >> 2] = HEAP32[i12 + 0 >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[i12 + 4 >> 2];
+ HEAP32[i5 + 8 >> 2] = HEAP32[i12 + 8 >> 2];
+ HEAP32[i5 + 12 >> 2] = HEAP32[i12 + 12 >> 2];
+ i5 = (HEAP32[i4 >> 2] | 0) + -1 | 0;
+ } while ((i3 | 0) < (i5 | 0));
+ HEAP32[i4 >> 2] = i5;
+ STACKTOP = i1;
+ return;
+}
+function _luaM_growaux_(i4, i5, i1, i7, i8, i9) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ i9 = i9 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i10 = i2;
+ i6 = HEAP32[i1 >> 2] | 0;
+ if ((i6 | 0) >= ((i8 | 0) / 2 | 0 | 0)) {
+ if ((i6 | 0) < (i8 | 0)) {
+ i3 = i8;
+ } else {
+ HEAP32[i10 >> 2] = i9;
+ HEAP32[i10 + 4 >> 2] = i8;
+ _luaG_runerror(i4, 4112, i10);
+ }
+ } else {
+ i3 = i6 << 1;
+ i3 = (i3 | 0) < 4 ? 4 : i3;
+ }
+ if ((i3 + 1 | 0) >>> 0 > (4294967293 / (i7 >>> 0) | 0) >>> 0) {
+ _luaM_toobig(i4);
+ }
+ i6 = Math_imul(i6, i7) | 0;
+ i8 = Math_imul(i3, i7) | 0;
+ i9 = HEAP32[i4 + 12 >> 2] | 0;
+ i7 = (i5 | 0) != 0;
+ i11 = i9 + 4 | 0;
+ i10 = FUNCTION_TABLE_iiiii[HEAP32[i9 >> 2] & 3](HEAP32[i11 >> 2] | 0, i5, i6, i8) | 0;
+ if ((i10 | 0) != 0 | (i8 | 0) == 0) {
+ i5 = i9 + 12 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = 0 - i6 | 0;
+ i11 = i7 ? i6 : 0;
+ i11 = i11 + i8 | 0;
+ i11 = i11 + i4 | 0;
+ HEAP32[i5 >> 2] = i11;
+ HEAP32[i1 >> 2] = i3;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ if ((HEAP8[i9 + 63 | 0] | 0) == 0) {
+ _luaD_throw(i4, 4);
+ }
+ _luaC_fullgc(i4, 1);
+ i10 = FUNCTION_TABLE_iiiii[HEAP32[i9 >> 2] & 3](HEAP32[i11 >> 2] | 0, i5, i6, i8) | 0;
+ if ((i10 | 0) == 0) {
+ _luaD_throw(i4, 4);
+ } else {
+ i5 = i9 + 12 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = 0 - i6 | 0;
+ i11 = i7 ? i6 : 0;
+ i11 = i11 + i8 | 0;
+ i11 = i11 + i4 | 0;
+ HEAP32[i5 >> 2] = i11;
+ HEAP32[i1 >> 2] = i3;
+ STACKTOP = i2;
+ return i10 | 0;
+ }
+ return 0;
+}
+function _luaD_hook(i5, i14, i13) {
+ i5 = i5 | 0;
+ i14 = i14 | 0;
+ i13 = i13 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i15 = 0, i16 = 0;
+ i11 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i11;
+ i3 = HEAP32[i5 + 52 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ STACKTOP = i11;
+ return;
+ }
+ i8 = i5 + 41 | 0;
+ if ((HEAP8[i8] | 0) == 0) {
+ STACKTOP = i11;
+ return;
+ }
+ i10 = HEAP32[i5 + 16 >> 2] | 0;
+ i6 = i5 + 8 | 0;
+ i15 = HEAP32[i6 >> 2] | 0;
+ i1 = i5 + 28 | 0;
+ i16 = i15;
+ i12 = HEAP32[i1 >> 2] | 0;
+ i7 = i16 - i12 | 0;
+ i9 = i10 + 4 | 0;
+ i12 = (HEAP32[i9 >> 2] | 0) - i12 | 0;
+ HEAP32[i4 >> 2] = i14;
+ HEAP32[i4 + 20 >> 2] = i13;
+ HEAP32[i4 + 96 >> 2] = i10;
+ do {
+ if (((HEAP32[i5 + 24 >> 2] | 0) - i16 | 0) < 336) {
+ i14 = HEAP32[i5 + 32 >> 2] | 0;
+ if ((i14 | 0) > 1e6) {
+ _luaD_throw(i5, 6);
+ }
+ i13 = (i7 >> 4) + 25 | 0;
+ i14 = i14 << 1;
+ i14 = (i14 | 0) > 1e6 ? 1e6 : i14;
+ i13 = (i14 | 0) < (i13 | 0) ? i13 : i14;
+ if ((i13 | 0) > 1e6) {
+ _luaD_reallocstack(i5, 1000200);
+ _luaG_runerror(i5, 2224, i4);
+ } else {
+ _luaD_reallocstack(i5, i13);
+ i2 = HEAP32[i6 >> 2] | 0;
+ break;
+ }
+ } else {
+ i2 = i15;
+ }
+ } while (0);
+ HEAP32[i9 >> 2] = i2 + 320;
+ HEAP8[i8] = 0;
+ i16 = i10 + 18 | 0;
+ HEAP8[i16] = HEAPU8[i16] | 2;
+ FUNCTION_TABLE_vii[i3 & 15](i5, i4);
+ HEAP8[i8] = 1;
+ HEAP32[i9 >> 2] = (HEAP32[i1 >> 2] | 0) + i12;
+ HEAP32[i6 >> 2] = (HEAP32[i1 >> 2] | 0) + i7;
+ HEAP8[i16] = HEAP8[i16] & 253;
+ STACKTOP = i11;
+ return;
+}
+function _funcargs(i10, i2, i1) {
+ i10 = i10 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i6 = i3;
+ i9 = i10 + 48 | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ i7 = i10 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 289) {
+ i9 = _luaK_stringK(i5, HEAP32[i10 + 24 >> 2] | 0) | 0;
+ HEAP32[i6 + 16 >> 2] = -1;
+ HEAP32[i6 + 20 >> 2] = -1;
+ HEAP32[i6 >> 2] = 4;
+ HEAP32[i6 + 8 >> 2] = i9;
+ _luaX_next(i10);
+ } else if ((i8 | 0) == 40) {
+ _luaX_next(i10);
+ if ((HEAP32[i7 >> 2] | 0) == 41) {
+ HEAP32[i6 >> 2] = 0;
+ } else {
+ _subexpr(i10, i6, 0) | 0;
+ if ((HEAP32[i7 >> 2] | 0) == 44) {
+ do {
+ _luaX_next(i10);
+ _luaK_exp2nextreg(HEAP32[i9 >> 2] | 0, i6);
+ _subexpr(i10, i6, 0) | 0;
+ } while ((HEAP32[i7 >> 2] | 0) == 44);
+ }
+ _luaK_setreturns(i5, i6, -1);
+ }
+ _check_match(i10, 41, 40, i1);
+ } else if ((i8 | 0) == 123) {
+ _constructor(i10, i6);
+ } else {
+ _luaX_syntaxerror(i10, 6624);
+ }
+ i8 = i2 + 8 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i4 = 13;
+ } else if ((i9 | 0) == 13 | (i9 | 0) == 12) {
+ i6 = 0;
+ } else {
+ _luaK_exp2nextreg(i5, i6);
+ i4 = 13;
+ }
+ if ((i4 | 0) == 13) {
+ i6 = (HEAPU8[i5 + 48 | 0] | 0) - i7 | 0;
+ }
+ i10 = _luaK_codeABC(i5, 29, i7, i6, 2) | 0;
+ HEAP32[i2 + 16 >> 2] = -1;
+ HEAP32[i2 + 20 >> 2] = -1;
+ HEAP32[i2 >> 2] = 12;
+ HEAP32[i8 >> 2] = i10;
+ _luaK_fixline(i5, i1);
+ HEAP8[i5 + 48 | 0] = i7 + 1;
+ STACKTOP = i3;
+ return;
+}
+function _luaD_reallocstack(i3, i6) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i1 = STACKTOP;
+ i2 = i3 + 28 | 0;
+ i8 = HEAP32[i2 >> 2] | 0;
+ i7 = i3 + 32 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ if ((i6 + 1 | 0) >>> 0 > 268435455) {
+ _luaM_toobig(i3);
+ }
+ i5 = _luaM_realloc_(i3, i8, i9 << 4, i6 << 4) | 0;
+ HEAP32[i2 >> 2] = i5;
+ if ((i9 | 0) < (i6 | 0)) {
+ do {
+ HEAP32[i5 + (i9 << 4) + 8 >> 2] = 0;
+ i9 = i9 + 1 | 0;
+ } while ((i9 | 0) != (i6 | 0));
+ }
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i3 + 24 >> 2] = i5 + (i6 + -5 << 4);
+ i6 = i3 + 8 | 0;
+ HEAP32[i6 >> 2] = i5 + ((HEAP32[i6 >> 2] | 0) - i8 >> 4 << 4);
+ i6 = HEAP32[i3 + 56 >> 2] | 0;
+ if ((i6 | 0) != 0 ? (i4 = i6 + 8 | 0, HEAP32[i4 >> 2] = i5 + ((HEAP32[i4 >> 2] | 0) - i8 >> 4 << 4), i4 = HEAP32[i6 >> 2] | 0, (i4 | 0) != 0) : 0) {
+ do {
+ i9 = i4 + 8 | 0;
+ HEAP32[i9 >> 2] = (HEAP32[i2 >> 2] | 0) + ((HEAP32[i9 >> 2] | 0) - i8 >> 4 << 4);
+ i4 = HEAP32[i4 >> 2] | 0;
+ } while ((i4 | 0) != 0);
+ }
+ i3 = HEAP32[i3 + 16 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ do {
+ i9 = i3 + 4 | 0;
+ HEAP32[i9 >> 2] = (HEAP32[i2 >> 2] | 0) + ((HEAP32[i9 >> 2] | 0) - i8 >> 4 << 4);
+ HEAP32[i3 >> 2] = (HEAP32[i2 >> 2] | 0) + ((HEAP32[i3 >> 2] | 0) - i8 >> 4 << 4);
+ if (!((HEAP8[i3 + 18 | 0] & 1) == 0)) {
+ i9 = i3 + 24 | 0;
+ HEAP32[i9 >> 2] = (HEAP32[i2 >> 2] | 0) + ((HEAP32[i9 >> 2] | 0) - i8 >> 4 << 4);
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ } while ((i3 | 0) != 0);
+ STACKTOP = i1;
+ return;
+}
+function _luaF_close(i7, i6) {
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i7 + 12 >> 2] | 0;
+ i3 = i7 + 56 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i5 = i4 + 60 | 0;
+ i2 = i4 + 68 | 0;
+ while (1) {
+ i9 = i8 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) >>> 0 < i6 >>> 0) {
+ i2 = 10;
+ break;
+ }
+ HEAP32[i3 >> 2] = HEAP32[i8 >> 2];
+ if ((((HEAPU8[i5] | 0) ^ 3) & ((HEAPU8[i8 + 5 | 0] | 0) ^ 3) | 0) == 0) {
+ if ((HEAP32[i9 >> 2] | 0) != (i8 + 16 | 0)) {
+ i9 = i8 + 16 | 0;
+ i10 = i9 + 4 | 0;
+ HEAP32[(HEAP32[i10 >> 2] | 0) + 16 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[(HEAP32[i9 >> 2] | 0) + 20 >> 2] = HEAP32[i10 >> 2];
+ }
+ _luaM_realloc_(i7, i8, 32, 0) | 0;
+ } else {
+ i11 = i8 + 16 | 0;
+ i10 = i11 + 4 | 0;
+ HEAP32[(HEAP32[i10 >> 2] | 0) + 16 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[(HEAP32[i11 >> 2] | 0) + 20 >> 2] = HEAP32[i10 >> 2];
+ i11 = HEAP32[i9 >> 2] | 0;
+ i10 = i8 + 16 | 0;
+ i14 = i11;
+ i13 = HEAP32[i14 + 4 >> 2] | 0;
+ i12 = i10;
+ HEAP32[i12 >> 2] = HEAP32[i14 >> 2];
+ HEAP32[i12 + 4 >> 2] = i13;
+ HEAP32[i8 + 24 >> 2] = HEAP32[i11 + 8 >> 2];
+ HEAP32[i9 >> 2] = i10;
+ HEAP32[i8 >> 2] = HEAP32[i2 >> 2];
+ HEAP32[i2 >> 2] = i8;
+ _luaC_checkupvalcolor(i4, i8);
+ }
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ i2 = 10;
+ break;
+ }
+ }
+ if ((i2 | 0) == 10) {
+ STACKTOP = i1;
+ return;
+ }
+}
+function _luaK_dischargevars(i3, i1) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ switch (HEAP32[i1 >> 2] | 0) {
+ case 12:
+ {
+ HEAP32[i1 >> 2] = 6;
+ i6 = i1 + 8 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[(HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i6 >> 2] << 2) >> 2] | 0) >>> 6 & 255;
+ STACKTOP = i2;
+ return;
+ }
+ case 13:
+ {
+ i6 = (HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i1 + 8 >> 2] << 2) | 0;
+ HEAP32[i6 >> 2] = HEAP32[i6 >> 2] & 8388607 | 16777216;
+ HEAP32[i1 >> 2] = 11;
+ STACKTOP = i2;
+ return;
+ }
+ case 9:
+ {
+ i4 = i1 + 8 | 0;
+ i5 = HEAP16[i4 >> 1] | 0;
+ if ((i5 & 256 | 0) == 0 ? (HEAPU8[i3 + 46 | 0] | 0) <= (i5 | 0) : 0) {
+ i6 = i3 + 48 | 0;
+ HEAP8[i6] = (HEAP8[i6] | 0) + -1 << 24 >> 24;
+ }
+ i5 = i4 + 2 | 0;
+ if ((HEAP8[i4 + 3 | 0] | 0) == 7) {
+ if ((HEAPU8[i3 + 46 | 0] | 0) > (HEAPU8[i5] | 0)) {
+ i6 = 7;
+ } else {
+ i6 = i3 + 48 | 0;
+ HEAP8[i6] = (HEAP8[i6] | 0) + -1 << 24 >> 24;
+ i6 = 7;
+ }
+ } else {
+ i6 = 6;
+ }
+ HEAP32[i4 >> 2] = _luaK_code(i3, HEAPU8[i5] << 23 | i6 | HEAP16[i4 >> 1] << 14) | 0;
+ HEAP32[i1 >> 2] = 11;
+ STACKTOP = i2;
+ return;
+ }
+ case 7:
+ {
+ HEAP32[i1 >> 2] = 6;
+ STACKTOP = i2;
+ return;
+ }
+ case 8:
+ {
+ i6 = i1 + 8 | 0;
+ HEAP32[i6 >> 2] = _luaK_code(i3, HEAP32[i6 >> 2] << 23 | 5) | 0;
+ HEAP32[i1 >> 2] = 11;
+ STACKTOP = i2;
+ return;
+ }
+ default:
+ {
+ STACKTOP = i2;
+ return;
+ }
+ }
+}
+function _gmatch_aux(i10) {
+ i10 = i10 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 288 | 0;
+ i2 = i1 + 8 | 0;
+ i12 = i1 + 4 | 0;
+ i3 = i1;
+ i8 = _lua_tolstring(i10, -1001001, i12) | 0;
+ i7 = _lua_tolstring(i10, -1001002, i3) | 0;
+ i5 = i2 + 16 | 0;
+ HEAP32[i5 >> 2] = i10;
+ HEAP32[i2 >> 2] = 200;
+ HEAP32[i2 + 4 >> 2] = i8;
+ i9 = i2 + 8 | 0;
+ HEAP32[i9 >> 2] = i8 + (HEAP32[i12 >> 2] | 0);
+ HEAP32[i2 + 12 >> 2] = i7 + (HEAP32[i3 >> 2] | 0);
+ i3 = i8 + (_lua_tointegerx(i10, -1001003, 0) | 0) | 0;
+ if (i3 >>> 0 > (HEAP32[i9 >> 2] | 0) >>> 0) {
+ i12 = 0;
+ STACKTOP = i1;
+ return i12 | 0;
+ }
+ i11 = i2 + 20 | 0;
+ while (1) {
+ HEAP32[i11 >> 2] = 0;
+ i4 = _match(i2, i3, i7) | 0;
+ i12 = i3 + 1 | 0;
+ if ((i4 | 0) != 0) {
+ break;
+ }
+ if (i12 >>> 0 > (HEAP32[i9 >> 2] | 0) >>> 0) {
+ i2 = 0;
+ i6 = 7;
+ break;
+ } else {
+ i3 = i12;
+ }
+ }
+ if ((i6 | 0) == 7) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ _lua_pushinteger(i10, i4 - i8 + ((i4 | 0) == (i3 | 0)) | 0);
+ _lua_replace(i10, -1001003);
+ i7 = HEAP32[i11 >> 2] | 0;
+ i6 = (i7 | 0) != 0 | (i3 | 0) == 0 ? i7 : 1;
+ _luaL_checkstack(HEAP32[i5 >> 2] | 0, i6, 7200);
+ if ((i6 | 0) > 0) {
+ i5 = 0;
+ } else {
+ i12 = i7;
+ STACKTOP = i1;
+ return i12 | 0;
+ }
+ while (1) {
+ _push_onecapture(i2, i5, i3, i4);
+ i5 = i5 + 1 | 0;
+ if ((i5 | 0) == (i6 | 0)) {
+ i2 = i6;
+ break;
+ }
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _lua_rawseti(i1, i5, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i6 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i6 >> 2] | 0, (i5 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i4 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i6 >> 2] | 0) + (i5 << 4) | 0;
+ i5 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i4 = i1 + 8 | 0;
+ _luaH_setint(i1, HEAP32[i5 >> 2] | 0, i3, (HEAP32[i4 >> 2] | 0) + -16 | 0);
+ i3 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i3 + -8 >> 2] & 64 | 0) == 0) {
+ i6 = i3;
+ i6 = i6 + -16 | 0;
+ HEAP32[i4 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP8[(HEAP32[i3 + -16 >> 2] | 0) + 5 | 0] & 3) == 0) {
+ i6 = i3;
+ i6 = i6 + -16 | 0;
+ HEAP32[i4 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP8[i5 + 5 | 0] & 4) == 0) {
+ i6 = i3;
+ i6 = i6 + -16 | 0;
+ HEAP32[i4 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+ }
+ _luaC_barrierback_(i1, i5);
+ i6 = HEAP32[i4 >> 2] | 0;
+ i6 = i6 + -16 | 0;
+ HEAP32[i4 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+}
+function _ll_require(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i5 = i2;
+ i4 = i2 + 8 | 0;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ _lua_settop(i1, 1);
+ _lua_getfield(i1, -1001e3, 4576);
+ _lua_getfield(i1, 2, i3);
+ if ((_lua_toboolean(i1, -1) | 0) != 0) {
+ STACKTOP = i2;
+ return 1;
+ }
+ _lua_settop(i1, -2);
+ _luaL_buffinit(i1, i4);
+ _lua_getfield(i1, -1001001, 4240);
+ if ((_lua_type(i1, 3) | 0) == 5) {
+ i6 = 1;
+ } else {
+ _luaL_error(i1, 4656, i5) | 0;
+ i6 = 1;
+ }
+ while (1) {
+ _lua_rawgeti(i1, 3, i6);
+ if ((_lua_type(i1, -1) | 0) == 0) {
+ _lua_settop(i1, -2);
+ _luaL_pushresult(i4);
+ i7 = _lua_tolstring(i1, -1, 0) | 0;
+ HEAP32[i5 >> 2] = i3;
+ HEAP32[i5 + 4 >> 2] = i7;
+ _luaL_error(i1, 4696, i5) | 0;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_callk(i1, 1, 2, 0, 0);
+ if ((_lua_type(i1, -2) | 0) == 6) {
+ break;
+ }
+ if ((_lua_isstring(i1, -2) | 0) == 0) {
+ _lua_settop(i1, -3);
+ } else {
+ _lua_settop(i1, -2);
+ _luaL_addvalue(i4);
+ }
+ i6 = i6 + 1 | 0;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_insert(i1, -2);
+ _lua_callk(i1, 2, 1, 0, 0);
+ if ((_lua_type(i1, -1) | 0) != 0) {
+ _lua_setfield(i1, 2, i3);
+ }
+ _lua_getfield(i1, 2, i3);
+ if ((_lua_type(i1, -1) | 0) != 0) {
+ STACKTOP = i2;
+ return 1;
+ }
+ _lua_pushboolean(i1, 1);
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, 2, i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _f_parser(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i8 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i8 + -1;
+ if ((i8 | 0) == 0) {
+ i6 = _luaZ_fill(i5) | 0;
+ } else {
+ i8 = i5 + 4 | 0;
+ i6 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 + 1;
+ i6 = HEAPU8[i6] | 0;
+ }
+ i5 = HEAP32[i3 + 52 >> 2] | 0;
+ i7 = (i5 | 0) == 0;
+ if ((i6 | 0) == 27) {
+ if (!i7 ? (_strchr(i5, 98) | 0) == 0 : 0) {
+ HEAP32[i4 >> 2] = 2360;
+ HEAP32[i4 + 4 >> 2] = i5;
+ _luaO_pushfstring(i1, 2376, i4) | 0;
+ _luaD_throw(i1, 3);
+ }
+ i8 = _luaU_undump(i1, HEAP32[i3 >> 2] | 0, i3 + 4 | 0, HEAP32[i3 + 56 >> 2] | 0) | 0;
+ } else {
+ if (!i7 ? (_strchr(i5, 116) | 0) == 0 : 0) {
+ HEAP32[i4 >> 2] = 2368;
+ HEAP32[i4 + 4 >> 2] = i5;
+ _luaO_pushfstring(i1, 2376, i4) | 0;
+ _luaD_throw(i1, 3);
+ }
+ i8 = _luaY_parser(i1, HEAP32[i3 >> 2] | 0, i3 + 4 | 0, i3 + 16 | 0, HEAP32[i3 + 56 >> 2] | 0, i6) | 0;
+ }
+ i7 = i8 + 6 | 0;
+ if ((HEAP8[i7] | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i5 = i8 + 16 | 0;
+ i6 = i8 + 5 | 0;
+ i4 = 0;
+ do {
+ i3 = _luaF_newupval(i1) | 0;
+ HEAP32[i5 + (i4 << 2) >> 2] = i3;
+ if (!((HEAP8[i3 + 5 | 0] & 3) == 0) ? !((HEAP8[i6] & 4) == 0) : 0) {
+ _luaC_barrier_(i1, i8, i3);
+ }
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) < (HEAPU8[i7] | 0));
+ STACKTOP = i2;
+ return;
+}
+function _str_rep(i9) {
+ i9 = i9 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i4 = i6;
+ i2 = i6 + 1044 | 0;
+ i3 = i6 + 1040 | 0;
+ i1 = _luaL_checklstring(i9, 1, i2) | 0;
+ i8 = _luaL_checkinteger(i9, 2) | 0;
+ i5 = _luaL_optlstring(i9, 3, 7040, i3) | 0;
+ if ((i8 | 0) < 1) {
+ _lua_pushlstring(i9, 7040, 0) | 0;
+ i12 = 1;
+ STACKTOP = i6;
+ return i12 | 0;
+ }
+ i7 = HEAP32[i2 >> 2] | 0;
+ i10 = HEAP32[i3 >> 2] | 0;
+ i11 = i10 + i7 | 0;
+ if (!(i11 >>> 0 < i7 >>> 0) ? i11 >>> 0 < (2147483647 / (i8 >>> 0) | 0) >>> 0 : 0) {
+ i7 = (Math_imul(i10, i8 + -1 | 0) | 0) + (Math_imul(i7, i8) | 0) | 0;
+ i11 = _luaL_buffinitsize(i9, i4, i7) | 0;
+ _memcpy(i11 | 0, i1 | 0, HEAP32[i2 >> 2] | 0) | 0;
+ if ((i8 | 0) > 1) {
+ while (1) {
+ i8 = i8 + -1 | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ i10 = i11 + i9 | 0;
+ i12 = HEAP32[i3 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i12 = i9;
+ } else {
+ _memcpy(i10 | 0, i5 | 0, i12 | 0) | 0;
+ i12 = HEAP32[i2 >> 2] | 0;
+ i10 = i11 + ((HEAP32[i3 >> 2] | 0) + i9) | 0;
+ }
+ _memcpy(i10 | 0, i1 | 0, i12 | 0) | 0;
+ if ((i8 | 0) <= 1) {
+ break;
+ } else {
+ i11 = i10;
+ }
+ }
+ }
+ _luaL_pushresultsize(i4, i7);
+ i12 = 1;
+ STACKTOP = i6;
+ return i12 | 0;
+ }
+ i12 = _luaL_error(i9, 7168, i4) | 0;
+ STACKTOP = i6;
+ return i12 | 0;
+}
+function ___strchrnul(i6, i2) {
+ i6 = i6 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i3 = i2 & 255;
+ if ((i3 | 0) == 0) {
+ i7 = i6 + (_strlen(i6 | 0) | 0) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ L5 : do {
+ if ((i6 & 3 | 0) != 0) {
+ i4 = i2 & 255;
+ while (1) {
+ i5 = HEAP8[i6] | 0;
+ if (i5 << 24 >> 24 == 0) {
+ i4 = i6;
+ i5 = 13;
+ break;
+ }
+ i7 = i6 + 1 | 0;
+ if (i5 << 24 >> 24 == i4 << 24 >> 24) {
+ i4 = i6;
+ i5 = 13;
+ break;
+ }
+ if ((i7 & 3 | 0) == 0) {
+ i4 = i7;
+ break L5;
+ } else {
+ i6 = i7;
+ }
+ }
+ if ((i5 | 0) == 13) {
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ } else {
+ i4 = i6;
+ }
+ } while (0);
+ i3 = Math_imul(i3, 16843009) | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ L15 : do {
+ if (((i6 & -2139062144 ^ -2139062144) & i6 + -16843009 | 0) == 0) {
+ while (1) {
+ i7 = i6 ^ i3;
+ i5 = i4 + 4 | 0;
+ if (((i7 & -2139062144 ^ -2139062144) & i7 + -16843009 | 0) != 0) {
+ break L15;
+ }
+ i6 = HEAP32[i5 >> 2] | 0;
+ if (((i6 & -2139062144 ^ -2139062144) & i6 + -16843009 | 0) == 0) {
+ i4 = i5;
+ } else {
+ i4 = i5;
+ break;
+ }
+ }
+ }
+ } while (0);
+ i2 = i2 & 255;
+ while (1) {
+ i7 = HEAP8[i4] | 0;
+ if (i7 << 24 >> 24 == 0 | i7 << 24 >> 24 == i2 << 24 >> 24) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _lua_replace(i2, i6) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i3 = STACKTOP;
+ i7 = i2 + 8 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ i5 = i9 + -16 | 0;
+ i4 = i2 + 16 | 0;
+ i12 = HEAP32[i4 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i10 = i9 + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i10 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i11 = -1001e3 - i6 | 0;
+ i12 = HEAP32[i12 >> 2] | 0;
+ if ((HEAP32[i12 + 8 >> 2] | 0) != 22 ? (i10 = HEAP32[i12 >> 2] | 0, (i11 | 0) <= (HEAPU8[i10 + 6 | 0] | 0 | 0)) : 0) {
+ i10 = i10 + (i11 + -1 << 4) + 16 | 0;
+ } else {
+ i10 = 5192;
+ }
+ } else {
+ i10 = (HEAP32[i12 >> 2] | 0) + (i6 << 4) | 0;
+ i10 = i10 >>> 0 < i9 >>> 0 ? i10 : 5192;
+ }
+ } while (0);
+ i13 = i5;
+ i11 = HEAP32[i13 + 4 >> 2] | 0;
+ i12 = i10;
+ HEAP32[i12 >> 2] = HEAP32[i13 >> 2];
+ HEAP32[i12 + 4 >> 2] = i11;
+ i9 = i9 + -8 | 0;
+ HEAP32[i10 + 8 >> 2] = HEAP32[i9 >> 2];
+ if ((((i6 | 0) < -1001e3 ? (HEAP32[i9 >> 2] & 64 | 0) != 0 : 0) ? (i1 = HEAP32[i5 >> 2] | 0, !((HEAP8[i1 + 5 | 0] & 3) == 0)) : 0) ? (i8 = HEAP32[HEAP32[HEAP32[i4 >> 2] >> 2] >> 2] | 0, !((HEAP8[i8 + 5 | 0] & 4) == 0)) : 0) {
+ _luaC_barrier_(i2, i8, i1);
+ }
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + -16;
+ STACKTOP = i3;
+ return;
+}
+function _memchr(i4, i3, i6) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i2 = i3 & 255;
+ i7 = (i6 | 0) == 0;
+ L1 : do {
+ if ((i4 & 3 | 0) == 0 | i7) {
+ i5 = i6;
+ i6 = 5;
+ } else {
+ i5 = i3 & 255;
+ while (1) {
+ if ((HEAP8[i4] | 0) == i5 << 24 >> 24) {
+ i5 = i6;
+ i6 = 6;
+ break L1;
+ }
+ i4 = i4 + 1 | 0;
+ i6 = i6 + -1 | 0;
+ i7 = (i6 | 0) == 0;
+ if ((i4 & 3 | 0) == 0 | i7) {
+ i5 = i6;
+ i6 = 5;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if ((i6 | 0) == 5) {
+ if (i7) {
+ i5 = 0;
+ } else {
+ i6 = 6;
+ }
+ }
+ L8 : do {
+ if ((i6 | 0) == 6) {
+ i3 = i3 & 255;
+ if (!((HEAP8[i4] | 0) == i3 << 24 >> 24)) {
+ i2 = Math_imul(i2, 16843009) | 0;
+ L11 : do {
+ if (i5 >>> 0 > 3) {
+ do {
+ i7 = HEAP32[i4 >> 2] ^ i2;
+ if (((i7 & -2139062144 ^ -2139062144) & i7 + -16843009 | 0) != 0) {
+ break L11;
+ }
+ i4 = i4 + 4 | 0;
+ i5 = i5 + -4 | 0;
+ } while (i5 >>> 0 > 3);
+ }
+ } while (0);
+ if ((i5 | 0) == 0) {
+ i5 = 0;
+ } else {
+ while (1) {
+ if ((HEAP8[i4] | 0) == i3 << 24 >> 24) {
+ break L8;
+ }
+ i4 = i4 + 1 | 0;
+ i5 = i5 + -1 | 0;
+ if ((i5 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ }
+ }
+ }
+ } while (0);
+ STACKTOP = i1;
+ return ((i5 | 0) != 0 ? i4 : 0) | 0;
+}
+function _lua_insert(i2, i5) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i2 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i2 = i2 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if (i4 >>> 0 > i3 >>> 0) {
+ while (1) {
+ i5 = i4 + -16 | 0;
+ i8 = i5;
+ i7 = HEAP32[i8 + 4 >> 2] | 0;
+ i6 = i4;
+ HEAP32[i6 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i6 + 4 >> 2] = i7;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i4 + -8 >> 2];
+ if (i5 >>> 0 > i3 >>> 0) {
+ i4 = i5;
+ } else {
+ break;
+ }
+ }
+ i4 = HEAP32[i2 >> 2] | 0;
+ }
+ i6 = i4;
+ i7 = HEAP32[i6 + 4 >> 2] | 0;
+ i8 = i3;
+ HEAP32[i8 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i8 + 4 >> 2] = i7;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ STACKTOP = i1;
+ return;
+}
+function _findlocal(i6, i4, i1, i2) {
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ do {
+ if ((HEAP8[i4 + 18 | 0] & 1) == 0) {
+ i7 = (HEAP32[i4 >> 2] | 0) + 16 | 0;
+ i5 = 7;
+ } else {
+ if ((i1 | 0) >= 0) {
+ i8 = HEAP32[i4 + 24 >> 2] | 0;
+ i7 = HEAP32[(HEAP32[HEAP32[i4 >> 2] >> 2] | 0) + 12 >> 2] | 0;
+ i7 = _luaF_getlocalname(i7, i1, ((HEAP32[i4 + 28 >> 2] | 0) - (HEAP32[i7 + 12 >> 2] | 0) >> 2) + -1 | 0) | 0;
+ if ((i7 | 0) == 0) {
+ i7 = i8;
+ i5 = 7;
+ break;
+ } else {
+ break;
+ }
+ }
+ i5 = HEAP32[i4 >> 2] | 0;
+ i6 = HEAPU8[(HEAP32[(HEAP32[i5 >> 2] | 0) + 12 >> 2] | 0) + 76 | 0] | 0;
+ if ((((HEAP32[i4 + 24 >> 2] | 0) - i5 >> 4) - i6 | 0) <= (0 - i1 | 0)) {
+ i8 = 0;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ HEAP32[i2 >> 2] = i5 + (i6 - i1 << 4);
+ i8 = 2208;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ } while (0);
+ if ((i5 | 0) == 7) {
+ if ((HEAP32[i6 + 16 >> 2] | 0) == (i4 | 0)) {
+ i4 = i6 + 8 | 0;
+ } else {
+ i4 = HEAP32[i4 + 12 >> 2] | 0;
+ }
+ if (((HEAP32[i4 >> 2] | 0) - i7 >> 4 | 0) >= (i1 | 0) & (i1 | 0) > 0) {
+ i8 = i7;
+ i7 = 2192;
+ } else {
+ i8 = 0;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ }
+ HEAP32[i2 >> 2] = i8 + (i1 + -1 << 4);
+ i8 = i7;
+ STACKTOP = i3;
+ return i8 | 0;
+}
+function _luaH_setint(i4, i5, i6, i1) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, d7 = 0.0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i8 = i2 + 16 | 0;
+ i3 = i2;
+ i9 = i6 + -1 | 0;
+ L1 : do {
+ if (i9 >>> 0 < (HEAP32[i5 + 28 >> 2] | 0) >>> 0) {
+ i9 = (HEAP32[i5 + 12 >> 2] | 0) + (i9 << 4) | 0;
+ i8 = 10;
+ } else {
+ d7 = +(i6 | 0);
+ HEAPF64[i8 >> 3] = d7 + 1.0;
+ i8 = (HEAP32[i8 + 4 >> 2] | 0) + (HEAP32[i8 >> 2] | 0) | 0;
+ if ((i8 | 0) < 0) {
+ i9 = 0 - i8 | 0;
+ i8 = (i8 | 0) == (i9 | 0) ? 0 : i9;
+ }
+ i9 = (HEAP32[i5 + 16 >> 2] | 0) + (((i8 | 0) % ((1 << (HEAPU8[i5 + 7 | 0] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i9 + 24 >> 2] | 0) == 3 ? +HEAPF64[i9 + 16 >> 3] == d7 : 0) {
+ break;
+ }
+ i9 = HEAP32[i9 + 28 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ i8 = 12;
+ break L1;
+ }
+ }
+ i8 = 10;
+ }
+ } while (0);
+ if ((i8 | 0) == 10) {
+ if ((i9 | 0) == 5192) {
+ d7 = +(i6 | 0);
+ i8 = 12;
+ }
+ }
+ if ((i8 | 0) == 12) {
+ HEAPF64[i3 >> 3] = d7;
+ HEAP32[i3 + 8 >> 2] = 3;
+ i9 = _luaH_newkey(i4, i5, i3) | 0;
+ }
+ i5 = i1;
+ i6 = HEAP32[i5 + 4 >> 2] | 0;
+ i8 = i9;
+ HEAP32[i8 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i8 + 4 >> 2] = i6;
+ HEAP32[i9 + 8 >> 2] = HEAP32[i1 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+}
+function _lua_tounsignedx(i6, i8, i1) {
+ i6 = i6 | 0;
+ i8 = i8 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i4 = i2 + 8 | 0;
+ i3 = i2;
+ i7 = HEAP32[i6 + 16 >> 2] | 0;
+ do {
+ if ((i8 | 0) <= 0) {
+ if (!((i8 | 0) < -1000999)) {
+ i5 = (HEAP32[i6 + 8 >> 2] | 0) + (i8 << 4) | 0;
+ break;
+ }
+ if ((i8 | 0) == -1001e3) {
+ i5 = (HEAP32[i6 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i8 | 0;
+ i7 = HEAP32[i7 >> 2] | 0;
+ if ((HEAP32[i7 + 8 >> 2] | 0) != 22 ? (i5 = HEAP32[i7 >> 2] | 0, (i6 | 0) <= (HEAPU8[i5 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i5 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i5 = (HEAP32[i7 >> 2] | 0) + (i8 << 4) | 0;
+ i5 = i5 >>> 0 < (HEAP32[i6 + 8 >> 2] | 0) >>> 0 ? i5 : 5192;
+ }
+ } while (0);
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 3) {
+ i5 = _luaV_tonumber(i5, i4) | 0;
+ if ((i5 | 0) == 0) {
+ if ((i1 | 0) == 0) {
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ HEAP32[i1 >> 2] = 0;
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ }
+ HEAPF64[i3 >> 3] = +HEAPF64[i5 >> 3] + 6755399441055744.0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((i1 | 0) == 0) {
+ i8 = i3;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ HEAP32[i1 >> 2] = 1;
+ i8 = i3;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function _luaC_freeallobjects(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i5 = i1 + 12 | 0;
+ i3 = HEAP32[i5 >> 2] | 0;
+ i7 = i3 + 104 | 0;
+ while (1) {
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ break;
+ } else {
+ i7 = i4;
+ }
+ }
+ i4 = i3 + 72 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ i5 = i3;
+ } else {
+ while (1) {
+ i8 = i6 + 5 | 0;
+ HEAP8[i8] = HEAPU8[i8] | 0 | 8;
+ HEAP32[i4 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i6 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i7 >> 2] = i6;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ break;
+ } else {
+ i8 = i6;
+ i6 = i7;
+ i7 = i8;
+ }
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ }
+ i5 = i5 + 104 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ do {
+ i8 = i6 + 5 | 0;
+ HEAP8[i8] = HEAP8[i8] & 191;
+ _GCTM(i1, 0);
+ i6 = HEAP32[i5 >> 2] | 0;
+ } while ((i6 | 0) != 0);
+ }
+ HEAP8[i3 + 60 | 0] = 3;
+ HEAP8[i3 + 62 | 0] = 0;
+ _sweeplist(i1, i4, -3) | 0;
+ _sweeplist(i1, i3 + 68 | 0, -3) | 0;
+ i4 = i3 + 32 | 0;
+ if ((HEAP32[i4 >> 2] | 0) <= 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i3 + 24 | 0;
+ i5 = 0;
+ do {
+ _sweeplist(i1, (HEAP32[i3 >> 2] | 0) + (i5 << 2) | 0, -3) | 0;
+ i5 = i5 + 1 | 0;
+ } while ((i5 | 0) < (HEAP32[i4 >> 2] | 0));
+ STACKTOP = i2;
+ return;
+}
+function _strspn(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i2;
+ HEAP32[i3 + 0 >> 2] = 0;
+ HEAP32[i3 + 4 >> 2] = 0;
+ HEAP32[i3 + 8 >> 2] = 0;
+ HEAP32[i3 + 12 >> 2] = 0;
+ HEAP32[i3 + 16 >> 2] = 0;
+ HEAP32[i3 + 20 >> 2] = 0;
+ HEAP32[i3 + 24 >> 2] = 0;
+ HEAP32[i3 + 28 >> 2] = 0;
+ i4 = HEAP8[i5] | 0;
+ if (i4 << 24 >> 24 == 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((HEAP8[i5 + 1 | 0] | 0) == 0) {
+ i3 = i1;
+ while (1) {
+ if ((HEAP8[i3] | 0) == i4 << 24 >> 24) {
+ i3 = i3 + 1 | 0;
+ } else {
+ break;
+ }
+ }
+ i6 = i3 - i1 | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ do {
+ i7 = i4 & 255;
+ i6 = i3 + (i7 >>> 5 << 2) | 0;
+ HEAP32[i6 >> 2] = HEAP32[i6 >> 2] | 1 << (i7 & 31);
+ i5 = i5 + 1 | 0;
+ i4 = HEAP8[i5] | 0;
+ } while (!(i4 << 24 >> 24 == 0));
+ i5 = HEAP8[i1] | 0;
+ L12 : do {
+ if (i5 << 24 >> 24 == 0) {
+ i4 = i1;
+ } else {
+ i4 = i1;
+ while (1) {
+ i7 = i5 & 255;
+ i6 = i4 + 1 | 0;
+ if ((HEAP32[i3 + (i7 >>> 5 << 2) >> 2] & 1 << (i7 & 31) | 0) == 0) {
+ break L12;
+ }
+ i5 = HEAP8[i6] | 0;
+ if (i5 << 24 >> 24 == 0) {
+ i4 = i6;
+ break;
+ } else {
+ i4 = i6;
+ }
+ }
+ }
+ } while (0);
+ i7 = i4 - i1 | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _lua_remove(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i5 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i3 = (HEAP32[i2 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i3 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i4 = -1001e3 - i4 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i5 >> 2] | 0, (i4 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i4 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i5 >> 2] | 0) + (i4 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i4 = i3 + 16 | 0;
+ i2 = i2 + 8 | 0;
+ i5 = HEAP32[i2 >> 2] | 0;
+ if (!(i4 >>> 0 < i5 >>> 0)) {
+ i5 = i5 + -16 | 0;
+ HEAP32[i2 >> 2] = i5;
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i7 = i4;
+ i6 = HEAP32[i7 + 4 >> 2] | 0;
+ i5 = i3;
+ HEAP32[i5 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i5 + 4 >> 2] = i6;
+ HEAP32[i3 + 8 >> 2] = HEAP32[i3 + 24 >> 2];
+ i5 = i4 + 16 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ if (i5 >>> 0 < i3 >>> 0) {
+ i3 = i4;
+ i4 = i5;
+ } else {
+ break;
+ }
+ }
+ i7 = i3 + -16 | 0;
+ HEAP32[i2 >> 2] = i7;
+ STACKTOP = i1;
+ return;
+}
+function _luaD_protectedparser(i1, i4, i3, i2) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i13 = i5;
+ i6 = i1 + 36 | 0;
+ HEAP16[i6 >> 1] = (HEAP16[i6 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP32[i13 >> 2] = i4;
+ HEAP32[i13 + 56 >> 2] = i3;
+ HEAP32[i13 + 52 >> 2] = i2;
+ i10 = i13 + 16 | 0;
+ HEAP32[i10 >> 2] = 0;
+ i9 = i13 + 24 | 0;
+ HEAP32[i9 >> 2] = 0;
+ i8 = i13 + 28 | 0;
+ HEAP32[i8 >> 2] = 0;
+ i7 = i13 + 36 | 0;
+ HEAP32[i7 >> 2] = 0;
+ i2 = i13 + 40 | 0;
+ HEAP32[i2 >> 2] = 0;
+ i3 = i13 + 48 | 0;
+ HEAP32[i3 >> 2] = 0;
+ i12 = i13 + 4 | 0;
+ HEAP32[i12 >> 2] = 0;
+ i11 = i13 + 12 | 0;
+ HEAP32[i11 >> 2] = 0;
+ i4 = _luaD_pcall(i1, 6, i13, (HEAP32[i1 + 8 >> 2] | 0) - (HEAP32[i1 + 28 >> 2] | 0) | 0, HEAP32[i1 + 68 >> 2] | 0) | 0;
+ HEAP32[i12 >> 2] = _luaM_realloc_(i1, HEAP32[i12 >> 2] | 0, HEAP32[i11 >> 2] | 0, 0) | 0;
+ HEAP32[i11 >> 2] = 0;
+ _luaM_realloc_(i1, HEAP32[i10 >> 2] | 0, HEAP32[i9 >> 2] << 1, 0) | 0;
+ _luaM_realloc_(i1, HEAP32[i8 >> 2] | 0, HEAP32[i7 >> 2] << 4, 0) | 0;
+ _luaM_realloc_(i1, HEAP32[i2 >> 2] | 0, HEAP32[i3 >> 2] << 4, 0) | 0;
+ HEAP16[i6 >> 1] = (HEAP16[i6 >> 1] | 0) + -1 << 16 >> 16;
+ STACKTOP = i5;
+ return i4 | 0;
+}
+function _markmt(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i1 + 252 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 256 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 260 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 264 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 268 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 272 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 276 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 280 >> 2] | 0;
+ if ((i3 | 0) != 0 ? !((HEAP8[i3 + 5 | 0] & 3) == 0) : 0) {
+ _reallymarkobject(i1, i3);
+ }
+ i3 = HEAP32[i1 + 284 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP8[i3 + 5 | 0] & 3) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _reallymarkobject(i1, i3);
+ STACKTOP = i2;
+ return;
+}
+function _findlabel(i9, i2) {
+ i9 = i9 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0;
+ i1 = STACKTOP;
+ i3 = i9 + 48 | 0;
+ i7 = HEAP32[(HEAP32[i3 >> 2] | 0) + 16 >> 2] | 0;
+ i10 = HEAP32[i9 + 64 >> 2] | 0;
+ i4 = HEAP32[i10 + 12 >> 2] | 0;
+ i6 = i7 + 4 | 0;
+ i13 = HEAP16[i6 >> 1] | 0;
+ i5 = i10 + 28 | 0;
+ if ((i13 | 0) >= (HEAP32[i5 >> 2] | 0)) {
+ i15 = 0;
+ STACKTOP = i1;
+ return i15 | 0;
+ }
+ i10 = i10 + 24 | 0;
+ i11 = i4 + (i2 << 4) | 0;
+ while (1) {
+ i14 = HEAP32[i10 >> 2] | 0;
+ i12 = i14 + (i13 << 4) | 0;
+ i15 = i13 + 1 | 0;
+ if ((_luaS_eqstr(HEAP32[i12 >> 2] | 0, HEAP32[i11 >> 2] | 0) | 0) != 0) {
+ break;
+ }
+ if ((i15 | 0) < (HEAP32[i5 >> 2] | 0)) {
+ i13 = i15;
+ } else {
+ i2 = 0;
+ i8 = 10;
+ break;
+ }
+ }
+ if ((i8 | 0) == 10) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ i8 = HEAP8[i14 + (i13 << 4) + 12 | 0] | 0;
+ do {
+ if ((HEAPU8[i4 + (i2 << 4) + 12 | 0] | 0) > (i8 & 255)) {
+ if ((HEAP8[i7 + 9 | 0] | 0) == 0 ? (HEAP32[i5 >> 2] | 0) <= (HEAP16[i6 >> 1] | 0) : 0) {
+ break;
+ }
+ _luaK_patchclose(HEAP32[i3 >> 2] | 0, HEAP32[i4 + (i2 << 4) + 4 >> 2] | 0, i8 & 255);
+ }
+ } while (0);
+ _closegoto(i9, i2, i12);
+ i15 = 1;
+ STACKTOP = i1;
+ return i15 | 0;
+}
+function _lua_getmetatable(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i4 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i4 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i4 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i3 = HEAP32[i4 + 8 >> 2] & 15;
+ if ((i3 | 0) == 7) {
+ i3 = HEAP32[(HEAP32[i4 >> 2] | 0) + 8 >> 2] | 0;
+ } else if ((i3 | 0) == 5) {
+ i3 = HEAP32[(HEAP32[i4 >> 2] | 0) + 8 >> 2] | 0;
+ } else {
+ i3 = HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + (i3 << 2) + 252 >> 2] | 0;
+ }
+ if ((i3 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ i5 = i1 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i4 + 8 >> 2] = 69;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 16;
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _str_byte(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i6 = i1;
+ i4 = i1 + 4 | 0;
+ i3 = _luaL_checklstring(i2, 1, i4) | 0;
+ i5 = _luaL_optinteger(i2, 2, 1) | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if (!((i5 | 0) > -1)) {
+ if (i7 >>> 0 < (0 - i5 | 0) >>> 0) {
+ i5 = 0;
+ } else {
+ i5 = i5 + 1 + i7 | 0;
+ }
+ }
+ i8 = _luaL_optinteger(i2, 3, i5) | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ if (!((i8 | 0) > -1)) {
+ if (i7 >>> 0 < (0 - i8 | 0) >>> 0) {
+ i8 = 0;
+ } else {
+ i8 = i8 + 1 + i7 | 0;
+ }
+ }
+ i9 = (i5 | 0) == 0 ? 1 : i5;
+ i10 = i8 >>> 0 > i7 >>> 0 ? i7 : i8;
+ if (i9 >>> 0 > i10 >>> 0) {
+ i10 = 0;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i4 = i10 - i9 + 1 | 0;
+ if ((i10 | 0) == -1) {
+ i10 = _luaL_error(i2, 7944, i6) | 0;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ _luaL_checkstack(i2, i4, 7944);
+ if ((i4 | 0) <= 0) {
+ i10 = i4;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i6 = i9 + -1 | 0;
+ i8 = ~i8;
+ i7 = ~i7;
+ i5 = 0 - (i8 >>> 0 > i7 >>> 0 ? i8 : i7) - (i5 >>> 0 > 1 ? i5 : 1) | 0;
+ i7 = 0;
+ do {
+ _lua_pushinteger(i2, HEAPU8[i3 + (i6 + i7) | 0] | 0);
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != (i5 | 0));
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _lua_setuservalue(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i6 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i5 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i6 >> 2] | 0, (i5 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i5 = i4 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i5 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i6 >> 2] | 0) + (i5 << 4) | 0;
+ i5 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i4 = i1 + 8 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i6 + -8 >> 2] | 0) != 0) {
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 12 >> 2] = HEAP32[i6 + -16 >> 2];
+ i6 = HEAP32[(HEAP32[i4 >> 2] | 0) + -16 >> 2] | 0;
+ if (!((HEAP8[i6 + 5 | 0] & 3) == 0) ? (i2 = HEAP32[i5 >> 2] | 0, !((HEAP8[i2 + 5 | 0] & 4) == 0)) : 0) {
+ _luaC_barrier_(i1, i2, i6);
+ }
+ } else {
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 12 >> 2] = 0;
+ }
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -16;
+ STACKTOP = i3;
+ return;
+}
+function _f_luaopen(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i6;
+ i4 = HEAP32[i1 + 12 >> 2] | 0;
+ i2 = _luaM_realloc_(i1, 0, 0, 640) | 0;
+ HEAP32[i1 + 28 >> 2] = i2;
+ i3 = i1 + 32 | 0;
+ HEAP32[i3 >> 2] = 40;
+ i7 = 0;
+ do {
+ HEAP32[i2 + (i7 << 4) + 8 >> 2] = 0;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != 40);
+ HEAP32[i1 + 24 >> 2] = i2 + ((HEAP32[i3 >> 2] | 0) + -5 << 4);
+ i7 = i1 + 72 | 0;
+ HEAP32[i1 + 80 >> 2] = 0;
+ HEAP32[i1 + 84 >> 2] = 0;
+ HEAP8[i1 + 90 | 0] = 0;
+ HEAP32[i7 >> 2] = i2;
+ HEAP32[i1 + 8 >> 2] = i2 + 16;
+ HEAP32[i2 + 8 >> 2] = 0;
+ HEAP32[i1 + 76 >> 2] = i2 + 336;
+ HEAP32[i1 + 16 >> 2] = i7;
+ i7 = _luaH_new(i1) | 0;
+ HEAP32[i4 + 40 >> 2] = i7;
+ HEAP32[i4 + 48 >> 2] = 69;
+ _luaH_resize(i1, i7, 2, 0);
+ HEAP32[i5 >> 2] = i1;
+ i3 = i5 + 8 | 0;
+ HEAP32[i3 >> 2] = 72;
+ _luaH_setint(i1, i7, 1, i5);
+ HEAP32[i5 >> 2] = _luaH_new(i1) | 0;
+ HEAP32[i3 >> 2] = 69;
+ _luaH_setint(i1, i7, 2, i5);
+ _luaS_resize(i1, 32);
+ _luaT_init(i1);
+ _luaX_init(i1);
+ i7 = _luaS_newlstr(i1, 6896, 17) | 0;
+ HEAP32[i4 + 180 >> 2] = i7;
+ i7 = i7 + 5 | 0;
+ HEAP8[i7] = HEAPU8[i7] | 0 | 32;
+ HEAP8[i4 + 63 | 0] = 1;
+ STACKTOP = i6;
+ return;
+}
+function _lua_tointegerx(i6, i7, i1) {
+ i6 = i6 | 0;
+ i7 = i7 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ do {
+ if ((i7 | 0) <= 0) {
+ if (!((i7 | 0) < -1000999)) {
+ i4 = (HEAP32[i6 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i4 = (HEAP32[i6 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i7 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i7 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i6 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 3) {
+ i4 = _luaV_tonumber(i4, i3) | 0;
+ if ((i4 | 0) == 0) {
+ if ((i1 | 0) == 0) {
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ HEAP32[i1 >> 2] = 0;
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ }
+ i3 = ~~+HEAPF64[i4 >> 3];
+ if ((i1 | 0) == 0) {
+ i7 = i3;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ HEAP32[i1 >> 2] = 1;
+ i7 = i3;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _close_state(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ i6 = i1 + 12 | 0;
+ i3 = HEAP32[i6 >> 2] | 0;
+ i4 = i1 + 28 | 0;
+ _luaF_close(i1, HEAP32[i4 >> 2] | 0);
+ _luaC_freeallobjects(i1);
+ i6 = HEAP32[i6 >> 2] | 0;
+ _luaM_realloc_(i1, HEAP32[i6 + 24 >> 2] | 0, HEAP32[i6 + 32 >> 2] << 2, 0) | 0;
+ i6 = i3 + 144 | 0;
+ i5 = i3 + 152 | 0;
+ HEAP32[i6 >> 2] = _luaM_realloc_(i1, HEAP32[i6 >> 2] | 0, HEAP32[i5 >> 2] | 0, 0) | 0;
+ HEAP32[i5 >> 2] = 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i3 >> 2] | 0;
+ i6 = i3 + 4 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ FUNCTION_TABLE_iiiii[i5 & 3](i6, i1, 400, 0) | 0;
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i1 + 16 >> 2] = i1 + 72;
+ i7 = i1 + 84 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = 0;
+ if ((i6 | 0) != 0) {
+ while (1) {
+ i5 = HEAP32[i6 + 12 >> 2] | 0;
+ _luaM_realloc_(i1, i6, 40, 0) | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ } else {
+ i6 = i5;
+ }
+ }
+ i5 = HEAP32[i4 >> 2] | 0;
+ }
+ _luaM_realloc_(i1, i5, HEAP32[i1 + 32 >> 2] << 4, 0) | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i7 = i3 + 4 | 0;
+ i7 = HEAP32[i7 >> 2] | 0;
+ FUNCTION_TABLE_iiiii[i6 & 3](i7, i1, 400, 0) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _ll_module(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i2;
+ i5 = i2 + 4 | 0;
+ i6 = _luaL_checklstring(i1, 1, 0) | 0;
+ i3 = _lua_gettop(i1) | 0;
+ _luaL_pushmodule(i1, i6, 1);
+ _lua_getfield(i1, -1, 4728);
+ i7 = (_lua_type(i1, -1) | 0) == 0;
+ _lua_settop(i1, -2);
+ if (i7) {
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -2, 4784);
+ _lua_pushstring(i1, i6) | 0;
+ _lua_setfield(i1, -2, 4728);
+ i7 = _strrchr(i6, 46) | 0;
+ _lua_pushlstring(i1, i6, ((i7 | 0) == 0 ? i6 : i7 + 1 | 0) - i6 | 0) | 0;
+ _lua_setfield(i1, -2, 4792);
+ }
+ _lua_pushvalue(i1, -1);
+ if (!(((_lua_getstack(i1, 1, i5) | 0) != 0 ? (_lua_getinfo(i1, 4736, i5) | 0) != 0 : 0) ? (_lua_iscfunction(i1, -1) | 0) == 0 : 0)) {
+ _luaL_error(i1, 4744, i4) | 0;
+ }
+ _lua_pushvalue(i1, -2);
+ _lua_setupvalue(i1, -2, 1) | 0;
+ _lua_settop(i1, -2);
+ if ((i3 | 0) < 2) {
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i4 = 2;
+ }
+ while (1) {
+ if ((_lua_type(i1, i4) | 0) == 6) {
+ _lua_pushvalue(i1, i4);
+ _lua_pushvalue(i1, -2);
+ _lua_callk(i1, 1, 0, 0, 0);
+ }
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ STACKTOP = i2;
+ return 1;
+}
+function _strcspn(i2, i5) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i1;
+ i4 = HEAP8[i5] | 0;
+ if (!(i4 << 24 >> 24 == 0) ? (HEAP8[i5 + 1 | 0] | 0) != 0 : 0) {
+ HEAP32[i3 + 0 >> 2] = 0;
+ HEAP32[i3 + 4 >> 2] = 0;
+ HEAP32[i3 + 8 >> 2] = 0;
+ HEAP32[i3 + 12 >> 2] = 0;
+ HEAP32[i3 + 16 >> 2] = 0;
+ HEAP32[i3 + 20 >> 2] = 0;
+ HEAP32[i3 + 24 >> 2] = 0;
+ HEAP32[i3 + 28 >> 2] = 0;
+ do {
+ i7 = i4 & 255;
+ i6 = i3 + (i7 >>> 5 << 2) | 0;
+ HEAP32[i6 >> 2] = HEAP32[i6 >> 2] | 1 << (i7 & 31);
+ i5 = i5 + 1 | 0;
+ i4 = HEAP8[i5] | 0;
+ } while (!(i4 << 24 >> 24 == 0));
+ i5 = HEAP8[i2] | 0;
+ L7 : do {
+ if (i5 << 24 >> 24 == 0) {
+ i4 = i2;
+ } else {
+ i4 = i2;
+ while (1) {
+ i7 = i5 & 255;
+ i6 = i4 + 1 | 0;
+ if ((HEAP32[i3 + (i7 >>> 5 << 2) >> 2] & 1 << (i7 & 31) | 0) != 0) {
+ break L7;
+ }
+ i5 = HEAP8[i6] | 0;
+ if (i5 << 24 >> 24 == 0) {
+ i4 = i6;
+ break;
+ } else {
+ i4 = i6;
+ }
+ }
+ }
+ } while (0);
+ i7 = i4 - i2 | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i7 = (___strchrnul(i2, i4 << 24 >> 24) | 0) - i2 | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _main(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ i3 = _luaL_newstate() | 0;
+ if ((i3 | 0) == 0) {
+ i4 = HEAP32[i5 >> 2] | 0;
+ i3 = HEAP32[_stderr >> 2] | 0;
+ if ((i4 | 0) != 0) {
+ HEAP32[i2 >> 2] = i4;
+ _fprintf(i3 | 0, 496, i2 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = 8;
+ _fprintf(i3 | 0, 912, i2 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ i8 = 1;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ _lua_pushcclosure(i3, 141, 0);
+ _lua_pushinteger(i3, i4);
+ _lua_pushlightuserdata(i3, i5);
+ i6 = _lua_pcallk(i3, 2, 1, 0, 0, 0) | 0;
+ i7 = _lua_toboolean(i3, -1) | 0;
+ i6 = (i6 | 0) == 0;
+ if (!i6) {
+ if ((_lua_type(i3, -1) | 0) == 4) {
+ i8 = _lua_tolstring(i3, -1, 0) | 0;
+ } else {
+ i8 = 0;
+ }
+ i4 = HEAP32[20] | 0;
+ i5 = HEAP32[_stderr >> 2] | 0;
+ if ((i4 | 0) != 0) {
+ HEAP32[i2 >> 2] = i4;
+ _fprintf(i5 | 0, 496, i2 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = (i8 | 0) == 0 ? 48 : i8;
+ _fprintf(i5 | 0, 912, i2 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ _lua_settop(i3, -2);
+ }
+ _lua_close(i3);
+ i8 = i6 & (i7 | 0) != 0 & 1 ^ 1;
+ STACKTOP = i1;
+ return i8 | 0;
+}
+function _db_sethook(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i4 = STACKTOP;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i2 = _lua_tothread(i1, 1) | 0;
+ i5 = 1;
+ } else {
+ i2 = i1;
+ i5 = 0;
+ }
+ i3 = i5 + 1 | 0;
+ if ((_lua_type(i1, i3) | 0) < 1) {
+ _lua_settop(i1, i3);
+ i6 = 0;
+ i7 = 0;
+ i5 = 0;
+ } else {
+ i6 = _luaL_checklstring(i1, i5 | 2, 0) | 0;
+ _luaL_checktype(i1, i3, 6);
+ i5 = _luaL_optinteger(i1, i5 + 3 | 0, 0) | 0;
+ i7 = (_strchr(i6, 99) | 0) != 0 | 0;
+ i8 = (_strchr(i6, 114) | 0) == 0;
+ i7 = i8 ? i7 : i7 | 2;
+ i8 = (_strchr(i6, 108) | 0) == 0;
+ i8 = i8 ? i7 : i7 | 4;
+ i6 = i5;
+ i7 = 9;
+ i5 = (i5 | 0) > 0 ? i8 | 8 : i8;
+ }
+ if ((_luaL_getsubtable(i1, -1001e3, 11584) | 0) != 0) {
+ _lua_pushthread(i2) | 0;
+ _lua_xmove(i2, i1, 1);
+ _lua_pushvalue(i1, i3);
+ _lua_rawset(i1, -3);
+ _lua_sethook(i2, i7, i5, i6) | 0;
+ STACKTOP = i4;
+ return 0;
+ }
+ _lua_pushstring(i1, 11592) | 0;
+ _lua_setfield(i1, -2, 11600);
+ _lua_pushvalue(i1, -1);
+ _lua_setmetatable(i1, -2) | 0;
+ _lua_pushthread(i2) | 0;
+ _lua_xmove(i2, i1, 1);
+ _lua_pushvalue(i1, i3);
+ _lua_rawset(i1, -3);
+ _lua_sethook(i2, i7, i5, i6) | 0;
+ STACKTOP = i4;
+ return 0;
+}
+function _tconcat(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i6 = i3;
+ i2 = i3 + 16 | 0;
+ i5 = i3 + 8 | 0;
+ i4 = _luaL_optlstring(i1, 2, 8208, i5) | 0;
+ _luaL_checktype(i1, 1, 5);
+ i8 = _luaL_optinteger(i1, 3, 1) | 0;
+ if ((_lua_type(i1, 4) | 0) < 1) {
+ i7 = _luaL_len(i1, 1) | 0;
+ } else {
+ i7 = _luaL_checkinteger(i1, 4) | 0;
+ }
+ _luaL_buffinit(i1, i2);
+ if ((i8 | 0) >= (i7 | 0)) {
+ if ((i8 | 0) != (i7 | 0)) {
+ _luaL_pushresult(i2);
+ STACKTOP = i3;
+ return 1;
+ }
+ } else {
+ do {
+ _lua_rawgeti(i1, 1, i8);
+ if ((_lua_isstring(i1, -1) | 0) == 0) {
+ HEAP32[i6 >> 2] = _lua_typename(i1, _lua_type(i1, -1) | 0) | 0;
+ HEAP32[i6 + 4 >> 2] = i8;
+ _luaL_error(i1, 8360, i6) | 0;
+ }
+ _luaL_addvalue(i2);
+ _luaL_addlstring(i2, i4, HEAP32[i5 >> 2] | 0);
+ i8 = i8 + 1 | 0;
+ } while ((i8 | 0) != (i7 | 0));
+ }
+ _lua_rawgeti(i1, 1, i7);
+ if ((_lua_isstring(i1, -1) | 0) == 0) {
+ HEAP32[i6 >> 2] = _lua_typename(i1, _lua_type(i1, -1) | 0) | 0;
+ HEAP32[i6 + 4 >> 2] = i7;
+ _luaL_error(i1, 8360, i6) | 0;
+ }
+ _luaL_addvalue(i2);
+ _luaL_pushresult(i2);
+ STACKTOP = i3;
+ return 1;
+}
+function _searcher_Croot(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i4 = _luaL_checklstring(i1, 1, 0) | 0;
+ i5 = _strchr(i4, 46) | 0;
+ if ((i5 | 0) == 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ _lua_pushlstring(i1, i4, i5 - i4 | 0) | 0;
+ i5 = _lua_tolstring(i1, -1, 0) | 0;
+ _lua_getfield(i1, -1001001, 4440);
+ i6 = _lua_tolstring(i1, -1, 0) | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i3 >> 2] = 4440;
+ _luaL_error(i1, 5032, i3) | 0;
+ }
+ i5 = _searchpath(i1, i5, i6, 4936, 4848) | 0;
+ if ((i5 | 0) == 0) {
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i6 = _loadfunc(i1, i5, i4) | 0;
+ if ((i6 | 0) == 2) {
+ HEAP32[i3 >> 2] = i4;
+ HEAP32[i3 + 4 >> 2] = i5;
+ _lua_pushfstring(i1, 4856, i3) | 0;
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else if ((i6 | 0) == 0) {
+ _lua_pushstring(i1, i5) | 0;
+ i6 = 2;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ i4 = _lua_tolstring(i1, 1, 0) | 0;
+ i6 = _lua_tolstring(i1, -1, 0) | 0;
+ HEAP32[i3 >> 2] = i4;
+ HEAP32[i3 + 4 >> 2] = i5;
+ HEAP32[i3 + 8 >> 2] = i6;
+ i6 = _luaL_error(i1, 4888, i3) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ return 0;
+}
+function _lua_tonumberx(i5, i7, i1) {
+ i5 = i5 | 0;
+ i7 = i7 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, d8 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i6 = HEAP32[i5 + 16 >> 2] | 0;
+ do {
+ if ((i7 | 0) <= 0) {
+ if (!((i7 | 0) < -1000999)) {
+ i4 = (HEAP32[i5 + 8 >> 2] | 0) + (i7 << 4) | 0;
+ break;
+ }
+ if ((i7 | 0) == -1001e3) {
+ i4 = (HEAP32[i5 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i7 | 0;
+ i6 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i6 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i6 >> 2] | 0, (i5 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i6 >> 2] | 0) + (i7 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i5 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 3) {
+ i4 = _luaV_tonumber(i4, i3) | 0;
+ if ((i4 | 0) == 0) {
+ if ((i1 | 0) == 0) {
+ d8 = 0.0;
+ STACKTOP = i2;
+ return +d8;
+ }
+ HEAP32[i1 >> 2] = 0;
+ d8 = 0.0;
+ STACKTOP = i2;
+ return +d8;
+ }
+ }
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = 1;
+ }
+ d8 = +HEAPF64[i4 >> 3];
+ STACKTOP = i2;
+ return +d8;
+}
+function _luaopen_package(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_getsubtable(i1, -1001e3, 4184) | 0;
+ _lua_createtable(i1, 0, 1);
+ _lua_pushcclosure(i1, 158, 0);
+ _lua_setfield(i1, -2, 4192);
+ _lua_setmetatable(i1, -2) | 0;
+ _lua_createtable(i1, 0, 3);
+ _luaL_setfuncs(i1, 4200, 0);
+ _lua_createtable(i1, 4, 0);
+ _lua_pushvalue(i1, -2);
+ _lua_pushcclosure(i1, 159, 1);
+ _lua_rawseti(i1, -2, 1);
+ _lua_pushvalue(i1, -2);
+ _lua_pushcclosure(i1, 160, 1);
+ _lua_rawseti(i1, -2, 2);
+ _lua_pushvalue(i1, -2);
+ _lua_pushcclosure(i1, 161, 1);
+ _lua_rawseti(i1, -2, 3);
+ _lua_pushvalue(i1, -2);
+ _lua_pushcclosure(i1, 162, 1);
+ _lua_rawseti(i1, -2, 4);
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -3, 4232);
+ _lua_setfield(i1, -2, 4240);
+ _setpath(i1, 4256, 4264, 4280, 4296);
+ _setpath(i1, 4440, 4448, 4464, 4480);
+ _lua_pushlstring(i1, 4552, 10) | 0;
+ _lua_setfield(i1, -2, 4568);
+ _luaL_getsubtable(i1, -1001e3, 4576) | 0;
+ _lua_setfield(i1, -2, 4584);
+ _luaL_getsubtable(i1, -1001e3, 4592) | 0;
+ _lua_setfield(i1, -2, 4608);
+ _lua_rawgeti(i1, -1001e3, 2);
+ _lua_pushvalue(i1, -2);
+ _luaL_setfuncs(i1, 4616, 1);
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_rawlen(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i2 = (HEAP32[i3 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i2 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i2 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i2 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i2 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ i3 = HEAP32[i2 + 8 >> 2] & 15;
+ if ((i3 | 0) == 5) {
+ i5 = _luaH_getn(HEAP32[i2 >> 2] | 0) | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else if ((i3 | 0) == 4) {
+ i5 = HEAP32[(HEAP32[i2 >> 2] | 0) + 12 >> 2] | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else if ((i3 | 0) == 7) {
+ i5 = HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _searchpath(i3, i5, i6, i7, i8) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i4 = i2;
+ i1 = i2 + 8 | 0;
+ _luaL_buffinit(i3, i1);
+ if ((HEAP8[i7] | 0) != 0) {
+ i5 = _luaL_gsub(i3, i5, i7, i8) | 0;
+ }
+ while (1) {
+ i7 = HEAP8[i6] | 0;
+ if (i7 << 24 >> 24 == 59) {
+ i6 = i6 + 1 | 0;
+ continue;
+ } else if (i7 << 24 >> 24 == 0) {
+ i3 = 12;
+ break;
+ }
+ i8 = _strchr(i6, 59) | 0;
+ if ((i8 | 0) == 0) {
+ i8 = i6 + (_strlen(i6 | 0) | 0) | 0;
+ }
+ _lua_pushlstring(i3, i6, i8 - i6 | 0) | 0;
+ if ((i8 | 0) == 0) {
+ i3 = 12;
+ break;
+ }
+ i6 = _luaL_gsub(i3, _lua_tolstring(i3, -1, 0) | 0, 5064, i5) | 0;
+ _lua_remove(i3, -2);
+ i7 = _fopen(i6 | 0, 5088) | 0;
+ if ((i7 | 0) != 0) {
+ i3 = 10;
+ break;
+ }
+ HEAP32[i4 >> 2] = i6;
+ _lua_pushfstring(i3, 5072, i4) | 0;
+ _lua_remove(i3, -2);
+ _luaL_addvalue(i1);
+ i6 = i8;
+ }
+ if ((i3 | 0) == 10) {
+ _fclose(i7 | 0) | 0;
+ i8 = i6;
+ STACKTOP = i2;
+ return i8 | 0;
+ } else if ((i3 | 0) == 12) {
+ _luaL_pushresult(i1);
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ return 0;
+}
+function _io_readline(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i4 = _lua_touserdata(i1, -1001001) | 0;
+ i5 = _lua_tointegerx(i1, -1001002, 0) | 0;
+ if ((HEAP32[i4 + 4 >> 2] | 0) == 0) {
+ i6 = _luaL_error(i1, 3344, i3) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ _lua_settop(i1, 1);
+ if ((i5 | 0) >= 1) {
+ i6 = 1;
+ while (1) {
+ _lua_pushvalue(i1, -1001003 - i6 | 0);
+ if ((i6 | 0) == (i5 | 0)) {
+ break;
+ } else {
+ i6 = i6 + 1 | 0;
+ }
+ }
+ }
+ i4 = _g_read(i1, HEAP32[i4 >> 2] | 0, 2) | 0;
+ if ((_lua_type(i1, 0 - i4 | 0) | 0) != 0) {
+ i6 = i4;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((i4 | 0) > 1) {
+ HEAP32[i3 >> 2] = _lua_tolstring(i1, 1 - i4 | 0, 0) | 0;
+ i6 = _luaL_error(i1, 3368, i3) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((_lua_toboolean(i1, -1001003) | 0) == 0) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ _lua_settop(i1, 0);
+ _lua_pushvalue(i1, -1001001);
+ i5 = (_luaL_checkudata(i1, 1, 2832) | 0) + 4 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = 0;
+ FUNCTION_TABLE_ii[i6 & 255](i1) | 0;
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _luaK_setreturns(i3, i5, i6) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 13) {
+ i7 = i5 + 8 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ i4 = HEAP32[i8 + 12 >> 2] | 0;
+ i5 = i4 + (HEAP32[i7 >> 2] << 2) | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & 8388607 | (i6 << 23) + 8388608;
+ i7 = i4 + (HEAP32[i7 >> 2] << 2) | 0;
+ i4 = i3 + 48 | 0;
+ HEAP32[i7 >> 2] = (HEAPU8[i4] | 0) << 6 | HEAP32[i7 >> 2] & -16321;
+ i7 = HEAP8[i4] | 0;
+ i5 = (i7 & 255) + 1 | 0;
+ i6 = i8 + 78 | 0;
+ do {
+ if (i5 >>> 0 > (HEAPU8[i6] | 0) >>> 0) {
+ if (i5 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i3 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i6] = i5;
+ i1 = HEAP8[i4] | 0;
+ break;
+ }
+ } else {
+ i1 = i7;
+ }
+ } while (0);
+ HEAP8[i4] = (i1 & 255) + 1;
+ STACKTOP = i2;
+ return;
+ } else if ((i4 | 0) == 12) {
+ i8 = (HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i5 + 8 >> 2] << 2) | 0;
+ HEAP32[i8 >> 2] = HEAP32[i8 >> 2] & -8372225 | (i6 << 14) + 16384 & 8372224;
+ STACKTOP = i2;
+ return;
+ } else {
+ STACKTOP = i2;
+ return;
+ }
+}
+function _luaZ_read(i2, i9, i8) {
+ i2 = i2 | 0;
+ i9 = i9 | 0;
+ i8 = i8 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ if ((i8 | 0) == 0) {
+ i11 = 0;
+ STACKTOP = i1;
+ return i11 | 0;
+ }
+ i7 = i2 + 16 | 0;
+ i6 = i2 + 8 | 0;
+ i4 = i2 + 12 | 0;
+ i5 = i2 + 4 | 0;
+ i11 = HEAP32[i2 >> 2] | 0;
+ while (1) {
+ if ((i11 | 0) == 0) {
+ i10 = FUNCTION_TABLE_iiii[HEAP32[i6 >> 2] & 3](HEAP32[i7 >> 2] | 0, HEAP32[i4 >> 2] | 0, i3) | 0;
+ if ((i10 | 0) == 0) {
+ i2 = 9;
+ break;
+ }
+ i11 = HEAP32[i3 >> 2] | 0;
+ if ((i11 | 0) == 0) {
+ i2 = 9;
+ break;
+ }
+ HEAP32[i2 >> 2] = i11;
+ HEAP32[i5 >> 2] = i10;
+ } else {
+ i10 = HEAP32[i5 >> 2] | 0;
+ }
+ i11 = i8 >>> 0 > i11 >>> 0 ? i11 : i8;
+ _memcpy(i9 | 0, i10 | 0, i11 | 0) | 0;
+ i10 = (HEAP32[i2 >> 2] | 0) - i11 | 0;
+ HEAP32[i2 >> 2] = i10;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + i11;
+ if ((i8 | 0) == (i11 | 0)) {
+ i8 = 0;
+ i2 = 9;
+ break;
+ } else {
+ i8 = i8 - i11 | 0;
+ i9 = i9 + i11 | 0;
+ i11 = i10;
+ }
+ }
+ if ((i2 | 0) == 9) {
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ return 0;
+}
+function _lua_load(i1, i5, i4, i3, i6) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i7 = i2;
+ _luaZ_init(i1, i7, i5, i4);
+ i3 = _luaD_protectedparser(i1, i7, (i3 | 0) == 0 ? 928 : i3, i6) | 0;
+ if ((i3 | 0) != 0) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ i4 = HEAP32[(HEAP32[i1 + 8 >> 2] | 0) + -16 >> 2] | 0;
+ if ((HEAP8[i4 + 6 | 0] | 0) != 1) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ i5 = _luaH_getint(HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 40 >> 2] | 0, 2) | 0;
+ i4 = i4 + 16 | 0;
+ i6 = HEAP32[(HEAP32[i4 >> 2] | 0) + 8 >> 2] | 0;
+ i9 = i5;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i6;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ i7 = i5 + 8 | 0;
+ HEAP32[i6 + 8 >> 2] = HEAP32[i7 >> 2];
+ if ((HEAP32[i7 >> 2] & 64 | 0) == 0) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP8[i5 + 5 | 0] & 3) == 0) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP8[i4 + 5 | 0] & 4) == 0) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _luaC_barrier_(i1, i4, i5);
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _g_write(i1, i4, i8) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i8 = i8 | 0;
+ var i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i9 = 0, d10 = 0.0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i5;
+ i3 = i5 + 8 | 0;
+ i7 = _lua_gettop(i1) | 0;
+ if ((i7 | 0) == (i8 | 0)) {
+ i9 = 1;
+ STACKTOP = i5;
+ return i9 | 0;
+ }
+ i6 = i8;
+ i7 = i7 - i8 | 0;
+ i9 = 1;
+ while (1) {
+ i7 = i7 + -1 | 0;
+ if ((_lua_type(i1, i6) | 0) == 3) {
+ if ((i9 | 0) == 0) {
+ i8 = 0;
+ } else {
+ d10 = +_lua_tonumberx(i1, i6, 0);
+ HEAPF64[tempDoublePtr >> 3] = d10;
+ HEAP32[i2 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i2 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i8 = (_fprintf(i4 | 0, 3072, i2 | 0) | 0) > 0;
+ }
+ } else {
+ i8 = _luaL_checklstring(i1, i6, i3) | 0;
+ if ((i9 | 0) == 0) {
+ i8 = 0;
+ } else {
+ i8 = _fwrite(i8 | 0, 1, HEAP32[i3 >> 2] | 0, i4 | 0) | 0;
+ i8 = (i8 | 0) == (HEAP32[i3 >> 2] | 0);
+ }
+ }
+ if ((i7 | 0) == 0) {
+ break;
+ } else {
+ i6 = i6 + 1 | 0;
+ i9 = i8 & 1;
+ }
+ }
+ if (i8) {
+ i9 = 1;
+ STACKTOP = i5;
+ return i9 | 0;
+ }
+ i9 = _luaL_fileresult(i1, 0, 0) | 0;
+ STACKTOP = i5;
+ return i9 | 0;
+}
+function _lua_getuservalue(i2, i5) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i2 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i3 = HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0;
+ i2 = i2 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ HEAP32[i4 + 8 >> 2] = 0;
+ i5 = i4;
+ i5 = i5 + 16 | 0;
+ HEAP32[i2 >> 2] = i5;
+ STACKTOP = i1;
+ return;
+ } else {
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i4 + 8 >> 2] = 69;
+ i5 = HEAP32[i2 >> 2] | 0;
+ i5 = i5 + 16 | 0;
+ HEAP32[i2 >> 2] = i5;
+ STACKTOP = i1;
+ return;
+ }
+}
+function _luaL_addlstring(i7, i6, i1) {
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = HEAP32[i7 + 12 >> 2] | 0;
+ i3 = i7 + 4 | 0;
+ i9 = HEAP32[i3 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i8 = HEAP32[i2 >> 2] | 0;
+ if (!((i9 - i8 | 0) >>> 0 < i1 >>> 0)) {
+ i7 = HEAP32[i7 >> 2] | 0;
+ i9 = i8;
+ i9 = i7 + i9 | 0;
+ _memcpy(i9 | 0, i6 | 0, i1 | 0) | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ i9 = i9 + i1 | 0;
+ HEAP32[i2 >> 2] = i9;
+ STACKTOP = i5;
+ return;
+ }
+ i9 = i9 << 1;
+ i9 = (i9 - i8 | 0) >>> 0 < i1 >>> 0 ? i8 + i1 | 0 : i9;
+ if (i9 >>> 0 < i8 >>> 0 | (i9 - i8 | 0) >>> 0 < i1 >>> 0) {
+ _luaL_error(i4, 1272, i5) | 0;
+ }
+ i8 = _lua_newuserdata(i4, i9) | 0;
+ _memcpy(i8 | 0, HEAP32[i7 >> 2] | 0, HEAP32[i2 >> 2] | 0) | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i7 + 16 | 0)) {
+ _lua_remove(i4, -2);
+ }
+ HEAP32[i7 >> 2] = i8;
+ HEAP32[i3 >> 2] = i9;
+ i9 = HEAP32[i2 >> 2] | 0;
+ i9 = i8 + i9 | 0;
+ _memcpy(i9 | 0, i6 | 0, i1 | 0) | 0;
+ i9 = HEAP32[i2 >> 2] | 0;
+ i9 = i9 + i1 | 0;
+ HEAP32[i2 >> 2] = i9;
+ STACKTOP = i5;
+ return;
+}
+function _lua_rawgeti(i3, i6, i1) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i4 = (HEAP32[i3 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i4 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i4 = _luaH_getint(HEAP32[i4 >> 2] | 0, i1) | 0;
+ i6 = i3 + 8 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i7 = i4;
+ i1 = HEAP32[i7 + 4 >> 2] | 0;
+ i3 = i5;
+ HEAP32[i3 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i3 + 4 >> 2] = i1;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i4 + 8 >> 2];
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return;
+}
+function _lua_setfield(i1, i6, i3) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i4 = (HEAP32[i1 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i4 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i6 = i1 + 8 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 + 16;
+ i3 = _luaS_new(i1, i3) | 0;
+ HEAP32[i5 >> 2] = i3;
+ HEAP32[i5 + 8 >> 2] = HEAPU8[i3 + 4 | 0] | 0 | 64;
+ i5 = HEAP32[i6 >> 2] | 0;
+ _luaV_settable(i1, i4, i5 + -16 | 0, i5 + -32 | 0);
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + -32;
+ STACKTOP = i2;
+ return;
+}
+function _luaopen_io(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 11);
+ _luaL_setfuncs(i1, 2680, 0);
+ _luaL_newmetatable(i1, 2832) | 0;
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -2, 2872);
+ _luaL_setfuncs(i1, 2880, 0);
+ _lua_settop(i1, -2);
+ i5 = HEAP32[_stdin >> 2] | 0;
+ i4 = _lua_newuserdata(i1, 8) | 0;
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i4 >> 2] = i5;
+ HEAP32[i3 >> 2] = 154;
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -1001e3, 2776);
+ _lua_setfield(i1, -2, 2792);
+ i3 = HEAP32[_stdout >> 2] | 0;
+ i4 = _lua_newuserdata(i1, 8) | 0;
+ i5 = i4 + 4 | 0;
+ HEAP32[i5 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i5 >> 2] = 154;
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -1001e3, 2800);
+ _lua_setfield(i1, -2, 2816);
+ i5 = HEAP32[_stderr >> 2] | 0;
+ i4 = _lua_newuserdata(i1, 8) | 0;
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i4 >> 2] = i5;
+ HEAP32[i3 >> 2] = 154;
+ _lua_setfield(i1, -2, 2824);
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_pushcclosure(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ if ((i5 | 0) == 0) {
+ i6 = HEAP32[i1 + 8 >> 2] | 0;
+ HEAP32[i6 >> 2] = i4;
+ HEAP32[i6 + 8 >> 2] = 22;
+ i6 = i1 + 8 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i5 = i5 + 16 | 0;
+ HEAP32[i6 >> 2] = i5;
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i3 = _luaF_newCclosure(i1, i5) | 0;
+ HEAP32[i3 + 12 >> 2] = i4;
+ i4 = i1 + 8 | 0;
+ i6 = (HEAP32[i4 >> 2] | 0) + (0 - i5 << 4) | 0;
+ HEAP32[i4 >> 2] = i6;
+ do {
+ i5 = i5 + -1 | 0;
+ i9 = i6 + (i5 << 4) | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i3 + (i5 << 4) + 16 | 0;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ HEAP32[i3 + (i5 << 4) + 24 >> 2] = HEAP32[i6 + (i5 << 4) + 8 >> 2];
+ i6 = HEAP32[i4 >> 2] | 0;
+ } while ((i5 | 0) != 0);
+ HEAP32[i6 >> 2] = i3;
+ HEAP32[i6 + 8 >> 2] = 102;
+ i9 = i1 + 8 | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ i8 = i8 + 16 | 0;
+ HEAP32[i9 >> 2] = i8;
+ STACKTOP = i2;
+ return;
+}
+function _luaF_findupval(i3, i4) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i3 + 12 >> 2] | 0;
+ i6 = i3 + 56 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ L1 : do {
+ if ((i5 | 0) == 0) {
+ i5 = i6;
+ } else {
+ while (1) {
+ i7 = HEAP32[i5 + 8 >> 2] | 0;
+ if (i7 >>> 0 < i4 >>> 0) {
+ i5 = i6;
+ break L1;
+ }
+ if ((i7 | 0) == (i4 | 0)) {
+ break;
+ }
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ break L1;
+ } else {
+ i7 = i5;
+ i5 = i6;
+ i6 = i7;
+ }
+ }
+ i4 = i5 + 5 | 0;
+ i3 = (HEAPU8[i4] | 0) ^ 3;
+ if ((((HEAPU8[i2 + 60 | 0] | 0) ^ 3) & i3 | 0) != 0) {
+ i7 = i5;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ HEAP8[i4] = i3;
+ i7 = i5;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ } while (0);
+ i7 = _luaC_newobj(i3, 10, 32, i5, 0) | 0;
+ HEAP32[i7 + 8 >> 2] = i4;
+ i4 = i7 + 16 | 0;
+ HEAP32[i4 >> 2] = i2 + 112;
+ i6 = i2 + 132 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i4 + 4 >> 2] = i5;
+ HEAP32[i5 + 16 >> 2] = i7;
+ HEAP32[i6 >> 2] = i7;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _luaC_checkfinalizer(i5, i4, i6) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ i1 = HEAP32[i5 + 12 >> 2] | 0;
+ i2 = i4 + 5 | 0;
+ if ((HEAP8[i2] & 24) != 0 | (i6 | 0) == 0) {
+ STACKTOP = i3;
+ return;
+ }
+ if (!((HEAP8[i6 + 6 | 0] & 4) == 0)) {
+ STACKTOP = i3;
+ return;
+ }
+ if ((_luaT_gettm(i6, 2, HEAP32[i1 + 192 >> 2] | 0) | 0) == 0) {
+ STACKTOP = i3;
+ return;
+ }
+ i7 = i1 + 76 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == (i4 | 0)) {
+ do {
+ i6 = _sweeplist(i5, i8, 1) | 0;
+ } while ((i6 | 0) == (i8 | 0));
+ HEAP32[i7 >> 2] = i6;
+ }
+ i5 = i1 + 68 | 0;
+ while (1) {
+ i6 = HEAP32[i5 >> 2] | 0;
+ if ((i6 | 0) == (i4 | 0)) {
+ break;
+ } else {
+ i5 = i6;
+ }
+ }
+ HEAP32[i5 >> 2] = HEAP32[i4 >> 2];
+ i8 = i1 + 72 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i8 >> 2] = i4;
+ i4 = HEAPU8[i2] | 0 | 16;
+ HEAP8[i2] = i4;
+ if ((HEAPU8[i1 + 61 | 0] | 0) < 2) {
+ HEAP8[i2] = i4 & 191;
+ STACKTOP = i3;
+ return;
+ } else {
+ HEAP8[i2] = HEAP8[i1 + 60 | 0] & 3 | i4 & 184;
+ STACKTOP = i3;
+ return;
+ }
+}
+function _io_lines(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((_lua_type(i1, 1) | 0) == -1) {
+ _lua_pushnil(i1);
+ }
+ if ((_lua_type(i1, 1) | 0) == 0) {
+ _lua_getfield(i1, -1001e3, 2776);
+ _lua_replace(i1, 1);
+ if ((HEAP32[(_luaL_checkudata(i1, 1, 2832) | 0) + 4 >> 2] | 0) != 0) {
+ i4 = 0;
+ _aux_lines(i1, i4);
+ STACKTOP = i2;
+ return 1;
+ }
+ _luaL_error(i1, 3080, i3) | 0;
+ i4 = 0;
+ _aux_lines(i1, i4);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i4 = _luaL_checklstring(i1, 1, 0) | 0;
+ i6 = _lua_newuserdata(i1, 8) | 0;
+ i5 = i6 + 4 | 0;
+ HEAP32[i5 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i6 >> 2] = 0;
+ HEAP32[i5 >> 2] = 156;
+ i5 = _fopen(i4 | 0, 3480) | 0;
+ HEAP32[i6 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ i6 = _strerror(HEAP32[(___errno_location() | 0) >> 2] | 0) | 0;
+ HEAP32[i3 >> 2] = i4;
+ HEAP32[i3 + 4 >> 2] = i6;
+ _luaL_error(i1, 3520, i3) | 0;
+ }
+ _lua_replace(i1, 1);
+ i6 = 1;
+ _aux_lines(i1, i6);
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _luaC_changemode(i2, i6) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i3 = i2 + 12 | 0;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i4 = i5 + 62 | 0;
+ if ((HEAPU8[i4] | 0) == (i6 | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == 2) {
+ i3 = i5 + 61 | 0;
+ if ((HEAP8[i3] | 0) != 0) {
+ do {
+ _singlestep(i2) | 0;
+ } while ((HEAP8[i3] | 0) != 0);
+ }
+ HEAP32[i5 + 20 >> 2] = (HEAP32[i5 + 12 >> 2] | 0) + (HEAP32[i5 + 8 >> 2] | 0);
+ HEAP8[i4] = 2;
+ STACKTOP = i1;
+ return;
+ }
+ HEAP8[i4] = 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ HEAP8[i4 + 61 | 0] = 2;
+ HEAP32[i4 + 64 >> 2] = 0;
+ i5 = i4 + 72 | 0;
+ do {
+ i6 = _sweeplist(i2, i5, 1) | 0;
+ } while ((i6 | 0) == (i5 | 0));
+ HEAP32[i4 + 80 >> 2] = i6;
+ i5 = i4 + 68 | 0;
+ do {
+ i6 = _sweeplist(i2, i5, 1) | 0;
+ } while ((i6 | 0) == (i5 | 0));
+ HEAP32[i4 + 76 >> 2] = i6;
+ i3 = (HEAP32[i3 >> 2] | 0) + 61 | 0;
+ if ((1 << HEAPU8[i3] & -29 | 0) != 0) {
+ STACKTOP = i1;
+ return;
+ }
+ do {
+ _singlestep(i2) | 0;
+ } while ((1 << HEAPU8[i3] & -29 | 0) == 0);
+ STACKTOP = i1;
+ return;
+}
+function _lua_rawget(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i5 = i1 + 8 | 0;
+ i4 = _luaH_get(HEAP32[i3 >> 2] | 0, (HEAP32[i5 >> 2] | 0) + -16 | 0) | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ i6 = i4;
+ i1 = HEAP32[i6 + 4 >> 2] | 0;
+ i3 = i5 + -16 | 0;
+ HEAP32[i3 >> 2] = HEAP32[i6 >> 2];
+ HEAP32[i3 + 4 >> 2] = i1;
+ HEAP32[i5 + -8 >> 2] = HEAP32[i4 + 8 >> 2];
+ STACKTOP = i2;
+ return;
+}
+function _lua_isstring(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i2 = (HEAP32[i2 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i2 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i2 = -1001e3 - i4 | 0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP32[i3 + 8 >> 2] | 0) == 22) {
+ i4 = 0;
+ i4 = i4 & 1;
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((i2 | 0) > (HEAPU8[i3 + 6 | 0] | 0 | 0)) {
+ i4 = 0;
+ i4 = i4 & 1;
+ STACKTOP = i1;
+ return i4 | 0;
+ } else {
+ i2 = i3 + (i2 + -1 << 4) + 16 | 0;
+ break;
+ }
+ } else {
+ i3 = (HEAP32[i3 >> 2] | 0) + (i4 << 4) | 0;
+ i2 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ if ((i2 | 0) == 5192) {
+ i4 = 0;
+ i4 = i4 & 1;
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i4 = ((HEAP32[i2 + 8 >> 2] & 15) + -3 | 0) >>> 0 < 2;
+ i4 = i4 & 1;
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _setnodevector(i5, i1, i3) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ if ((i3 | 0) == 0) {
+ HEAP32[i1 + 16 >> 2] = 8016;
+ i6 = 0;
+ i7 = 8016;
+ i4 = 0;
+ i5 = i1 + 7 | 0;
+ HEAP8[i5] = i4;
+ i6 = i7 + (i6 << 5) | 0;
+ i7 = i1 + 20 | 0;
+ HEAP32[i7 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+ }
+ i4 = _luaO_ceillog2(i3) | 0;
+ if ((i4 | 0) > 30) {
+ _luaG_runerror(i5, 8048, i2);
+ }
+ i3 = 1 << i4;
+ if ((i3 + 1 | 0) >>> 0 > 134217727) {
+ _luaM_toobig(i5);
+ }
+ i6 = _luaM_realloc_(i5, 0, 0, i3 << 5) | 0;
+ i5 = i1 + 16 | 0;
+ HEAP32[i5 >> 2] = i6;
+ if ((i3 | 0) > 0) {
+ i7 = 0;
+ do {
+ HEAP32[i6 + (i7 << 5) + 28 >> 2] = 0;
+ HEAP32[i6 + (i7 << 5) + 24 >> 2] = 0;
+ HEAP32[i6 + (i7 << 5) + 8 >> 2] = 0;
+ i7 = i7 + 1 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ } while ((i7 | 0) != (i3 | 0));
+ }
+ i7 = i3;
+ i4 = i4 & 255;
+ i5 = i1 + 7 | 0;
+ HEAP8[i5] = i4;
+ i6 = i6 + (i7 << 5) | 0;
+ i7 = i1 + 20 | 0;
+ HEAP32[i7 >> 2] = i6;
+ STACKTOP = i2;
+ return;
+}
+function _lua_pushvalue(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i5 = i1 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i7 = i3;
+ i6 = HEAP32[i7 + 4 >> 2] | 0;
+ i1 = i4;
+ HEAP32[i1 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i1 + 4 >> 2] = i6;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return;
+}
+function _luaL_setfuncs(i3, i6, i1) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ _luaL_checkversion_(i3, 502.0);
+ if ((_lua_checkstack(i3, i1 + 20 | 0) | 0) == 0) {
+ HEAP32[i4 >> 2] = 1472;
+ _luaL_error(i3, 1216, i4) | 0;
+ }
+ if ((HEAP32[i6 >> 2] | 0) == 0) {
+ i7 = ~i1;
+ _lua_settop(i3, i7);
+ STACKTOP = i2;
+ return;
+ }
+ i4 = -2 - i1 | 0;
+ i5 = 0 - i1 | 0;
+ if ((i1 | 0) <= 0) {
+ do {
+ _lua_pushcclosure(i3, HEAP32[i6 + 4 >> 2] | 0, i1);
+ _lua_setfield(i3, i4, HEAP32[i6 >> 2] | 0);
+ i6 = i6 + 8 | 0;
+ } while ((HEAP32[i6 >> 2] | 0) != 0);
+ i7 = ~i1;
+ _lua_settop(i3, i7);
+ STACKTOP = i2;
+ return;
+ }
+ do {
+ i7 = 0;
+ do {
+ _lua_pushvalue(i3, i5);
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != (i1 | 0));
+ _lua_pushcclosure(i3, HEAP32[i6 + 4 >> 2] | 0, i1);
+ _lua_setfield(i3, i4, HEAP32[i6 >> 2] | 0);
+ i6 = i6 + 8 | 0;
+ } while ((HEAP32[i6 >> 2] | 0) != 0);
+ i7 = ~i1;
+ _lua_settop(i3, i7);
+ STACKTOP = i2;
+ return;
+}
+function _lua_touserdata(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i2 = (HEAP32[i3 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i2 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i2 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i2 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i2 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ i3 = HEAP32[i2 + 8 >> 2] & 15;
+ if ((i3 | 0) == 2) {
+ i5 = HEAP32[i2 >> 2] | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else if ((i3 | 0) == 7) {
+ i5 = (HEAP32[i2 >> 2] | 0) + 24 | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _luaL_checkoption(i2, i3, i6, i4) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i1;
+ if ((i6 | 0) == 0) {
+ i6 = _lua_tolstring(i2, i3, 0) | 0;
+ if ((i6 | 0) == 0) {
+ i9 = _lua_typename(i2, 4) | 0;
+ i6 = _lua_typename(i2, _lua_type(i2, i3) | 0) | 0;
+ HEAP32[i5 >> 2] = i9;
+ HEAP32[i5 + 4 >> 2] = i6;
+ _luaL_argerror(i2, i3, _lua_pushfstring(i2, 1744, i5) | 0) | 0;
+ i6 = 0;
+ }
+ } else {
+ i6 = _luaL_optlstring(i2, i3, i6, 0) | 0;
+ }
+ i9 = HEAP32[i4 >> 2] | 0;
+ L6 : do {
+ if ((i9 | 0) != 0) {
+ i8 = 0;
+ while (1) {
+ i7 = i8 + 1 | 0;
+ if ((_strcmp(i9, i6) | 0) == 0) {
+ break;
+ }
+ i9 = HEAP32[i4 + (i7 << 2) >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break L6;
+ } else {
+ i8 = i7;
+ }
+ }
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ } while (0);
+ HEAP32[i5 >> 2] = i6;
+ i9 = _luaL_argerror(i2, i3, _lua_pushfstring(i2, 1192, i5) | 0) | 0;
+ STACKTOP = i1;
+ return i9 | 0;
+}
+function _lua_toboolean(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i3 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ i2 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ i5 = 0;
+ i5 = i5 & 1;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ if ((i2 | 0) != 1) {
+ i5 = 1;
+ i5 = i5 & 1;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i5 = (HEAP32[i3 >> 2] | 0) != 0;
+ i5 = i5 & 1;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _lua_getfield(i1, i6, i3) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i5 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i4 = (HEAP32[i1 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i4 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i6 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i4 = HEAP32[i5 >> 2] | 0, (i6 | 0) <= (HEAPU8[i4 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i4 + (i6 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i4 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i4 = i4 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i4 : 5192;
+ }
+ } while (0);
+ i5 = i1 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i3 = _luaS_new(i1, i3) | 0;
+ HEAP32[i6 >> 2] = i3;
+ HEAP32[i6 + 8 >> 2] = HEAPU8[i3 + 4 | 0] | 0 | 64;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i6 + 16;
+ _luaV_gettable(i1, i4, i6, i6);
+ STACKTOP = i2;
+ return;
+}
+function _luaL_argerror(i1, i6, i3) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i2 = i4;
+ i5 = i4 + 12 | 0;
+ if ((_lua_getstack(i1, 0, i5) | 0) == 0) {
+ HEAP32[i2 >> 2] = i6;
+ HEAP32[i2 + 4 >> 2] = i3;
+ i8 = _luaL_error(i1, 1040, i2) | 0;
+ STACKTOP = i4;
+ return i8 | 0;
+ }
+ _lua_getinfo(i1, 1064, i5) | 0;
+ if ((_strcmp(HEAP32[i5 + 8 >> 2] | 0, 1072) | 0) == 0) {
+ i6 = i6 + -1 | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i2 >> 2] = HEAP32[i5 + 4 >> 2];
+ HEAP32[i2 + 4 >> 2] = i3;
+ i8 = _luaL_error(i1, 1080, i2) | 0;
+ STACKTOP = i4;
+ return i8 | 0;
+ }
+ }
+ i7 = i5 + 4 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ if ((_pushglobalfuncname(i1, i5) | 0) == 0) {
+ i8 = 1112;
+ } else {
+ i8 = _lua_tolstring(i1, -1, 0) | 0;
+ }
+ HEAP32[i7 >> 2] = i8;
+ }
+ HEAP32[i2 >> 2] = i6;
+ HEAP32[i2 + 4 >> 2] = i8;
+ HEAP32[i2 + 8 >> 2] = i3;
+ i8 = _luaL_error(i1, 1120, i2) | 0;
+ STACKTOP = i4;
+ return i8 | 0;
+}
+function _match_class(i3, i2) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ switch (_tolower(i2 | 0) | 0) {
+ case 117:
+ {
+ i3 = _isupper(i3 | 0) | 0;
+ break;
+ }
+ case 97:
+ {
+ i3 = _isalpha(i3 | 0) | 0;
+ break;
+ }
+ case 99:
+ {
+ i3 = _iscntrl(i3 | 0) | 0;
+ break;
+ }
+ case 120:
+ {
+ i3 = _isxdigit(i3 | 0) | 0;
+ break;
+ }
+ case 119:
+ {
+ i3 = _isalnum(i3 | 0) | 0;
+ break;
+ }
+ case 112:
+ {
+ i3 = _ispunct(i3 | 0) | 0;
+ break;
+ }
+ case 100:
+ {
+ i3 = (i3 + -48 | 0) >>> 0 < 10 | 0;
+ break;
+ }
+ case 108:
+ {
+ i3 = _islower(i3 | 0) | 0;
+ break;
+ }
+ case 122:
+ {
+ i3 = (i3 | 0) == 0 | 0;
+ break;
+ }
+ case 103:
+ {
+ i3 = _isgraph(i3 | 0) | 0;
+ break;
+ }
+ case 115:
+ {
+ i3 = _isspace(i3 | 0) | 0;
+ break;
+ }
+ default:
+ {
+ i3 = (i2 | 0) == (i3 | 0) | 0;
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ }
+ if ((_islower(i2 | 0) | 0) != 0) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ i3 = (i3 | 0) == 0 | 0;
+ STACKTOP = i1;
+ return i3 | 0;
+}
+function _condjump(i1, i3, i6, i4, i5) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ _luaK_code(i1, i6 << 6 | i3 | i4 << 23 | i5 << 14) | 0;
+ i3 = i1 + 28 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = -1;
+ i3 = _luaK_code(i1, 2147450903) | 0;
+ if ((i6 | 0) == -1) {
+ i9 = i3;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ if ((i3 | 0) == -1) {
+ i9 = i6;
+ STACKTOP = i2;
+ return i9 | 0;
+ }
+ i8 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ i7 = i3;
+ while (1) {
+ i4 = i8 + (i7 << 2) | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ i9 = (i5 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i7 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i7 = i9;
+ }
+ }
+ i6 = i6 + ~i7 | 0;
+ if ((((i6 | 0) > -1 ? i6 : 0 - i6 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i4 >> 2] = (i6 << 14) + 2147467264 | i5 & 16383;
+ i9 = i3;
+ STACKTOP = i2;
+ return i9 | 0;
+}
+function _skipcomment(i6, i1) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ HEAP32[i6 >> 2] = 0;
+ i3 = i6 + 4 | 0;
+ i5 = 1712;
+ while (1) {
+ i7 = _fgetc(HEAP32[i3 >> 2] | 0) | 0;
+ if ((i7 | 0) == -1) {
+ i4 = 3;
+ break;
+ }
+ i8 = i5 + 1 | 0;
+ if ((i7 | 0) != (HEAPU8[i5] | 0)) {
+ break;
+ }
+ i5 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 + 1;
+ HEAP8[i6 + i5 + 8 | 0] = i7;
+ if ((HEAP8[i8] | 0) == 0) {
+ i4 = 6;
+ break;
+ } else {
+ i5 = i8;
+ }
+ }
+ if ((i4 | 0) == 3) {
+ HEAP32[i1 >> 2] = -1;
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ } else if ((i4 | 0) == 6) {
+ HEAP32[i6 >> 2] = 0;
+ i7 = _fgetc(HEAP32[i3 >> 2] | 0) | 0;
+ }
+ HEAP32[i1 >> 2] = i7;
+ if ((i7 | 0) != 35) {
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ do {
+ i8 = _fgetc(HEAP32[i3 >> 2] | 0) | 0;
+ } while (!((i8 | 0) == 10 | (i8 | 0) == -1));
+ HEAP32[i1 >> 2] = _fgetc(HEAP32[i3 >> 2] | 0) | 0;
+ i8 = 1;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function _lua_isnumber(i4, i6) {
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ i5 = HEAP32[i4 + 16 >> 2] | 0;
+ do {
+ if ((i6 | 0) <= 0) {
+ if (!((i6 | 0) < -1000999)) {
+ i3 = (HEAP32[i4 + 8 >> 2] | 0) + (i6 << 4) | 0;
+ break;
+ }
+ if ((i6 | 0) == -1001e3) {
+ i3 = (HEAP32[i4 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i4 = -1001e3 - i6 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i5 >> 2] | 0, (i4 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i4 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i5 >> 2] | 0) + (i6 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i4 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ if ((HEAP32[i3 + 8 >> 2] | 0) == 3) {
+ i6 = 1;
+ i6 = i6 & 1;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ i6 = (_luaV_tonumber(i3, i2) | 0) != 0;
+ i6 = i6 & 1;
+ STACKTOP = i1;
+ return i6 | 0;
+}
+function ___shgetc(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i7 = i3 + 104 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ if (!((i6 | 0) != 0 ? (HEAP32[i3 + 108 >> 2] | 0) >= (i6 | 0) : 0)) {
+ i8 = 3;
+ }
+ if ((i8 | 0) == 3 ? (i1 = ___uflow(i3) | 0, (i1 | 0) >= 0) : 0) {
+ i7 = HEAP32[i7 >> 2] | 0;
+ i6 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((i7 | 0) != 0 ? (i4 = HEAP32[i3 + 4 >> 2] | 0, i5 = i7 - (HEAP32[i3 + 108 >> 2] | 0) + -1 | 0, (i6 - i4 | 0) > (i5 | 0)) : 0) {
+ HEAP32[i3 + 100 >> 2] = i4 + i5;
+ } else {
+ HEAP32[i3 + 100 >> 2] = i6;
+ }
+ i4 = HEAP32[i3 + 4 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i8 = i3 + 108 | 0;
+ HEAP32[i8 >> 2] = i6 + 1 - i4 + (HEAP32[i8 >> 2] | 0);
+ }
+ i3 = i4 + -1 | 0;
+ if ((HEAPU8[i3] | 0 | 0) == (i1 | 0)) {
+ i8 = i1;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ HEAP8[i3] = i1;
+ i8 = i1;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ HEAP32[i3 + 100 >> 2] = 0;
+ i8 = -1;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function _lua_type(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i2 = (HEAP32[i2 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i2 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i2 = -1001e3 - i4 | 0;
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP32[i3 + 8 >> 2] | 0) == 22) {
+ i4 = -1;
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((i2 | 0) > (HEAPU8[i3 + 6 | 0] | 0 | 0)) {
+ i4 = -1;
+ STACKTOP = i1;
+ return i4 | 0;
+ } else {
+ i2 = i3 + (i2 + -1 << 4) + 16 | 0;
+ break;
+ }
+ } else {
+ i3 = (HEAP32[i3 >> 2] | 0) + (i4 << 4) | 0;
+ i2 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ if ((i2 | 0) == 5192) {
+ i4 = -1;
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i4 = HEAP32[i2 + 8 >> 2] & 15;
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _g_iofile(i4, i1, i5) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((_lua_type(i4, 1) | 0) < 1) {
+ _lua_getfield(i4, -1001e3, i1);
+ STACKTOP = i2;
+ return;
+ }
+ i6 = _lua_tolstring(i4, 1, 0) | 0;
+ if ((i6 | 0) != 0) {
+ i7 = _lua_newuserdata(i4, 8) | 0;
+ i8 = i7 + 4 | 0;
+ HEAP32[i8 >> 2] = 0;
+ _luaL_setmetatable(i4, 2832);
+ HEAP32[i7 >> 2] = 0;
+ HEAP32[i8 >> 2] = 156;
+ i5 = _fopen(i6 | 0, i5 | 0) | 0;
+ HEAP32[i7 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ i8 = _strerror(HEAP32[(___errno_location() | 0) >> 2] | 0) | 0;
+ HEAP32[i3 >> 2] = i6;
+ HEAP32[i3 + 4 >> 2] = i8;
+ _luaL_error(i4, 3520, i3) | 0;
+ }
+ } else {
+ if ((HEAP32[(_luaL_checkudata(i4, 1, 2832) | 0) + 4 >> 2] | 0) == 0) {
+ _luaL_error(i4, 3080, i3) | 0;
+ }
+ _lua_pushvalue(i4, 1);
+ }
+ _lua_setfield(i4, -1001e3, i1);
+ _lua_getfield(i4, -1001e3, i1);
+ STACKTOP = i2;
+ return;
+}
+function _lua_getlocal(i4, i5, i2) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ if ((i5 | 0) == 0) {
+ i3 = HEAP32[i4 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + -8 >> 2] | 0) != 70) {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i5 = _luaF_getlocalname(HEAP32[(HEAP32[i3 + -16 >> 2] | 0) + 12 >> 2] | 0, i2, 0) | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else {
+ HEAP32[i3 >> 2] = 0;
+ i2 = _findlocal(i4, HEAP32[i5 + 96 >> 2] | 0, i2, i3) | 0;
+ if ((i2 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ i5 = i4 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i8 = i3;
+ i7 = HEAP32[i8 + 4 >> 2] | 0;
+ i6 = i4;
+ HEAP32[i6 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i6 + 4 >> 2] = i7;
+ HEAP32[i4 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 16;
+ i5 = i2;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _lua_checkstack(i7, i4) {
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i8 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ HEAP32[i3 >> 2] = i4;
+ i2 = HEAP32[i7 + 16 >> 2] | 0;
+ i5 = i7 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i8 = i6;
+ do {
+ if (((HEAP32[i7 + 24 >> 2] | 0) - i8 >> 4 | 0) <= (i4 | 0)) {
+ if (((i8 - (HEAP32[i7 + 28 >> 2] | 0) >> 4) + 5 | 0) > (1e6 - i4 | 0)) {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ i6 = (_luaD_rawrunprotected(i7, 2, i3) | 0) == 0;
+ if (i6) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ i3 = i6 & 1;
+ break;
+ } else {
+ i8 = 0;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ } else {
+ i5 = i6;
+ i3 = 1;
+ }
+ } while (0);
+ i2 = i2 + 4 | 0;
+ i4 = i5 + (i4 << 4) | 0;
+ if (!((HEAP32[i2 >> 2] | 0) >>> 0 < i4 >>> 0)) {
+ i8 = i3;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ HEAP32[i2 >> 2] = i4;
+ i8 = i3;
+ STACKTOP = i1;
+ return i8 | 0;
+}
+function _luaK_exp2nextreg(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ _luaK_dischargevars(i1, i3);
+ if (((HEAP32[i3 >> 2] | 0) == 6 ? (i4 = HEAP32[i3 + 8 >> 2] | 0, (i4 & 256 | 0) == 0) : 0) ? (HEAPU8[i1 + 46 | 0] | 0 | 0) <= (i4 | 0) : 0) {
+ i7 = i1 + 48 | 0;
+ HEAP8[i7] = (HEAP8[i7] | 0) + -1 << 24 >> 24;
+ }
+ i4 = i1 + 48 | 0;
+ i5 = HEAP8[i4] | 0;
+ i6 = (i5 & 255) + 1 | 0;
+ i7 = (HEAP32[i1 >> 2] | 0) + 78 | 0;
+ if (!(i6 >>> 0 > (HEAPU8[i7] | 0) >>> 0)) {
+ i7 = i5;
+ i7 = i7 & 255;
+ i7 = i7 + 1 | 0;
+ i6 = i7 & 255;
+ HEAP8[i4] = i6;
+ i7 = i7 & 255;
+ i7 = i7 + -1 | 0;
+ _exp2reg(i1, i3, i7);
+ STACKTOP = i2;
+ return;
+ }
+ if (i6 >>> 0 > 249) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10536);
+ }
+ HEAP8[i7] = i6;
+ i7 = HEAP8[i4] | 0;
+ i7 = i7 & 255;
+ i7 = i7 + 1 | 0;
+ i6 = i7 & 255;
+ HEAP8[i4] = i6;
+ i7 = i7 & 255;
+ i7 = i7 + -1 | 0;
+ _exp2reg(i1, i3, i7);
+ STACKTOP = i2;
+ return;
+}
+function _lua_next(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i5 = HEAP32[i2 + 16 >> 2] | 0;
+ do {
+ if ((i4 | 0) <= 0) {
+ if (!((i4 | 0) < -1000999)) {
+ i4 = (HEAP32[i2 + 8 >> 2] | 0) + (i4 << 4) | 0;
+ break;
+ }
+ if ((i4 | 0) == -1001e3) {
+ i4 = (HEAP32[i2 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i4 = -1001e3 - i4 | 0;
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i5 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i5 >> 2] | 0, (i4 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i4 = i3 + (i4 + -1 << 4) + 16 | 0;
+ } else {
+ i4 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i5 >> 2] | 0) + (i4 << 4) | 0;
+ i4 = i3 >>> 0 < (HEAP32[i2 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i3 = i2 + 8 | 0;
+ i2 = _luaH_next(i2, HEAP32[i4 >> 2] | 0, (HEAP32[i3 >> 2] | 0) + -16 | 0) | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = (i2 | 0) == 0 ? i4 + -16 | 0 : i4 + 16 | 0;
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _inclinenumber(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 >> 2] | 0;
+ i3 = i1 + 56 | 0;
+ i5 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i6 + -1;
+ if ((i6 | 0) == 0) {
+ i5 = _luaZ_fill(i5) | 0;
+ } else {
+ i6 = i5 + 4 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i5 + 1;
+ i5 = HEAPU8[i5] | 0;
+ }
+ HEAP32[i1 >> 2] = i5;
+ if ((i5 | 0) == 13 | (i5 | 0) == 10 ? (i5 | 0) != (i4 | 0) : 0) {
+ i3 = HEAP32[i3 >> 2] | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i6 + -1;
+ if ((i6 | 0) == 0) {
+ i3 = _luaZ_fill(i3) | 0;
+ } else {
+ i6 = i3 + 4 | 0;
+ i3 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i3 + 1;
+ i3 = HEAPU8[i3] | 0;
+ }
+ HEAP32[i1 >> 2] = i3;
+ }
+ i5 = i1 + 4 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i6 + 1;
+ if ((i6 | 0) > 2147483643) {
+ _luaX_syntaxerror(i1, 12560);
+ } else {
+ STACKTOP = i2;
+ return;
+ }
+}
+function _lua_yieldk(i5, i6, i1, i7) {
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = HEAP32[i5 + 16 >> 2] | 0;
+ if ((HEAP16[i5 + 36 >> 1] | 0) != 0) {
+ if ((HEAP32[(HEAP32[i5 + 12 >> 2] | 0) + 172 >> 2] | 0) == (i5 | 0)) {
+ _luaG_runerror(i5, 2312, i4);
+ } else {
+ _luaG_runerror(i5, 2264, i4);
+ }
+ }
+ HEAP8[i5 + 6 | 0] = 1;
+ HEAP32[i3 + 20 >> 2] = (HEAP32[i3 >> 2] | 0) - (HEAP32[i5 + 28 >> 2] | 0);
+ if (!((HEAP8[i3 + 18 | 0] & 1) == 0)) {
+ STACKTOP = i2;
+ return 0;
+ }
+ HEAP32[i3 + 28 >> 2] = i7;
+ if ((i7 | 0) == 0) {
+ i4 = i5 + 8 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ i7 = ~i6;
+ i7 = i4 + (i7 << 4) | 0;
+ HEAP32[i3 >> 2] = i7;
+ _luaD_throw(i5, 1);
+ }
+ HEAP32[i3 + 24 >> 2] = i1;
+ i4 = i5 + 8 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ i7 = ~i6;
+ i7 = i4 + (i7 << 4) | 0;
+ HEAP32[i3 >> 2] = i7;
+ _luaD_throw(i5, 1);
+ return 0;
+}
+function _luaH_getint(i4, i6) {
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, d3 = 0.0, i5 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i1;
+ i7 = i6 + -1 | 0;
+ if (i7 >>> 0 < (HEAP32[i4 + 28 >> 2] | 0) >>> 0) {
+ i7 = (HEAP32[i4 + 12 >> 2] | 0) + (i7 << 4) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ d3 = +(i6 | 0);
+ HEAPF64[i5 >> 3] = d3 + 1.0;
+ i5 = (HEAP32[i5 + 4 >> 2] | 0) + (HEAP32[i5 >> 2] | 0) | 0;
+ if ((i5 | 0) < 0) {
+ i6 = 0 - i5 | 0;
+ i5 = (i5 | 0) == (i6 | 0) ? 0 : i6;
+ }
+ i4 = (HEAP32[i4 + 16 >> 2] | 0) + (((i5 | 0) % ((1 << (HEAPU8[i4 + 7 | 0] | 0)) + -1 | 1 | 0) | 0) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i4 + 24 >> 2] | 0) == 3 ? +HEAPF64[i4 + 16 >> 3] == d3 : 0) {
+ break;
+ }
+ i4 = HEAP32[i4 + 28 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i4 = 5192;
+ i2 = 10;
+ break;
+ }
+ }
+ if ((i2 | 0) == 10) {
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i7 = i4;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _luaL_checkversion_(i1, d4) {
+ i1 = i1 | 0;
+ d4 = +d4;
+ var i2 = 0, i3 = 0, i5 = 0, d6 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i5 = _lua_version(i1) | 0;
+ if ((i5 | 0) == (_lua_version(0) | 0)) {
+ d6 = +HEAPF64[i5 >> 3];
+ if (d6 != d4) {
+ HEAPF64[tempDoublePtr >> 3] = d4;
+ HEAP32[i3 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i5 = i3 + 8 | 0;
+ HEAPF64[tempDoublePtr >> 3] = d6;
+ HEAP32[i5 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i5 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ _luaL_error(i1, 1528, i3) | 0;
+ }
+ } else {
+ _luaL_error(i1, 1496, i3) | 0;
+ }
+ _lua_pushnumber(i1, -4660.0);
+ if ((_lua_tointegerx(i1, -1, 0) | 0) == -4660 ? (_lua_tounsignedx(i1, -1, 0) | 0) == -4660 : 0) {
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return;
+ }
+ _luaL_error(i1, 1584, i3) | 0;
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return;
+}
+function _math_random(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0, i4 = 0, i5 = 0, d6 = 0.0, d7 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ d3 = +((_rand() | 0) % 2147483647 | 0 | 0) / 2147483647.0;
+ i5 = _lua_gettop(i1) | 0;
+ if ((i5 | 0) == 0) {
+ _lua_pushnumber(i1, d3);
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else if ((i5 | 0) == 1) {
+ d6 = +_luaL_checknumber(i1, 1);
+ if (!(d6 >= 1.0)) {
+ _luaL_argerror(i1, 1, 4056) | 0;
+ }
+ _lua_pushnumber(i1, +Math_floor(+(d3 * d6)) + 1.0);
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else if ((i5 | 0) == 2) {
+ d6 = +_luaL_checknumber(i1, 1);
+ d7 = +_luaL_checknumber(i1, 2);
+ if (!(d6 <= d7)) {
+ _luaL_argerror(i1, 2, 4056) | 0;
+ }
+ _lua_pushnumber(i1, d6 + +Math_floor(+(d3 * (d7 - d6 + 1.0))));
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else {
+ i5 = _luaL_error(i1, 4080, i4) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _push_onecapture(i2, i3, i4, i6) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i5 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i1;
+ if ((HEAP32[i2 + 20 >> 2] | 0) <= (i3 | 0)) {
+ i2 = HEAP32[i2 + 16 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ _lua_pushlstring(i2, i4, i6 - i4 | 0) | 0;
+ STACKTOP = i1;
+ return;
+ } else {
+ _luaL_error(i2, 7224, i5) | 0;
+ STACKTOP = i1;
+ return;
+ }
+ }
+ i4 = HEAP32[i2 + (i3 << 3) + 28 >> 2] | 0;
+ if (!((i4 | 0) == -1)) {
+ i5 = HEAP32[i2 + 16 >> 2] | 0;
+ i3 = HEAP32[i2 + (i3 << 3) + 24 >> 2] | 0;
+ if ((i4 | 0) == -2) {
+ _lua_pushinteger(i5, i3 + 1 - (HEAP32[i2 + 4 >> 2] | 0) | 0);
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ i6 = i2 + 16 | 0;
+ _luaL_error(HEAP32[i6 >> 2] | 0, 7248, i5) | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i3 = HEAP32[i2 + (i3 << 3) + 24 >> 2] | 0;
+ }
+ _lua_pushlstring(i5, i3, i4) | 0;
+ STACKTOP = i1;
+ return;
+}
+function _luaK_nil(i7, i6, i5) {
+ i7 = i7 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ i9 = i5 + i6 | 0;
+ i1 = i9 + -1 | 0;
+ i10 = HEAP32[i7 + 20 >> 2] | 0;
+ do {
+ if ((i10 | 0) > (HEAP32[i7 + 24 >> 2] | 0) ? (i4 = (HEAP32[(HEAP32[i7 >> 2] | 0) + 12 >> 2] | 0) + (i10 + -1 << 2) | 0, i3 = HEAP32[i4 >> 2] | 0, (i3 & 63 | 0) == 4) : 0) {
+ i11 = i3 >>> 6 & 255;
+ i10 = i11 + (i3 >>> 23) | 0;
+ if (!((i11 | 0) <= (i6 | 0) ? (i10 + 1 | 0) >= (i6 | 0) : 0)) {
+ i8 = 5;
+ }
+ if ((i8 | 0) == 5 ? (i11 | 0) < (i6 | 0) | (i11 | 0) > (i9 | 0) : 0) {
+ break;
+ }
+ i5 = (i11 | 0) < (i6 | 0) ? i11 : i6;
+ HEAP32[i4 >> 2] = ((i10 | 0) > (i1 | 0) ? i10 : i1) - i5 << 23 | i5 << 6 & 16320 | i3 & 8372287;
+ STACKTOP = i2;
+ return;
+ }
+ } while (0);
+ _luaK_code(i7, i6 << 6 | (i5 << 23) + -8388608 | 4) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _lua_settable(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i5 = i1 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ _luaV_settable(i1, i3, i4 + -32 | 0, i4 + -16 | 0);
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + -32;
+ STACKTOP = i2;
+ return;
+}
+function _luaL_findtable(i3, i6, i5, i4) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i7 = 0;
+ i2 = STACKTOP;
+ if ((i6 | 0) != 0) {
+ _lua_pushvalue(i3, i6);
+ }
+ while (1) {
+ i6 = _strchr(i5, 46) | 0;
+ if ((i6 | 0) == 0) {
+ i6 = i5 + (_strlen(i5 | 0) | 0) | 0;
+ }
+ i7 = i6 - i5 | 0;
+ _lua_pushlstring(i3, i5, i7) | 0;
+ _lua_rawget(i3, -2);
+ if ((_lua_type(i3, -1) | 0) != 0) {
+ if ((_lua_type(i3, -1) | 0) != 5) {
+ break;
+ }
+ } else {
+ _lua_settop(i3, -2);
+ _lua_createtable(i3, 0, (HEAP8[i6] | 0) == 46 ? 1 : i4);
+ _lua_pushlstring(i3, i5, i7) | 0;
+ _lua_pushvalue(i3, -2);
+ _lua_settable(i3, -4);
+ }
+ _lua_remove(i3, -2);
+ if ((HEAP8[i6] | 0) == 46) {
+ i5 = i6 + 1 | 0;
+ } else {
+ i3 = 0;
+ i1 = 10;
+ break;
+ }
+ }
+ if ((i1 | 0) == 10) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _lua_settop(i3, -3);
+ i7 = i5;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _luaD_call(i1, i4, i5, i8) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i8 = i8 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i7 = i3;
+ i2 = i1 + 38 | 0;
+ i6 = (HEAP16[i2 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP16[i2 >> 1] = i6;
+ if ((i6 & 65535) > 199) {
+ if (i6 << 16 >> 16 == 200) {
+ _luaG_runerror(i1, 2240, i7);
+ }
+ if ((i6 & 65535) > 224) {
+ _luaD_throw(i1, 6);
+ }
+ }
+ i6 = (i8 | 0) != 0;
+ if (!i6) {
+ i8 = i1 + 36 | 0;
+ HEAP16[i8 >> 1] = (HEAP16[i8 >> 1] | 0) + 1 << 16 >> 16;
+ }
+ if ((_luaD_precall(i1, i4, i5) | 0) == 0) {
+ _luaV_execute(i1);
+ }
+ if (i6) {
+ i8 = HEAP16[i2 >> 1] | 0;
+ i8 = i8 + -1 << 16 >> 16;
+ HEAP16[i2 >> 1] = i8;
+ STACKTOP = i3;
+ return;
+ }
+ i8 = i1 + 36 | 0;
+ HEAP16[i8 >> 1] = (HEAP16[i8 >> 1] | 0) + -1 << 16 >> 16;
+ i8 = HEAP16[i2 >> 1] | 0;
+ i8 = i8 + -1 << 16 >> 16;
+ HEAP16[i2 >> 1] = i8;
+ STACKTOP = i3;
+ return;
+}
+function _pushline(i6, i1) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 528 | 0;
+ i4 = i2;
+ i3 = i2 + 8 | 0;
+ i7 = (i1 | 0) != 0;
+ _lua_getglobal(i6, i7 ? 288 : 296);
+ i8 = _lua_tolstring(i6, -1, 0) | 0;
+ if ((i8 | 0) == 0) {
+ i8 = i7 ? 312 : 320;
+ }
+ i7 = HEAP32[_stdout >> 2] | 0;
+ _fputs(i8 | 0, i7 | 0) | 0;
+ _fflush(i7 | 0) | 0;
+ i8 = (_fgets(i3 | 0, 512, HEAP32[_stdin >> 2] | 0) | 0) == 0;
+ _lua_settop(i6, -2);
+ if (i8) {
+ i8 = 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ i7 = _strlen(i3 | 0) | 0;
+ if ((i7 | 0) != 0 ? (i5 = i3 + (i7 + -1) | 0, (HEAP8[i5] | 0) == 10) : 0) {
+ HEAP8[i5] = 0;
+ }
+ if ((i1 | 0) != 0 ? (HEAP8[i3] | 0) == 61 : 0) {
+ HEAP32[i4 >> 2] = i3 + 1;
+ _lua_pushfstring(i6, 272, i4) | 0;
+ i8 = 1;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ _lua_pushstring(i6, i3) | 0;
+ i8 = 1;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function _db_getlocal(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i2;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i3 = _lua_tothread(i1, 1) | 0;
+ i6 = 1;
+ } else {
+ i3 = i1;
+ i6 = 0;
+ }
+ i5 = _luaL_checkinteger(i1, i6 | 2) | 0;
+ i6 = i6 + 1 | 0;
+ if ((_lua_type(i1, i6) | 0) == 6) {
+ _lua_pushvalue(i1, i6);
+ _lua_pushstring(i1, _lua_getlocal(i1, 0, i5) | 0) | 0;
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ if ((_lua_getstack(i3, _luaL_checkinteger(i1, i6) | 0, i4) | 0) == 0) {
+ i6 = _luaL_argerror(i1, i6, 11560) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i4 = _lua_getlocal(i3, i4, i5) | 0;
+ if ((i4 | 0) == 0) {
+ _lua_pushnil(i1);
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ _lua_xmove(i3, i1, 1);
+ _lua_pushstring(i1, i4) | 0;
+ _lua_pushvalue(i1, -2);
+ i6 = 2;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ return 0;
+}
+function _luaB_print(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i3;
+ i4 = i3 + 4 | 0;
+ i6 = _lua_gettop(i1) | 0;
+ _lua_getglobal(i1, 9584);
+ i5 = HEAP32[_stdout >> 2] | 0;
+ L1 : do {
+ if ((i6 | 0) >= 1) {
+ i7 = 1;
+ while (1) {
+ _lua_pushvalue(i1, -1);
+ _lua_pushvalue(i1, i7);
+ _lua_callk(i1, 1, 1, 0, 0);
+ i8 = _lua_tolstring(i1, -1, i4) | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ }
+ if ((i7 | 0) > 1) {
+ _fputc(9, i5 | 0) | 0;
+ }
+ _fwrite(i8 | 0, 1, HEAP32[i4 >> 2] | 0, i5 | 0) | 0;
+ _lua_settop(i1, -2);
+ if ((i7 | 0) < (i6 | 0)) {
+ i7 = i7 + 1 | 0;
+ } else {
+ break L1;
+ }
+ }
+ i8 = _luaL_error(i1, 9816, i2) | 0;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ } while (0);
+ _fputc(10, i5 | 0) | 0;
+ _fflush(i5 | 0) | 0;
+ i8 = 0;
+ STACKTOP = i3;
+ return i8 | 0;
+}
+function _luaB_load(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i2;
+ i6 = _lua_tolstring(i1, 1, i5) | 0;
+ i4 = _luaL_optlstring(i1, 3, 9872, 0) | 0;
+ i3 = (_lua_type(i1, 4) | 0) != -1;
+ if ((i6 | 0) == 0) {
+ i6 = _luaL_optlstring(i1, 2, 9880, 0) | 0;
+ _luaL_checktype(i1, 1, 6);
+ _lua_settop(i1, 5);
+ i4 = _lua_load(i1, 3, 0, i6, i4) | 0;
+ } else {
+ i7 = _luaL_optlstring(i1, 2, i6, 0) | 0;
+ i4 = _luaL_loadbufferx(i1, i6, HEAP32[i5 >> 2] | 0, i7, i4) | 0;
+ }
+ if ((i4 | 0) != 0) {
+ _lua_pushnil(i1);
+ _lua_insert(i1, -2);
+ i7 = 2;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ if (!i3) {
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ _lua_pushvalue(i1, i3 ? 4 : 0);
+ if ((_lua_setupvalue(i1, -2, 1) | 0) != 0) {
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ _lua_settop(i1, -2);
+ i7 = 1;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _db_debug(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 256 | 0;
+ i6 = i1;
+ i4 = i1 + 4 | 0;
+ i3 = HEAP32[_stderr >> 2] | 0;
+ _fwrite(12040, 11, 1, i3 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ i5 = HEAP32[_stdin >> 2] | 0;
+ if ((_fgets(i4 | 0, 250, i5 | 0) | 0) == 0) {
+ STACKTOP = i1;
+ return 0;
+ }
+ while (1) {
+ if ((_strcmp(i4, 12056) | 0) == 0) {
+ i2 = 7;
+ break;
+ }
+ if (!((_luaL_loadbufferx(i2, i4, _strlen(i4 | 0) | 0, 12064, 0) | 0) == 0 ? (_lua_pcallk(i2, 0, 0, 0, 0, 0) | 0) == 0 : 0)) {
+ HEAP32[i6 >> 2] = _lua_tolstring(i2, -1, 0) | 0;
+ _fprintf(i3 | 0, 12088, i6 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ }
+ _lua_settop(i2, 0);
+ _fwrite(12040, 11, 1, i3 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ if ((_fgets(i4 | 0, 250, i5 | 0) | 0) == 0) {
+ i2 = 7;
+ break;
+ }
+ }
+ if ((i2 | 0) == 7) {
+ STACKTOP = i1;
+ return 0;
+ }
+ return 0;
+}
+function _luaL_prepbuffsize(i2, i7) {
+ i2 = i2 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i1 = HEAP32[i2 + 12 >> 2] | 0;
+ i4 = i2 + 4 | 0;
+ i8 = HEAP32[i4 >> 2] | 0;
+ i5 = i2 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ if (!((i8 - i6 | 0) >>> 0 < i7 >>> 0)) {
+ i7 = HEAP32[i2 >> 2] | 0;
+ i8 = i6;
+ i8 = i7 + i8 | 0;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ i8 = i8 << 1;
+ i8 = (i8 - i6 | 0) >>> 0 < i7 >>> 0 ? i6 + i7 | 0 : i8;
+ if (i8 >>> 0 < i6 >>> 0 | (i8 - i6 | 0) >>> 0 < i7 >>> 0) {
+ _luaL_error(i1, 1272, i3) | 0;
+ }
+ i6 = _lua_newuserdata(i1, i8) | 0;
+ _memcpy(i6 | 0, HEAP32[i2 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ if ((HEAP32[i2 >> 2] | 0) != (i2 + 16 | 0)) {
+ _lua_remove(i1, -2);
+ }
+ HEAP32[i2 >> 2] = i6;
+ HEAP32[i4 >> 2] = i8;
+ i7 = i6;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i8 = i7 + i8 | 0;
+ STACKTOP = i3;
+ return i8 | 0;
+}
+function _luaG_runerror(i1, i5, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 96 | 0;
+ i2 = i6;
+ i3 = i6 + 32 | 0;
+ i6 = i6 + 16 | 0;
+ HEAP32[i6 >> 2] = i4;
+ i4 = _luaO_pushvfstring(i1, i5, i6) | 0;
+ i6 = HEAP32[i1 + 16 >> 2] | 0;
+ if ((HEAP8[i6 + 18 | 0] & 1) == 0) {
+ _luaG_errormsg(i1);
+ }
+ i5 = HEAP32[(HEAP32[HEAP32[i6 >> 2] >> 2] | 0) + 12 >> 2] | 0;
+ i7 = HEAP32[i5 + 20 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = HEAP32[i7 + (((HEAP32[i6 + 28 >> 2] | 0) - (HEAP32[i5 + 12 >> 2] | 0) >> 2) + -1 << 2) >> 2] | 0;
+ }
+ i5 = HEAP32[i5 + 36 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ HEAP8[i3] = 63;
+ HEAP8[i3 + 1 | 0] = 0;
+ } else {
+ _luaO_chunkid(i3, i5 + 16 | 0, 60);
+ }
+ HEAP32[i2 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i6;
+ HEAP32[i2 + 8 >> 2] = i4;
+ _luaO_pushfstring(i1, 2024, i2) | 0;
+ _luaG_errormsg(i1);
+}
+function _db_upvaluejoin(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i3;
+ i2 = _luaL_checkinteger(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 6);
+ _lua_pushvalue(i1, 1);
+ _lua_getinfo(i1, 11728, i4) | 0;
+ if (!((i2 | 0) > 0 ? (i2 | 0) <= (HEAPU8[i4 + 32 | 0] | 0 | 0) : 0)) {
+ _luaL_argerror(i1, 2, 11736) | 0;
+ }
+ i5 = _luaL_checkinteger(i1, 4) | 0;
+ _luaL_checktype(i1, 3, 6);
+ _lua_pushvalue(i1, 3);
+ _lua_getinfo(i1, 11728, i4) | 0;
+ if (!((i5 | 0) > 0 ? (i5 | 0) <= (HEAPU8[i4 + 32 | 0] | 0 | 0) : 0)) {
+ _luaL_argerror(i1, 4, 11736) | 0;
+ }
+ if ((_lua_iscfunction(i1, 1) | 0) != 0) {
+ _luaL_argerror(i1, 1, 11760) | 0;
+ }
+ if ((_lua_iscfunction(i1, 3) | 0) == 0) {
+ _lua_upvaluejoin(i1, 1, i2, 3, i5);
+ STACKTOP = i3;
+ return 0;
+ }
+ _luaL_argerror(i1, 3, 11760) | 0;
+ _lua_upvaluejoin(i1, 1, i2, 3, i5);
+ STACKTOP = i3;
+ return 0;
+}
+function _luaK_jump(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i3 = STACKTOP;
+ i2 = i1 + 28 | 0;
+ i7 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i2 >> 2] = -1;
+ i2 = _luaK_code(i1, 2147450903) | 0;
+ if ((i7 | 0) == -1) {
+ i9 = i2;
+ STACKTOP = i3;
+ return i9 | 0;
+ }
+ if ((i2 | 0) == -1) {
+ i9 = i7;
+ STACKTOP = i3;
+ return i9 | 0;
+ }
+ i6 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ i8 = i2;
+ while (1) {
+ i5 = i6 + (i8 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i9 = (i4 >>> 14) + -131071 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ }
+ i9 = i8 + 1 + i9 | 0;
+ if ((i9 | 0) == -1) {
+ break;
+ } else {
+ i8 = i9;
+ }
+ }
+ i6 = i7 + ~i8 | 0;
+ if ((((i6 | 0) > -1 ? i6 : 0 - i6 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i5 >> 2] = (i6 << 14) + 2147467264 | i4 & 16383;
+ i9 = i2;
+ STACKTOP = i3;
+ return i9 | 0;
+}
+function _findfield(i2, i3, i4) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ L1 : do {
+ if (((i4 | 0) != 0 ? (_lua_type(i2, -1) | 0) == 5 : 0) ? (_lua_pushnil(i2), (_lua_next(i2, -2) | 0) != 0) : 0) {
+ i4 = i4 + -1 | 0;
+ while (1) {
+ if ((_lua_type(i2, -2) | 0) == 4) {
+ if ((_lua_rawequal(i2, i3, -1) | 0) != 0) {
+ i3 = 7;
+ break;
+ }
+ if ((_findfield(i2, i3, i4) | 0) != 0) {
+ i3 = 9;
+ break;
+ }
+ }
+ _lua_settop(i2, -2);
+ if ((_lua_next(i2, -2) | 0) == 0) {
+ i2 = 0;
+ break L1;
+ }
+ }
+ if ((i3 | 0) == 7) {
+ _lua_settop(i2, -2);
+ i2 = 1;
+ break;
+ } else if ((i3 | 0) == 9) {
+ _lua_remove(i2, -2);
+ _lua_pushlstring(i2, 1776, 1) | 0;
+ _lua_insert(i2, -2);
+ _lua_concat(i2, 3);
+ i2 = 1;
+ break;
+ }
+ } else {
+ i2 = 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _db_gethook(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i3;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i4 = _lua_tothread(i1, 1) | 0;
+ } else {
+ i4 = i1;
+ }
+ i5 = _lua_gethookmask(i4) | 0;
+ i6 = _lua_gethook(i4) | 0;
+ if ((i6 | 0) != 0 & (i6 | 0) != 9) {
+ _lua_pushlstring(i1, 12024, 13) | 0;
+ } else {
+ _luaL_getsubtable(i1, -1001e3, 11584) | 0;
+ _lua_pushthread(i4) | 0;
+ _lua_xmove(i4, i1, 1);
+ _lua_rawget(i1, -2);
+ _lua_remove(i1, -2);
+ }
+ if ((i5 & 1 | 0) == 0) {
+ i6 = 0;
+ } else {
+ HEAP8[i2] = 99;
+ i6 = 1;
+ }
+ if ((i5 & 2 | 0) != 0) {
+ HEAP8[i2 + i6 | 0] = 114;
+ i6 = i6 + 1 | 0;
+ }
+ if ((i5 & 4 | 0) != 0) {
+ HEAP8[i2 + i6 | 0] = 108;
+ i6 = i6 + 1 | 0;
+ }
+ HEAP8[i2 + i6 | 0] = 0;
+ _lua_pushstring(i1, i2) | 0;
+ _lua_pushinteger(i1, _lua_gethookcount(i4) | 0);
+ STACKTOP = i3;
+ return 3;
+}
+function _lua_tothread(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i2 = (HEAP32[i3 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i2 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i2 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i2 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i2 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ if ((HEAP32[i2 + 8 >> 2] | 0) != 72) {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i5 = HEAP32[i2 >> 2] | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _luaD_throw(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i3 = i1 + 64 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ if ((i4 | 0) != 0) {
+ HEAP32[i4 + 160 >> 2] = i2;
+ _longjmp((HEAP32[i3 >> 2] | 0) + 4 | 0, 1);
+ }
+ HEAP8[i1 + 6 | 0] = i2;
+ i4 = i1 + 12 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[i3 + 172 >> 2] | 0;
+ if ((HEAP32[i5 + 64 >> 2] | 0) != 0) {
+ i6 = HEAP32[i1 + 8 >> 2] | 0;
+ i9 = i5 + 8 | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i5 + 16;
+ i9 = i6 + -16 | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i5;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 + -8 >> 2];
+ _luaD_throw(HEAP32[(HEAP32[i4 >> 2] | 0) + 172 >> 2] | 0, i2);
+ }
+ i2 = HEAP32[i3 + 168 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ _abort();
+ }
+ FUNCTION_TABLE_ii[i2 & 255](i1) | 0;
+ _abort();
+}
+function _lua_len(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i5 = i1 + 8 | 0;
+ _luaV_objlen(i1, HEAP32[i5 >> 2] | 0, i3);
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return;
+}
+function _read_line(i4, i5, i1) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 1040 | 0;
+ i2 = i3;
+ _luaL_buffinit(i4, i2);
+ i7 = _luaL_prepbuffsize(i2, 1024) | 0;
+ L1 : do {
+ if ((_fgets(i7 | 0, 1024, i5 | 0) | 0) != 0) {
+ i6 = i2 + 8 | 0;
+ while (1) {
+ i8 = _strlen(i7 | 0) | 0;
+ if ((i8 | 0) != 0 ? (HEAP8[i7 + (i8 + -1) | 0] | 0) == 10 : 0) {
+ break;
+ }
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i8;
+ i7 = _luaL_prepbuffsize(i2, 1024) | 0;
+ if ((_fgets(i7 | 0, 1024, i5 | 0) | 0) == 0) {
+ break L1;
+ }
+ }
+ HEAP32[i6 >> 2] = i8 - i1 + (HEAP32[i6 >> 2] | 0);
+ _luaL_pushresult(i2);
+ i8 = 1;
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ } while (0);
+ _luaL_pushresult(i2);
+ i8 = (_lua_rawlen(i4, -1) | 0) != 0 | 0;
+ STACKTOP = i3;
+ return i8 | 0;
+}
+function _luaL_tolstring(i1, i5, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ do {
+ if ((_luaL_callmeta(i1, i5, 1384) | 0) == 0) {
+ i6 = _lua_type(i1, i5) | 0;
+ if ((i6 | 0) == 0) {
+ _lua_pushlstring(i1, 1416, 3) | 0;
+ break;
+ } else if ((i6 | 0) == 1) {
+ i6 = (_lua_toboolean(i1, i5) | 0) != 0;
+ _lua_pushstring(i1, i6 ? 1400 : 1408) | 0;
+ break;
+ } else if ((i6 | 0) == 4 | (i6 | 0) == 3) {
+ _lua_pushvalue(i1, i5);
+ break;
+ } else {
+ i7 = _lua_typename(i1, _lua_type(i1, i5) | 0) | 0;
+ i6 = _lua_topointer(i1, i5) | 0;
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i3 + 4 >> 2] = i6;
+ _lua_pushfstring(i1, 1424, i3) | 0;
+ break;
+ }
+ }
+ } while (0);
+ i7 = _lua_tolstring(i1, -1, i4) | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _save(i7, i1) {
+ i7 = i7 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i7 + 60 >> 2] | 0;
+ i3 = i4 + 4 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ i6 = i4 + 8 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if (!((i8 + 1 | 0) >>> 0 > i5 >>> 0)) {
+ i6 = HEAP32[i4 >> 2] | 0;
+ i7 = i1 & 255;
+ i5 = i8 + 1 | 0;
+ HEAP32[i3 >> 2] = i5;
+ i8 = i6 + i8 | 0;
+ HEAP8[i8] = i7;
+ STACKTOP = i2;
+ return;
+ }
+ if (i5 >>> 0 > 2147483645) {
+ _lexerror(i7, 12368, 0);
+ }
+ i8 = i5 << 1;
+ i7 = HEAP32[i7 + 52 >> 2] | 0;
+ if ((i8 | 0) == -2) {
+ _luaM_toobig(i7);
+ }
+ i7 = _luaM_realloc_(i7, HEAP32[i4 >> 2] | 0, i5, i8) | 0;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i6 >> 2] = i8;
+ i8 = HEAP32[i3 >> 2] | 0;
+ i6 = i7;
+ i7 = i1 & 255;
+ i5 = i8 + 1 | 0;
+ HEAP32[i3 >> 2] = i5;
+ i8 = i6 + i8 | 0;
+ HEAP8[i8] = i7;
+ STACKTOP = i2;
+ return;
+}
+function _luaK_patchtohere(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 24 >> 2] = HEAP32[i1 + 20 >> 2];
+ i4 = i1 + 28 | 0;
+ if ((i3 | 0) == -1) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == -1) {
+ HEAP32[i4 >> 2] = i3;
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i6 = i4 + (i7 << 2) | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i8 = (i5 >>> 14) + -131071 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ }
+ i8 = i7 + 1 + i8 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ } else {
+ i7 = i8;
+ }
+ }
+ i3 = ~i7 + i3 | 0;
+ if ((((i3 | 0) > -1 ? i3 : 0 - i3 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i6 >> 2] = (i3 << 14) + 2147467264 | i5 & 16383;
+ STACKTOP = i2;
+ return;
+}
+function _tinsert(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i7 = i2;
+ _luaL_checktype(i1, 1, 5);
+ i4 = _luaL_len(i1, 1) | 0;
+ i3 = i4 + 1 | 0;
+ i6 = _lua_gettop(i1) | 0;
+ if ((i6 | 0) == 3) {
+ i5 = 2;
+ } else if ((i6 | 0) != 2) {
+ i7 = _luaL_error(i1, 8320, i7) | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ if ((i5 | 0) == 2) {
+ i5 = _luaL_checkinteger(i1, 2) | 0;
+ if ((i5 | 0) < 1 | (i5 | 0) > (i3 | 0)) {
+ _luaL_argerror(i1, 2, 8256) | 0;
+ }
+ if ((i4 | 0) < (i5 | 0)) {
+ i3 = i5;
+ } else {
+ while (1) {
+ i4 = i3 + -1 | 0;
+ _lua_rawgeti(i1, 1, i4);
+ _lua_rawseti(i1, 1, i3);
+ if ((i4 | 0) > (i5 | 0)) {
+ i3 = i4;
+ } else {
+ i3 = i5;
+ break;
+ }
+ }
+ }
+ }
+ _lua_rawseti(i1, 1, i3);
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _lua_iscfunction(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i2 = (HEAP32[i3 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i2 = (HEAP32[i3 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i3 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i2 = HEAP32[i4 >> 2] | 0, (i3 | 0) <= (HEAPU8[i2 + 6 | 0] | 0 | 0)) : 0) {
+ i2 = i2 + (i3 + -1 << 4) + 16 | 0;
+ } else {
+ i2 = 5192;
+ }
+ } else {
+ i2 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i2 = i2 >>> 0 < (HEAP32[i3 + 8 >> 2] | 0) >>> 0 ? i2 : 5192;
+ }
+ } while (0);
+ i5 = HEAP32[i2 + 8 >> 2] | 0;
+ STACKTOP = i1;
+ return ((i5 | 0) == 22 | (i5 | 0) == 102) & 1 | 0;
+}
+function _lua_gettable(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) <= 0) {
+ if (!((i5 | 0) < -1000999)) {
+ i3 = (HEAP32[i1 + 8 >> 2] | 0) + (i5 << 4) | 0;
+ break;
+ }
+ if ((i5 | 0) == -1001e3) {
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 40 | 0;
+ break;
+ }
+ i5 = -1001e3 - i5 | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 22 ? (i3 = HEAP32[i4 >> 2] | 0, (i5 | 0) <= (HEAPU8[i3 + 6 | 0] | 0 | 0)) : 0) {
+ i3 = i3 + (i5 + -1 << 4) + 16 | 0;
+ } else {
+ i3 = 5192;
+ }
+ } else {
+ i3 = (HEAP32[i4 >> 2] | 0) + (i5 << 4) | 0;
+ i3 = i3 >>> 0 < (HEAP32[i1 + 8 >> 2] | 0) >>> 0 ? i3 : 5192;
+ }
+ } while (0);
+ i5 = (HEAP32[i1 + 8 >> 2] | 0) + -16 | 0;
+ _luaV_gettable(i1, i3, i5, i5);
+ STACKTOP = i2;
+ return;
+}
+function _luaG_errormsg(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = HEAP32[i1 + 68 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ _luaD_throw(i1, 2);
+ }
+ i4 = HEAP32[i1 + 28 >> 2] | 0;
+ i3 = i4 + (i2 + 8) | 0;
+ if ((HEAP32[i3 >> 2] & 15 | 0) != 6) {
+ _luaD_throw(i1, 6);
+ }
+ i5 = i1 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i9 = i6 + -16 | 0;
+ i8 = HEAP32[i9 + 4 >> 2] | 0;
+ i7 = i6;
+ HEAP32[i7 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i7 + 4 >> 2] = i8;
+ HEAP32[i6 + 8 >> 2] = HEAP32[i6 + -8 >> 2];
+ i6 = HEAP32[i5 >> 2] | 0;
+ i7 = i4 + i2 | 0;
+ i2 = HEAP32[i7 + 4 >> 2] | 0;
+ i4 = i6 + -16 | 0;
+ HEAP32[i4 >> 2] = HEAP32[i7 >> 2];
+ HEAP32[i4 + 4 >> 2] = i2;
+ HEAP32[i6 + -8 >> 2] = HEAP32[i3 >> 2];
+ i4 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i4 + 16;
+ _luaD_call(i1, i4 + -16 | 0, 1, 0);
+ _luaD_throw(i1, 2);
+}
+function _luaB_costatus(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i3 = i4;
+ i2 = _lua_tothread(i1, 1) | 0;
+ if ((i2 | 0) == 0) {
+ _luaL_argerror(i1, 1, 10856) | 0;
+ }
+ do {
+ if ((i2 | 0) != (i1 | 0)) {
+ i5 = _lua_status(i2) | 0;
+ if ((i5 | 0) == 0) {
+ if ((_lua_getstack(i2, 0, i3) | 0) > 0) {
+ _lua_pushlstring(i1, 10896, 6) | 0;
+ break;
+ }
+ if ((_lua_gettop(i2) | 0) == 0) {
+ _lua_pushlstring(i1, 10904, 4) | 0;
+ break;
+ } else {
+ _lua_pushlstring(i1, 10880, 9) | 0;
+ break;
+ }
+ } else if ((i5 | 0) == 1) {
+ _lua_pushlstring(i1, 10880, 9) | 0;
+ break;
+ } else {
+ _lua_pushlstring(i1, 10904, 4) | 0;
+ break;
+ }
+ } else {
+ _lua_pushlstring(i1, 10728, 7) | 0;
+ }
+ } while (0);
+ STACKTOP = i4;
+ return 1;
+}
+function _searcher_Lua(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i4 = _luaL_checklstring(i1, 1, 0) | 0;
+ _lua_getfield(i1, -1001001, 4256);
+ i5 = _lua_tolstring(i1, -1, 0) | 0;
+ if ((i5 | 0) == 0) {
+ HEAP32[i3 >> 2] = 4256;
+ _luaL_error(i1, 5032, i3) | 0;
+ }
+ i4 = _searchpath(i1, i4, i5, 4936, 4848) | 0;
+ if ((i4 | 0) == 0) {
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ if ((_luaL_loadfilex(i1, i4, 0) | 0) == 0) {
+ _lua_pushstring(i1, i4) | 0;
+ i5 = 2;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else {
+ i6 = _lua_tolstring(i1, 1, 0) | 0;
+ i5 = _lua_tolstring(i1, -1, 0) | 0;
+ HEAP32[i3 >> 2] = i6;
+ HEAP32[i3 + 4 >> 2] = i4;
+ HEAP32[i3 + 8 >> 2] = i5;
+ i5 = _luaL_error(i1, 4888, i3) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _str_sub(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ i2 = _luaL_checklstring(i1, 1, i4) | 0;
+ i5 = _luaL_checkinteger(i1, 2) | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ if (!((i5 | 0) > -1)) {
+ if (i6 >>> 0 < (0 - i5 | 0) >>> 0) {
+ i5 = 0;
+ } else {
+ i5 = i5 + 1 + i6 | 0;
+ }
+ }
+ i6 = _luaL_optinteger(i1, 3, -1) | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if (!((i6 | 0) > -1)) {
+ if (i4 >>> 0 < (0 - i6 | 0) >>> 0) {
+ i6 = 0;
+ } else {
+ i6 = i6 + 1 + i4 | 0;
+ }
+ }
+ i5 = (i5 | 0) == 0 ? 1 : i5;
+ i4 = i6 >>> 0 > i4 >>> 0 ? i4 : i6;
+ if (i5 >>> 0 > i4 >>> 0) {
+ _lua_pushlstring(i1, 7040, 0) | 0;
+ STACKTOP = i3;
+ return 1;
+ } else {
+ _lua_pushlstring(i1, i2 + (i5 + -1) | 0, 1 - i5 + i4 | 0) | 0;
+ STACKTOP = i3;
+ return 1;
+ }
+ return 0;
+}
+function _searcher_C(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i4 = _luaL_checklstring(i1, 1, 0) | 0;
+ _lua_getfield(i1, -1001001, 4440);
+ i5 = _lua_tolstring(i1, -1, 0) | 0;
+ if ((i5 | 0) == 0) {
+ HEAP32[i3 >> 2] = 4440;
+ _luaL_error(i1, 5032, i3) | 0;
+ }
+ i5 = _searchpath(i1, i4, i5, 4936, 4848) | 0;
+ if ((i5 | 0) == 0) {
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ if ((_loadfunc(i1, i5, i4) | 0) == 0) {
+ _lua_pushstring(i1, i5) | 0;
+ i5 = 2;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else {
+ i6 = _lua_tolstring(i1, 1, 0) | 0;
+ i4 = _lua_tolstring(i1, -1, 0) | 0;
+ HEAP32[i3 >> 2] = i6;
+ HEAP32[i3 + 4 >> 2] = i5;
+ HEAP32[i3 + 8 >> 2] = i4;
+ i5 = _luaL_error(i1, 4888, i3) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _io_open(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i5 = STACKTOP;
+ i2 = _luaL_checklstring(i1, 1, 0) | 0;
+ i3 = _luaL_optlstring(i1, 2, 3480, 0) | 0;
+ i4 = _lua_newuserdata(i1, 8) | 0;
+ i6 = i4 + 4 | 0;
+ HEAP32[i6 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i4 >> 2] = 0;
+ HEAP32[i6 >> 2] = 156;
+ i6 = HEAP8[i3] | 0;
+ if (!((!(i6 << 24 >> 24 == 0) ? (i7 = i3 + 1 | 0, (_memchr(3552, i6 << 24 >> 24, 4) | 0) != 0) : 0) ? (i6 = (HEAP8[i7] | 0) == 43 ? i3 + 2 | 0 : i7, (HEAP8[(HEAP8[i6] | 0) == 98 ? i6 + 1 | 0 : i6] | 0) == 0) : 0)) {
+ _luaL_argerror(i1, 2, 3560) | 0;
+ }
+ i7 = _fopen(i2 | 0, i3 | 0) | 0;
+ HEAP32[i4 >> 2] = i7;
+ if ((i7 | 0) != 0) {
+ i7 = 1;
+ STACKTOP = i5;
+ return i7 | 0;
+ }
+ i7 = _luaL_fileresult(i1, 0, i2) | 0;
+ STACKTOP = i5;
+ return i7 | 0;
+}
+function _unpack(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i6 = i1;
+ _luaL_checktype(i2, 1, 5);
+ i5 = _luaL_optinteger(i2, 2, 1) | 0;
+ if ((_lua_type(i2, 3) | 0) < 1) {
+ i3 = _luaL_len(i2, 1) | 0;
+ } else {
+ i3 = _luaL_checkinteger(i2, 3) | 0;
+ }
+ if ((i5 | 0) > (i3 | 0)) {
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ i7 = i3 - i5 | 0;
+ i4 = i7 + 1 | 0;
+ if ((i7 | 0) >= 0 ? (_lua_checkstack(i2, i4) | 0) != 0 : 0) {
+ _lua_rawgeti(i2, 1, i5);
+ if ((i5 | 0) >= (i3 | 0)) {
+ i7 = i4;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ do {
+ i5 = i5 + 1 | 0;
+ _lua_rawgeti(i2, 1, i5);
+ } while ((i5 | 0) != (i3 | 0));
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i7 = _luaL_error(i2, 8280, i6) | 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _luaF_getlocalname(i4, i6, i2) {
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i4 + 60 >> 2] | 0;
+ if ((i3 | 0) <= 0) {
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ i4 = HEAP32[i4 + 24 >> 2] | 0;
+ i5 = 0;
+ while (1) {
+ if ((HEAP32[i4 + (i5 * 12 | 0) + 4 >> 2] | 0) > (i2 | 0)) {
+ i3 = 0;
+ i2 = 8;
+ break;
+ }
+ if ((HEAP32[i4 + (i5 * 12 | 0) + 8 >> 2] | 0) > (i2 | 0)) {
+ i6 = i6 + -1 | 0;
+ if ((i6 | 0) == 0) {
+ i2 = 6;
+ break;
+ }
+ }
+ i5 = i5 + 1 | 0;
+ if ((i5 | 0) >= (i3 | 0)) {
+ i3 = 0;
+ i2 = 8;
+ break;
+ }
+ }
+ if ((i2 | 0) == 6) {
+ i6 = (HEAP32[i4 + (i5 * 12 | 0) >> 2] | 0) + 16 | 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ } else if ((i2 | 0) == 8) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _luaK_concat(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) == -1) {
+ STACKTOP = i2;
+ return;
+ }
+ i7 = HEAP32[i4 >> 2] | 0;
+ if ((i7 | 0) == -1) {
+ HEAP32[i4 >> 2] = i3;
+ STACKTOP = i2;
+ return;
+ }
+ i4 = HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0;
+ while (1) {
+ i6 = i4 + (i7 << 2) | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i8 = (i5 >>> 14) + -131071 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ }
+ i8 = i7 + 1 + i8 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ } else {
+ i7 = i8;
+ }
+ }
+ i3 = ~i7 + i3 | 0;
+ if ((((i3 | 0) > -1 ? i3 : 0 - i3 | 0) | 0) > 131071) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10624);
+ }
+ HEAP32[i6 >> 2] = i5 & 16383 | (i3 << 14) + 2147467264;
+ STACKTOP = i2;
+ return;
+}
+function _scalbn(d3, i2) {
+ d3 = +d3;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0;
+ i1 = STACKTOP;
+ if ((i2 | 0) > 1023) {
+ d3 = d3 * 8.98846567431158e+307;
+ i4 = i2 + -1023 | 0;
+ if ((i4 | 0) > 1023) {
+ i2 = i2 + -2046 | 0;
+ i2 = (i2 | 0) > 1023 ? 1023 : i2;
+ d3 = d3 * 8.98846567431158e+307;
+ } else {
+ i2 = i4;
+ }
+ } else {
+ if ((i2 | 0) < -1022) {
+ d3 = d3 * 2.2250738585072014e-308;
+ i4 = i2 + 1022 | 0;
+ if ((i4 | 0) < -1022) {
+ i2 = i2 + 2044 | 0;
+ i2 = (i2 | 0) < -1022 ? -1022 : i2;
+ d3 = d3 * 2.2250738585072014e-308;
+ } else {
+ i2 = i4;
+ }
+ }
+ }
+ i2 = _bitshift64Shl(i2 + 1023 | 0, 0, 52) | 0;
+ i4 = tempRet0;
+ HEAP32[tempDoublePtr >> 2] = i2;
+ HEAP32[tempDoublePtr + 4 >> 2] = i4;
+ d3 = d3 * +HEAPF64[tempDoublePtr >> 3];
+ STACKTOP = i1;
+ return +d3;
+}
+function _luaK_numberK(i1, d6) {
+ i1 = i1 | 0;
+ d6 = +d6;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i4 = i2 + 16 | 0;
+ i3 = i2;
+ HEAPF64[i4 >> 3] = d6;
+ i5 = HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 52 >> 2] | 0;
+ HEAPF64[i3 >> 3] = d6;
+ HEAP32[i3 + 8 >> 2] = 3;
+ if (d6 != d6 | 0.0 != 0.0 | d6 == 0.0) {
+ i7 = i5 + 8 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i8 + 16;
+ i5 = _luaS_newlstr(i5, i4, 8) | 0;
+ HEAP32[i8 >> 2] = i5;
+ HEAP32[i8 + 8 >> 2] = HEAPU8[i5 + 4 | 0] | 0 | 64;
+ i5 = _addk(i1, (HEAP32[i7 >> 2] | 0) + -16 | 0, i3) | 0;
+ HEAP32[i7 >> 2] = (HEAP32[i7 >> 2] | 0) + -16;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else {
+ i8 = _addk(i1, i3, i3) | 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ return 0;
+}
+function _auxresume(i2, i3, i4) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ do {
+ if ((_lua_checkstack(i3, i4) | 0) != 0) {
+ if ((_lua_status(i3) | 0) == 0 ? (_lua_gettop(i3) | 0) == 0 : 0) {
+ _lua_pushlstring(i2, 10792, 28) | 0;
+ i4 = -1;
+ break;
+ }
+ _lua_xmove(i2, i3, i4);
+ if (!((_lua_resume(i3, i2, i4) | 0) >>> 0 < 2)) {
+ _lua_xmove(i3, i2, 1);
+ i4 = -1;
+ break;
+ }
+ i4 = _lua_gettop(i3) | 0;
+ if ((_lua_checkstack(i2, i4 + 1 | 0) | 0) == 0) {
+ _lua_settop(i3, ~i4);
+ _lua_pushlstring(i2, 10824, 26) | 0;
+ i4 = -1;
+ break;
+ } else {
+ _lua_xmove(i3, i2, i4);
+ break;
+ }
+ } else {
+ _lua_pushlstring(i2, 10760, 28) | 0;
+ i4 = -1;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _luaX_setinput(i2, i1, i4, i3, i5) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i6 = 0, i7 = 0;
+ i6 = STACKTOP;
+ HEAP8[i1 + 76 | 0] = 46;
+ i7 = i1 + 52 | 0;
+ HEAP32[i7 >> 2] = i2;
+ HEAP32[i1 >> 2] = i5;
+ HEAP32[i1 + 32 >> 2] = 286;
+ HEAP32[i1 + 56 >> 2] = i4;
+ HEAP32[i1 + 48 >> 2] = 0;
+ HEAP32[i1 + 4 >> 2] = 1;
+ HEAP32[i1 + 8 >> 2] = 1;
+ HEAP32[i1 + 68 >> 2] = i3;
+ i5 = _luaS_new(i2, 12264) | 0;
+ HEAP32[i1 + 72 >> 2] = i5;
+ i5 = i5 + 5 | 0;
+ HEAP8[i5] = HEAPU8[i5] | 0 | 32;
+ i5 = i1 + 60 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i4 = _luaM_realloc_(HEAP32[i7 >> 2] | 0, HEAP32[i4 >> 2] | 0, HEAP32[i4 + 8 >> 2] | 0, 32) | 0;
+ HEAP32[HEAP32[i5 >> 2] >> 2] = i4;
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 8 >> 2] = 32;
+ STACKTOP = i6;
+ return;
+}
+function _luaL_optlstring(i2, i4, i6, i5) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ if ((_lua_type(i2, i4) | 0) >= 1) {
+ i5 = _lua_tolstring(i2, i4, i5) | 0;
+ if ((i5 | 0) != 0) {
+ i6 = i5;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ i5 = _lua_typename(i2, 4) | 0;
+ i6 = _lua_typename(i2, _lua_type(i2, i4) | 0) | 0;
+ HEAP32[i3 >> 2] = i5;
+ HEAP32[i3 + 4 >> 2] = i6;
+ _luaL_argerror(i2, i4, _lua_pushfstring(i2, 1744, i3) | 0) | 0;
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ if ((i5 | 0) == 0) {
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ if ((i6 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = _strlen(i6 | 0) | 0;
+ }
+ HEAP32[i5 >> 2] = i2;
+ STACKTOP = i1;
+ return i6 | 0;
+}
+function _lua_xmove(i3, i4, i1) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) == (i4 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i3 + 8 | 0;
+ i5 = (HEAP32[i3 >> 2] | 0) + (0 - i1 << 4) | 0;
+ HEAP32[i3 >> 2] = i5;
+ if ((i1 | 0) <= 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i4 = i4 + 8 | 0;
+ i6 = 0;
+ while (1) {
+ i7 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i7 + 16;
+ i10 = i5 + (i6 << 4) | 0;
+ i9 = HEAP32[i10 + 4 >> 2] | 0;
+ i8 = i7;
+ HEAP32[i8 >> 2] = HEAP32[i10 >> 2];
+ HEAP32[i8 + 4 >> 2] = i9;
+ HEAP32[i7 + 8 >> 2] = HEAP32[i5 + (i6 << 4) + 8 >> 2];
+ i6 = i6 + 1 | 0;
+ if ((i6 | 0) == (i1 | 0)) {
+ break;
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ }
+ STACKTOP = i2;
+ return;
+}
+function _luaM_realloc_(i7, i10, i3, i2) {
+ i7 = i7 | 0;
+ i10 = i10 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0;
+ i5 = STACKTOP;
+ i6 = HEAP32[i7 + 12 >> 2] | 0;
+ i4 = (i10 | 0) != 0;
+ i9 = i6 + 4 | 0;
+ i8 = FUNCTION_TABLE_iiiii[HEAP32[i6 >> 2] & 3](HEAP32[i9 >> 2] | 0, i10, i3, i2) | 0;
+ if (!((i8 | 0) != 0 | (i2 | 0) == 0)) {
+ if ((HEAP8[i6 + 63 | 0] | 0) == 0) {
+ _luaD_throw(i7, 4);
+ }
+ _luaC_fullgc(i7, 1);
+ i8 = FUNCTION_TABLE_iiiii[HEAP32[i6 >> 2] & 3](HEAP32[i9 >> 2] | 0, i10, i3, i2) | 0;
+ if ((i8 | 0) == 0) {
+ _luaD_throw(i7, 4);
+ } else {
+ i1 = i8;
+ }
+ } else {
+ i1 = i8;
+ }
+ i6 = i6 + 12 | 0;
+ HEAP32[i6 >> 2] = (i4 ? 0 - i3 | 0 : 0) + i2 + (HEAP32[i6 >> 2] | 0);
+ STACKTOP = i5;
+ return i1 | 0;
+}
+function _realloc(i2, i3) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i5 = 0;
+ i1 = STACKTOP;
+ do {
+ if ((i2 | 0) != 0) {
+ if (i3 >>> 0 > 4294967231) {
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i4 = 0;
+ break;
+ }
+ if (i3 >>> 0 < 11) {
+ i4 = 16;
+ } else {
+ i4 = i3 + 11 & -8;
+ }
+ i4 = _try_realloc_chunk(i2 + -8 | 0, i4) | 0;
+ if ((i4 | 0) != 0) {
+ i4 = i4 + 8 | 0;
+ break;
+ }
+ i4 = _malloc(i3) | 0;
+ if ((i4 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i5 = HEAP32[i2 + -4 >> 2] | 0;
+ i5 = (i5 & -8) - ((i5 & 3 | 0) == 0 ? 8 : 4) | 0;
+ _memcpy(i4 | 0, i2 | 0, (i5 >>> 0 < i3 >>> 0 ? i5 : i3) | 0) | 0;
+ _free(i2);
+ }
+ } else {
+ i4 = _malloc(i3) | 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _lua_setlocal(i3, i5, i4) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ HEAP32[i2 >> 2] = 0;
+ i4 = _findlocal(i3, HEAP32[i5 + 96 >> 2] | 0, i4, i2) | 0;
+ i3 = i3 + 8 | 0;
+ if ((i4 | 0) == 0) {
+ i5 = HEAP32[i3 >> 2] | 0;
+ i5 = i5 + -16 | 0;
+ HEAP32[i3 >> 2] = i5;
+ STACKTOP = i1;
+ return i4 | 0;
+ }
+ i6 = HEAP32[i3 >> 2] | 0;
+ i5 = HEAP32[i2 >> 2] | 0;
+ i8 = i6 + -16 | 0;
+ i7 = HEAP32[i8 + 4 >> 2] | 0;
+ i2 = i5;
+ HEAP32[i2 >> 2] = HEAP32[i8 >> 2];
+ HEAP32[i2 + 4 >> 2] = i7;
+ HEAP32[i5 + 8 >> 2] = HEAP32[i6 + -8 >> 2];
+ i5 = HEAP32[i3 >> 2] | 0;
+ i5 = i5 + -16 | 0;
+ HEAP32[i3 >> 2] = i5;
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function ___remdi3(i1, i4, i5, i6) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 8 | 0;
+ i2 = i3 | 0;
+ i7 = i4 >> 31 | ((i4 | 0) < 0 ? -1 : 0) << 1;
+ i8 = ((i4 | 0) < 0 ? -1 : 0) >> 31 | ((i4 | 0) < 0 ? -1 : 0) << 1;
+ i9 = i6 >> 31 | ((i6 | 0) < 0 ? -1 : 0) << 1;
+ i10 = ((i6 | 0) < 0 ? -1 : 0) >> 31 | ((i6 | 0) < 0 ? -1 : 0) << 1;
+ i1 = _i64Subtract(i7 ^ i1, i8 ^ i4, i7, i8) | 0;
+ i4 = tempRet0;
+ ___udivmoddi4(i1, i4, _i64Subtract(i9 ^ i5, i10 ^ i6, i9, i10) | 0, tempRet0, i2) | 0;
+ i9 = _i64Subtract(HEAP32[i2 >> 2] ^ i7, HEAP32[i2 + 4 >> 2] ^ i8, i7, i8) | 0;
+ i8 = tempRet0;
+ STACKTOP = i3;
+ return (tempRet0 = i8, i9) | 0;
+}
+function _luaC_barrierproto_(i3, i4, i2) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i5 = 0;
+ i1 = STACKTOP;
+ if ((HEAP32[i4 + 32 >> 2] | 0) != 0) {
+ i5 = HEAP32[i3 + 12 >> 2] | 0;
+ i3 = i4 + 5 | 0;
+ HEAP8[i3] = HEAP8[i3] & 251;
+ i5 = i5 + 88 | 0;
+ HEAP32[i4 + 72 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i5 >> 2] = i4;
+ STACKTOP = i1;
+ return;
+ }
+ if ((HEAP8[i2 + 5 | 0] & 3) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i5 = i4 + 5 | 0;
+ i4 = HEAP8[i5] | 0;
+ if ((i4 & 4) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = HEAP32[i3 + 12 >> 2] | 0;
+ if ((HEAPU8[i3 + 61 | 0] | 0) < 2) {
+ _reallymarkobject(i3, i2);
+ STACKTOP = i1;
+ return;
+ } else {
+ HEAP8[i5] = HEAP8[i3 + 60 | 0] & 3 | i4 & 184;
+ STACKTOP = i1;
+ return;
+ }
+}
+function _luaL_openlibs(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_requiref(i1, 2592, 144, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2600, 145, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2608, 146, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2624, 147, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2632, 148, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2640, 149, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2648, 150, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2656, 151, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2664, 152, 1);
+ _lua_settop(i1, -2);
+ _luaL_requiref(i1, 2672, 153, 1);
+ _lua_settop(i1, -2);
+ _luaL_getsubtable(i1, -1001e3, 2576) | 0;
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return;
+}
+function _luaX_token2str(i4, i3) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i5 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ if ((i3 | 0) >= 257) {
+ i5 = HEAP32[12096 + (i3 + -257 << 2) >> 2] | 0;
+ if ((i3 | 0) >= 286) {
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i4 = HEAP32[i4 + 52 >> 2] | 0;
+ HEAP32[i2 >> 2] = i5;
+ i5 = _luaO_pushfstring(i4, 12256, i2) | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i4 = HEAP32[i4 + 52 >> 2] | 0;
+ if ((HEAP8[i3 + 10913 | 0] & 4) == 0) {
+ HEAP32[i2 >> 2] = i3;
+ i5 = _luaO_pushfstring(i4, 12240, i2) | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ } else {
+ HEAP32[i2 >> 2] = i3;
+ i5 = _luaO_pushfstring(i4, 12232, i2) | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _luaL_buffinitsize(i6, i1, i7) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 12 >> 2] = i6;
+ i3 = i1 + 16 | 0;
+ HEAP32[i1 >> 2] = i3;
+ i5 = i1 + 8 | 0;
+ HEAP32[i5 >> 2] = 0;
+ i4 = i1 + 4 | 0;
+ HEAP32[i4 >> 2] = 1024;
+ if (!(i7 >>> 0 > 1024)) {
+ i7 = i3;
+ i8 = 0;
+ i8 = i7 + i8 | 0;
+ STACKTOP = i2;
+ return i8 | 0;
+ }
+ i8 = i7 >>> 0 > 2048 ? i7 : 2048;
+ i7 = _lua_newuserdata(i6, i8) | 0;
+ _memcpy(i7 | 0, HEAP32[i1 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+ if ((HEAP32[i1 >> 2] | 0) != (i3 | 0)) {
+ _lua_remove(i6, -2);
+ }
+ HEAP32[i1 >> 2] = i7;
+ HEAP32[i4 >> 2] = i8;
+ i8 = HEAP32[i5 >> 2] | 0;
+ i8 = i7 + i8 | 0;
+ STACKTOP = i2;
+ return i8 | 0;
+}
+function _luaE_freethread(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ i4 = i3 + 28 | 0;
+ _luaF_close(i3, HEAP32[i4 >> 2] | 0);
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ _luaM_realloc_(i1, i3, 112, 0) | 0;
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i3 + 16 >> 2] = i3 + 72;
+ i7 = i3 + 84 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = 0;
+ if ((i6 | 0) != 0) {
+ while (1) {
+ i5 = HEAP32[i6 + 12 >> 2] | 0;
+ _luaM_realloc_(i3, i6, 40, 0) | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ } else {
+ i6 = i5;
+ }
+ }
+ i5 = HEAP32[i4 >> 2] | 0;
+ }
+ _luaM_realloc_(i3, i5, HEAP32[i3 + 32 >> 2] << 4, 0) | 0;
+ _luaM_realloc_(i1, i3, 112, 0) | 0;
+ STACKTOP = i2;
+ return;
+}
+function ___toread(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ i4 = i1 + 74 | 0;
+ i2 = HEAP8[i4] | 0;
+ HEAP8[i4] = i2 + 255 | i2;
+ i4 = i1 + 20 | 0;
+ i2 = i1 + 44 | 0;
+ if ((HEAP32[i4 >> 2] | 0) >>> 0 > (HEAP32[i2 >> 2] | 0) >>> 0) {
+ FUNCTION_TABLE_iiii[HEAP32[i1 + 36 >> 2] & 3](i1, 0, 0) | 0;
+ }
+ HEAP32[i1 + 16 >> 2] = 0;
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i4 >> 2] = 0;
+ i4 = HEAP32[i1 >> 2] | 0;
+ if ((i4 & 20 | 0) == 0) {
+ i4 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i1 + 8 >> 2] = i4;
+ HEAP32[i1 + 4 >> 2] = i4;
+ i4 = 0;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ if ((i4 & 4 | 0) == 0) {
+ i4 = -1;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ HEAP32[i1 >> 2] = i4 | 32;
+ i4 = -1;
+ STACKTOP = i3;
+ return i4 | 0;
+}
+function _lua_callk(i3, i7, i4, i6, i5) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i8 = 0;
+ i1 = STACKTOP;
+ i2 = i3 + 8 | 0;
+ i7 = (HEAP32[i2 >> 2] | 0) + (~i7 << 4) | 0;
+ if ((i5 | 0) != 0 ? (HEAP16[i3 + 36 >> 1] | 0) == 0 : 0) {
+ i8 = i3 + 16 | 0;
+ HEAP32[(HEAP32[i8 >> 2] | 0) + 28 >> 2] = i5;
+ HEAP32[(HEAP32[i8 >> 2] | 0) + 24 >> 2] = i6;
+ _luaD_call(i3, i7, i4, 1);
+ } else {
+ _luaD_call(i3, i7, i4, 0);
+ }
+ if (!((i4 | 0) == -1)) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = (HEAP32[i3 + 16 >> 2] | 0) + 4 | 0;
+ i2 = HEAP32[i2 >> 2] | 0;
+ if (!((HEAP32[i3 >> 2] | 0) >>> 0 < i2 >>> 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[i3 >> 2] = i2;
+ STACKTOP = i1;
+ return;
+}
+function _luaX_newstring(i3, i5, i4) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i6 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i3 + 52 >> 2] | 0;
+ i5 = _luaS_newlstr(i2, i5, i4) | 0;
+ i4 = i2 + 8 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i6 + 16;
+ HEAP32[i6 >> 2] = i5;
+ HEAP32[i6 + 8 >> 2] = HEAPU8[i5 + 4 | 0] | 0 | 64;
+ i6 = _luaH_set(i2, HEAP32[(HEAP32[i3 + 48 >> 2] | 0) + 4 >> 2] | 0, (HEAP32[i4 >> 2] | 0) + -16 | 0) | 0;
+ i3 = i6 + 8 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 0 ? (HEAP32[i6 >> 2] = 1, HEAP32[i3 >> 2] = 1, (HEAP32[(HEAP32[i2 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) : 0) {
+ _luaC_step(i2);
+ }
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -16;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _strtod(i3, i2) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0, d5 = 0.0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i1;
+ i7 = i4 + 0 | 0;
+ i6 = i7 + 112 | 0;
+ do {
+ HEAP32[i7 >> 2] = 0;
+ i7 = i7 + 4 | 0;
+ } while ((i7 | 0) < (i6 | 0));
+ i6 = i4 + 4 | 0;
+ HEAP32[i6 >> 2] = i3;
+ i7 = i4 + 8 | 0;
+ HEAP32[i7 >> 2] = -1;
+ HEAP32[i4 + 44 >> 2] = i3;
+ HEAP32[i4 + 76 >> 2] = -1;
+ ___shlim(i4, 0);
+ d5 = +___floatscan(i4, 1, 1);
+ i4 = (HEAP32[i6 >> 2] | 0) - (HEAP32[i7 >> 2] | 0) + (HEAP32[i4 + 108 >> 2] | 0) | 0;
+ if ((i2 | 0) == 0) {
+ STACKTOP = i1;
+ return +d5;
+ }
+ if ((i4 | 0) != 0) {
+ i3 = i3 + i4 | 0;
+ }
+ HEAP32[i2 >> 2] = i3;
+ STACKTOP = i1;
+ return +d5;
+}
+function _f_seek(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, d6 = 0.0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ i5 = _luaL_checkoption(i1, 2, 3208, 3184) | 0;
+ d6 = +_luaL_optnumber(i1, 3, 0.0);
+ i4 = ~~d6;
+ if (!(+(i4 | 0) == d6)) {
+ _luaL_argerror(i1, 3, 3224) | 0;
+ }
+ if ((_fseek(i3 | 0, i4 | 0, HEAP32[3168 + (i5 << 2) >> 2] | 0) | 0) == 0) {
+ _lua_pushnumber(i1, +(_ftell(i3 | 0) | 0));
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ } else {
+ i5 = _luaL_fileresult(i1, 0, 0) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _setpath(i1, i4, i8, i7, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i8 = i8 | 0;
+ i7 = i7 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i8 = _getenv(i8 | 0) | 0;
+ if ((i8 | 0) == 0) {
+ i7 = _getenv(i7 | 0) | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = 3;
+ }
+ } else {
+ i5 = i8;
+ i6 = 3;
+ }
+ if ((i6 | 0) == 3 ? (_lua_getfield(i1, -1001e3, 4832), i8 = _lua_toboolean(i1, -1) | 0, _lua_settop(i1, -2), (i8 | 0) == 0) : 0) {
+ _luaL_gsub(i1, _luaL_gsub(i1, i5, 4808, 4816) | 0, 4824, i3) | 0;
+ _lua_remove(i1, -2);
+ _lua_setfield(i1, -2, i4);
+ STACKTOP = i2;
+ return;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_setfield(i1, -2, i4);
+ STACKTOP = i2;
+ return;
+}
+function _luaU_header(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ HEAP8[i1] = 1635077147;
+ HEAP8[i1 + 1 | 0] = 6387020;
+ HEAP8[i1 + 2 | 0] = 24949;
+ HEAP8[i1 + 3 | 0] = 97;
+ HEAP8[i1 + 4 | 0] = 82;
+ HEAP8[i1 + 5 | 0] = 0;
+ HEAP8[i1 + 6 | 0] = 1;
+ HEAP8[i1 + 7 | 0] = 4;
+ HEAP8[i1 + 8 | 0] = 4;
+ HEAP8[i1 + 9 | 0] = 4;
+ HEAP8[i1 + 10 | 0] = 8;
+ i3 = i1 + 12 | 0;
+ HEAP8[i1 + 11 | 0] = 0;
+ HEAP8[i3 + 0 | 0] = HEAP8[8816 | 0] | 0;
+ HEAP8[i3 + 1 | 0] = HEAP8[8817 | 0] | 0;
+ HEAP8[i3 + 2 | 0] = HEAP8[8818 | 0] | 0;
+ HEAP8[i3 + 3 | 0] = HEAP8[8819 | 0] | 0;
+ HEAP8[i3 + 4 | 0] = HEAP8[8820 | 0] | 0;
+ HEAP8[i3 + 5 | 0] = HEAP8[8821 | 0] | 0;
+ STACKTOP = i2;
+ return;
+}
+function _db_setlocal(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i3 = i2;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i5 = _lua_tothread(i1, 1) | 0;
+ i4 = 1;
+ } else {
+ i5 = i1;
+ i4 = 0;
+ }
+ i6 = i4 + 1 | 0;
+ if ((_lua_getstack(i5, _luaL_checkinteger(i1, i6) | 0, i3) | 0) == 0) {
+ i6 = _luaL_argerror(i1, i6, 11560) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ i6 = i4 + 3 | 0;
+ _luaL_checkany(i1, i6);
+ _lua_settop(i1, i6);
+ _lua_xmove(i1, i5, 1);
+ _lua_pushstring(i1, _lua_setlocal(i5, i3, _luaL_checkinteger(i1, i4 | 2) | 0) | 0) | 0;
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ return 0;
+}
+function _tremove(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ i3 = _luaL_len(i1, 1) | 0;
+ i4 = _luaL_optinteger(i1, 2, i3) | 0;
+ if ((i4 | 0) != (i3 | 0) ? (i4 | 0) < 1 | (i4 | 0) > (i3 + 1 | 0) : 0) {
+ _luaL_argerror(i1, 1, 8256) | 0;
+ }
+ _lua_rawgeti(i1, 1, i4);
+ if ((i4 | 0) >= (i3 | 0)) {
+ i5 = i4;
+ _lua_pushnil(i1);
+ _lua_rawseti(i1, 1, i5);
+ STACKTOP = i2;
+ return 1;
+ }
+ while (1) {
+ i5 = i4 + 1 | 0;
+ _lua_rawgeti(i1, 1, i5);
+ _lua_rawseti(i1, 1, i4);
+ if ((i5 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i5;
+ }
+ }
+ _lua_pushnil(i1);
+ _lua_rawseti(i1, 1, i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaL_checkudata(i1, i7, i5) {
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = _lua_touserdata(i1, i7) | 0;
+ if (((i3 | 0) != 0 ? (_lua_getmetatable(i1, i7) | 0) != 0 : 0) ? (_lua_getfield(i1, -1001e3, i5), i6 = (_lua_rawequal(i1, -1, -2) | 0) == 0, i6 = i6 ? 0 : i3, _lua_settop(i1, -3), (i6 | 0) != 0) : 0) {
+ i7 = i6;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ i6 = _lua_typename(i1, _lua_type(i1, i7) | 0) | 0;
+ HEAP32[i4 >> 2] = i5;
+ HEAP32[i4 + 4 >> 2] = i6;
+ _luaL_argerror(i1, i7, _lua_pushfstring(i1, 1744, i4) | 0) | 0;
+ i7 = 0;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _luaL_error(i1, i5, i7) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 128 | 0;
+ i3 = i4;
+ i2 = i4 + 24 | 0;
+ i4 = i4 + 8 | 0;
+ HEAP32[i4 >> 2] = i7;
+ if ((_lua_getstack(i1, 1, i2) | 0) != 0 ? (_lua_getinfo(i1, 1152, i2) | 0, i6 = HEAP32[i2 + 20 >> 2] | 0, (i6 | 0) > 0) : 0) {
+ HEAP32[i3 >> 2] = i2 + 36;
+ HEAP32[i3 + 4 >> 2] = i6;
+ _lua_pushfstring(i1, 1160, i3) | 0;
+ _lua_pushvfstring(i1, i5, i4) | 0;
+ _lua_concat(i1, 2);
+ _lua_error(i1) | 0;
+ }
+ _lua_pushlstring(i1, 1168, 0) | 0;
+ _lua_pushvfstring(i1, i5, i4) | 0;
+ _lua_concat(i1, 2);
+ _lua_error(i1) | 0;
+ return 0;
+}
+function _luaK_infix(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ L1 : do {
+ switch (i4 | 0) {
+ case 6:
+ {
+ _luaK_exp2nextreg(i1, i3);
+ break;
+ }
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ case 0:
+ {
+ if (((HEAP32[i3 >> 2] | 0) == 5 ? (HEAP32[i3 + 16 >> 2] | 0) == -1 : 0) ? (HEAP32[i3 + 20 >> 2] | 0) == -1 : 0) {
+ break L1;
+ }
+ _luaK_exp2RK(i1, i3) | 0;
+ break;
+ }
+ case 13:
+ {
+ _luaK_goiftrue(i1, i3);
+ break;
+ }
+ case 14:
+ {
+ _luaK_goiffalse(i1, i3);
+ break;
+ }
+ default:
+ {
+ _luaK_exp2RK(i1, i3) | 0;
+ }
+ }
+ } while (0);
+ STACKTOP = i2;
+ return;
+}
+function _luaD_shrinkstack(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i1 + 8 >> 2] | 0;
+ i3 = HEAP32[i1 + 16 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ do {
+ i5 = HEAP32[i3 + 4 >> 2] | 0;
+ i4 = i4 >>> 0 < i5 >>> 0 ? i5 : i4;
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ } while ((i3 | 0) != 0);
+ }
+ i3 = i4 - (HEAP32[i1 + 28 >> 2] | 0) | 0;
+ i4 = (i3 >> 4) + 1 | 0;
+ i4 = ((i4 | 0) / 8 | 0) + 10 + i4 | 0;
+ i4 = (i4 | 0) > 1e6 ? 1e6 : i4;
+ if ((i3 | 0) > 15999984) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i4 | 0) >= (HEAP32[i1 + 32 >> 2] | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaD_reallocstack(i1, i4);
+ STACKTOP = i2;
+ return;
+}
+function _luaF_newproto(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _luaC_newobj(i1, 9, 80, 0, 0) | 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i1 + 44 >> 2] = 0;
+ HEAP32[i1 + 16 >> 2] = 0;
+ HEAP32[i1 + 56 >> 2] = 0;
+ HEAP32[i1 + 12 >> 2] = 0;
+ HEAP32[i1 + 32 >> 2] = 0;
+ HEAP32[i1 + 48 >> 2] = 0;
+ HEAP32[i1 + 20 >> 2] = 0;
+ HEAP32[i1 + 52 >> 2] = 0;
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i1 + 40 >> 2] = 0;
+ HEAP8[i1 + 76 | 0] = 0;
+ HEAP8[i1 + 77 | 0] = 0;
+ HEAP8[i1 + 78 | 0] = 0;
+ HEAP32[i1 + 24 >> 2] = 0;
+ HEAP32[i1 + 60 >> 2] = 0;
+ HEAP32[i1 + 64 >> 2] = 0;
+ HEAP32[i1 + 68 >> 2] = 0;
+ HEAP32[i1 + 36 >> 2] = 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaF_freeproto(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ _luaM_realloc_(i2, HEAP32[i1 + 12 >> 2] | 0, HEAP32[i1 + 48 >> 2] << 2, 0) | 0;
+ _luaM_realloc_(i2, HEAP32[i1 + 16 >> 2] | 0, HEAP32[i1 + 56 >> 2] << 2, 0) | 0;
+ _luaM_realloc_(i2, HEAP32[i1 + 8 >> 2] | 0, HEAP32[i1 + 44 >> 2] << 4, 0) | 0;
+ _luaM_realloc_(i2, HEAP32[i1 + 20 >> 2] | 0, HEAP32[i1 + 52 >> 2] << 2, 0) | 0;
+ _luaM_realloc_(i2, HEAP32[i1 + 24 >> 2] | 0, (HEAP32[i1 + 60 >> 2] | 0) * 12 | 0, 0) | 0;
+ _luaM_realloc_(i2, HEAP32[i1 + 28 >> 2] | 0, HEAP32[i1 + 40 >> 2] << 3, 0) | 0;
+ _luaM_realloc_(i2, i1, 80, 0) | 0;
+ STACKTOP = i3;
+ return;
+}
+function _luaK_patchclose(i3, i7, i4) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i5 = 0, i6 = 0, i8 = 0;
+ i2 = STACKTOP;
+ if ((i7 | 0) == -1) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = HEAP32[(HEAP32[i3 >> 2] | 0) + 12 >> 2] | 0;
+ i4 = (i4 << 6) + 64 & 16320;
+ while (1) {
+ i6 = i3 + (i7 << 2) | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ i8 = (i5 >>> 14) + -131071 | 0;
+ if ((i8 | 0) == -1) {
+ break;
+ }
+ i7 = i7 + 1 + i8 | 0;
+ HEAP32[i6 >> 2] = i5 & -16321 | i4;
+ if ((i7 | 0) == -1) {
+ i1 = 6;
+ break;
+ }
+ }
+ if ((i1 | 0) == 6) {
+ STACKTOP = i2;
+ return;
+ }
+ HEAP32[i6 >> 2] = i5 & -16321 | i4;
+ STACKTOP = i2;
+ return;
+}
+function _loadfunc(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i6 = _luaL_gsub(i1, i5, 4936, 4944) | 0;
+ i5 = _strchr(i6, 45) | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ HEAP32[i3 >> 2] = _lua_pushlstring(i1, i6, i5 - i6 | 0) | 0;
+ i6 = _ll_loadfunc(i1, i4, _lua_pushfstring(i1, 4952, i3) | 0) | 0;
+ if ((i6 | 0) == 2) {
+ i6 = i5 + 1 | 0;
+ break;
+ } else {
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ }
+ } while (0);
+ HEAP32[i3 >> 2] = i6;
+ i6 = _ll_loadfunc(i1, i4, _lua_pushfstring(i1, 4952, i3) | 0) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _luaK_setlist(i1, i3, i4, i5) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i4 = ((i4 + -1 | 0) / 50 | 0) + 1 | 0;
+ i5 = (i5 | 0) == -1 ? 0 : i5;
+ if ((i4 | 0) < 512) {
+ _luaK_code(i1, i3 << 6 | i5 << 23 | i4 << 14 | 36) | 0;
+ i4 = i3 + 1 | 0;
+ i4 = i4 & 255;
+ i5 = i1 + 48 | 0;
+ HEAP8[i5] = i4;
+ STACKTOP = i2;
+ return;
+ }
+ if ((i4 | 0) >= 67108864) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10576);
+ }
+ _luaK_code(i1, i3 << 6 | i5 << 23 | 36) | 0;
+ _luaK_code(i1, i4 << 6 | 39) | 0;
+ i4 = i3 + 1 | 0;
+ i4 = i4 & 255;
+ i5 = i1 + 48 | 0;
+ HEAP8[i5] = i4;
+ STACKTOP = i2;
+ return;
+}
+function _lua_getstack(i2, i6, i3) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i5 = 0;
+ i1 = STACKTOP;
+ L1 : do {
+ if ((i6 | 0) >= 0) {
+ i5 = HEAP32[i2 + 16 >> 2] | 0;
+ if ((i6 | 0) > 0) {
+ i4 = i2 + 72 | 0;
+ do {
+ if ((i5 | 0) == (i4 | 0)) {
+ i2 = 0;
+ break L1;
+ }
+ i6 = i6 + -1 | 0;
+ i5 = HEAP32[i5 + 8 >> 2] | 0;
+ } while ((i6 | 0) > 0);
+ if ((i6 | 0) != 0) {
+ i2 = 0;
+ break;
+ }
+ }
+ if ((i5 | 0) != (i2 + 72 | 0)) {
+ HEAP32[i3 + 96 >> 2] = i5;
+ i2 = 1;
+ } else {
+ i2 = 0;
+ }
+ } else {
+ i2 = 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaC_checkupvalcolor(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = i5 + 5 | 0;
+ i3 = HEAPU8[i4] | 0;
+ if ((i3 & 7 | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP8[i1 + 62 | 0] | 0) != 2 ? (HEAPU8[i1 + 61 | 0] | 0) >= 2 : 0) {
+ HEAP8[i4] = HEAP8[i1 + 60 | 0] & 3 | i3 & 184;
+ STACKTOP = i2;
+ return;
+ }
+ HEAP8[i4] = i3 & 187 | 4;
+ i3 = HEAP32[i5 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + 8 >> 2] & 64 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ if ((HEAP8[i3 + 5 | 0] & 3) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _reallymarkobject(i1, i3);
+ STACKTOP = i2;
+ return;
+}
+function _luaB_collectgarbage(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[10160 + ((_luaL_checkoption(i1, 1, 10040, 9976) | 0) << 2) >> 2] | 0;
+ i3 = _lua_gc(i1, i4, _luaL_optinteger(i1, 2, 0) | 0) | 0;
+ if ((i4 | 0) == 3) {
+ i4 = _lua_gc(i1, 4, 0) | 0;
+ _lua_pushnumber(i1, +(i3 | 0) + +(i4 | 0) * .0009765625);
+ _lua_pushinteger(i1, i4);
+ i4 = 2;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else if ((i4 | 0) == 9 | (i4 | 0) == 5) {
+ _lua_pushboolean(i1, i3);
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ _lua_pushinteger(i1, i3);
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _maxn(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0, d4 = 0.0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ _lua_pushnil(i1);
+ L1 : do {
+ if ((_lua_next(i1, 1) | 0) == 0) {
+ d3 = 0.0;
+ } else {
+ d4 = 0.0;
+ while (1) {
+ while (1) {
+ _lua_settop(i1, -2);
+ if ((_lua_type(i1, -1) | 0) == 3 ? (d3 = +_lua_tonumberx(i1, -1, 0), d3 > d4) : 0) {
+ break;
+ }
+ if ((_lua_next(i1, 1) | 0) == 0) {
+ d3 = d4;
+ break L1;
+ }
+ }
+ if ((_lua_next(i1, 1) | 0) == 0) {
+ break;
+ } else {
+ d4 = d3;
+ }
+ }
+ }
+ } while (0);
+ _lua_pushnumber(i1, d3);
+ STACKTOP = i2;
+ return 1;
+}
+function _str_char(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1040 | 0;
+ i4 = i2;
+ i3 = _lua_gettop(i1) | 0;
+ i5 = _luaL_buffinitsize(i1, i4, i3) | 0;
+ if ((i3 | 0) < 1) {
+ _luaL_pushresultsize(i4, i3);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i6 = 1;
+ }
+ while (1) {
+ i7 = _luaL_checkinteger(i1, i6) | 0;
+ if ((i7 & 255 | 0) != (i7 | 0)) {
+ _luaL_argerror(i1, i6, 7920) | 0;
+ }
+ HEAP8[i5 + (i6 + -1) | 0] = i7;
+ if ((i6 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i6 = i6 + 1 | 0;
+ }
+ }
+ _luaL_pushresultsize(i4, i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _luaK_exp2val(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i2 = STACKTOP;
+ i3 = i5 + 16 | 0;
+ i4 = i5 + 20 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == (HEAP32[i4 >> 2] | 0)) {
+ _luaK_dischargevars(i1, i5);
+ STACKTOP = i2;
+ return;
+ }
+ _luaK_dischargevars(i1, i5);
+ if ((HEAP32[i5 >> 2] | 0) == 6) {
+ i6 = HEAP32[i5 + 8 >> 2] | 0;
+ if ((HEAP32[i3 >> 2] | 0) == (HEAP32[i4 >> 2] | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i6 | 0) >= (HEAPU8[i1 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i1, i5, i6);
+ STACKTOP = i2;
+ return;
+ }
+ }
+ _luaK_exp2nextreg(i1, i5);
+ STACKTOP = i2;
+ return;
+}
+function _str_reverse(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i4 = i2 + 1040 | 0;
+ i1 = i2;
+ i3 = _luaL_checklstring(i5, 1, i4) | 0;
+ i5 = _luaL_buffinitsize(i5, i1, HEAP32[i4 >> 2] | 0) | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ i7 = 0;
+ _luaL_pushresultsize(i1, i7);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i7 = 0;
+ }
+ do {
+ HEAP8[i5 + i7 | 0] = HEAP8[i3 + (i6 + ~i7) | 0] | 0;
+ i7 = i7 + 1 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ } while (i7 >>> 0 < i6 >>> 0);
+ _luaL_pushresultsize(i1, i6);
+ STACKTOP = i2;
+ return 1;
+}
+function _str_upper(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i4 = i1 + 1040 | 0;
+ i2 = i1;
+ i3 = _luaL_checklstring(i5, 1, i4) | 0;
+ i5 = _luaL_buffinitsize(i5, i2, HEAP32[i4 >> 2] | 0) | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ i7 = 0;
+ _luaL_pushresultsize(i2, i7);
+ STACKTOP = i1;
+ return 1;
+ } else {
+ i6 = 0;
+ }
+ do {
+ HEAP8[i5 + i6 | 0] = _toupper(HEAPU8[i3 + i6 | 0] | 0 | 0) | 0;
+ i6 = i6 + 1 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ } while (i6 >>> 0 < i7 >>> 0);
+ _luaL_pushresultsize(i2, i7);
+ STACKTOP = i1;
+ return 1;
+}
+function _str_lower(i5) {
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i4 = i1 + 1040 | 0;
+ i2 = i1;
+ i3 = _luaL_checklstring(i5, 1, i4) | 0;
+ i5 = _luaL_buffinitsize(i5, i2, HEAP32[i4 >> 2] | 0) | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ i7 = 0;
+ _luaL_pushresultsize(i2, i7);
+ STACKTOP = i1;
+ return 1;
+ } else {
+ i6 = 0;
+ }
+ do {
+ HEAP8[i5 + i6 | 0] = _tolower(HEAPU8[i3 + i6 | 0] | 0 | 0) | 0;
+ i6 = i6 + 1 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ } while (i6 >>> 0 < i7 >>> 0);
+ _luaL_pushresultsize(i2, i7);
+ STACKTOP = i1;
+ return 1;
+}
+function ___divdi3(i1, i2, i3, i4) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i5 = 0, i6 = 0, i7 = 0, i8 = 0;
+ i5 = i2 >> 31 | ((i2 | 0) < 0 ? -1 : 0) << 1;
+ i6 = ((i2 | 0) < 0 ? -1 : 0) >> 31 | ((i2 | 0) < 0 ? -1 : 0) << 1;
+ i7 = i4 >> 31 | ((i4 | 0) < 0 ? -1 : 0) << 1;
+ i8 = ((i4 | 0) < 0 ? -1 : 0) >> 31 | ((i4 | 0) < 0 ? -1 : 0) << 1;
+ i1 = _i64Subtract(i5 ^ i1, i6 ^ i2, i5, i6) | 0;
+ i2 = tempRet0;
+ i5 = i7 ^ i5;
+ i6 = i8 ^ i6;
+ i7 = _i64Subtract((___udivmoddi4(i1, i2, _i64Subtract(i7 ^ i3, i8 ^ i4, i7, i8) | 0, tempRet0, 0) | 0) ^ i5, tempRet0 ^ i6, i5, i6) | 0;
+ return i7 | 0;
+}
+function _luaK_setoneret(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 13) {
+ i3 = (HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i4 + 8 >> 2] << 2) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & 8388607 | 16777216;
+ HEAP32[i4 >> 2] = 11;
+ STACKTOP = i2;
+ return;
+ } else if ((i3 | 0) == 12) {
+ HEAP32[i4 >> 2] = 6;
+ i4 = i4 + 8 | 0;
+ HEAP32[i4 >> 2] = (HEAP32[(HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] | 0) + (HEAP32[i4 >> 2] << 2) >> 2] | 0) >>> 6 & 255;
+ STACKTOP = i2;
+ return;
+ } else {
+ STACKTOP = i2;
+ return;
+ }
+}
+function _luaV_tostring(i6, i1) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i3 = i2;
+ i4 = i2 + 8 | 0;
+ i5 = i1 + 8 | 0;
+ if ((HEAP32[i5 >> 2] | 0) != 3) {
+ i6 = 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ HEAPF64[tempDoublePtr >> 3] = +HEAPF64[i1 >> 3];
+ HEAP32[i3 >> 2] = HEAP32[tempDoublePtr >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[tempDoublePtr + 4 >> 2];
+ i6 = _luaS_newlstr(i6, i4, _sprintf(i4 | 0, 8936, i3 | 0) | 0) | 0;
+ HEAP32[i1 >> 2] = i6;
+ HEAP32[i5 >> 2] = HEAPU8[i6 + 4 | 0] | 0 | 64;
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _strcmp(i4, i2) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i5 = HEAP8[i4] | 0;
+ i3 = HEAP8[i2] | 0;
+ if (i5 << 24 >> 24 != i3 << 24 >> 24 | i5 << 24 >> 24 == 0 | i3 << 24 >> 24 == 0) {
+ i4 = i5;
+ i5 = i3;
+ i4 = i4 & 255;
+ i5 = i5 & 255;
+ i5 = i4 - i5 | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ do {
+ i4 = i4 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i5 = HEAP8[i4] | 0;
+ i3 = HEAP8[i2] | 0;
+ } while (!(i5 << 24 >> 24 != i3 << 24 >> 24 | i5 << 24 >> 24 == 0 | i3 << 24 >> 24 == 0));
+ i4 = i5 & 255;
+ i5 = i3 & 255;
+ i5 = i4 - i5 | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _lua_pushstring(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) == 0) {
+ i3 = i1 + 8 | 0;
+ i1 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i3 >> 2] = i1 + 16;
+ i3 = 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i3 = _luaS_new(i1, i3) | 0;
+ i1 = i1 + 8 | 0;
+ i4 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i4 + 8 >> 2] = HEAPU8[i3 + 4 | 0] | 0 | 64;
+ HEAP32[i1 >> 2] = (HEAP32[i1 >> 2] | 0) + 16;
+ i3 = i3 + 16 | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _luaK_exp2anyreg(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ _luaK_dischargevars(i1, i3);
+ if ((HEAP32[i3 >> 2] | 0) == 6) {
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if ((HEAP32[i3 + 16 >> 2] | 0) == (HEAP32[i3 + 20 >> 2] | 0)) {
+ i5 = i4;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ if ((i4 | 0) >= (HEAPU8[i1 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i1, i3, i4);
+ i5 = HEAP32[i5 >> 2] | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ } else {
+ i5 = i3 + 8 | 0;
+ }
+ _luaK_exp2nextreg(i1, i3);
+ i5 = HEAP32[i5 >> 2] | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _check_match(i1, i4, i5, i6) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((HEAP32[i1 + 16 >> 2] | 0) == (i4 | 0)) {
+ _luaX_next(i1);
+ STACKTOP = i2;
+ return;
+ }
+ if ((HEAP32[i1 + 4 >> 2] | 0) == (i6 | 0)) {
+ _error_expected(i1, i4);
+ } else {
+ i2 = HEAP32[i1 + 52 >> 2] | 0;
+ i4 = _luaX_token2str(i1, i4) | 0;
+ i5 = _luaX_token2str(i1, i5) | 0;
+ HEAP32[i3 >> 2] = i4;
+ HEAP32[i3 + 4 >> 2] = i5;
+ HEAP32[i3 + 8 >> 2] = i6;
+ _luaX_syntaxerror(i1, _luaO_pushfstring(i2, 6840, i3) | 0);
+ }
+}
+function _fieldsel(i1, i6) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i2;
+ i5 = i1 + 48 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ _luaK_exp2anyregup(i4, i6);
+ _luaX_next(i1);
+ if ((HEAP32[i1 + 16 >> 2] | 0) == 288) {
+ i7 = HEAP32[i1 + 24 >> 2] | 0;
+ _luaX_next(i1);
+ i5 = _luaK_stringK(HEAP32[i5 >> 2] | 0, i7) | 0;
+ HEAP32[i3 + 16 >> 2] = -1;
+ HEAP32[i3 + 20 >> 2] = -1;
+ HEAP32[i3 >> 2] = 4;
+ HEAP32[i3 + 8 >> 2] = i5;
+ _luaK_indexed(i4, i6, i3);
+ STACKTOP = i2;
+ return;
+ } else {
+ _error_expected(i1, 288);
+ }
+}
+function _luaK_exp2anyregup(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i3 >> 2] | 0) == 8 ? (HEAP32[i3 + 16 >> 2] | 0) == (HEAP32[i3 + 20 >> 2] | 0) : 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaK_dischargevars(i1, i3);
+ if ((HEAP32[i3 >> 2] | 0) == 6) {
+ i4 = HEAP32[i3 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + 16 >> 2] | 0) == (HEAP32[i3 + 20 >> 2] | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i4 | 0) >= (HEAPU8[i1 + 46 | 0] | 0 | 0)) {
+ _exp2reg(i1, i3, i4);
+ STACKTOP = i2;
+ return;
+ }
+ }
+ _luaK_exp2nextreg(i1, i3);
+ STACKTOP = i2;
+ return;
+}
+function _lua_settop(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ if (!((i5 | 0) > -1)) {
+ i4 = i3 + 8 | 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + (i5 + 1 << 4);
+ STACKTOP = i1;
+ return;
+ }
+ i2 = i3 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ i3 = (HEAP32[HEAP32[i3 + 16 >> 2] >> 2] | 0) + (i5 + 1 << 4) | 0;
+ if (i4 >>> 0 < i3 >>> 0) {
+ while (1) {
+ i5 = i4 + 16 | 0;
+ HEAP32[i4 + 8 >> 2] = 0;
+ if (i5 >>> 0 < i3 >>> 0) {
+ i4 = i5;
+ } else {
+ break;
+ }
+ }
+ HEAP32[i2 >> 2] = i5;
+ }
+ HEAP32[i2 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+}
+function _luaL_fileresult(i1, i6, i5) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = HEAP32[(___errno_location() | 0) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ _lua_pushboolean(i1, 1);
+ i6 = 1;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ _lua_pushnil(i1);
+ i6 = _strerror(i3 | 0) | 0;
+ if ((i5 | 0) == 0) {
+ _lua_pushstring(i1, i6) | 0;
+ } else {
+ HEAP32[i4 >> 2] = i5;
+ HEAP32[i4 + 4 >> 2] = i6;
+ _lua_pushfstring(i1, 1176, i4) | 0;
+ }
+ _lua_pushinteger(i1, i3);
+ i6 = 3;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _luaL_pushmodule(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ _luaL_findtable(i1, -1001e3, 1432, 1) | 0;
+ _lua_getfield(i1, -1, i4);
+ if ((_lua_type(i1, -1) | 0) == 5) {
+ _lua_remove(i1, -2);
+ STACKTOP = i2;
+ return;
+ }
+ _lua_settop(i1, -2);
+ _lua_rawgeti(i1, -1001e3, 2);
+ if ((_luaL_findtable(i1, 0, i4, i5) | 0) != 0) {
+ HEAP32[i3 >> 2] = i4;
+ _luaL_error(i1, 1440, i3) | 0;
+ }
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -3, i4);
+ _lua_remove(i1, -2);
+ STACKTOP = i2;
+ return;
+}
+function _b_replace(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkunsigned(i1, 1) | 0;
+ i5 = _luaL_checkunsigned(i1, 2) | 0;
+ i4 = _luaL_checkinteger(i1, 3) | 0;
+ i2 = _luaL_optinteger(i1, 4, 1) | 0;
+ if (!((i4 | 0) > -1)) {
+ _luaL_argerror(i1, 3, 10440) | 0;
+ }
+ if ((i2 | 0) <= 0) {
+ _luaL_argerror(i1, 4, 10472) | 0;
+ }
+ if ((i2 + i4 | 0) > 32) {
+ _luaL_error(i1, 10496, i6) | 0;
+ }
+ i2 = ~(-2 << i2 + -1);
+ _lua_pushunsigned(i1, i3 & ~(i2 << i4) | (i5 & i2) << i4);
+ STACKTOP = i6;
+ return 1;
+}
+function _luaT_gettmbyobj(i1, i5, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i5 + 8 >> 2] & 15;
+ if ((i4 | 0) == 5) {
+ i4 = HEAP32[(HEAP32[i5 >> 2] | 0) + 8 >> 2] | 0;
+ } else if ((i4 | 0) == 7) {
+ i4 = HEAP32[(HEAP32[i5 >> 2] | 0) + 8 >> 2] | 0;
+ } else {
+ i4 = HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + (i4 << 2) + 252 >> 2] | 0;
+ }
+ if ((i4 | 0) == 0) {
+ i5 = 5192;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ i5 = _luaH_getstr(i4, HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + (i3 << 2) + 184 >> 2] | 0) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _luaS_eqstr(i2, i3) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i4 = HEAP8[i2 + 4 | 0] | 0;
+ do {
+ if (i4 << 24 >> 24 == (HEAP8[i3 + 4 | 0] | 0)) {
+ if (i4 << 24 >> 24 == 4) {
+ i2 = (i2 | 0) == (i3 | 0);
+ break;
+ }
+ i4 = HEAP32[i2 + 12 >> 2] | 0;
+ if ((i2 | 0) != (i3 | 0)) {
+ if ((i4 | 0) == (HEAP32[i3 + 12 >> 2] | 0)) {
+ i2 = (_memcmp(i2 + 16 | 0, i3 + 16 | 0, i4) | 0) == 0;
+ } else {
+ i2 = 0;
+ }
+ } else {
+ i2 = 1;
+ }
+ } else {
+ i2 = 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 & 1 | 0;
+}
+function _lua_concat(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) > 1) {
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ _luaV_concat(i1, i3);
+ STACKTOP = i2;
+ return;
+ } else {
+ if ((i3 | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i1 + 8 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ i1 = _luaS_newlstr(i1, 936, 0) | 0;
+ HEAP32[i4 >> 2] = i1;
+ HEAP32[i4 + 8 >> 2] = HEAPU8[i1 + 4 | 0] | 0 | 64;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _ll_loadfunc(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_getfield(i1, -1001e3, 4184);
+ _lua_getfield(i1, -1, i4);
+ i4 = _lua_touserdata(i1, -1) | 0;
+ _lua_settop(i1, -3);
+ if ((i4 | 0) == 0) {
+ _lua_pushlstring(i1, 4968, 58) | 0;
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ if ((HEAP8[i3] | 0) == 42) {
+ _lua_pushboolean(i1, 1);
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ _lua_pushlstring(i1, 4968, 58) | 0;
+ i4 = 2;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function _luaD_growstack(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = HEAP32[i1 + 32 >> 2] | 0;
+ if ((i4 | 0) > 1e6) {
+ _luaD_throw(i1, 6);
+ }
+ i3 = i3 + 5 + ((HEAP32[i1 + 8 >> 2] | 0) - (HEAP32[i1 + 28 >> 2] | 0) >> 4) | 0;
+ i4 = i4 << 1;
+ i4 = (i4 | 0) > 1e6 ? 1e6 : i4;
+ i3 = (i4 | 0) < (i3 | 0) ? i3 : i4;
+ if ((i3 | 0) > 1e6) {
+ _luaD_reallocstack(i1, 1000200);
+ _luaG_runerror(i1, 2224, i2);
+ } else {
+ _luaD_reallocstack(i1, i3);
+ STACKTOP = i2;
+ return;
+ }
+}
+function _luaL_callmeta(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i4 = _lua_absindex(i1, i4) | 0;
+ if ((_lua_getmetatable(i1, i4) | 0) == 0) {
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_rawget(i1, -2);
+ if ((_lua_type(i1, -1) | 0) == 0) {
+ _lua_settop(i1, -3);
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ _lua_remove(i1, -2);
+ _lua_pushvalue(i1, i4);
+ _lua_callk(i1, 1, 1, 0, 0);
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _luaK_reserveregs(i8, i7) {
+ i8 = i8 | 0;
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i2 = i8 + 48 | 0;
+ i6 = HEAP8[i2] | 0;
+ i4 = (i6 & 255) + i7 | 0;
+ i5 = (HEAP32[i8 >> 2] | 0) + 78 | 0;
+ do {
+ if ((i4 | 0) > (HEAPU8[i5] | 0 | 0)) {
+ if ((i4 | 0) > 249) {
+ _luaX_syntaxerror(HEAP32[i8 + 12 >> 2] | 0, 10536);
+ } else {
+ HEAP8[i5] = i4;
+ i1 = HEAP8[i2] | 0;
+ break;
+ }
+ } else {
+ i1 = i6;
+ }
+ } while (0);
+ HEAP8[i2] = (i1 & 255) + i7;
+ STACKTOP = i3;
+ return;
+}
+function _aux_lines(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i4 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ i2 = i3 + -1 | 0;
+ if ((i3 | 0) >= 19) {
+ _luaL_argerror(i1, 17, 3320) | 0;
+ }
+ _lua_pushvalue(i1, 1);
+ _lua_pushinteger(i1, i2);
+ _lua_pushboolean(i1, i5);
+ if ((i3 | 0) >= 2) {
+ i5 = 1;
+ while (1) {
+ i6 = i5 + 1 | 0;
+ _lua_pushvalue(i1, i6);
+ if ((i5 | 0) < (i2 | 0)) {
+ i5 = i6;
+ } else {
+ break;
+ }
+ }
+ }
+ _lua_pushcclosure(i1, 155, i3 + 2 | 0);
+ STACKTOP = i4;
+ return;
+}
+function _memcmp(i2, i4, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0, i6 = 0;
+ i1 = STACKTOP;
+ L1 : do {
+ if ((i3 | 0) == 0) {
+ i2 = 0;
+ } else {
+ while (1) {
+ i6 = HEAP8[i2] | 0;
+ i5 = HEAP8[i4] | 0;
+ if (!(i6 << 24 >> 24 == i5 << 24 >> 24)) {
+ break;
+ }
+ i3 = i3 + -1 | 0;
+ if ((i3 | 0) == 0) {
+ i2 = 0;
+ break L1;
+ } else {
+ i2 = i2 + 1 | 0;
+ i4 = i4 + 1 | 0;
+ }
+ }
+ i2 = (i6 & 255) - (i5 & 255) | 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _b_arshift(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkunsigned(i1, 1) | 0;
+ i4 = _luaL_checkinteger(i1, 2) | 0;
+ if ((i4 | 0) > -1 & (i3 | 0) < 0) {
+ if ((i4 | 0) > 31) {
+ i3 = -1;
+ } else {
+ i3 = i3 >>> i4 | ~(-1 >>> i4);
+ }
+ _lua_pushunsigned(i1, i3);
+ STACKTOP = i2;
+ return 1;
+ }
+ i5 = 0 - i4 | 0;
+ if ((i4 | 0) > 0) {
+ i3 = (i4 | 0) > 31 ? 0 : i3 >>> i4;
+ } else {
+ i3 = (i5 | 0) > 31 ? 0 : i3 << i5;
+ }
+ _lua_pushunsigned(i1, i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaL_checkunsigned(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ i6 = i3 + 8 | 0;
+ i2 = _lua_tounsignedx(i1, i5, i6) | 0;
+ if ((HEAP32[i6 >> 2] | 0) != 0) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ i7 = _lua_typename(i1, 3) | 0;
+ i6 = _lua_typename(i1, _lua_type(i1, i5) | 0) | 0;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i4 + 4 >> 2] = i6;
+ _luaL_argerror(i1, i5, _lua_pushfstring(i1, 1744, i4) | 0) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _luaB_loadfile(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i1 = STACKTOP;
+ i4 = _luaL_optlstring(i2, 1, 0, 0) | 0;
+ i5 = _luaL_optlstring(i2, 2, 0, 0) | 0;
+ i3 = (_lua_type(i2, 3) | 0) != -1;
+ i6 = i3 ? 3 : 0;
+ if ((_luaL_loadfilex(i2, i4, i5) | 0) == 0) {
+ if (i3 ? (_lua_pushvalue(i2, i6), (_lua_setupvalue(i2, -2, 1) | 0) == 0) : 0) {
+ _lua_settop(i2, -2);
+ i2 = 1;
+ } else {
+ i2 = 1;
+ }
+ } else {
+ _lua_pushnil(i2);
+ _lua_insert(i2, -2);
+ i2 = 2;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaL_checkinteger(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ i6 = i3 + 8 | 0;
+ i2 = _lua_tointegerx(i1, i5, i6) | 0;
+ if ((HEAP32[i6 >> 2] | 0) != 0) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ i7 = _lua_typename(i1, 3) | 0;
+ i6 = _lua_typename(i1, _lua_type(i1, i5) | 0) | 0;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i4 + 4 >> 2] = i6;
+ _luaL_argerror(i1, i5, _lua_pushfstring(i1, 1744, i4) | 0) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _luaB_select(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ i2 = _lua_gettop(i1) | 0;
+ if ((_lua_type(i1, 1) | 0) == 4 ? (HEAP8[_lua_tolstring(i1, 1, 0) | 0] | 0) == 35 : 0) {
+ _lua_pushinteger(i1, i2 + -1 | 0);
+ i4 = 1;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ i4 = _luaL_checkinteger(i1, 1) | 0;
+ if ((i4 | 0) < 0) {
+ i4 = i4 + i2 | 0;
+ } else {
+ i4 = (i4 | 0) > (i2 | 0) ? i2 : i4;
+ }
+ if ((i4 | 0) <= 0) {
+ _luaL_argerror(i1, 1, 9760) | 0;
+ }
+ i4 = i2 - i4 | 0;
+ STACKTOP = i3;
+ return i4 | 0;
+}
+function _luaX_next(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 8 >> 2] = HEAP32[i1 + 4 >> 2];
+ i3 = i1 + 32 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 286) {
+ HEAP32[i1 + 16 >> 2] = _llex(i1, i1 + 24 | 0) | 0;
+ STACKTOP = i2;
+ return;
+ } else {
+ i1 = i1 + 16 | 0;
+ HEAP32[i1 + 0 >> 2] = HEAP32[i3 + 0 >> 2];
+ HEAP32[i1 + 4 >> 2] = HEAP32[i3 + 4 >> 2];
+ HEAP32[i1 + 8 >> 2] = HEAP32[i3 + 8 >> 2];
+ HEAP32[i1 + 12 >> 2] = HEAP32[i3 + 12 >> 2];
+ HEAP32[i3 >> 2] = 286;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _lua_setglobal(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i5 = _luaH_getint(HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 40 >> 2] | 0, 2) | 0;
+ i4 = i1 + 8 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = i6 + 16;
+ i2 = _luaS_new(i1, i2) | 0;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i6 + 8 >> 2] = HEAPU8[i2 + 4 | 0] | 0 | 64;
+ i2 = HEAP32[i4 >> 2] | 0;
+ _luaV_settable(i1, i5, i2 + -16 | 0, i2 + -32 | 0);
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + -32;
+ STACKTOP = i3;
+ return;
+}
+function _luaL_checknumber(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var d2 = 0.0, i3 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ i6 = i3 + 8 | 0;
+ d2 = +_lua_tonumberx(i1, i5, i6);
+ if ((HEAP32[i6 >> 2] | 0) != 0) {
+ STACKTOP = i3;
+ return +d2;
+ }
+ i7 = _lua_typename(i1, 3) | 0;
+ i6 = _lua_typename(i1, _lua_type(i1, i5) | 0) | 0;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i4 + 4 >> 2] = i6;
+ _luaL_argerror(i1, i5, _lua_pushfstring(i1, 1744, i4) | 0) | 0;
+ STACKTOP = i3;
+ return +d2;
+}
+function _luaZ_fill(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = FUNCTION_TABLE_iiii[HEAP32[i1 + 8 >> 2] & 3](HEAP32[i1 + 16 >> 2] | 0, HEAP32[i1 + 12 >> 2] | 0, i4) | 0;
+ if ((i3 | 0) == 0) {
+ i4 = -1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i4 = -1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ HEAP32[i1 >> 2] = i4 + -1;
+ HEAP32[i1 + 4 >> 2] = i3 + 1;
+ i4 = HEAPU8[i3] | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _lua_createtable(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i5 = _luaH_new(i1) | 0;
+ i6 = i1 + 8 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i7 >> 2] = i5;
+ HEAP32[i7 + 8 >> 2] = 69;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + 16;
+ if (!((i3 | 0) > 0 | (i4 | 0) > 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ _luaH_resize(i1, i5, i3, i4);
+ STACKTOP = i2;
+ return;
+}
+function _generic_reader(i1, i3, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ _luaL_checkstack(i1, 2, 9888);
+ _lua_pushvalue(i1, 1);
+ _lua_callk(i1, 0, 1, 0, 0);
+ if ((_lua_type(i1, -1) | 0) == 0) {
+ _lua_settop(i1, -2);
+ HEAP32[i2 >> 2] = 0;
+ i2 = 0;
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ if ((_lua_isstring(i1, -1) | 0) == 0) {
+ _luaL_error(i1, 9920, i3) | 0;
+ }
+ _lua_replace(i1, 5);
+ i2 = _lua_tolstring(i1, 5, i2) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _luaZ_openspace(i5, i1, i6) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = i1 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (!(i3 >>> 0 < i6 >>> 0)) {
+ i6 = HEAP32[i1 >> 2] | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i6 = i6 >>> 0 < 32 ? 32 : i6;
+ if ((i6 + 1 | 0) >>> 0 > 4294967293) {
+ _luaM_toobig(i5);
+ }
+ i5 = _luaM_realloc_(i5, HEAP32[i1 >> 2] | 0, i3, i6) | 0;
+ HEAP32[i1 >> 2] = i5;
+ HEAP32[i4 >> 2] = i6;
+ i6 = i5;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _luaH_getstr(i4, i3) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0;
+ i2 = STACKTOP;
+ i4 = (HEAP32[i4 + 16 >> 2] | 0) + (((1 << (HEAPU8[i4 + 7 | 0] | 0)) + -1 & HEAP32[i3 + 8 >> 2]) << 5) | 0;
+ while (1) {
+ if ((HEAP32[i4 + 24 >> 2] | 0) == 68 ? (HEAP32[i4 + 16 >> 2] | 0) == (i3 | 0) : 0) {
+ break;
+ }
+ i4 = HEAP32[i4 + 28 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i3 = 5192;
+ i1 = 6;
+ break;
+ }
+ }
+ if ((i1 | 0) == 6) {
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _b_extract(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = _luaL_checkunsigned(i1, 1) | 0;
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ i4 = _luaL_optinteger(i1, 3, 1) | 0;
+ if (!((i3 | 0) > -1)) {
+ _luaL_argerror(i1, 2, 10440) | 0;
+ }
+ if ((i4 | 0) <= 0) {
+ _luaL_argerror(i1, 3, 10472) | 0;
+ }
+ if ((i4 + i3 | 0) > 32) {
+ _luaL_error(i1, 10496, i5) | 0;
+ }
+ _lua_pushunsigned(i1, i2 >>> i3 & ~(-2 << i4 + -1));
+ STACKTOP = i5;
+ return 1;
+}
+function _luaL_checklstring(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i6 = 0, i7 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i5 = _lua_tolstring(i1, i4, i5) | 0;
+ if ((i5 | 0) != 0) {
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ i7 = _lua_typename(i1, 4) | 0;
+ i6 = _lua_typename(i1, _lua_type(i1, i4) | 0) | 0;
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i3 + 4 >> 2] = i6;
+ _luaL_argerror(i1, i4, _lua_pushfstring(i1, 1744, i3) | 0) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _db_traceback(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, 1) | 0) == 8) {
+ i3 = _lua_tothread(i1, 1) | 0;
+ i4 = 1;
+ } else {
+ i3 = i1;
+ i4 = 0;
+ }
+ i5 = i4 + 1 | 0;
+ i6 = _lua_tolstring(i1, i5, 0) | 0;
+ if ((i6 | 0) == 0 ? (_lua_type(i1, i5) | 0) >= 1 : 0) {
+ _lua_pushvalue(i1, i5);
+ STACKTOP = i2;
+ return 1;
+ }
+ _luaL_traceback(i1, i3, i6, _luaL_optinteger(i1, i4 | 2, (i3 | 0) == (i1 | 0) | 0) | 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _f_setvbuf(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i5 = HEAP32[i3 >> 2] | 0;
+ i4 = _luaL_checkoption(i1, 2, 0, 3128) | 0;
+ i3 = _luaL_optinteger(i1, 3, 1024) | 0;
+ i3 = _luaL_fileresult(i1, (_setvbuf(i5 | 0, 0, HEAP32[3112 + (i4 << 2) >> 2] | 0, i3 | 0) | 0) == 0 | 0, 0) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _luaU_dump(i3, i1, i4, i2, i5) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i6 = 0, i7 = 0, i8 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 48 | 0;
+ i8 = i6 + 20 | 0;
+ i7 = i6;
+ HEAP32[i7 >> 2] = i3;
+ HEAP32[i7 + 4 >> 2] = i4;
+ HEAP32[i7 + 8 >> 2] = i2;
+ HEAP32[i7 + 12 >> 2] = i5;
+ i5 = i7 + 16 | 0;
+ _luaU_header(i8);
+ HEAP32[i5 >> 2] = FUNCTION_TABLE_iiiii[i4 & 3](i3, i8, 18, i2) | 0;
+ _DumpFunction(i1, i7);
+ STACKTOP = i6;
+ return HEAP32[i5 >> 2] | 0;
+}
+function _luaB_setmetatable(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _lua_type(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 5);
+ if (!((i3 | 0) == 0 | (i3 | 0) == 5)) {
+ _luaL_argerror(i1, 2, 9680) | 0;
+ }
+ if ((_luaL_getmetafield(i1, 1, 9704) | 0) == 0) {
+ _lua_settop(i1, 2);
+ _lua_setmetatable(i1, 1) | 0;
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ i3 = _luaL_error(i1, 9720, i2) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _getF(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ i3 = STACKTOP;
+ i4 = HEAP32[i2 >> 2] | 0;
+ if ((i4 | 0) > 0) {
+ HEAP32[i1 >> 2] = i4;
+ HEAP32[i2 >> 2] = 0;
+ i4 = i2 + 8 | 0;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ i4 = i2 + 4 | 0;
+ if ((_feof(HEAP32[i4 >> 2] | 0) | 0) != 0) {
+ i4 = 0;
+ STACKTOP = i3;
+ return i4 | 0;
+ }
+ i2 = i2 + 8 | 0;
+ HEAP32[i1 >> 2] = _fread(i2 | 0, 1, 1024, HEAP32[i4 >> 2] | 0) | 0;
+ i4 = i2;
+ STACKTOP = i3;
+ return i4 | 0;
+}
+function _luaL_where(i1, i6) {
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i3 = i4;
+ i2 = i4 + 8 | 0;
+ if ((_lua_getstack(i1, i6, i2) | 0) != 0 ? (_lua_getinfo(i1, 1152, i2) | 0, i5 = HEAP32[i2 + 20 >> 2] | 0, (i5 | 0) > 0) : 0) {
+ HEAP32[i3 >> 2] = i2 + 36;
+ HEAP32[i3 + 4 >> 2] = i5;
+ _lua_pushfstring(i1, 1160, i3) | 0;
+ STACKTOP = i4;
+ return;
+ }
+ _lua_pushlstring(i1, 1168, 0) | 0;
+ STACKTOP = i4;
+ return;
+}
+function _hookf(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_getsubtable(i1, -1001e3, 11584) | 0;
+ _lua_pushthread(i1) | 0;
+ _lua_rawget(i1, -2);
+ if ((_lua_type(i1, -1) | 0) != 6) {
+ STACKTOP = i2;
+ return;
+ }
+ _lua_pushstring(i1, HEAP32[11608 + (HEAP32[i3 >> 2] << 2) >> 2] | 0) | 0;
+ i3 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i3 | 0) > -1) {
+ _lua_pushinteger(i1, i3);
+ } else {
+ _lua_pushnil(i1);
+ }
+ _lua_callk(i1, 2, 0, 0, 0);
+ STACKTOP = i2;
+ return;
+}
+function _luaV_tonumber(i5, i2) {
+ i5 = i5 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ i4 = HEAP32[i5 + 8 >> 2] | 0;
+ if ((i4 | 0) != 3) {
+ if ((i4 & 15 | 0) == 4 ? (i5 = HEAP32[i5 >> 2] | 0, (_luaO_str2d(i5 + 16 | 0, HEAP32[i5 + 12 >> 2] | 0, i3) | 0) != 0) : 0) {
+ HEAPF64[i2 >> 3] = +HEAPF64[i3 >> 3];
+ HEAP32[i2 + 8 >> 2] = 3;
+ } else {
+ i2 = 0;
+ }
+ } else {
+ i2 = i5;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaO_arith(i3, d1, d2) {
+ i3 = i3 | 0;
+ d1 = +d1;
+ d2 = +d2;
+ switch (i3 | 0) {
+ case 4:
+ {
+ d1 = d1 - +Math_floor(+(d1 / d2)) * d2;
+ break;
+ }
+ case 6:
+ {
+ d1 = -d1;
+ break;
+ }
+ case 0:
+ {
+ d1 = d1 + d2;
+ break;
+ }
+ case 1:
+ {
+ d1 = d1 - d2;
+ break;
+ }
+ case 5:
+ {
+ d1 = +Math_pow(+d1, +d2);
+ break;
+ }
+ case 3:
+ {
+ d1 = d1 / d2;
+ break;
+ }
+ case 2:
+ {
+ d1 = d1 * d2;
+ break;
+ }
+ default:
+ {
+ d1 = 0.0;
+ }
+ }
+ return +d1;
+}
+function _luaB_coresume(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_tothread(i1, 1) | 0;
+ if ((i3 | 0) == 0) {
+ _luaL_argerror(i1, 1, 10856) | 0;
+ }
+ i3 = _auxresume(i1, i3, (_lua_gettop(i1) | 0) + -1 | 0) | 0;
+ if ((i3 | 0) < 0) {
+ _lua_pushboolean(i1, 0);
+ _lua_insert(i1, -2);
+ i3 = 2;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ _lua_pushboolean(i1, 1);
+ _lua_insert(i1, ~i3);
+ i3 = i3 + 1 | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _pairsmeta(i1, i5, i4, i3) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_luaL_getmetafield(i1, 1, i5) | 0) != 0) {
+ _lua_pushvalue(i1, 1);
+ _lua_callk(i1, 1, 3, 0, 0);
+ STACKTOP = i2;
+ return;
+ }
+ _luaL_checktype(i1, 1, 5);
+ _lua_pushcclosure(i1, i3, 0);
+ _lua_pushvalue(i1, 1);
+ if ((i4 | 0) == 0) {
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return;
+ } else {
+ _lua_pushinteger(i1, 0);
+ STACKTOP = i2;
+ return;
+ }
+}
+function _io_close(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ if ((_lua_type(i1, 1) | 0) == -1) {
+ _lua_getfield(i1, -1001e3, 2800);
+ }
+ if ((HEAP32[(_luaL_checkudata(i1, 1, 2832) | 0) + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i4 = (_luaL_checkudata(i1, 1, 2832) | 0) + 4 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = 0;
+ i1 = FUNCTION_TABLE_ii[i3 & 255](i1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _pack(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ _lua_createtable(i1, i3, 1);
+ _lua_pushinteger(i1, i3);
+ _lua_setfield(i1, -2, 8312);
+ if ((i3 | 0) <= 0) {
+ STACKTOP = i2;
+ return 1;
+ }
+ _lua_pushvalue(i1, 1);
+ _lua_rawseti(i1, -2, 1);
+ _lua_replace(i1, 1);
+ if ((i3 | 0) <= 1) {
+ STACKTOP = i2;
+ return 1;
+ }
+ do {
+ _lua_rawseti(i1, 1, i3);
+ i3 = i3 + -1 | 0;
+ } while ((i3 | 0) > 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaL_execresult(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) == -1) {
+ i3 = HEAP32[(___errno_location() | 0) >> 2] | 0;
+ _lua_pushnil(i1);
+ _lua_pushstring(i1, _strerror(i3 | 0) | 0) | 0;
+ _lua_pushinteger(i1, i3);
+ STACKTOP = i2;
+ return 3;
+ } else if ((i3 | 0) == 0) {
+ _lua_pushboolean(i1, 1);
+ } else {
+ _lua_pushnil(i1);
+ }
+ _lua_pushstring(i1, 1184) | 0;
+ _lua_pushinteger(i1, i3);
+ STACKTOP = i2;
+ return 3;
+}
+function _lua_getglobal(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i3 = STACKTOP;
+ i4 = _luaH_getint(HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 40 >> 2] | 0, 2) | 0;
+ i5 = i1 + 8 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i6 + 16;
+ i2 = _luaS_new(i1, i2) | 0;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i6 + 8 >> 2] = HEAPU8[i2 + 4 | 0] | 0 | 64;
+ i2 = (HEAP32[i5 >> 2] | 0) + -16 | 0;
+ _luaV_gettable(i1, i4, i2, i2);
+ STACKTOP = i3;
+ return;
+}
+function _luaL_checktype(i1, i5, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((_lua_type(i1, i5) | 0) == (i4 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i6 = _lua_typename(i1, i4) | 0;
+ i4 = _lua_typename(i1, _lua_type(i1, i5) | 0) | 0;
+ HEAP32[i3 >> 2] = i6;
+ HEAP32[i3 + 4 >> 2] = i4;
+ _luaL_argerror(i1, i5, _lua_pushfstring(i1, 1744, i3) | 0) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _luaC_newobj(i7, i4, i6, i5, i1) {
+ i7 = i7 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i7 + 12 >> 2] | 0;
+ i7 = _luaM_realloc_(i7, 0, i4 & 15, i6) | 0;
+ i6 = i7 + i1 | 0;
+ i5 = (i5 | 0) == 0 ? i3 + 68 | 0 : i5;
+ HEAP8[i7 + (i1 + 5) | 0] = HEAP8[i3 + 60 | 0] & 3;
+ HEAP8[i7 + (i1 + 4) | 0] = i4;
+ HEAP32[i6 >> 2] = HEAP32[i5 >> 2];
+ HEAP32[i5 >> 2] = i6;
+ STACKTOP = i2;
+ return i6 | 0;
+}
+function _luaL_requiref(i1, i3, i5, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushcclosure(i1, i5, 0);
+ _lua_pushstring(i1, i3) | 0;
+ _lua_callk(i1, 1, 1, 0, 0);
+ _luaL_getsubtable(i1, -1001e3, 1432) | 0;
+ _lua_pushvalue(i1, -2);
+ _lua_setfield(i1, -2, i3);
+ _lua_settop(i1, -2);
+ if ((i4 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ _lua_pushvalue(i1, -1);
+ _lua_setglobal(i1, i3);
+ STACKTOP = i2;
+ return;
+}
+function _luaG_ordererror(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = HEAP32[8528 + ((HEAP32[i3 + 8 >> 2] & 15) + 1 << 2) >> 2] | 0;
+ i4 = HEAP32[8528 + ((HEAP32[i4 + 8 >> 2] & 15) + 1 << 2) >> 2] | 0;
+ if ((i3 | 0) == (i4 | 0)) {
+ HEAP32[i2 >> 2] = i3;
+ _luaG_runerror(i1, 1952, i2);
+ } else {
+ HEAP32[i2 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i4;
+ _luaG_runerror(i1, 1992, i2);
+ }
+}
+function _io_popen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ _luaL_optlstring(i1, 2, 3480, 0) | 0;
+ i5 = _lua_newuserdata(i1, 8) | 0;
+ i4 = i5 + 4 | 0;
+ HEAP32[i4 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ _luaL_error(i1, 3488, i2) | 0;
+ HEAP32[i5 >> 2] = 0;
+ HEAP32[i4 >> 2] = 157;
+ i1 = _luaL_fileresult(i1, 0, i3) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _sort_comp(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, 2) | 0) == 0) {
+ i4 = _lua_compare(i1, i3, i4, 1) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ _lua_pushvalue(i1, 2);
+ _lua_pushvalue(i1, i3 + -1 | 0);
+ _lua_pushvalue(i1, i4 + -2 | 0);
+ _lua_callk(i1, 2, 1, 0, 0);
+ i4 = _lua_toboolean(i1, -1) | 0;
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _db_upvalueid(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 112 | 0;
+ i4 = i3;
+ i2 = _luaL_checkinteger(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 6);
+ _lua_pushvalue(i1, 1);
+ _lua_getinfo(i1, 11728, i4) | 0;
+ if (!((i2 | 0) > 0 ? (i2 | 0) <= (HEAPU8[i4 + 32 | 0] | 0 | 0) : 0)) {
+ _luaL_argerror(i1, 2, 11736) | 0;
+ }
+ _lua_pushlightuserdata(i1, _lua_upvalueid(i1, 1, i2) | 0);
+ STACKTOP = i3;
+ return 1;
+}
+function _luaL_getmetafield(i2, i4, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ do {
+ if ((_lua_getmetatable(i2, i4) | 0) != 0) {
+ _lua_pushstring(i2, i3) | 0;
+ _lua_rawget(i2, -2);
+ if ((_lua_type(i2, -1) | 0) == 0) {
+ _lua_settop(i2, -3);
+ i2 = 0;
+ break;
+ } else {
+ _lua_remove(i2, -2);
+ i2 = 1;
+ break;
+ }
+ } else {
+ i2 = 0;
+ }
+ } while (0);
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaF_freeupval(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i3 + 8 >> 2] | 0) == (i3 + 16 | 0)) {
+ _luaM_realloc_(i1, i3, 32, 0) | 0;
+ STACKTOP = i2;
+ return;
+ }
+ i4 = i3 + 16 | 0;
+ i5 = i4 + 4 | 0;
+ HEAP32[(HEAP32[i5 >> 2] | 0) + 16 >> 2] = HEAP32[i4 >> 2];
+ HEAP32[(HEAP32[i4 >> 2] | 0) + 20 >> 2] = HEAP32[i5 >> 2];
+ _luaM_realloc_(i1, i3, 32, 0) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _luaL_addvalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i5 = HEAP32[i1 + 12 >> 2] | 0;
+ i3 = _lua_tolstring(i5, -1, i4) | 0;
+ i6 = i1 + 16 | 0;
+ if ((HEAP32[i1 >> 2] | 0) != (i6 | 0)) {
+ _lua_insert(i5, -2);
+ }
+ _luaL_addlstring(i1, i3, HEAP32[i4 >> 2] | 0);
+ _lua_remove(i5, (HEAP32[i1 >> 2] | 0) != (i6 | 0) ? -2 : -1);
+ STACKTOP = i2;
+ return;
+}
+function _escerror(i1, i4, i3, i2) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i5 = 0, i6 = 0;
+ HEAP32[(HEAP32[i1 + 60 >> 2] | 0) + 4 >> 2] = 0;
+ _save(i1, 92);
+ L1 : do {
+ if ((i3 | 0) > 0) {
+ i5 = 0;
+ do {
+ i6 = HEAP32[i4 + (i5 << 2) >> 2] | 0;
+ if ((i6 | 0) == -1) {
+ break L1;
+ }
+ _save(i1, i6);
+ i5 = i5 + 1 | 0;
+ } while ((i5 | 0) < (i3 | 0));
+ }
+ } while (0);
+ _lexerror(i1, i2, 289);
+}
+function _pushglobalfuncname(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ _lua_getinfo(i1, 1768, i4) | 0;
+ _lua_rawgeti(i1, -1001e3, 2);
+ i4 = i3 + 1 | 0;
+ if ((_findfield(i1, i4, 2) | 0) == 0) {
+ _lua_settop(i1, i3);
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ _lua_copy(i1, -1, i4);
+ _lua_settop(i1, -3);
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function _lua_pushlstring(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i4 = _luaS_newlstr(i1, i3, i4) | 0;
+ i3 = i1 + 8 | 0;
+ i1 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i1 >> 2] = i4;
+ HEAP32[i1 + 8 >> 2] = HEAPU8[i4 + 4 | 0] | 0 | 64;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return i4 + 16 | 0;
+}
+function _ll_searchpath(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i5 = _luaL_checklstring(i1, 1, 0) | 0;
+ i4 = _luaL_checklstring(i1, 2, 0) | 0;
+ i3 = _luaL_optlstring(i1, 3, 4936, 0) | 0;
+ if ((_searchpath(i1, i5, i4, i3, _luaL_optlstring(i1, 4, 4848, 0) | 0) | 0) != 0) {
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ _lua_pushnil(i1);
+ _lua_insert(i1, -2);
+ i5 = 2;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _math_log(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0, d4 = 0.0;
+ i2 = STACKTOP;
+ d3 = +_luaL_checknumber(i1, 1);
+ do {
+ if ((_lua_type(i1, 2) | 0) >= 1) {
+ d4 = +_luaL_checknumber(i1, 2);
+ if (d4 == 10.0) {
+ d3 = +_log10(+d3);
+ break;
+ } else {
+ d3 = +Math_log(+d3) / +Math_log(+d4);
+ break;
+ }
+ } else {
+ d3 = +Math_log(+d3);
+ }
+ } while (0);
+ _lua_pushnumber(i1, d3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaT_init(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = i1 + 12 | 0;
+ i4 = 0;
+ do {
+ i5 = _luaS_new(i1, HEAP32[8576 + (i4 << 2) >> 2] | 0) | 0;
+ HEAP32[(HEAP32[i3 >> 2] | 0) + (i4 << 2) + 184 >> 2] = i5;
+ i5 = (HEAP32[(HEAP32[i3 >> 2] | 0) + (i4 << 2) + 184 >> 2] | 0) + 5 | 0;
+ HEAP8[i5] = HEAPU8[i5] | 0 | 32;
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) != 17);
+ STACKTOP = i2;
+ return;
+}
+function _f_gc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ STACKTOP = i2;
+ return 0;
+ }
+ if ((HEAP32[i3 >> 2] | 0) == 0) {
+ STACKTOP = i2;
+ return 0;
+ }
+ i4 = (_luaL_checkudata(i1, 1, 2832) | 0) + 4 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = 0;
+ FUNCTION_TABLE_ii[i3 & 255](i1) | 0;
+ STACKTOP = i2;
+ return 0;
+}
+function ___shlim(i1, i5) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i6 = 0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 104 >> 2] = i5;
+ i4 = HEAP32[i1 + 8 >> 2] | 0;
+ i3 = HEAP32[i1 + 4 >> 2] | 0;
+ i6 = i4 - i3 | 0;
+ HEAP32[i1 + 108 >> 2] = i6;
+ if ((i5 | 0) != 0 & (i6 | 0) > (i5 | 0)) {
+ HEAP32[i1 + 100 >> 2] = i3 + i5;
+ STACKTOP = i2;
+ return;
+ } else {
+ HEAP32[i1 + 100 >> 2] = i4;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _lua_sethook(i4, i6, i1, i5) {
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = (i6 | 0) == 0 | (i1 | 0) == 0;
+ i3 = HEAP32[i4 + 16 >> 2] | 0;
+ if (!((HEAP8[i3 + 18 | 0] & 1) == 0)) {
+ HEAP32[i4 + 20 >> 2] = HEAP32[i3 + 28 >> 2];
+ }
+ HEAP32[i4 + 52 >> 2] = i2 ? 0 : i6;
+ HEAP32[i4 + 44 >> 2] = i5;
+ HEAP32[i4 + 48 >> 2] = i5;
+ HEAP8[i4 + 40 | 0] = i2 ? 0 : i1 & 255;
+ return 1;
+}
+function _io_tmpfile(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = _lua_newuserdata(i1, 8) | 0;
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 0;
+ _luaL_setmetatable(i1, 2832);
+ HEAP32[i4 >> 2] = 0;
+ HEAP32[i3 >> 2] = 156;
+ i3 = _tmpfile() | 0;
+ HEAP32[i4 >> 2] = i3;
+ if ((i3 | 0) != 0) {
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ i4 = _luaL_fileresult(i1, 0, 0) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _luaL_checkstack(i1, i5, i4) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((_lua_checkstack(i1, i5 + 20 | 0) | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i4 | 0) == 0) {
+ _luaL_error(i1, 1240, i3) | 0;
+ STACKTOP = i2;
+ return;
+ } else {
+ HEAP32[i3 >> 2] = i4;
+ _luaL_error(i1, 1216, i3) | 0;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _b_rshift(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i4 = _luaL_checkunsigned(i1, 1) | 0;
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ i5 = 0 - i3 | 0;
+ if ((i3 | 0) > 0) {
+ i5 = (i3 | 0) > 31 ? 0 : i4 >>> i3;
+ _lua_pushunsigned(i1, i5);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i5 = (i5 | 0) > 31 ? 0 : i4 << i5;
+ _lua_pushunsigned(i1, i5);
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _b_lshift(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkunsigned(i1, 1) | 0;
+ i4 = _luaL_checkinteger(i1, 2) | 0;
+ if ((i4 | 0) < 0) {
+ i4 = 0 - i4 | 0;
+ i4 = (i4 | 0) > 31 ? 0 : i3 >>> i4;
+ _lua_pushunsigned(i1, i4);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ i4 = (i4 | 0) > 31 ? 0 : i3 << i4;
+ _lua_pushunsigned(i1, i4);
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _math_min(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, d5 = 0.0, d6 = 0.0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ d5 = +_luaL_checknumber(i1, 1);
+ if ((i3 | 0) >= 2) {
+ i4 = 2;
+ while (1) {
+ d6 = +_luaL_checknumber(i1, i4);
+ d5 = d6 < d5 ? d6 : d5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ _lua_pushnumber(i1, d5);
+ STACKTOP = i2;
+ return 1;
+}
+function _math_max(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, d5 = 0.0, d6 = 0.0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ d5 = +_luaL_checknumber(i1, 1);
+ if ((i3 | 0) >= 2) {
+ i4 = 2;
+ while (1) {
+ d6 = +_luaL_checknumber(i1, i4);
+ d5 = d6 > d5 ? d6 : d5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ _lua_pushnumber(i1, d5);
+ STACKTOP = i2;
+ return 1;
+}
+function _io_type(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ i3 = _luaL_testudata(i1, 1, 2832) | 0;
+ if ((i3 | 0) == 0) {
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return 1;
+ }
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _lua_pushlstring(i1, 3456, 11) | 0;
+ STACKTOP = i2;
+ return 1;
+ } else {
+ _lua_pushlstring(i1, 3472, 4) | 0;
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _luaF_newLclosure(i3, i2) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i3 = _luaC_newobj(i3, 6, (i2 << 2) + 16 | 0, 0, 0) | 0;
+ HEAP32[i3 + 12 >> 2] = 0;
+ HEAP8[i3 + 6 | 0] = i2;
+ if ((i2 | 0) == 0) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ i4 = i3 + 16 | 0;
+ do {
+ i2 = i2 + -1 | 0;
+ HEAP32[i4 + (i2 << 2) >> 2] = 0;
+ } while ((i2 | 0) != 0);
+ STACKTOP = i1;
+ return i3 | 0;
+}
+function _io_flush(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ _lua_getfield(i1, -1001e3, 2800);
+ i3 = _lua_touserdata(i1, -1) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 2804;
+ _luaL_error(i1, 3424, i4) | 0;
+ }
+ i4 = _luaL_fileresult(i1, (_fflush(HEAP32[i3 >> 2] | 0) | 0) == 0 | 0, 0) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _b_test(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ if ((i3 | 0) < 1) {
+ i3 = 1;
+ } else {
+ i4 = 1;
+ i5 = -1;
+ while (1) {
+ i5 = (_luaL_checkunsigned(i1, i4) | 0) & i5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ i3 = (i5 | 0) != 0;
+ }
+ _lua_pushboolean(i1, i3 & 1);
+ STACKTOP = i2;
+ return 1;
+}
+function ___muldsi3(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0, i4 = 0, i5 = 0, i6 = 0;
+ i6 = i2 & 65535;
+ i4 = i1 & 65535;
+ i3 = Math_imul(i4, i6) | 0;
+ i5 = i2 >>> 16;
+ i4 = (i3 >>> 16) + (Math_imul(i4, i5) | 0) | 0;
+ i1 = i1 >>> 16;
+ i2 = Math_imul(i1, i6) | 0;
+ return (tempRet0 = (i4 >>> 16) + (Math_imul(i1, i5) | 0) + (((i4 & 65535) + i2 | 0) >>> 16) | 0, i4 + i2 << 16 | i3 & 65535 | 0) | 0;
+}
+function _str_dump(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 1056 | 0;
+ i3 = i2 + 8 | 0;
+ _luaL_checktype(i1, 1, 6);
+ _lua_settop(i1, 1);
+ _luaL_buffinit(i1, i3);
+ if ((_lua_dump(i1, 2, i3) | 0) == 0) {
+ _luaL_pushresult(i3);
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ i3 = _luaL_error(i1, 7888, i2) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function ___memrchr(i2, i3, i5) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i3 = i3 & 255;
+ while (1) {
+ i4 = i5 + -1 | 0;
+ if ((i5 | 0) == 0) {
+ i5 = 0;
+ i2 = 4;
+ break;
+ }
+ i5 = i2 + i4 | 0;
+ if ((HEAP8[i5] | 0) == i3 << 24 >> 24) {
+ i2 = 4;
+ break;
+ } else {
+ i5 = i4;
+ }
+ }
+ if ((i2 | 0) == 4) {
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ return 0;
+}
+function _luaL_getsubtable(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_getfield(i1, i3, i4);
+ if ((_lua_type(i1, -1) | 0) == 5) {
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ _lua_settop(i1, -2);
+ i3 = _lua_absindex(i1, i3) | 0;
+ _lua_createtable(i1, 0, 0);
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, i3, i4);
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _luaE_freeCI(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = (HEAP32[i1 + 16 >> 2] | 0) + 12 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ HEAP32[i4 >> 2] = 0;
+ if ((i3 | 0) == 0) {
+ STACKTOP = i2;
+ return;
+ }
+ while (1) {
+ i4 = HEAP32[i3 + 12 >> 2] | 0;
+ _luaM_realloc_(i1, i3, 40, 0) | 0;
+ if ((i4 | 0) == 0) {
+ break;
+ } else {
+ i3 = i4;
+ }
+ }
+ STACKTOP = i2;
+ return;
+}
+function _f_tostring(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ i4 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i4 + 4 >> 2] | 0) == 0) {
+ _lua_pushlstring(i1, 3040, 13) | 0;
+ STACKTOP = i2;
+ return 1;
+ } else {
+ HEAP32[i3 >> 2] = HEAP32[i4 >> 2];
+ _lua_pushfstring(i1, 3056, i3) | 0;
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _lua_newuserdata(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i3 = _luaS_newudata(i1, i3, 0) | 0;
+ i1 = i1 + 8 | 0;
+ i4 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i4 >> 2] = i3;
+ HEAP32[i4 + 8 >> 2] = 71;
+ HEAP32[i1 >> 2] = (HEAP32[i1 >> 2] | 0) + 16;
+ STACKTOP = i2;
+ return i3 + 24 | 0;
+}
+function _luaL_pushresultsize(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i5 = i1 + 8 | 0;
+ i4 = (HEAP32[i5 >> 2] | 0) + i3 | 0;
+ HEAP32[i5 >> 2] = i4;
+ i3 = HEAP32[i1 + 12 >> 2] | 0;
+ _lua_pushlstring(i3, HEAP32[i1 >> 2] | 0, i4) | 0;
+ if ((HEAP32[i1 >> 2] | 0) == (i1 + 16 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ _lua_remove(i3, -2);
+ STACKTOP = i2;
+ return;
+}
+function _luaL_testudata(i2, i5, i4) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = _lua_touserdata(i2, i5) | 0;
+ if ((i3 | 0) != 0 ? (_lua_getmetatable(i2, i5) | 0) != 0 : 0) {
+ _lua_getfield(i2, -1001e3, i4);
+ i5 = (_lua_rawequal(i2, -1, -2) | 0) == 0;
+ _lua_settop(i2, -3);
+ i2 = i5 ? 0 : i3;
+ } else {
+ i2 = 0;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _finishpcall(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_lua_checkstack(i1, 1) | 0) == 0) {
+ _lua_settop(i1, 0);
+ _lua_pushboolean(i1, 0);
+ _lua_pushstring(i1, 9632) | 0;
+ i3 = 2;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ _lua_pushboolean(i1, i3);
+ _lua_replace(i1, 1);
+ i3 = _lua_gettop(i1) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _searcher_preload(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ _lua_getfield(i1, -1001e3, 4592);
+ _lua_getfield(i1, -1, i3);
+ if ((_lua_type(i1, -1) | 0) != 0) {
+ STACKTOP = i2;
+ return 1;
+ }
+ HEAP32[i4 >> 2] = i3;
+ _lua_pushfstring(i1, 5096, i4) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_auxwrap(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i3 = STACKTOP;
+ i2 = _lua_tothread(i1, -1001001) | 0;
+ i2 = _auxresume(i1, i2, _lua_gettop(i1) | 0) | 0;
+ if ((i2 | 0) >= 0) {
+ STACKTOP = i3;
+ return i2 | 0;
+ }
+ if ((_lua_isstring(i1, -1) | 0) == 0) {
+ _lua_error(i1) | 0;
+ }
+ _luaL_where(i1, 1);
+ _lua_insert(i1, -2);
+ _lua_concat(i1, 2);
+ _lua_error(i1) | 0;
+ return 0;
+}
+function _ll_loadlib(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ i3 = _ll_loadfunc(i1, i3, _luaL_checklstring(i1, 2, 0) | 0) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _lua_pushnil(i1);
+ _lua_insert(i1, -2);
+ _lua_pushstring(i1, (i3 | 0) == 1 ? 5176 : 5184) | 0;
+ i3 = 3;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _luaS_hash(i2, i4, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i5 = i3 ^ i4;
+ i3 = (i4 >>> 5) + 1 | 0;
+ if (i3 >>> 0 > i4 >>> 0) {
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ do {
+ i5 = (i5 << 5) + (i5 >>> 2) + (HEAPU8[i2 + (i4 + -1) | 0] | 0) ^ i5;
+ i4 = i4 - i3 | 0;
+ } while (!(i4 >>> 0 < i3 >>> 0));
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _b_and(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ if ((i3 | 0) < 1) {
+ i5 = -1;
+ } else {
+ i4 = 1;
+ i5 = -1;
+ while (1) {
+ i5 = (_luaL_checkunsigned(i1, i4) | 0) & i5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ _lua_pushunsigned(i1, i5);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaopen_string(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 14);
+ _luaL_setfuncs(i1, 6920, 0);
+ _lua_createtable(i1, 0, 1);
+ _lua_pushlstring(i1, 7040, 0) | 0;
+ _lua_pushvalue(i1, -2);
+ _lua_setmetatable(i1, -2) | 0;
+ _lua_settop(i1, -2);
+ _lua_pushvalue(i1, -2);
+ _lua_setfield(i1, -2, 7048);
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return 1;
+}
+function _b_xor(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ if ((i3 | 0) < 1) {
+ i5 = 0;
+ } else {
+ i4 = 1;
+ i5 = 0;
+ while (1) {
+ i5 = (_luaL_checkunsigned(i1, i4) | 0) ^ i5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ _lua_pushunsigned(i1, i5);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_assert(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((_lua_toboolean(i1, 1) | 0) == 0) {
+ HEAP32[i3 >> 2] = _luaL_optlstring(i1, 2, 10216, 0) | 0;
+ i3 = _luaL_error(i1, 10208, i3) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ i3 = _lua_gettop(i1) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _b_or(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ if ((i3 | 0) < 1) {
+ i5 = 0;
+ } else {
+ i4 = 1;
+ i5 = 0;
+ while (1) {
+ i5 = _luaL_checkunsigned(i1, i4) | 0 | i5;
+ if ((i4 | 0) == (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ _lua_pushunsigned(i1, i5);
+ STACKTOP = i2;
+ return 1;
+}
+function _io_write(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ _lua_getfield(i1, -1001e3, 2800);
+ i3 = _lua_touserdata(i1, -1) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 2804;
+ _luaL_error(i1, 3424, i4) | 0;
+ }
+ i4 = _g_write(i1, HEAP32[i3 >> 2] | 0, 1) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _luaK_checkstack(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i3 = (HEAPU8[i1 + 48 | 0] | 0) + i3 | 0;
+ i4 = (HEAP32[i1 >> 2] | 0) + 78 | 0;
+ if ((i3 | 0) <= (HEAPU8[i4] | 0 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ if ((i3 | 0) > 249) {
+ _luaX_syntaxerror(HEAP32[i1 + 12 >> 2] | 0, 10536);
+ }
+ HEAP8[i4] = i3;
+ STACKTOP = i2;
+ return;
+}
+function _io_read(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ _lua_getfield(i1, -1001e3, 2776);
+ i3 = _lua_touserdata(i1, -1) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 2780;
+ _luaL_error(i1, 3424, i4) | 0;
+ }
+ i4 = _g_read(i1, HEAP32[i3 >> 2] | 0, 1) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _db_setupvalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 3);
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 6);
+ i3 = _lua_setupvalue(i1, 1, i3) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_insert(i1, -1);
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function ___uflow(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i1;
+ if ((HEAP32[i2 + 8 >> 2] | 0) == 0 ? (___toread(i2) | 0) != 0 : 0) {
+ i2 = -1;
+ } else {
+ if ((FUNCTION_TABLE_iiii[HEAP32[i2 + 32 >> 2] & 3](i2, i3, 1) | 0) == 1) {
+ i2 = HEAPU8[i3] | 0;
+ } else {
+ i2 = -1;
+ }
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _llvm_cttz_i32(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = HEAP8[cttz_i8 + (i1 & 255) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 | 0;
+ i2 = HEAP8[cttz_i8 + (i1 >> 8 & 255) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 + 8 | 0;
+ i2 = HEAP8[cttz_i8 + (i1 >> 16 & 255) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 + 16 | 0;
+ return (HEAP8[cttz_i8 + (i1 >>> 24) | 0] | 0) + 24 | 0;
+}
+function _llvm_ctlz_i32(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = HEAP8[ctlz_i8 + (i1 >>> 24) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 | 0;
+ i2 = HEAP8[ctlz_i8 + (i1 >> 16 & 255) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 + 8 | 0;
+ i2 = HEAP8[ctlz_i8 + (i1 >> 8 & 255) | 0] | 0;
+ if ((i2 | 0) < 8) return i2 + 16 | 0;
+ return (HEAP8[ctlz_i8 + (i1 & 255) | 0] | 0) + 24 | 0;
+}
+function _luaO_ceillog2(i2) {
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i4 = 0;
+ i1 = STACKTOP;
+ i2 = i2 + -1 | 0;
+ if (i2 >>> 0 > 255) {
+ i3 = 0;
+ while (1) {
+ i3 = i3 + 8 | 0;
+ i4 = i2 >>> 8;
+ if (i2 >>> 0 > 65535) {
+ i2 = i4;
+ } else {
+ i2 = i4;
+ break;
+ }
+ }
+ } else {
+ i3 = 0;
+ }
+ STACKTOP = i1;
+ return (HEAPU8[5208 + i2 | 0] | 0) + i3 | 0;
+}
+function _os_exit(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, 1) | 0) == 1) {
+ i3 = (_lua_toboolean(i1, 1) | 0) == 0 | 0;
+ } else {
+ i3 = _luaL_optinteger(i1, 1, 0) | 0;
+ }
+ if ((_lua_toboolean(i1, 2) | 0) != 0) {
+ _lua_close(i1);
+ }
+ if ((i1 | 0) == 0) {
+ STACKTOP = i2;
+ return 0;
+ } else {
+ _exit(i3 | 0);
+ }
+ return 0;
+}
+function _luaL_newmetatable(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_getfield(i1, -1001e3, i3);
+ if ((_lua_type(i1, -1) | 0) != 0) {
+ i3 = 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _lua_settop(i1, -2);
+ _lua_createtable(i1, 0, 0);
+ _lua_pushvalue(i1, -1);
+ _lua_setfield(i1, -1001e3, i3);
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _luaH_free(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i4 + 16 >> 2] | 0;
+ if ((i3 | 0) != 8016) {
+ _luaM_realloc_(i1, i3, 32 << (HEAPU8[i4 + 7 | 0] | 0), 0) | 0;
+ }
+ _luaM_realloc_(i1, HEAP32[i4 + 12 >> 2] | 0, HEAP32[i4 + 28 >> 2] << 4, 0) | 0;
+ _luaM_realloc_(i1, i4, 32, 0) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _luaO_int2fb(i3) {
+ i3 = i3 | 0;
+ var i1 = 0, i2 = 0, i4 = 0;
+ i1 = STACKTOP;
+ if (i3 >>> 0 < 8) {
+ STACKTOP = i1;
+ return i3 | 0;
+ }
+ if (i3 >>> 0 > 15) {
+ i2 = 1;
+ do {
+ i4 = i3 + 1 | 0;
+ i3 = i4 >>> 1;
+ i2 = i2 + 1 | 0;
+ } while (i4 >>> 0 > 31);
+ i2 = i2 << 3;
+ } else {
+ i2 = 8;
+ }
+ i4 = i2 | i3 + -8;
+ STACKTOP = i1;
+ return i4 | 0;
+}
+function _luaK_codek(i3, i4, i1) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i4 = i4 << 6;
+ if ((i1 | 0) < 262144) {
+ i4 = _luaK_code(i3, i4 | i1 << 14 | 1) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ i4 = _luaK_code(i3, i4 | 2) | 0;
+ _luaK_code(i3, i1 << 6 | 39) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _luaB_xpcall(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_gettop(i1) | 0;
+ if ((i3 | 0) <= 1) {
+ _luaL_argerror(i1, 2, 9616) | 0;
+ }
+ _lua_pushvalue(i1, 1);
+ _lua_copy(i1, 2, 1);
+ _lua_replace(i1, 2);
+ i3 = _finishpcall(i1, (_lua_pcallk(i1, i3 + -2 | 0, -1, 1, 0, 166) | 0) == 0 | 0) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _luaS_newudata(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if (i3 >>> 0 > 4294967269) {
+ _luaM_toobig(i1);
+ } else {
+ i1 = _luaC_newobj(i1, 7, i3 + 24 | 0, 0, 0) | 0;
+ HEAP32[i1 + 16 >> 2] = i3;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i1 + 12 >> 2] = i4;
+ STACKTOP = i2;
+ return i1 | 0;
+ }
+ return 0;
+}
+function _lua_dump(i1, i4, i5) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i1 + 8 >> 2] | 0;
+ if ((HEAP32[i3 + -8 >> 2] | 0) != 70) {
+ i5 = 1;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ i5 = _luaU_dump(i1, HEAP32[(HEAP32[i3 + -16 >> 2] | 0) + 12 >> 2] | 0, i4, i5, 0) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _luaS_eqlngstr(i2, i4) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = HEAP32[i2 + 12 >> 2] | 0;
+ if ((i2 | 0) != (i4 | 0)) {
+ if ((i3 | 0) == (HEAP32[i4 + 12 >> 2] | 0)) {
+ i2 = (_memcmp(i2 + 16 | 0, i4 + 16 | 0, i3) | 0) == 0;
+ } else {
+ i2 = 0;
+ }
+ } else {
+ i2 = 1;
+ }
+ STACKTOP = i1;
+ return i2 & 1 | 0;
+}
+function _luaC_barrier_(i4, i3, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i4 = HEAP32[i4 + 12 >> 2] | 0;
+ if ((HEAPU8[i4 + 61 | 0] | 0) < 2) {
+ _reallymarkobject(i4, i1);
+ STACKTOP = i2;
+ return;
+ } else {
+ i3 = i3 + 5 | 0;
+ HEAP8[i3] = HEAP8[i4 + 60 | 0] & 3 | HEAP8[i3] & 184;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _db_getupvalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 6);
+ i3 = _lua_getupvalue(i1, 1, i3) | 0;
+ if ((i3 | 0) == 0) {
+ i3 = 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ _lua_pushstring(i1, i3) | 0;
+ _lua_insert(i1, -2);
+ i3 = 2;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _os_execute(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i4 = _luaL_optlstring(i1, 1, 0, 0) | 0;
+ i3 = _system(i4 | 0) | 0;
+ if ((i4 | 0) == 0) {
+ _lua_pushboolean(i1, i3);
+ i4 = 1;
+ STACKTOP = i2;
+ return i4 | 0;
+ } else {
+ i4 = _luaL_execresult(i1, i3) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ return 0;
+}
+function _lua_pushfstring(i4, i5, i1) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ if ((HEAP32[(HEAP32[i4 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i4);
+ }
+ HEAP32[i3 >> 2] = i1;
+ i5 = _luaO_pushvfstring(i4, i5, i3) | 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _luaB_dofile(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_optlstring(i1, 1, 0, 0) | 0;
+ _lua_settop(i1, 1);
+ if ((_luaL_loadfilex(i1, i3, 0) | 0) == 0) {
+ _lua_callk(i1, 0, -1, 0, 164);
+ i3 = (_lua_gettop(i1) | 0) + -1 | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ _lua_error(i1) | 0;
+ }
+ return 0;
+}
+function _f_write(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i3 = HEAP32[i3 >> 2] | 0;
+ _lua_pushvalue(i1, 1);
+ i3 = _g_write(i1, i3, 2) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _lua_getctx(i3, i1) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i3 + 16 >> 2] | 0;
+ if ((HEAP8[i3 + 18 | 0] & 8) == 0) {
+ i3 = 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ if ((i1 | 0) != 0) {
+ HEAP32[i1 >> 2] = HEAP32[i3 + 24 >> 2];
+ }
+ i3 = HEAPU8[i3 + 37 | 0] | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _f_flush(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i3 = _luaL_fileresult(i1, (_fflush(HEAP32[i3 >> 2] | 0) | 0) == 0 | 0, 0) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _os_tmpname(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i3 = i2 + 4 | 0;
+ if ((_tmpnam(i3 | 0) | 0) == 0) {
+ i3 = _luaL_error(i1, 5824, i2) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+ } else {
+ _lua_pushstring(i1, i3) | 0;
+ i3 = 1;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ return 0;
+}
+function _traceback(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_tolstring(i1, 1, 0) | 0;
+ if ((i3 | 0) == 0) {
+ if ((_lua_type(i1, 1) | 0) >= 1 ? (_luaL_callmeta(i1, 1, 216) | 0) == 0 : 0) {
+ _lua_pushlstring(i1, 232, 18) | 0;
+ }
+ } else {
+ _luaL_traceback(i1, i1, i3, 1);
+ }
+ STACKTOP = i2;
+ return 1;
+}
+function _luaH_new(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _luaC_newobj(i1, 5, 32, 0, 0) | 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP8[i1 + 6 | 0] = -1;
+ HEAP32[i1 + 12 >> 2] = 0;
+ HEAP32[i1 + 28 >> 2] = 0;
+ HEAP32[i1 + 16 >> 2] = 8016;
+ HEAP8[i1 + 7 | 0] = 0;
+ HEAP32[i1 + 20 >> 2] = 8016;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaL_len(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2 + 4 | 0;
+ _lua_len(i1, i3);
+ i3 = _lua_tointegerx(i1, -1, i4) | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 1352, i2) | 0;
+ }
+ _lua_settop(i1, -2);
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _getS(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0, i5 = 0;
+ i3 = STACKTOP;
+ i5 = i2 + 4 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i3;
+ return i5 | 0;
+ }
+ HEAP32[i1 >> 2] = i4;
+ HEAP32[i5 >> 2] = 0;
+ i5 = HEAP32[i2 >> 2] | 0;
+ STACKTOP = i3;
+ return i5 | 0;
+}
+function _luaC_runtilstate(i1, i4) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = (HEAP32[i1 + 12 >> 2] | 0) + 61 | 0;
+ if ((1 << (HEAPU8[i3] | 0) & i4 | 0) != 0) {
+ STACKTOP = i2;
+ return;
+ }
+ do {
+ _singlestep(i1) | 0;
+ } while ((1 << (HEAPU8[i3] | 0) & i4 | 0) == 0);
+ STACKTOP = i2;
+ return;
+}
+function _luaX_init(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ i3 = 0;
+ do {
+ i4 = _luaS_new(i1, HEAP32[12096 + (i3 << 2) >> 2] | 0) | 0;
+ i5 = i4 + 5 | 0;
+ HEAP8[i5] = HEAPU8[i5] | 0 | 32;
+ i3 = i3 + 1 | 0;
+ HEAP8[i4 + 6 | 0] = i3;
+ } while ((i3 | 0) != 22);
+ STACKTOP = i2;
+ return;
+}
+function _luaK_indexed(i5, i1, i4) {
+ i5 = i5 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i3 = 0;
+ i3 = STACKTOP;
+ i2 = i1 + 8 | 0;
+ HEAP8[i2 + 2 | 0] = HEAP32[i2 >> 2];
+ HEAP16[i2 >> 1] = _luaK_exp2RK(i5, i4) | 0;
+ HEAP8[i2 + 3 | 0] = (HEAP32[i1 >> 2] | 0) == 8 ? 8 : 7;
+ HEAP32[i1 >> 2] = 9;
+ STACKTOP = i3;
+ return;
+}
+function _db_setuservalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, 1) | 0) == 2) {
+ _luaL_argerror(i1, 1, 11680) | 0;
+ }
+ _luaL_checktype(i1, 1, 7);
+ if ((_lua_type(i1, 2) | 0) >= 1) {
+ _luaL_checktype(i1, 2, 5);
+ }
+ _lua_settop(i1, 2);
+ _lua_setuservalue(i1, 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _ll_seeall(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ if ((_lua_getmetatable(i1, 1) | 0) == 0) {
+ _lua_createtable(i1, 0, 1);
+ _lua_pushvalue(i1, -1);
+ _lua_setmetatable(i1, 1) | 0;
+ }
+ _lua_rawgeti(i1, -1001e3, 2);
+ _lua_setfield(i1, -2, 5168);
+ STACKTOP = i2;
+ return 0;
+}
+function _luaL_loadbufferx(i3, i5, i4, i2, i1) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i6 = 0, i7 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i7 = i6;
+ HEAP32[i7 >> 2] = i5;
+ HEAP32[i7 + 4 >> 2] = i4;
+ i5 = _lua_load(i3, 2, i7, i2, i1) | 0;
+ STACKTOP = i6;
+ return i5 | 0;
+}
+function _luaT_gettm(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i4 = _luaH_getstr(i1, i4) | 0;
+ if ((HEAP32[i4 + 8 >> 2] | 0) != 0) {
+ STACKTOP = i2;
+ return i4 | 0;
+ }
+ i4 = i1 + 6 | 0;
+ HEAP8[i4] = HEAPU8[i4] | 0 | 1 << i3;
+ i4 = 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _luaL_pushresult(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i1 + 12 >> 2] | 0;
+ _lua_pushlstring(i3, HEAP32[i1 >> 2] | 0, HEAP32[i1 + 8 >> 2] | 0) | 0;
+ if ((HEAP32[i1 >> 2] | 0) == (i1 + 16 | 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ _lua_remove(i3, -2);
+ STACKTOP = i2;
+ return;
+}
+function _resume_error(i1, i3, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ var i4 = 0;
+ i4 = i1 + 8 | 0;
+ HEAP32[i4 >> 2] = i2;
+ i3 = _luaS_new(i1, i3) | 0;
+ HEAP32[i2 >> 2] = i3;
+ HEAP32[i2 + 8 >> 2] = HEAPU8[i3 + 4 | 0] | 0 | 64;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + 16;
+ _luaD_throw(i1, -1);
+}
+function _lua_absindex(i3, i1) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((i1 + 1000999 | 0) >>> 0 > 1000999) {
+ i3 = i1;
+ STACKTOP = i2;
+ return i3 | 0;
+ }
+ i3 = ((HEAP32[i3 + 8 >> 2] | 0) - (HEAP32[HEAP32[i3 + 16 >> 2] >> 2] | 0) >> 4) + i1 | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function ___uremdi3(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i5 = 0, i6 = 0;
+ i6 = STACKTOP;
+ STACKTOP = STACKTOP + 8 | 0;
+ i5 = i6 | 0;
+ ___udivmoddi4(i4, i3, i2, i1, i5) | 0;
+ STACKTOP = i6;
+ return (tempRet0 = HEAP32[i5 + 4 >> 2] | 0, HEAP32[i5 >> 2] | 0) | 0;
+}
+function _f_read(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = _luaL_checkudata(i1, 1, 2832) | 0;
+ if ((HEAP32[i3 + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ i3 = _g_read(i1, HEAP32[i3 >> 2] | 0, 2) | 0;
+ STACKTOP = i2;
+ return i3 | 0;
+}
+function _sort(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ i3 = _luaL_len(i1, 1) | 0;
+ _luaL_checkstack(i1, 40, 8208);
+ if ((_lua_type(i1, 2) | 0) >= 1) {
+ _luaL_checktype(i1, 2, 6);
+ }
+ _lua_settop(i1, 2);
+ _auxsort(i1, 1, i3);
+ STACKTOP = i2;
+ return 0;
+}
+function _luaB_error(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = _luaL_optinteger(i1, 2, 1) | 0;
+ _lua_settop(i1, 1);
+ if (!((_lua_isstring(i1, 1) | 0) != 0 & (i2 | 0) > 0)) {
+ _lua_error(i1) | 0;
+ }
+ _luaL_where(i1, i2);
+ _lua_pushvalue(i1, 1);
+ _lua_concat(i1, 2);
+ _lua_error(i1) | 0;
+ return 0;
+}
+function _error(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i3 >> 2] = HEAP32[i1 + 12 >> 2];
+ HEAP32[i3 + 4 >> 2] = i2;
+ _luaO_pushfstring(i4, 8840, i3) | 0;
+ _luaD_throw(HEAP32[i1 >> 2] | 0, 3);
+}
+function _ipairsaux(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ _luaL_checktype(i1, 1, 5);
+ i3 = i3 + 1 | 0;
+ _lua_pushinteger(i1, i3);
+ _lua_rawgeti(i1, 1, i3);
+ i1 = (_lua_type(i1, -1) | 0) == 0;
+ STACKTOP = i2;
+ return (i1 ? 1 : 2) | 0;
+}
+function _panic(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ i3 = HEAP32[_stderr >> 2] | 0;
+ HEAP32[i4 >> 2] = _lua_tolstring(i1, -1, 0) | 0;
+ _fprintf(i3 | 0, 1656, i4 | 0) | 0;
+ _fflush(i3 | 0) | 0;
+ STACKTOP = i2;
+ return 0;
+}
+function _testSetjmp(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0;
+ while ((i3 | 0) < 20) {
+ i4 = HEAP32[i2 + (i3 << 2) >> 2] | 0;
+ if ((i4 | 0) == 0) break;
+ if ((i4 | 0) == (i1 | 0)) {
+ return HEAP32[i2 + ((i3 << 2) + 4) >> 2] | 0;
+ }
+ i3 = i3 + 2 | 0;
+ }
+ return 0;
+}
+function _luaopen_math(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 28);
+ _luaL_setfuncs(i1, 3576, 0);
+ _lua_pushnumber(i1, 3.141592653589793);
+ _lua_setfield(i1, -2, 3808);
+ _lua_pushnumber(i1, inf);
+ _lua_setfield(i1, -2, 3816);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaopen_base(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_rawgeti(i1, -1001e3, 2);
+ _lua_rawgeti(i1, -1001e3, 2);
+ _lua_setfield(i1, -2, 9144);
+ _luaL_setfuncs(i1, 9152, 0);
+ _lua_pushlstring(i1, 9344, 7) | 0;
+ _lua_setfield(i1, -2, 9352);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaE_extendCI(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i3 = STACKTOP;
+ i2 = _luaM_realloc_(i1, 0, 0, 40) | 0;
+ i1 = i1 + 16 | 0;
+ HEAP32[(HEAP32[i1 >> 2] | 0) + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = HEAP32[i1 >> 2];
+ HEAP32[i2 + 12 >> 2] = 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _luaB_getmetatable(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ if ((_lua_getmetatable(i1, 1) | 0) == 0) {
+ _lua_pushnil(i1);
+ STACKTOP = i2;
+ return 1;
+ } else {
+ _luaL_getmetafield(i1, 1, 9704) | 0;
+ STACKTOP = i2;
+ return 1;
+ }
+ return 0;
+}
+function _lua_pushunsigned(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var d3 = 0.0;
+ if ((i2 | 0) > -1) {
+ d3 = +(i2 | 0);
+ } else {
+ d3 = +(i2 >>> 0);
+ }
+ i2 = i1 + 8 | 0;
+ i1 = HEAP32[i2 >> 2] | 0;
+ HEAPF64[i1 >> 3] = d3;
+ HEAP32[i1 + 8 >> 2] = 3;
+ HEAP32[i2 >> 2] = i1 + 16;
+ return;
+}
+function _lua_pushthread(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = i1 + 8 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i3 >> 2] = i1;
+ HEAP32[i3 + 8 >> 2] = 72;
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + 16;
+ return (HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 172 >> 2] | 0) == (i1 | 0) | 0;
+}
+function _gctm(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_len(i1, 1) | 0;
+ if ((i3 | 0) <= 0) {
+ STACKTOP = i2;
+ return 0;
+ }
+ do {
+ _lua_rawgeti(i1, 1, i3);
+ _lua_settop(i1, -2);
+ i3 = i3 + -1 | 0;
+ } while ((i3 | 0) > 0);
+ STACKTOP = i2;
+ return 0;
+}
+function ___muldi3(i4, i2, i3, i1) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0, i6 = 0;
+ i5 = i4;
+ i6 = i3;
+ i4 = ___muldsi3(i5, i6) | 0;
+ i3 = tempRet0;
+ return (tempRet0 = (Math_imul(i2, i6) | 0) + (Math_imul(i1, i5) | 0) + i3 | i3 & 0, i4 | 0 | 0) | 0;
+}
+function _luaH_resizearray(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i5 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[i3 + 16 >> 2] | 0) == 8016) {
+ i5 = 0;
+ } else {
+ i5 = 1 << (HEAPU8[i3 + 7 | 0] | 0);
+ }
+ _luaH_resize(i1, i3, i4, i5);
+ STACKTOP = i2;
+ return;
+}
+function _luaK_stringK(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i3;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i4 + 8 >> 2] = HEAPU8[i2 + 4 | 0] | 0 | 64;
+ i2 = _addk(i1, i4, i4) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _math_modf(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i2;
+ d3 = +_modf(+(+_luaL_checknumber(i1, 1)), i4 | 0);
+ _lua_pushnumber(i1, +HEAPF64[i4 >> 3]);
+ _lua_pushnumber(i1, d3);
+ STACKTOP = i2;
+ return 2;
+}
+function _os_setlocale(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_optlstring(i1, 1, 0, 0) | 0;
+ _lua_pushstring(i1, _setlocale(HEAP32[5960 + ((_luaL_checkoption(i1, 2, 6016, 5984) | 0) << 2) >> 2] | 0, i3 | 0) | 0) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_pcall(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ _lua_pushnil(i1);
+ _lua_insert(i1, 1);
+ i1 = _finishpcall(i1, (_lua_pcallk(i1, (_lua_gettop(i1) | 0) + -2 | 0, -1, 0, 0, 166) | 0) == 0 | 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _error_expected(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0, i4 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = HEAP32[i1 + 52 >> 2] | 0;
+ HEAP32[i3 >> 2] = _luaX_token2str(i1, i2) | 0;
+ _luaX_syntaxerror(i1, _luaO_pushfstring(i4, 6328, i3) | 0);
+}
+function _lua_pushvfstring(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 12 >> 2] | 0) > 0) {
+ _luaC_step(i1);
+ }
+ i4 = _luaO_pushvfstring(i1, i3, i4) | 0;
+ STACKTOP = i2;
+ return i4 | 0;
+}
+function _db_setmetatable(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _lua_type(i1, 2) | 0;
+ if (!((i3 | 0) == 0 | (i3 | 0) == 5)) {
+ _luaL_argerror(i1, 2, 11536) | 0;
+ }
+ _lua_settop(i1, 2);
+ _lua_setmetatable(i1, 1) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function _b_rrot(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i3 = 0 - (_luaL_checkinteger(i1, 2) | 0) | 0;
+ i4 = _luaL_checkunsigned(i1, 1) | 0;
+ i3 = i3 & 31;
+ _lua_pushunsigned(i1, i4 >>> (32 - i3 | 0) | i4 << i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaC_step(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = HEAP32[i1 + 12 >> 2] | 0;
+ if ((HEAP8[i3 + 63 | 0] | 0) == 0) {
+ _luaE_setdebt(i3, -1600);
+ STACKTOP = i2;
+ return;
+ } else {
+ _luaC_forcestep(i1);
+ STACKTOP = i2;
+ return;
+ }
+}
+function _math_frexp(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ _lua_pushnumber(i1, +_frexp(+(+_luaL_checknumber(i1, 1)), i3 | 0));
+ _lua_pushinteger(i1, HEAP32[i3 >> 2] | 0);
+ STACKTOP = i2;
+ return 2;
+}
+function _luaO_pushfstring(i2, i1, i3) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i4 = 0, i5 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i5 = i4;
+ HEAP32[i5 >> 2] = i3;
+ i3 = _luaO_pushvfstring(i2, i1, i5) | 0;
+ STACKTOP = i4;
+ return i3 | 0;
+}
+function _luaO_hexavalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((HEAP8[i1 + 10913 | 0] & 2) == 0) {
+ i1 = (i1 | 32) + -87 | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+ } else {
+ i1 = i1 + -48 | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+ }
+ return 0;
+}
+function _b_lrot(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checkinteger(i1, 2) | 0;
+ i4 = _luaL_checkunsigned(i1, 1) | 0;
+ i3 = i3 & 31;
+ _lua_pushunsigned(i1, i4 >>> (32 - i3 | 0) | i4 << i3);
+ STACKTOP = i2;
+ return 1;
+}
+function _f_lines(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ if ((HEAP32[(_luaL_checkudata(i1, 1, 2832) | 0) + 4 >> 2] | 0) == 0) {
+ _luaL_error(i1, 3080, i2) | 0;
+ }
+ _aux_lines(i1, 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaC_barrierback_(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i2 = HEAP32[i2 + 12 >> 2] | 0;
+ i3 = i1 + 5 | 0;
+ HEAP8[i3] = HEAP8[i3] & 251;
+ i2 = i2 + 88 | 0;
+ HEAP32[i1 + 24 >> 2] = HEAP32[i2 >> 2];
+ HEAP32[i2 >> 2] = i1;
+ return;
+}
+function _os_rename(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ i1 = _luaL_fileresult(i1, (_rename(i3 | 0, _luaL_checklstring(i1, 2, 0) | 0) | 0) == 0 | 0, 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _bitshift64Ashr(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ if ((i1 | 0) < 32) {
+ tempRet0 = i2 >> i1;
+ return i3 >>> i1 | (i2 & (1 << i1) - 1) << 32 - i1;
+ }
+ tempRet0 = (i2 | 0) < 0 ? -1 : 0;
+ return i2 >> i1 - 32 | 0;
+}
+function _luaB_cowrap(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 6);
+ i3 = _lua_newthread(i1) | 0;
+ _lua_pushvalue(i1, 1);
+ _lua_xmove(i1, i3, 1);
+ _lua_pushcclosure(i1, 167, 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _gmatch(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checklstring(i1, 1, 0) | 0;
+ _luaL_checklstring(i1, 2, 0) | 0;
+ _lua_settop(i1, 2);
+ _lua_pushinteger(i1, 0);
+ _lua_pushcclosure(i1, 163, 3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_next(i2) {
+ i2 = i2 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ _luaL_checktype(i2, 1, 5);
+ _lua_settop(i2, 2);
+ if ((_lua_next(i2, 1) | 0) == 0) {
+ _lua_pushnil(i2);
+ i2 = 1;
+ } else {
+ i2 = 2;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _luaK_codeABC(i5, i3, i4, i2, i1) {
+ i5 = i5 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i6 = 0;
+ i6 = STACKTOP;
+ i5 = _luaK_code(i5, i4 << 6 | i3 | i2 << 23 | i1 << 14) | 0;
+ STACKTOP = i6;
+ return i5 | 0;
+}
+function _luaH_set(i2, i4, i5) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0;
+ i1 = STACKTOP;
+ i3 = _luaH_get(i4, i5) | 0;
+ if ((i3 | 0) == 5192) {
+ i3 = _luaH_newkey(i2, i4, i5) | 0;
+ }
+ STACKTOP = i1;
+ return i3 | 0;
+}
+function _luaZ_init(i4, i1, i3, i2) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ HEAP32[i1 + 16 >> 2] = i4;
+ HEAP32[i1 + 8 >> 2] = i3;
+ HEAP32[i1 + 12 >> 2] = i2;
+ HEAP32[i1 >> 2] = 0;
+ HEAP32[i1 + 4 >> 2] = 0;
+ return;
+}
+function _lua_pushlightuserdata(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i2 = i2 + 8 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i3 >> 2] = i1;
+ HEAP32[i3 + 8 >> 2] = 2;
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + 16;
+ return;
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function _bitshift64Shl(i2, i3, i1) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ if ((i1 | 0) < 32) {
+ tempRet0 = i3 << i1 | (i2 & (1 << i1) - 1 << 32 - i1) >>> 32 - i1;
+ return i2 << i1;
+ }
+ tempRet0 = i2 << i1 - 32;
+ return 0;
+}
+function _luaB_rawlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if (((_lua_type(i1, 1) | 0) & -2 | 0) != 4) {
+ _luaL_argerror(i1, 1, 9784) | 0;
+ }
+ _lua_pushinteger(i1, _lua_rawlen(i1, 1) | 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _l_alloc(i3, i1, i4, i2) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i3 = STACKTOP;
+ if ((i2 | 0) == 0) {
+ _free(i1);
+ i1 = 0;
+ } else {
+ i1 = _realloc(i1, i2) | 0;
+ }
+ STACKTOP = i3;
+ return i1 | 0;
+}
+function _bitshift64Lshr(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ if ((i1 | 0) < 32) {
+ tempRet0 = i2 >>> i1;
+ return i3 >>> i1 | (i2 & (1 << i1) - 1) << 32 - i1;
+ }
+ tempRet0 = 0;
+ return i2 >>> i1 - 32 | 0;
+}
+function _luaG_aritherror(i3, i1, i2) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i4 = 0;
+ i4 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = (_luaV_tonumber(i1, i4) | 0) == 0;
+ _luaG_typeerror(i3, i4 ? i1 : i2, 1928);
+}
+function _str_len(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i3 = i2;
+ _luaL_checklstring(i1, 1, i3) | 0;
+ _lua_pushinteger(i1, HEAP32[i3 >> 2] | 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaL_optinteger(i3, i4, i2) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0;
+ i1 = STACKTOP;
+ if ((_lua_type(i3, i4) | 0) >= 1) {
+ i2 = _luaL_checkinteger(i3, i4) | 0;
+ }
+ STACKTOP = i1;
+ return i2 | 0;
+}
+function _os_difftime(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = ~~+_luaL_checknumber(i1, 1);
+ _lua_pushnumber(i1, +_difftime(i3 | 0, ~~+_luaL_optnumber(i1, 2, 0.0) | 0));
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_pushboolean(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i2 = i2 + 8 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i3 >> 2] = (i1 | 0) != 0;
+ HEAP32[i3 + 8 >> 2] = 1;
+ HEAP32[i2 >> 2] = i3 + 16;
+ return;
+}
+function _os_remove(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = _luaL_checklstring(i1, 1, 0) | 0;
+ i1 = _luaL_fileresult(i1, (_remove(i3 | 0) | 0) == 0 | 0, i3) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaopen_table(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 7);
+ _luaL_setfuncs(i1, 8088, 0);
+ _lua_getfield(i1, -1, 8152);
+ _lua_setglobal(i1, 8152);
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_pushinteger(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i2 = i2 + 8 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ HEAPF64[i3 >> 3] = +(i1 | 0);
+ HEAP32[i3 + 8 >> 2] = 3;
+ HEAP32[i2 >> 2] = i3 + 16;
+ return;
+}
+function _luaB_rawset(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ _luaL_checkany(i1, 2);
+ _luaL_checkany(i1, 3);
+ _lua_settop(i1, 3);
+ _lua_rawset(i1, 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaE_setdebt(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = i2 + 12 | 0;
+ i2 = i2 + 8 | 0;
+ HEAP32[i2 >> 2] = (HEAP32[i3 >> 2] | 0) - i1 + (HEAP32[i2 >> 2] | 0);
+ HEAP32[i3 >> 2] = i1;
+ return;
+}
+function _luaB_cocreate(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 6);
+ i3 = _lua_newthread(i1) | 0;
+ _lua_pushvalue(i1, 1);
+ _lua_xmove(i1, i3, 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _io_noclose(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ HEAP32[(_luaL_checkudata(i1, 1, 2832) | 0) + 4 >> 2] = 154;
+ _lua_pushnil(i1);
+ _lua_pushlstring(i1, 2840, 26) | 0;
+ STACKTOP = i2;
+ return 2;
+}
+function _io_fclose(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _luaL_fileresult(i1, (_fclose(HEAP32[(_luaL_checkudata(i1, 1, 2832) | 0) >> 2] | 0) | 0) == 0 | 0, 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaL_optnumber(i3, i4, d2) {
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ d2 = +d2;
+ var i1 = 0;
+ i1 = STACKTOP;
+ if ((_lua_type(i3, i4) | 0) >= 1) {
+ d2 = +_luaL_checknumber(i3, i4);
+ }
+ STACKTOP = i1;
+ return +d2;
+}
+function _math_atan2(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0;
+ i2 = STACKTOP;
+ d3 = +_luaL_checknumber(i1, 1);
+ _lua_pushnumber(i1, +Math_atan2(+d3, +(+_luaL_checknumber(i1, 2))));
+ STACKTOP = i2;
+ return 1;
+}
+function _luaK_codeABx(i4, i2, i3, i1) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ i4 = _luaK_code(i4, i3 << 6 | i2 | i1 << 14) | 0;
+ STACKTOP = i5;
+ return i4 | 0;
+}
+function _luaF_newCclosure(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i2 = _luaC_newobj(i2, 38, (i1 << 4) + 16 | 0, 0, 0) | 0;
+ HEAP8[i2 + 6 | 0] = i1;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _math_pow(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0;
+ i2 = STACKTOP;
+ d3 = +_luaL_checknumber(i1, 1);
+ _lua_pushnumber(i1, +Math_pow(+d3, +(+_luaL_checknumber(i1, 2))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_ldexp(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0;
+ i2 = STACKTOP;
+ d3 = +_luaL_checknumber(i1, 1);
+ _lua_pushnumber(i1, +_ldexp(d3, _luaL_checkinteger(i1, 2) | 0));
+ STACKTOP = i2;
+ return 1;
+}
+function _luaF_newupval(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _luaC_newobj(i1, 10, 32, 0, 0) | 0;
+ HEAP32[i1 + 8 >> 2] = i1 + 16;
+ HEAP32[i1 + 24 >> 2] = 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _lua_pushnumber(i2, d1) {
+ i2 = i2 | 0;
+ d1 = +d1;
+ var i3 = 0;
+ i2 = i2 + 8 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ HEAPF64[i3 >> 3] = d1;
+ HEAP32[i3 + 8 >> 2] = 3;
+ HEAP32[i2 >> 2] = i3 + 16;
+ return;
+}
+function _math_fmod(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, d3 = 0.0;
+ i2 = STACKTOP;
+ d3 = +_luaL_checknumber(i1, 1);
+ _lua_pushnumber(i1, +_fmod(+d3, +(+_luaL_checknumber(i1, 2))));
+ STACKTOP = i2;
+ return 1;
+}
+function _luaG_concaterror(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ i4 = HEAP32[i2 + 8 >> 2] | 0;
+ _luaG_typeerror(i3, (i4 & 15 | 0) == 4 | (i4 | 0) == 3 ? i1 : i2, 1912);
+}
+function _luaB_rawequal(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ _luaL_checkany(i1, 2);
+ _lua_pushboolean(i1, _lua_rawequal(i1, 1, 2) | 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _db_getuservalue(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, 1) | 0) == 7) {
+ _lua_getuservalue(i1, 1);
+ } else {
+ _lua_pushnil(i1);
+ }
+ STACKTOP = i2;
+ return 1;
+}
+function _strchr(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i2 = ___strchrnul(i2, i1) | 0;
+ STACKTOP = i3;
+ return ((HEAP8[i2] | 0) == (i1 & 255) << 24 >> 24 ? i2 : 0) | 0;
+}
+function runPostSets() {}
+function _rand_r(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = (Math_imul(HEAP32[i1 >> 2] | 0, 31010991) | 0) + 1735287159 & 2147483647;
+ HEAP32[i1 >> 2] = i2;
+ return i2 | 0;
+}
+function _luaL_checkany(i1, i3) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ if ((_lua_type(i1, i3) | 0) == -1) {
+ _luaL_argerror(i1, i3, 1256) | 0;
+ }
+ STACKTOP = i2;
+ return;
+}
+function _i64Subtract(i2, i4, i1, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 - i3 - (i1 >>> 0 > i2 >>> 0 | 0) >>> 0;
+ return (tempRet0 = i4, i2 - i1 >>> 0 | 0) | 0;
+}
+function _db_getmetatable(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ if ((_lua_getmetatable(i1, 1) | 0) == 0) {
+ _lua_pushnil(i1);
+ }
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_rawget(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checktype(i1, 1, 5);
+ _luaL_checkany(i1, 2);
+ _lua_settop(i1, 2);
+ _lua_rawget(i1, 1);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_type(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ _lua_pushstring(i1, _lua_typename(i1, _lua_type(i1, 1) | 0) | 0) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function dynCall_iiiii(i5, i4, i3, i2, i1) {
+ i5 = i5 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iiiii[i5 & 3](i4 | 0, i3 | 0, i2 | 0, i1 | 0) | 0;
+}
+function _lstop(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ _lua_sethook(i1, 0, 0, 0) | 0;
+ _luaL_error(i1, 200, i2) | 0;
+ STACKTOP = i2;
+ return;
+}
+function _i64Add(i1, i3, i4, i2) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i4 = i1 + i4 >>> 0;
+ return (tempRet0 = i3 + i2 + (i4 >>> 0 < i1 >>> 0 | 0) >>> 0, i4 | 0) | 0;
+}
+function _luaK_ret(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ i4 = STACKTOP;
+ _luaK_code(i3, i2 << 6 | (i1 << 23) + 8388608 | 31) | 0;
+ STACKTOP = i4;
+ return;
+}
+function _strpbrk(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i1 = i2 + (_strcspn(i2, i1) | 0) | 0;
+ STACKTOP = i3;
+ return ((HEAP8[i1] | 0) != 0 ? i1 : 0) | 0;
+}
+function _luaL_setmetatable(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ _lua_getfield(i1, -1001e3, i2);
+ _lua_setmetatable(i1, -2) | 0;
+ STACKTOP = i3;
+ return;
+}
+function _lua_atpanic(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = (HEAP32[i2 + 12 >> 2] | 0) + 168 | 0;
+ i2 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i3 >> 2] = i1;
+ return i2 | 0;
+}
+function _luaL_newstate() {
+ var i1 = 0, i2 = 0;
+ i2 = STACKTOP;
+ i1 = _lua_newstate(1, 0) | 0;
+ if ((i1 | 0) != 0) {
+ _lua_atpanic(i1, 143) | 0;
+ }
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaL_buffinit(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ HEAP32[i1 + 12 >> 2] = i2;
+ HEAP32[i1 >> 2] = i1 + 16;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i1 + 4 >> 2] = 1024;
+ return;
+}
+function _strrchr(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i2 = ___memrchr(i1, i2, (_strlen(i1 | 0) | 0) + 1 | 0) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _luaK_fixline(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ HEAP32[(HEAP32[(HEAP32[i1 >> 2] | 0) + 20 >> 2] | 0) + ((HEAP32[i1 + 20 >> 2] | 0) + -1 << 2) >> 2] = i2;
+ return;
+}
+function _luaX_lookahead(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i3 = STACKTOP;
+ i2 = _llex(i1, i1 + 40 | 0) | 0;
+ HEAP32[i1 + 32 >> 2] = i2;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _f_call(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ _luaD_call(i2, HEAP32[i1 >> 2] | 0, HEAP32[i1 + 4 >> 2] | 0, 0);
+ STACKTOP = i3;
+ return;
+}
+function _io_pclose(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkudata(i1, 1, 2832) | 0;
+ i1 = _luaL_execresult(i1, -1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaS_new(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i2 = _luaS_newlstr(i2, i1, _strlen(i1 | 0) | 0) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _os_getenv(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushstring(i1, _getenv(_luaL_checklstring(i1, 1, 0) | 0) | 0) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function _math_rad(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_luaL_checknumber(i1, 1) * .017453292519943295);
+ STACKTOP = i2;
+ return 1;
+}
+function _math_deg(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_luaL_checknumber(i1, 1) / .017453292519943295);
+ STACKTOP = i2;
+ return 1;
+}
+function _writer(i4, i2, i1, i3) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = STACKTOP;
+ _luaL_addlstring(i3, i2, i1);
+ STACKTOP = i4;
+ return 0;
+}
+function _luaL_addstring(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ _luaL_addlstring(i2, i1, _strlen(i1 | 0) | 0);
+ STACKTOP = i3;
+ return;
+}
+function _pcallcont(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _finishpcall(i1, (_lua_getctx(i1, 0) | 0) == 1 | 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaopen_coroutine(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 6);
+ _luaL_setfuncs(i1, 10656, 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_version(i1) {
+ i1 = i1 | 0;
+ if ((i1 | 0) == 0) {
+ i1 = 920;
+ } else {
+ i1 = HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 176 >> 2] | 0;
+ }
+ return i1 | 0;
+}
+function _lua_pushnil(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i1 = i1 + 8 | 0;
+ i2 = HEAP32[i1 >> 2] | 0;
+ HEAP32[i2 + 8 >> 2] = 0;
+ HEAP32[i1 >> 2] = i2 + 16;
+ return;
+}
+function _math_floor(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_floor(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _laction(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _signal(i1 | 0, 0) | 0;
+ _lua_sethook(HEAP32[48] | 0, 1, 11, 1) | 0;
+ STACKTOP = i2;
+ return;
+}
+function dynCall_iiii(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iiii[i4 & 3](i3 | 0, i2 | 0, i1 | 0) | 0;
+}
+function _luaopen_debug(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 16);
+ _luaL_setfuncs(i1, 11176, 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaopen_bit32(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 12);
+ _luaL_setfuncs(i1, 10240, 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _math_sqrt(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_sqrt(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_ceil(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_ceil(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_atan(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_atan(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_asin(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_asin(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_acos(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_acos(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _lua_close(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _close_state(HEAP32[(HEAP32[i1 + 12 >> 2] | 0) + 172 >> 2] | 0);
+ STACKTOP = i2;
+ return;
+}
+function _dothecall(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i2 = STACKTOP;
+ _luaD_call(i1, (HEAP32[i1 + 8 >> 2] | 0) + -32 | 0, 0, 0);
+ STACKTOP = i2;
+ return;
+}
+function _math_tan(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_tan(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_sin(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_sin(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_log10(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_log10(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_exp(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_exp(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_cos(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_cos(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_abs(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +Math_abs(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_randomseed(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _srand(_luaL_checkunsigned(i1, 1) | 0);
+ _rand() | 0;
+ STACKTOP = i2;
+ return 0;
+}
+function _luaopen_os(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_createtable(i1, 0, 11);
+ _luaL_setfuncs(i1, 5624, 0);
+ STACKTOP = i2;
+ return 1;
+}
+function _math_tanh(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_tanh(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_sinh(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_sinh(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _math_cosh(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +_cosh(+(+_luaL_checknumber(i1, 1))));
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_yield(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _lua_yieldk(i1, _lua_gettop(i1) | 0, 0, 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaB_tostring(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _luaL_checkany(i1, 1);
+ _luaL_tolstring(i1, 1, 0) | 0;
+ STACKTOP = i2;
+ return 1;
+}
+function _growstack(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ _luaD_growstack(i2, HEAP32[i1 >> 2] | 0);
+ STACKTOP = i3;
+ return;
+}
+function ___udivdi3(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i4 = ___udivmoddi4(i4, i3, i2, i1, 0) | 0;
+ return i4 | 0;
+}
+function _b_not(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushunsigned(i1, ~(_luaL_checkunsigned(i1, 1) | 0));
+ STACKTOP = i2;
+ return 1;
+}
+function _luaO_fb2int(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1 >>> 3 & 31;
+ if ((i2 | 0) != 0) {
+ i1 = (i1 & 7 | 8) << i2 + -1;
+ }
+ return i1 | 0;
+}
+function _luaB_corunning(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushboolean(i1, _lua_pushthread(i1) | 0);
+ STACKTOP = i2;
+ return 2;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function _strcoll(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ i2 = _strcmp(i2, i1) | 0;
+ STACKTOP = i3;
+ return i2 | 0;
+}
+function _os_clock(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushnumber(i1, +(_clock() | 0) / 1.0e6);
+ STACKTOP = i2;
+ return 1;
+}
+function _dofilecont(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = (_lua_gettop(i1) | 0) + -1 | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _scalbnl(d2, i1) {
+ d2 = +d2;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ d2 = +_scalbn(d2, i1);
+ STACKTOP = i3;
+ return +d2;
+}
+function _tolower(i1) {
+ i1 = i1 | 0;
+ if ((i1 | 0) < 65) return i1 | 0;
+ if ((i1 | 0) > 90) return i1 | 0;
+ return i1 - 65 + 97 | 0;
+}
+function _lua_gettop(i1) {
+ i1 = i1 | 0;
+ return (HEAP32[i1 + 8 >> 2] | 0) - ((HEAP32[HEAP32[i1 + 16 >> 2] >> 2] | 0) + 16) >> 4 | 0;
+}
+function dynCall_iii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iii[i3 & 1](i2 | 0, i1 | 0) | 0;
+}
+function _str_match(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _str_find_aux(i1, 0) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _luaM_toobig(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ _luaG_runerror(i1, 4144, i2);
+}
+function _luaK_getlabel(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = HEAP32[i1 + 20 >> 2] | 0;
+ HEAP32[i1 + 24 >> 2] = i2;
+ return i2 | 0;
+}
+function _ldexp(d2, i1) {
+ d2 = +d2;
+ i1 = i1 | 0;
+ var i3 = 0;
+ i3 = STACKTOP;
+ d2 = +_scalbn(d2, i1);
+ STACKTOP = i3;
+ return +d2;
+}
+function _str_find(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ i1 = _str_find_aux(i1, 1) | 0;
+ STACKTOP = i2;
+ return i1 | 0;
+}
+function _db_getregistry(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _lua_pushvalue(i1, -1001e3);
+ STACKTOP = i2;
+ return 1;
+}
+function _luaB_ipairs(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _pairsmeta(i1, 9960, 1, 165);
+ STACKTOP = i2;
+ return 3;
+}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function _luaB_pairs(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _pairsmeta(i1, 9864, 0, 93);
+ STACKTOP = i2;
+ return 3;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function _io_output(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _g_iofile(i1, 2800, 3512);
+ STACKTOP = i2;
+ return 1;
+}
+function dynCall_vii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vii[i3 & 15](i2 | 0, i1 | 0);
+}
+function _io_input(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ _g_iofile(i1, 2776, 3480);
+ STACKTOP = i2;
+ return 1;
+}
+function _semerror(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ _luaX_syntaxerror(i2, i1);
+}
+function _luaX_syntaxerror(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ _lexerror(i1, i2, HEAP32[i1 + 16 >> 2] | 0);
+}
+function b4(i1, i2, i3, i4) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ abort(4);
+ return 0;
+}
+function _lua_typename(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return HEAP32[8528 + (i1 + 1 << 2) >> 2] | 0;
+}
+function dynCall_ii(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_ii[i2 & 255](i1 | 0) | 0;
+}
+function dynCall_vi(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vi[i2 & 1](i1 | 0);
+}
+function b0(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ abort(0);
+ return 0;
+}
+function _lua_gethookmask(i1) {
+ i1 = i1 | 0;
+ return HEAPU8[i1 + 40 | 0] | 0 | 0;
+}
+function _lua_gethookcount(i1) {
+ i1 = i1 | 0;
+ return HEAP32[i1 + 44 >> 2] | 0;
+}
+function _lua_status(i1) {
+ i1 = i1 | 0;
+ return HEAPU8[i1 + 6 | 0] | 0 | 0;
+}
+function _lua_gethook(i1) {
+ i1 = i1 | 0;
+ return HEAP32[i1 + 52 >> 2] | 0;
+}
+function b5(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(5);
+ return 0;
+}
+function _lua_error(i1) {
+ i1 = i1 | 0;
+ _luaG_errormsg(i1);
+ return 0;
+}
+function b2(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(2);
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function b3(i1) {
+ i1 = i1 | 0;
+ abort(3);
+ return 0;
+}
+function _rand() {
+ return _rand_r(___rand_seed) | 0;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+function b1(i1) {
+ i1 = i1 | 0;
+ abort(1);
+}
+
+// EMSCRIPTEN_END_FUNCS
+ var FUNCTION_TABLE_iiii = [b0,_getF,_getS,_generic_reader];
+ var FUNCTION_TABLE_vi = [b1,_laction];
+ var FUNCTION_TABLE_vii = [b2,_lstop,_growstack,_f_call,_resume,_unroll,_f_parser,_dothecall,_f_luaopen,_hookf,b2,b2,b2,b2,b2,b2];
+ var FUNCTION_TABLE_ii = [b3,_io_close,_io_flush,_io_input,_io_lines,_io_open,_io_output,_io_popen,_io_read,_io_tmpfile,_io_type,_io_write,_f_flush,_f_lines,_f_read,_f_seek,_f_setvbuf,_f_write,_f_gc,_f_tostring,_math_abs,_math_acos,_math_asin,_math_atan2,_math_atan,_math_ceil,_math_cosh,_math_cos,_math_deg
+ ,_math_exp,_math_floor,_math_fmod,_math_frexp,_math_ldexp,_math_log10,_math_log,_math_max,_math_min,_math_modf,_math_pow,_math_rad,_math_random,_math_randomseed,_math_sinh,_math_sin,_math_sqrt,_math_tanh,_math_tan,_ll_loadlib,_ll_searchpath,_ll_seeall,_ll_module,_ll_require,_os_clock,_os_date,_os_difftime,_os_execute,_os_exit,_os_getenv
+ ,_os_remove,_os_rename,_os_setlocale,_os_time,_os_tmpname,_str_byte,_str_char,_str_dump,_str_find,_str_format,_gmatch,_str_gsub,_str_len,_str_lower,_str_match,_str_rep,_str_reverse,_str_sub,_str_upper,_tconcat,_maxn,_tinsert,_pack,_unpack,_tremove,_sort,_luaB_assert,_luaB_collectgarbage,_luaB_dofile,_luaB_error
+ ,_luaB_getmetatable,_luaB_ipairs,_luaB_loadfile,_luaB_load,_luaB_next,_luaB_pairs,_luaB_pcall,_luaB_print,_luaB_rawequal,_luaB_rawlen,_luaB_rawget,_luaB_rawset,_luaB_select,_luaB_setmetatable,_luaB_tonumber,_luaB_tostring,_luaB_type,_luaB_xpcall,_b_arshift,_b_and,_b_not,_b_or,_b_xor,_b_test,_b_extract,_b_lrot,_b_lshift,_b_replace,_b_rrot,_b_rshift
+ ,_luaB_cocreate,_luaB_coresume,_luaB_corunning,_luaB_costatus,_luaB_cowrap,_luaB_yield,_db_debug,_db_getuservalue,_db_gethook,_db_getinfo,_db_getlocal,_db_getregistry,_db_getmetatable,_db_getupvalue,_db_upvaluejoin,_db_upvalueid,_db_setuservalue,_db_sethook,_db_setlocal,_db_setmetatable,_db_setupvalue,_db_traceback,_pmain,_traceback,_panic,_luaopen_base,_luaopen_package,_luaopen_coroutine,_luaopen_table,_luaopen_io
+ ,_luaopen_os,_luaopen_string,_luaopen_bit32,_luaopen_math,_luaopen_debug,_io_noclose,_io_readline,_io_fclose,_io_pclose,_gctm,_searcher_preload,_searcher_Lua,_searcher_C,_searcher_Croot,_gmatch_aux,_dofilecont,_ipairsaux,_pcallcont,_luaB_auxwrap,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3
+ ,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3
+ ,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3
+ ,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3,b3];
+ var FUNCTION_TABLE_iiiii = [b4,_l_alloc,_writer,b4];
+ var FUNCTION_TABLE_iii = [b5,_lua_newstate];
+
+ return { _testSetjmp: _testSetjmp, _i64Subtract: _i64Subtract, _free: _free, _main: _main, _rand_r: _rand_r, _realloc: _realloc, _i64Add: _i64Add, _tolower: _tolower, _saveSetjmp: _saveSetjmp, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, _strlen: _strlen, _rand: _rand, _bitshift64Shl: _bitshift64Shl, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_vi: dynCall_vi, dynCall_vii: dynCall_vii, dynCall_ii: dynCall_ii, dynCall_iiiii: dynCall_iiiii, dynCall_iii: dynCall_iii };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_iiiii": invoke_iiiii, "invoke_iii": invoke_iii, "_isalnum": _isalnum, "_fabs": _fabs, "_frexp": _frexp, "_exp": _exp, "_fread": _fread, "__reallyNegative": __reallyNegative, "_longjmp": _longjmp, "__addDays": __addDays, "_fsync": _fsync, "_signal": _signal, "_rename": _rename, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_sinh": _sinh, "_sysconf": _sysconf, "_close": _close, "_ferror": _ferror, "_clock": _clock, "_cos": _cos, "_tanh": _tanh, "_unlink": _unlink, "_write": _write, "__isLeapYear": __isLeapYear, "_ftell": _ftell, "_isupper": _isupper, "_gmtime_r": _gmtime_r, "_islower": _islower, "_tmpnam": _tmpnam, "_tmpfile": _tmpfile, "_send": _send, "_abort": _abort, "_setvbuf": _setvbuf, "_atan2": _atan2, "_setlocale": _setlocale, "_isgraph": _isgraph, "_modf": _modf, "_strerror_r": _strerror_r, "_fscanf": _fscanf, "___setErrNo": ___setErrNo, "_isalpha": _isalpha, "_srand": _srand, "_mktime": _mktime, "_putchar": _putchar, "_gmtime": _gmtime, "_localeconv": _localeconv, "_sprintf": _sprintf, "_localtime": _localtime, "_read": _read, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_exit": _exit, "_freopen": _freopen, "_llvm_pow_f64": _llvm_pow_f64, "_fgetc": _fgetc, "_fmod": _fmod, "_lseek": _lseek, "_rmdir": _rmdir, "_asin": _asin, "_floor": _floor, "_pwrite": _pwrite, "_localtime_r": _localtime_r, "_tzset": _tzset, "_open": _open, "_remove": _remove, "_snprintf": _snprintf, "__scanString": __scanString, "_strftime": _strftime, "_fseek": _fseek, "_iscntrl": _iscntrl, "_isxdigit": _isxdigit, "_fclose": _fclose, "_log": _log, "_recv": _recv, "_tan": _tan, "_copysign": _copysign, "__getFloat": __getFloat, "_fputc": _fputc, "_ispunct": _ispunct, "_ceil": _ceil, "_isspace": _isspace, "_fopen": _fopen, "_sin": _sin, "_acos": _acos, "_cosh": _cosh, "___buildEnvironment": ___buildEnvironment, "_difftime": _difftime, "_ungetc": _ungetc, "_system": _system, "_fflush": _fflush, "_log10": _log10, "_fileno": _fileno, "__exit": __exit, "__arraySum": __arraySum, "_fgets": _fgets, "_atan": _atan, "_pread": _pread, "_mkport": _mkport, "_toupper": _toupper, "_feof": _feof, "___errno_location": ___errno_location, "_clearerr": _clearerr, "_getenv": _getenv, "_strerror": _strerror, "_emscripten_longjmp": _emscripten_longjmp, "__formatString": __formatString, "_fputs": _fputs, "_sqrt": _sqrt, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "cttz_i8": cttz_i8, "ctlz_i8": ctlz_i8, "___rand_seed": ___rand_seed, "NaN": NaN, "Infinity": Infinity, "_stderr": _stderr, "_stdin": _stdin, "_stdout": _stdout }, buffer);
+var _testSetjmp = Module["_testSetjmp"] = asm["_testSetjmp"];
+var _i64Subtract = Module["_i64Subtract"] = asm["_i64Subtract"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _rand_r = Module["_rand_r"] = asm["_rand_r"];
+var _realloc = Module["_realloc"] = asm["_realloc"];
+var _i64Add = Module["_i64Add"] = asm["_i64Add"];
+var _tolower = Module["_tolower"] = asm["_tolower"];
+var _saveSetjmp = Module["_saveSetjmp"] = asm["_saveSetjmp"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _rand = Module["_rand"] = asm["_rand"];
+var _bitshift64Shl = Module["_bitshift64Shl"] = asm["_bitshift64Shl"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+var dynCall_iiii = Module["dynCall_iiii"] = asm["dynCall_iiii"];
+var dynCall_vi = Module["dynCall_vi"] = asm["dynCall_vi"];
+var dynCall_vii = Module["dynCall_vii"] = asm["dynCall_vii"];
+var dynCall_ii = Module["dynCall_ii"] = asm["dynCall_ii"];
+var dynCall_iiiii = Module["dynCall_iiiii"] = asm["dynCall_iiiii"];
+var dynCall_iii = Module["dynCall_iii"] = asm["dynCall_iii"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// TODO: strip out parts of this we do not need
+
+//======= begin closure i64 code =======
+
+// Copyright 2009 The Closure Library Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Defines a Long class for representing a 64-bit two's-complement
+ * integer value, which faithfully simulates the behavior of a Java "long". This
+ * implementation is derived from LongLib in GWT.
+ *
+ */
+
+var i64Math = (function() { // Emscripten wrapper
+ var goog = { math: {} };
+
+
+ /**
+ * Constructs a 64-bit two's-complement integer, given its low and high 32-bit
+ * values as *signed* integers. See the from* functions below for more
+ * convenient ways of constructing Longs.
+ *
+ * The internal representation of a long is the two given signed, 32-bit values.
+ * We use 32-bit pieces because these are the size of integers on which
+ * Javascript performs bit-operations. For operations like addition and
+ * multiplication, we split each number into 16-bit pieces, which can easily be
+ * multiplied within Javascript's floating-point representation without overflow
+ * or change in sign.
+ *
+ * In the algorithms below, we frequently reduce the negative case to the
+ * positive case by negating the input(s) and then post-processing the result.
+ * Note that we must ALWAYS check specially whether those values are MIN_VALUE
+ * (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
+ * a positive number, it overflows back into a negative). Not handling this
+ * case would often result in infinite recursion.
+ *
+ * @param {number} low The low (signed) 32 bits of the long.
+ * @param {number} high The high (signed) 32 bits of the long.
+ * @constructor
+ */
+ goog.math.Long = function(low, high) {
+ /**
+ * @type {number}
+ * @private
+ */
+ this.low_ = low | 0; // force into 32 signed bits.
+
+ /**
+ * @type {number}
+ * @private
+ */
+ this.high_ = high | 0; // force into 32 signed bits.
+ };
+
+
+ // NOTE: Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the
+ // from* methods on which they depend.
+
+
+ /**
+ * A cache of the Long representations of small integer values.
+ * @type {!Object}
+ * @private
+ */
+ goog.math.Long.IntCache_ = {};
+
+
+ /**
+ * Returns a Long representing the given (32-bit) integer value.
+ * @param {number} value The 32-bit integer in question.
+ * @return {!goog.math.Long} The corresponding Long value.
+ */
+ goog.math.Long.fromInt = function(value) {
+ if (-128 <= value && value < 128) {
+ var cachedObj = goog.math.Long.IntCache_[value];
+ if (cachedObj) {
+ return cachedObj;
+ }
+ }
+
+ var obj = new goog.math.Long(value | 0, value < 0 ? -1 : 0);
+ if (-128 <= value && value < 128) {
+ goog.math.Long.IntCache_[value] = obj;
+ }
+ return obj;
+ };
+
+
+ /**
+ * Returns a Long representing the given value, provided that it is a finite
+ * number. Otherwise, zero is returned.
+ * @param {number} value The number in question.
+ * @return {!goog.math.Long} The corresponding Long value.
+ */
+ goog.math.Long.fromNumber = function(value) {
+ if (isNaN(value) || !isFinite(value)) {
+ return goog.math.Long.ZERO;
+ } else if (value <= -goog.math.Long.TWO_PWR_63_DBL_) {
+ return goog.math.Long.MIN_VALUE;
+ } else if (value + 1 >= goog.math.Long.TWO_PWR_63_DBL_) {
+ return goog.math.Long.MAX_VALUE;
+ } else if (value < 0) {
+ return goog.math.Long.fromNumber(-value).negate();
+ } else {
+ return new goog.math.Long(
+ (value % goog.math.Long.TWO_PWR_32_DBL_) | 0,
+ (value / goog.math.Long.TWO_PWR_32_DBL_) | 0);
+ }
+ };
+
+
+ /**
+ * Returns a Long representing the 64-bit integer that comes by concatenating
+ * the given high and low bits. Each is assumed to use 32 bits.
+ * @param {number} lowBits The low 32-bits.
+ * @param {number} highBits The high 32-bits.
+ * @return {!goog.math.Long} The corresponding Long value.
+ */
+ goog.math.Long.fromBits = function(lowBits, highBits) {
+ return new goog.math.Long(lowBits, highBits);
+ };
+
+
+ /**
+ * Returns a Long representation of the given string, written using the given
+ * radix.
+ * @param {string} str The textual representation of the Long.
+ * @param {number=} opt_radix The radix in which the text is written.
+ * @return {!goog.math.Long} The corresponding Long value.
+ */
+ goog.math.Long.fromString = function(str, opt_radix) {
+ if (str.length == 0) {
+ throw Error('number format error: empty string');
+ }
+
+ var radix = opt_radix || 10;
+ if (radix < 2 || 36 < radix) {
+ throw Error('radix out of range: ' + radix);
+ }
+
+ if (str.charAt(0) == '-') {
+ return goog.math.Long.fromString(str.substring(1), radix).negate();
+ } else if (str.indexOf('-') >= 0) {
+ throw Error('number format error: interior "-" character: ' + str);
+ }
+
+ // Do several (8) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = goog.math.Long.fromNumber(Math.pow(radix, 8));
+
+ var result = goog.math.Long.ZERO;
+ for (var i = 0; i < str.length; i += 8) {
+ var size = Math.min(8, str.length - i);
+ var value = parseInt(str.substring(i, i + size), radix);
+ if (size < 8) {
+ var power = goog.math.Long.fromNumber(Math.pow(radix, size));
+ result = result.multiply(power).add(goog.math.Long.fromNumber(value));
+ } else {
+ result = result.multiply(radixToPower);
+ result = result.add(goog.math.Long.fromNumber(value));
+ }
+ }
+ return result;
+ };
+
+
+ // NOTE: the compiler should inline these constant values below and then remove
+ // these variables, so there should be no runtime penalty for these.
+
+
+ /**
+ * Number used repeated below in calculations. This must appear before the
+ * first call to any from* function below.
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_16_DBL_ = 1 << 16;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_24_DBL_ = 1 << 24;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_32_DBL_ =
+ goog.math.Long.TWO_PWR_16_DBL_ * goog.math.Long.TWO_PWR_16_DBL_;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_31_DBL_ =
+ goog.math.Long.TWO_PWR_32_DBL_ / 2;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_48_DBL_ =
+ goog.math.Long.TWO_PWR_32_DBL_ * goog.math.Long.TWO_PWR_16_DBL_;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_64_DBL_ =
+ goog.math.Long.TWO_PWR_32_DBL_ * goog.math.Long.TWO_PWR_32_DBL_;
+
+
+ /**
+ * @type {number}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_63_DBL_ =
+ goog.math.Long.TWO_PWR_64_DBL_ / 2;
+
+
+ /** @type {!goog.math.Long} */
+ goog.math.Long.ZERO = goog.math.Long.fromInt(0);
+
+
+ /** @type {!goog.math.Long} */
+ goog.math.Long.ONE = goog.math.Long.fromInt(1);
+
+
+ /** @type {!goog.math.Long} */
+ goog.math.Long.NEG_ONE = goog.math.Long.fromInt(-1);
+
+
+ /** @type {!goog.math.Long} */
+ goog.math.Long.MAX_VALUE =
+ goog.math.Long.fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0);
+
+
+ /** @type {!goog.math.Long} */
+ goog.math.Long.MIN_VALUE = goog.math.Long.fromBits(0, 0x80000000 | 0);
+
+
+ /**
+ * @type {!goog.math.Long}
+ * @private
+ */
+ goog.math.Long.TWO_PWR_24_ = goog.math.Long.fromInt(1 << 24);
+
+
+ /** @return {number} The value, assuming it is a 32-bit integer. */
+ goog.math.Long.prototype.toInt = function() {
+ return this.low_;
+ };
+
+
+ /** @return {number} The closest floating-point representation to this value. */
+ goog.math.Long.prototype.toNumber = function() {
+ return this.high_ * goog.math.Long.TWO_PWR_32_DBL_ +
+ this.getLowBitsUnsigned();
+ };
+
+
+ /**
+ * @param {number=} opt_radix The radix in which the text should be written.
+ * @return {string} The textual representation of this value.
+ */
+ goog.math.Long.prototype.toString = function(opt_radix) {
+ var radix = opt_radix || 10;
+ if (radix < 2 || 36 < radix) {
+ throw Error('radix out of range: ' + radix);
+ }
+
+ if (this.isZero()) {
+ return '0';
+ }
+
+ if (this.isNegative()) {
+ if (this.equals(goog.math.Long.MIN_VALUE)) {
+ // We need to change the Long value before it can be negated, so we remove
+ // the bottom-most digit in this base and then recurse to do the rest.
+ var radixLong = goog.math.Long.fromNumber(radix);
+ var div = this.div(radixLong);
+ var rem = div.multiply(radixLong).subtract(this);
+ return div.toString(radix) + rem.toInt().toString(radix);
+ } else {
+ return '-' + this.negate().toString(radix);
+ }
+ }
+
+ // Do several (6) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = goog.math.Long.fromNumber(Math.pow(radix, 6));
+
+ var rem = this;
+ var result = '';
+ while (true) {
+ var remDiv = rem.div(radixToPower);
+ var intval = rem.subtract(remDiv.multiply(radixToPower)).toInt();
+ var digits = intval.toString(radix);
+
+ rem = remDiv;
+ if (rem.isZero()) {
+ return digits + result;
+ } else {
+ while (digits.length < 6) {
+ digits = '0' + digits;
+ }
+ result = '' + digits + result;
+ }
+ }
+ };
+
+
+ /** @return {number} The high 32-bits as a signed value. */
+ goog.math.Long.prototype.getHighBits = function() {
+ return this.high_;
+ };
+
+
+ /** @return {number} The low 32-bits as a signed value. */
+ goog.math.Long.prototype.getLowBits = function() {
+ return this.low_;
+ };
+
+
+ /** @return {number} The low 32-bits as an unsigned value. */
+ goog.math.Long.prototype.getLowBitsUnsigned = function() {
+ return (this.low_ >= 0) ?
+ this.low_ : goog.math.Long.TWO_PWR_32_DBL_ + this.low_;
+ };
+
+
+ /**
+ * @return {number} Returns the number of bits needed to represent the absolute
+ * value of this Long.
+ */
+ goog.math.Long.prototype.getNumBitsAbs = function() {
+ if (this.isNegative()) {
+ if (this.equals(goog.math.Long.MIN_VALUE)) {
+ return 64;
+ } else {
+ return this.negate().getNumBitsAbs();
+ }
+ } else {
+ var val = this.high_ != 0 ? this.high_ : this.low_;
+ for (var bit = 31; bit > 0; bit--) {
+ if ((val & (1 << bit)) != 0) {
+ break;
+ }
+ }
+ return this.high_ != 0 ? bit + 33 : bit + 1;
+ }
+ };
+
+
+ /** @return {boolean} Whether this value is zero. */
+ goog.math.Long.prototype.isZero = function() {
+ return this.high_ == 0 && this.low_ == 0;
+ };
+
+
+ /** @return {boolean} Whether this value is negative. */
+ goog.math.Long.prototype.isNegative = function() {
+ return this.high_ < 0;
+ };
+
+
+ /** @return {boolean} Whether this value is odd. */
+ goog.math.Long.prototype.isOdd = function() {
+ return (this.low_ & 1) == 1;
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long equals the other.
+ */
+ goog.math.Long.prototype.equals = function(other) {
+ return (this.high_ == other.high_) && (this.low_ == other.low_);
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long does not equal the other.
+ */
+ goog.math.Long.prototype.notEquals = function(other) {
+ return (this.high_ != other.high_) || (this.low_ != other.low_);
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long is less than the other.
+ */
+ goog.math.Long.prototype.lessThan = function(other) {
+ return this.compare(other) < 0;
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long is less than or equal to the other.
+ */
+ goog.math.Long.prototype.lessThanOrEqual = function(other) {
+ return this.compare(other) <= 0;
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long is greater than the other.
+ */
+ goog.math.Long.prototype.greaterThan = function(other) {
+ return this.compare(other) > 0;
+ };
+
+
+ /**
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {boolean} Whether this Long is greater than or equal to the other.
+ */
+ goog.math.Long.prototype.greaterThanOrEqual = function(other) {
+ return this.compare(other) >= 0;
+ };
+
+
+ /**
+ * Compares this Long with the given one.
+ * @param {goog.math.Long} other Long to compare against.
+ * @return {number} 0 if they are the same, 1 if the this is greater, and -1
+ * if the given one is greater.
+ */
+ goog.math.Long.prototype.compare = function(other) {
+ if (this.equals(other)) {
+ return 0;
+ }
+
+ var thisNeg = this.isNegative();
+ var otherNeg = other.isNegative();
+ if (thisNeg && !otherNeg) {
+ return -1;
+ }
+ if (!thisNeg && otherNeg) {
+ return 1;
+ }
+
+ // at this point, the signs are the same, so subtraction will not overflow
+ if (this.subtract(other).isNegative()) {
+ return -1;
+ } else {
+ return 1;
+ }
+ };
+
+
+ /** @return {!goog.math.Long} The negation of this value. */
+ goog.math.Long.prototype.negate = function() {
+ if (this.equals(goog.math.Long.MIN_VALUE)) {
+ return goog.math.Long.MIN_VALUE;
+ } else {
+ return this.not().add(goog.math.Long.ONE);
+ }
+ };
+
+
+ /**
+ * Returns the sum of this and the given Long.
+ * @param {goog.math.Long} other Long to add to this one.
+ * @return {!goog.math.Long} The sum of this and the given Long.
+ */
+ goog.math.Long.prototype.add = function(other) {
+ // Divide each number into 4 chunks of 16 bits, and then sum the chunks.
+
+ var a48 = this.high_ >>> 16;
+ var a32 = this.high_ & 0xFFFF;
+ var a16 = this.low_ >>> 16;
+ var a00 = this.low_ & 0xFFFF;
+
+ var b48 = other.high_ >>> 16;
+ var b32 = other.high_ & 0xFFFF;
+ var b16 = other.low_ >>> 16;
+ var b00 = other.low_ & 0xFFFF;
+
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 + b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 + b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 + b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 + b48;
+ c48 &= 0xFFFF;
+ return goog.math.Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
+ };
+
+
+ /**
+ * Returns the difference of this and the given Long.
+ * @param {goog.math.Long} other Long to subtract from this.
+ * @return {!goog.math.Long} The difference of this and the given Long.
+ */
+ goog.math.Long.prototype.subtract = function(other) {
+ return this.add(other.negate());
+ };
+
+
+ /**
+ * Returns the product of this and the given long.
+ * @param {goog.math.Long} other Long to multiply with this.
+ * @return {!goog.math.Long} The product of this and the other.
+ */
+ goog.math.Long.prototype.multiply = function(other) {
+ if (this.isZero()) {
+ return goog.math.Long.ZERO;
+ } else if (other.isZero()) {
+ return goog.math.Long.ZERO;
+ }
+
+ if (this.equals(goog.math.Long.MIN_VALUE)) {
+ return other.isOdd() ? goog.math.Long.MIN_VALUE : goog.math.Long.ZERO;
+ } else if (other.equals(goog.math.Long.MIN_VALUE)) {
+ return this.isOdd() ? goog.math.Long.MIN_VALUE : goog.math.Long.ZERO;
+ }
+
+ if (this.isNegative()) {
+ if (other.isNegative()) {
+ return this.negate().multiply(other.negate());
+ } else {
+ return this.negate().multiply(other).negate();
+ }
+ } else if (other.isNegative()) {
+ return this.multiply(other.negate()).negate();
+ }
+
+ // If both longs are small, use float multiplication
+ if (this.lessThan(goog.math.Long.TWO_PWR_24_) &&
+ other.lessThan(goog.math.Long.TWO_PWR_24_)) {
+ return goog.math.Long.fromNumber(this.toNumber() * other.toNumber());
+ }
+
+ // Divide each long into 4 chunks of 16 bits, and then add up 4x4 products.
+ // We can skip products that would overflow.
+
+ var a48 = this.high_ >>> 16;
+ var a32 = this.high_ & 0xFFFF;
+ var a16 = this.low_ >>> 16;
+ var a00 = this.low_ & 0xFFFF;
+
+ var b48 = other.high_ >>> 16;
+ var b32 = other.high_ & 0xFFFF;
+ var b16 = other.low_ >>> 16;
+ var b00 = other.low_ & 0xFFFF;
+
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 * b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 * b00;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c16 += a00 * b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 * b00;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a16 * b16;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a00 * b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
+ c48 &= 0xFFFF;
+ return goog.math.Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
+ };
+
+
+ /**
+ * Returns this Long divided by the given one.
+ * @param {goog.math.Long} other Long by which to divide.
+ * @return {!goog.math.Long} This Long divided by the given one.
+ */
+ goog.math.Long.prototype.div = function(other) {
+ if (other.isZero()) {
+ throw Error('division by zero');
+ } else if (this.isZero()) {
+ return goog.math.Long.ZERO;
+ }
+
+ if (this.equals(goog.math.Long.MIN_VALUE)) {
+ if (other.equals(goog.math.Long.ONE) ||
+ other.equals(goog.math.Long.NEG_ONE)) {
+ return goog.math.Long.MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE
+ } else if (other.equals(goog.math.Long.MIN_VALUE)) {
+ return goog.math.Long.ONE;
+ } else {
+ // At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
+ var halfThis = this.shiftRight(1);
+ var approx = halfThis.div(other).shiftLeft(1);
+ if (approx.equals(goog.math.Long.ZERO)) {
+ return other.isNegative() ? goog.math.Long.ONE : goog.math.Long.NEG_ONE;
+ } else {
+ var rem = this.subtract(other.multiply(approx));
+ var result = approx.add(rem.div(other));
+ return result;
+ }
+ }
+ } else if (other.equals(goog.math.Long.MIN_VALUE)) {
+ return goog.math.Long.ZERO;
+ }
+
+ if (this.isNegative()) {
+ if (other.isNegative()) {
+ return this.negate().div(other.negate());
+ } else {
+ return this.negate().div(other).negate();
+ }
+ } else if (other.isNegative()) {
+ return this.div(other.negate()).negate();
+ }
+
+ // Repeat the following until the remainder is less than other: find a
+ // floating-point that approximates remainder / other *from below*, add this
+ // into the result, and subtract it from the remainder. It is critical that
+ // the approximate value is less than or equal to the real value so that the
+ // remainder never becomes negative.
+ var res = goog.math.Long.ZERO;
+ var rem = this;
+ while (rem.greaterThanOrEqual(other)) {
+ // Approximate the result of division. This may be a little greater or
+ // smaller than the actual value.
+ var approx = Math.max(1, Math.floor(rem.toNumber() / other.toNumber()));
+
+ // We will tweak the approximate result by changing it in the 48-th digit or
+ // the smallest non-fractional digit, whichever is larger.
+ var log2 = Math.ceil(Math.log(approx) / Math.LN2);
+ var delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
+
+ // Decrease the approximation until it is smaller than the remainder. Note
+ // that if it is too large, the product overflows and is negative.
+ var approxRes = goog.math.Long.fromNumber(approx);
+ var approxRem = approxRes.multiply(other);
+ while (approxRem.isNegative() || approxRem.greaterThan(rem)) {
+ approx -= delta;
+ approxRes = goog.math.Long.fromNumber(approx);
+ approxRem = approxRes.multiply(other);
+ }
+
+ // We know the answer can't be zero... and actually, zero would cause
+ // infinite recursion since we would make no progress.
+ if (approxRes.isZero()) {
+ approxRes = goog.math.Long.ONE;
+ }
+
+ res = res.add(approxRes);
+ rem = rem.subtract(approxRem);
+ }
+ return res;
+ };
+
+
+ /**
+ * Returns this Long modulo the given one.
+ * @param {goog.math.Long} other Long by which to mod.
+ * @return {!goog.math.Long} This Long modulo the given one.
+ */
+ goog.math.Long.prototype.modulo = function(other) {
+ return this.subtract(this.div(other).multiply(other));
+ };
+
+
+ /** @return {!goog.math.Long} The bitwise-NOT of this value. */
+ goog.math.Long.prototype.not = function() {
+ return goog.math.Long.fromBits(~this.low_, ~this.high_);
+ };
+
+
+ /**
+ * Returns the bitwise-AND of this Long and the given one.
+ * @param {goog.math.Long} other The Long with which to AND.
+ * @return {!goog.math.Long} The bitwise-AND of this and the other.
+ */
+ goog.math.Long.prototype.and = function(other) {
+ return goog.math.Long.fromBits(this.low_ & other.low_,
+ this.high_ & other.high_);
+ };
+
+
+ /**
+ * Returns the bitwise-OR of this Long and the given one.
+ * @param {goog.math.Long} other The Long with which to OR.
+ * @return {!goog.math.Long} The bitwise-OR of this and the other.
+ */
+ goog.math.Long.prototype.or = function(other) {
+ return goog.math.Long.fromBits(this.low_ | other.low_,
+ this.high_ | other.high_);
+ };
+
+
+ /**
+ * Returns the bitwise-XOR of this Long and the given one.
+ * @param {goog.math.Long} other The Long with which to XOR.
+ * @return {!goog.math.Long} The bitwise-XOR of this and the other.
+ */
+ goog.math.Long.prototype.xor = function(other) {
+ return goog.math.Long.fromBits(this.low_ ^ other.low_,
+ this.high_ ^ other.high_);
+ };
+
+
+ /**
+ * Returns this Long with bits shifted to the left by the given amount.
+ * @param {number} numBits The number of bits by which to shift.
+ * @return {!goog.math.Long} This shifted to the left by the given amount.
+ */
+ goog.math.Long.prototype.shiftLeft = function(numBits) {
+ numBits &= 63;
+ if (numBits == 0) {
+ return this;
+ } else {
+ var low = this.low_;
+ if (numBits < 32) {
+ var high = this.high_;
+ return goog.math.Long.fromBits(
+ low << numBits,
+ (high << numBits) | (low >>> (32 - numBits)));
+ } else {
+ return goog.math.Long.fromBits(0, low << (numBits - 32));
+ }
+ }
+ };
+
+
+ /**
+ * Returns this Long with bits shifted to the right by the given amount.
+ * @param {number} numBits The number of bits by which to shift.
+ * @return {!goog.math.Long} This shifted to the right by the given amount.
+ */
+ goog.math.Long.prototype.shiftRight = function(numBits) {
+ numBits &= 63;
+ if (numBits == 0) {
+ return this;
+ } else {
+ var high = this.high_;
+ if (numBits < 32) {
+ var low = this.low_;
+ return goog.math.Long.fromBits(
+ (low >>> numBits) | (high << (32 - numBits)),
+ high >> numBits);
+ } else {
+ return goog.math.Long.fromBits(
+ high >> (numBits - 32),
+ high >= 0 ? 0 : -1);
+ }
+ }
+ };
+
+
+ /**
+ * Returns this Long with bits shifted to the right by the given amount, with
+ * the new top bits matching the current sign bit.
+ * @param {number} numBits The number of bits by which to shift.
+ * @return {!goog.math.Long} This shifted to the right by the given amount, with
+ * zeros placed into the new leading bits.
+ */
+ goog.math.Long.prototype.shiftRightUnsigned = function(numBits) {
+ numBits &= 63;
+ if (numBits == 0) {
+ return this;
+ } else {
+ var high = this.high_;
+ if (numBits < 32) {
+ var low = this.low_;
+ return goog.math.Long.fromBits(
+ (low >>> numBits) | (high << (32 - numBits)),
+ high >>> numBits);
+ } else if (numBits == 32) {
+ return goog.math.Long.fromBits(high, 0);
+ } else {
+ return goog.math.Long.fromBits(high >>> (numBits - 32), 0);
+ }
+ }
+ };
+
+ //======= begin jsbn =======
+
+ var navigator = { appName: 'Modern Browser' }; // polyfill a little
+
+ // Copyright (c) 2005 Tom Wu
+ // All Rights Reserved.
+ // http://www-cs-students.stanford.edu/~tjw/jsbn/
+
+ /*
+ * Copyright (c) 2003-2005 Tom Wu
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
+ * INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER
+ * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF
+ * THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * In addition, the following condition applies:
+ *
+ * All redistributions must retain an intact copy of this copyright notice
+ * and disclaimer.
+ */
+
+ // Basic JavaScript BN library - subset useful for RSA encryption.
+
+ // Bits per digit
+ var dbits;
+
+ // JavaScript engine analysis
+ var canary = 0xdeadbeefcafe;
+ var j_lm = ((canary&0xffffff)==0xefcafe);
+
+ // (public) Constructor
+ function BigInteger(a,b,c) {
+ if(a != null)
+ if("number" == typeof a) this.fromNumber(a,b,c);
+ else if(b == null && "string" != typeof a) this.fromString(a,256);
+ else this.fromString(a,b);
+ }
+
+ // return new, unset BigInteger
+ function nbi() { return new BigInteger(null); }
+
+ // am: Compute w_j += (x*this_i), propagate carries,
+ // c is initial carry, returns final carry.
+ // c < 3*dvalue, x < 2*dvalue, this_i < dvalue
+ // We need to select the fastest one that works in this environment.
+
+ // am1: use a single mult and divide to get the high bits,
+ // max digit bits should be 26 because
+ // max internal value = 2*dvalue^2-2*dvalue (< 2^53)
+ function am1(i,x,w,j,c,n) {
+ while(--n >= 0) {
+ var v = x*this[i++]+w[j]+c;
+ c = Math.floor(v/0x4000000);
+ w[j++] = v&0x3ffffff;
+ }
+ return c;
+ }
+ // am2 avoids a big mult-and-extract completely.
+ // Max digit bits should be <= 30 because we do bitwise ops
+ // on values up to 2*hdvalue^2-hdvalue-1 (< 2^31)
+ function am2(i,x,w,j,c,n) {
+ var xl = x&0x7fff, xh = x>>15;
+ while(--n >= 0) {
+ var l = this[i]&0x7fff;
+ var h = this[i++]>>15;
+ var m = xh*l+h*xl;
+ l = xl*l+((m&0x7fff)<<15)+w[j]+(c&0x3fffffff);
+ c = (l>>>30)+(m>>>15)+xh*h+(c>>>30);
+ w[j++] = l&0x3fffffff;
+ }
+ return c;
+ }
+ // Alternately, set max digit bits to 28 since some
+ // browsers slow down when dealing with 32-bit numbers.
+ function am3(i,x,w,j,c,n) {
+ var xl = x&0x3fff, xh = x>>14;
+ while(--n >= 0) {
+ var l = this[i]&0x3fff;
+ var h = this[i++]>>14;
+ var m = xh*l+h*xl;
+ l = xl*l+((m&0x3fff)<<14)+w[j]+c;
+ c = (l>>28)+(m>>14)+xh*h;
+ w[j++] = l&0xfffffff;
+ }
+ return c;
+ }
+ if(j_lm && (navigator.appName == "Microsoft Internet Explorer")) {
+ BigInteger.prototype.am = am2;
+ dbits = 30;
+ }
+ else if(j_lm && (navigator.appName != "Netscape")) {
+ BigInteger.prototype.am = am1;
+ dbits = 26;
+ }
+ else { // Mozilla/Netscape seems to prefer am3
+ BigInteger.prototype.am = am3;
+ dbits = 28;
+ }
+
+ BigInteger.prototype.DB = dbits;
+ BigInteger.prototype.DM = ((1<<dbits)-1);
+ BigInteger.prototype.DV = (1<<dbits);
+
+ var BI_FP = 52;
+ BigInteger.prototype.FV = Math.pow(2,BI_FP);
+ BigInteger.prototype.F1 = BI_FP-dbits;
+ BigInteger.prototype.F2 = 2*dbits-BI_FP;
+
+ // Digit conversions
+ var BI_RM = "0123456789abcdefghijklmnopqrstuvwxyz";
+ var BI_RC = new Array();
+ var rr,vv;
+ rr = "0".charCodeAt(0);
+ for(vv = 0; vv <= 9; ++vv) BI_RC[rr++] = vv;
+ rr = "a".charCodeAt(0);
+ for(vv = 10; vv < 36; ++vv) BI_RC[rr++] = vv;
+ rr = "A".charCodeAt(0);
+ for(vv = 10; vv < 36; ++vv) BI_RC[rr++] = vv;
+
+ function int2char(n) { return BI_RM.charAt(n); }
+ function intAt(s,i) {
+ var c = BI_RC[s.charCodeAt(i)];
+ return (c==null)?-1:c;
+ }
+
+ // (protected) copy this to r
+ function bnpCopyTo(r) {
+ for(var i = this.t-1; i >= 0; --i) r[i] = this[i];
+ r.t = this.t;
+ r.s = this.s;
+ }
+
+ // (protected) set from integer value x, -DV <= x < DV
+ function bnpFromInt(x) {
+ this.t = 1;
+ this.s = (x<0)?-1:0;
+ if(x > 0) this[0] = x;
+ else if(x < -1) this[0] = x+DV;
+ else this.t = 0;
+ }
+
+ // return bigint initialized to value
+ function nbv(i) { var r = nbi(); r.fromInt(i); return r; }
+
+ // (protected) set from string and radix
+ function bnpFromString(s,b) {
+ var k;
+ if(b == 16) k = 4;
+ else if(b == 8) k = 3;
+ else if(b == 256) k = 8; // byte array
+ else if(b == 2) k = 1;
+ else if(b == 32) k = 5;
+ else if(b == 4) k = 2;
+ else { this.fromRadix(s,b); return; }
+ this.t = 0;
+ this.s = 0;
+ var i = s.length, mi = false, sh = 0;
+ while(--i >= 0) {
+ var x = (k==8)?s[i]&0xff:intAt(s,i);
+ if(x < 0) {
+ if(s.charAt(i) == "-") mi = true;
+ continue;
+ }
+ mi = false;
+ if(sh == 0)
+ this[this.t++] = x;
+ else if(sh+k > this.DB) {
+ this[this.t-1] |= (x&((1<<(this.DB-sh))-1))<<sh;
+ this[this.t++] = (x>>(this.DB-sh));
+ }
+ else
+ this[this.t-1] |= x<<sh;
+ sh += k;
+ if(sh >= this.DB) sh -= this.DB;
+ }
+ if(k == 8 && (s[0]&0x80) != 0) {
+ this.s = -1;
+ if(sh > 0) this[this.t-1] |= ((1<<(this.DB-sh))-1)<<sh;
+ }
+ this.clamp();
+ if(mi) BigInteger.ZERO.subTo(this,this);
+ }
+
+ // (protected) clamp off excess high words
+ function bnpClamp() {
+ var c = this.s&this.DM;
+ while(this.t > 0 && this[this.t-1] == c) --this.t;
+ }
+
+ // (public) return string representation in given radix
+ function bnToString(b) {
+ if(this.s < 0) return "-"+this.negate().toString(b);
+ var k;
+ if(b == 16) k = 4;
+ else if(b == 8) k = 3;
+ else if(b == 2) k = 1;
+ else if(b == 32) k = 5;
+ else if(b == 4) k = 2;
+ else return this.toRadix(b);
+ var km = (1<<k)-1, d, m = false, r = "", i = this.t;
+ var p = this.DB-(i*this.DB)%k;
+ if(i-- > 0) {
+ if(p < this.DB && (d = this[i]>>p) > 0) { m = true; r = int2char(d); }
+ while(i >= 0) {
+ if(p < k) {
+ d = (this[i]&((1<<p)-1))<<(k-p);
+ d |= this[--i]>>(p+=this.DB-k);
+ }
+ else {
+ d = (this[i]>>(p-=k))&km;
+ if(p <= 0) { p += this.DB; --i; }
+ }
+ if(d > 0) m = true;
+ if(m) r += int2char(d);
+ }
+ }
+ return m?r:"0";
+ }
+
+ // (public) -this
+ function bnNegate() { var r = nbi(); BigInteger.ZERO.subTo(this,r); return r; }
+
+ // (public) |this|
+ function bnAbs() { return (this.s<0)?this.negate():this; }
+
+ // (public) return + if this > a, - if this < a, 0 if equal
+ function bnCompareTo(a) {
+ var r = this.s-a.s;
+ if(r != 0) return r;
+ var i = this.t;
+ r = i-a.t;
+ if(r != 0) return (this.s<0)?-r:r;
+ while(--i >= 0) if((r=this[i]-a[i]) != 0) return r;
+ return 0;
+ }
+
+ // returns bit length of the integer x
+ function nbits(x) {
+ var r = 1, t;
+ if((t=x>>>16) != 0) { x = t; r += 16; }
+ if((t=x>>8) != 0) { x = t; r += 8; }
+ if((t=x>>4) != 0) { x = t; r += 4; }
+ if((t=x>>2) != 0) { x = t; r += 2; }
+ if((t=x>>1) != 0) { x = t; r += 1; }
+ return r;
+ }
+
+ // (public) return the number of bits in "this"
+ function bnBitLength() {
+ if(this.t <= 0) return 0;
+ return this.DB*(this.t-1)+nbits(this[this.t-1]^(this.s&this.DM));
+ }
+
+ // (protected) r = this << n*DB
+ function bnpDLShiftTo(n,r) {
+ var i;
+ for(i = this.t-1; i >= 0; --i) r[i+n] = this[i];
+ for(i = n-1; i >= 0; --i) r[i] = 0;
+ r.t = this.t+n;
+ r.s = this.s;
+ }
+
+ // (protected) r = this >> n*DB
+ function bnpDRShiftTo(n,r) {
+ for(var i = n; i < this.t; ++i) r[i-n] = this[i];
+ r.t = Math.max(this.t-n,0);
+ r.s = this.s;
+ }
+
+ // (protected) r = this << n
+ function bnpLShiftTo(n,r) {
+ var bs = n%this.DB;
+ var cbs = this.DB-bs;
+ var bm = (1<<cbs)-1;
+ var ds = Math.floor(n/this.DB), c = (this.s<<bs)&this.DM, i;
+ for(i = this.t-1; i >= 0; --i) {
+ r[i+ds+1] = (this[i]>>cbs)|c;
+ c = (this[i]&bm)<<bs;
+ }
+ for(i = ds-1; i >= 0; --i) r[i] = 0;
+ r[ds] = c;
+ r.t = this.t+ds+1;
+ r.s = this.s;
+ r.clamp();
+ }
+
+ // (protected) r = this >> n
+ function bnpRShiftTo(n,r) {
+ r.s = this.s;
+ var ds = Math.floor(n/this.DB);
+ if(ds >= this.t) { r.t = 0; return; }
+ var bs = n%this.DB;
+ var cbs = this.DB-bs;
+ var bm = (1<<bs)-1;
+ r[0] = this[ds]>>bs;
+ for(var i = ds+1; i < this.t; ++i) {
+ r[i-ds-1] |= (this[i]&bm)<<cbs;
+ r[i-ds] = this[i]>>bs;
+ }
+ if(bs > 0) r[this.t-ds-1] |= (this.s&bm)<<cbs;
+ r.t = this.t-ds;
+ r.clamp();
+ }
+
+ // (protected) r = this - a
+ function bnpSubTo(a,r) {
+ var i = 0, c = 0, m = Math.min(a.t,this.t);
+ while(i < m) {
+ c += this[i]-a[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ if(a.t < this.t) {
+ c -= a.s;
+ while(i < this.t) {
+ c += this[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ c += this.s;
+ }
+ else {
+ c += this.s;
+ while(i < a.t) {
+ c -= a[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ c -= a.s;
+ }
+ r.s = (c<0)?-1:0;
+ if(c < -1) r[i++] = this.DV+c;
+ else if(c > 0) r[i++] = c;
+ r.t = i;
+ r.clamp();
+ }
+
+ // (protected) r = this * a, r != this,a (HAC 14.12)
+ // "this" should be the larger one if appropriate.
+ function bnpMultiplyTo(a,r) {
+ var x = this.abs(), y = a.abs();
+ var i = x.t;
+ r.t = i+y.t;
+ while(--i >= 0) r[i] = 0;
+ for(i = 0; i < y.t; ++i) r[i+x.t] = x.am(0,y[i],r,i,0,x.t);
+ r.s = 0;
+ r.clamp();
+ if(this.s != a.s) BigInteger.ZERO.subTo(r,r);
+ }
+
+ // (protected) r = this^2, r != this (HAC 14.16)
+ function bnpSquareTo(r) {
+ var x = this.abs();
+ var i = r.t = 2*x.t;
+ while(--i >= 0) r[i] = 0;
+ for(i = 0; i < x.t-1; ++i) {
+ var c = x.am(i,x[i],r,2*i,0,1);
+ if((r[i+x.t]+=x.am(i+1,2*x[i],r,2*i+1,c,x.t-i-1)) >= x.DV) {
+ r[i+x.t] -= x.DV;
+ r[i+x.t+1] = 1;
+ }
+ }
+ if(r.t > 0) r[r.t-1] += x.am(i,x[i],r,2*i,0,1);
+ r.s = 0;
+ r.clamp();
+ }
+
+ // (protected) divide this by m, quotient and remainder to q, r (HAC 14.20)
+ // r != q, this != m. q or r may be null.
+ function bnpDivRemTo(m,q,r) {
+ var pm = m.abs();
+ if(pm.t <= 0) return;
+ var pt = this.abs();
+ if(pt.t < pm.t) {
+ if(q != null) q.fromInt(0);
+ if(r != null) this.copyTo(r);
+ return;
+ }
+ if(r == null) r = nbi();
+ var y = nbi(), ts = this.s, ms = m.s;
+ var nsh = this.DB-nbits(pm[pm.t-1]); // normalize modulus
+ if(nsh > 0) { pm.lShiftTo(nsh,y); pt.lShiftTo(nsh,r); }
+ else { pm.copyTo(y); pt.copyTo(r); }
+ var ys = y.t;
+ var y0 = y[ys-1];
+ if(y0 == 0) return;
+ var yt = y0*(1<<this.F1)+((ys>1)?y[ys-2]>>this.F2:0);
+ var d1 = this.FV/yt, d2 = (1<<this.F1)/yt, e = 1<<this.F2;
+ var i = r.t, j = i-ys, t = (q==null)?nbi():q;
+ y.dlShiftTo(j,t);
+ if(r.compareTo(t) >= 0) {
+ r[r.t++] = 1;
+ r.subTo(t,r);
+ }
+ BigInteger.ONE.dlShiftTo(ys,t);
+ t.subTo(y,y); // "negative" y so we can replace sub with am later
+ while(y.t < ys) y[y.t++] = 0;
+ while(--j >= 0) {
+ // Estimate quotient digit
+ var qd = (r[--i]==y0)?this.DM:Math.floor(r[i]*d1+(r[i-1]+e)*d2);
+ if((r[i]+=y.am(0,qd,r,j,0,ys)) < qd) { // Try it out
+ y.dlShiftTo(j,t);
+ r.subTo(t,r);
+ while(r[i] < --qd) r.subTo(t,r);
+ }
+ }
+ if(q != null) {
+ r.drShiftTo(ys,q);
+ if(ts != ms) BigInteger.ZERO.subTo(q,q);
+ }
+ r.t = ys;
+ r.clamp();
+ if(nsh > 0) r.rShiftTo(nsh,r); // Denormalize remainder
+ if(ts < 0) BigInteger.ZERO.subTo(r,r);
+ }
+
+ // (public) this mod a
+ function bnMod(a) {
+ var r = nbi();
+ this.abs().divRemTo(a,null,r);
+ if(this.s < 0 && r.compareTo(BigInteger.ZERO) > 0) a.subTo(r,r);
+ return r;
+ }
+
+ // Modular reduction using "classic" algorithm
+ function Classic(m) { this.m = m; }
+ function cConvert(x) {
+ if(x.s < 0 || x.compareTo(this.m) >= 0) return x.mod(this.m);
+ else return x;
+ }
+ function cRevert(x) { return x; }
+ function cReduce(x) { x.divRemTo(this.m,null,x); }
+ function cMulTo(x,y,r) { x.multiplyTo(y,r); this.reduce(r); }
+ function cSqrTo(x,r) { x.squareTo(r); this.reduce(r); }
+
+ Classic.prototype.convert = cConvert;
+ Classic.prototype.revert = cRevert;
+ Classic.prototype.reduce = cReduce;
+ Classic.prototype.mulTo = cMulTo;
+ Classic.prototype.sqrTo = cSqrTo;
+
+ // (protected) return "-1/this % 2^DB"; useful for Mont. reduction
+ // justification:
+ // xy == 1 (mod m)
+ // xy = 1+km
+ // xy(2-xy) = (1+km)(1-km)
+ // x[y(2-xy)] = 1-k^2m^2
+ // x[y(2-xy)] == 1 (mod m^2)
+ // if y is 1/x mod m, then y(2-xy) is 1/x mod m^2
+ // should reduce x and y(2-xy) by m^2 at each step to keep size bounded.
+ // JS multiply "overflows" differently from C/C++, so care is needed here.
+ function bnpInvDigit() {
+ if(this.t < 1) return 0;
+ var x = this[0];
+ if((x&1) == 0) return 0;
+ var y = x&3; // y == 1/x mod 2^2
+ y = (y*(2-(x&0xf)*y))&0xf; // y == 1/x mod 2^4
+ y = (y*(2-(x&0xff)*y))&0xff; // y == 1/x mod 2^8
+ y = (y*(2-(((x&0xffff)*y)&0xffff)))&0xffff; // y == 1/x mod 2^16
+ // last step - calculate inverse mod DV directly;
+ // assumes 16 < DB <= 32 and assumes ability to handle 48-bit ints
+ y = (y*(2-x*y%this.DV))%this.DV; // y == 1/x mod 2^dbits
+ // we really want the negative inverse, and -DV < y < DV
+ return (y>0)?this.DV-y:-y;
+ }
+
+ // Montgomery reduction
+ function Montgomery(m) {
+ this.m = m;
+ this.mp = m.invDigit();
+ this.mpl = this.mp&0x7fff;
+ this.mph = this.mp>>15;
+ this.um = (1<<(m.DB-15))-1;
+ this.mt2 = 2*m.t;
+ }
+
+ // xR mod m
+ function montConvert(x) {
+ var r = nbi();
+ x.abs().dlShiftTo(this.m.t,r);
+ r.divRemTo(this.m,null,r);
+ if(x.s < 0 && r.compareTo(BigInteger.ZERO) > 0) this.m.subTo(r,r);
+ return r;
+ }
+
+ // x/R mod m
+ function montRevert(x) {
+ var r = nbi();
+ x.copyTo(r);
+ this.reduce(r);
+ return r;
+ }
+
+ // x = x/R mod m (HAC 14.32)
+ function montReduce(x) {
+ while(x.t <= this.mt2) // pad x so am has enough room later
+ x[x.t++] = 0;
+ for(var i = 0; i < this.m.t; ++i) {
+ // faster way of calculating u0 = x[i]*mp mod DV
+ var j = x[i]&0x7fff;
+ var u0 = (j*this.mpl+(((j*this.mph+(x[i]>>15)*this.mpl)&this.um)<<15))&x.DM;
+ // use am to combine the multiply-shift-add into one call
+ j = i+this.m.t;
+ x[j] += this.m.am(0,u0,x,i,0,this.m.t);
+ // propagate carry
+ while(x[j] >= x.DV) { x[j] -= x.DV; x[++j]++; }
+ }
+ x.clamp();
+ x.drShiftTo(this.m.t,x);
+ if(x.compareTo(this.m) >= 0) x.subTo(this.m,x);
+ }
+
+ // r = "x^2/R mod m"; x != r
+ function montSqrTo(x,r) { x.squareTo(r); this.reduce(r); }
+
+ // r = "xy/R mod m"; x,y != r
+ function montMulTo(x,y,r) { x.multiplyTo(y,r); this.reduce(r); }
+
+ Montgomery.prototype.convert = montConvert;
+ Montgomery.prototype.revert = montRevert;
+ Montgomery.prototype.reduce = montReduce;
+ Montgomery.prototype.mulTo = montMulTo;
+ Montgomery.prototype.sqrTo = montSqrTo;
+
+ // (protected) true iff this is even
+ function bnpIsEven() { return ((this.t>0)?(this[0]&1):this.s) == 0; }
+
+ // (protected) this^e, e < 2^32, doing sqr and mul with "r" (HAC 14.79)
+ function bnpExp(e,z) {
+ if(e > 0xffffffff || e < 1) return BigInteger.ONE;
+ var r = nbi(), r2 = nbi(), g = z.convert(this), i = nbits(e)-1;
+ g.copyTo(r);
+ while(--i >= 0) {
+ z.sqrTo(r,r2);
+ if((e&(1<<i)) > 0) z.mulTo(r2,g,r);
+ else { var t = r; r = r2; r2 = t; }
+ }
+ return z.revert(r);
+ }
+
+ // (public) this^e % m, 0 <= e < 2^32
+ function bnModPowInt(e,m) {
+ var z;
+ if(e < 256 || m.isEven()) z = new Classic(m); else z = new Montgomery(m);
+ return this.exp(e,z);
+ }
+
+ // protected
+ BigInteger.prototype.copyTo = bnpCopyTo;
+ BigInteger.prototype.fromInt = bnpFromInt;
+ BigInteger.prototype.fromString = bnpFromString;
+ BigInteger.prototype.clamp = bnpClamp;
+ BigInteger.prototype.dlShiftTo = bnpDLShiftTo;
+ BigInteger.prototype.drShiftTo = bnpDRShiftTo;
+ BigInteger.prototype.lShiftTo = bnpLShiftTo;
+ BigInteger.prototype.rShiftTo = bnpRShiftTo;
+ BigInteger.prototype.subTo = bnpSubTo;
+ BigInteger.prototype.multiplyTo = bnpMultiplyTo;
+ BigInteger.prototype.squareTo = bnpSquareTo;
+ BigInteger.prototype.divRemTo = bnpDivRemTo;
+ BigInteger.prototype.invDigit = bnpInvDigit;
+ BigInteger.prototype.isEven = bnpIsEven;
+ BigInteger.prototype.exp = bnpExp;
+
+ // public
+ BigInteger.prototype.toString = bnToString;
+ BigInteger.prototype.negate = bnNegate;
+ BigInteger.prototype.abs = bnAbs;
+ BigInteger.prototype.compareTo = bnCompareTo;
+ BigInteger.prototype.bitLength = bnBitLength;
+ BigInteger.prototype.mod = bnMod;
+ BigInteger.prototype.modPowInt = bnModPowInt;
+
+ // "constants"
+ BigInteger.ZERO = nbv(0);
+ BigInteger.ONE = nbv(1);
+
+ // jsbn2 stuff
+
+ // (protected) convert from radix string
+ function bnpFromRadix(s,b) {
+ this.fromInt(0);
+ if(b == null) b = 10;
+ var cs = this.chunkSize(b);
+ var d = Math.pow(b,cs), mi = false, j = 0, w = 0;
+ for(var i = 0; i < s.length; ++i) {
+ var x = intAt(s,i);
+ if(x < 0) {
+ if(s.charAt(i) == "-" && this.signum() == 0) mi = true;
+ continue;
+ }
+ w = b*w+x;
+ if(++j >= cs) {
+ this.dMultiply(d);
+ this.dAddOffset(w,0);
+ j = 0;
+ w = 0;
+ }
+ }
+ if(j > 0) {
+ this.dMultiply(Math.pow(b,j));
+ this.dAddOffset(w,0);
+ }
+ if(mi) BigInteger.ZERO.subTo(this,this);
+ }
+
+ // (protected) return x s.t. r^x < DV
+ function bnpChunkSize(r) { return Math.floor(Math.LN2*this.DB/Math.log(r)); }
+
+ // (public) 0 if this == 0, 1 if this > 0
+ function bnSigNum() {
+ if(this.s < 0) return -1;
+ else if(this.t <= 0 || (this.t == 1 && this[0] <= 0)) return 0;
+ else return 1;
+ }
+
+ // (protected) this *= n, this >= 0, 1 < n < DV
+ function bnpDMultiply(n) {
+ this[this.t] = this.am(0,n-1,this,0,0,this.t);
+ ++this.t;
+ this.clamp();
+ }
+
+ // (protected) this += n << w words, this >= 0
+ function bnpDAddOffset(n,w) {
+ if(n == 0) return;
+ while(this.t <= w) this[this.t++] = 0;
+ this[w] += n;
+ while(this[w] >= this.DV) {
+ this[w] -= this.DV;
+ if(++w >= this.t) this[this.t++] = 0;
+ ++this[w];
+ }
+ }
+
+ // (protected) convert to radix string
+ function bnpToRadix(b) {
+ if(b == null) b = 10;
+ if(this.signum() == 0 || b < 2 || b > 36) return "0";
+ var cs = this.chunkSize(b);
+ var a = Math.pow(b,cs);
+ var d = nbv(a), y = nbi(), z = nbi(), r = "";
+ this.divRemTo(d,y,z);
+ while(y.signum() > 0) {
+ r = (a+z.intValue()).toString(b).substr(1) + r;
+ y.divRemTo(d,y,z);
+ }
+ return z.intValue().toString(b) + r;
+ }
+
+ // (public) return value as integer
+ function bnIntValue() {
+ if(this.s < 0) {
+ if(this.t == 1) return this[0]-this.DV;
+ else if(this.t == 0) return -1;
+ }
+ else if(this.t == 1) return this[0];
+ else if(this.t == 0) return 0;
+ // assumes 16 < DB < 32
+ return ((this[1]&((1<<(32-this.DB))-1))<<this.DB)|this[0];
+ }
+
+ // (protected) r = this + a
+ function bnpAddTo(a,r) {
+ var i = 0, c = 0, m = Math.min(a.t,this.t);
+ while(i < m) {
+ c += this[i]+a[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ if(a.t < this.t) {
+ c += a.s;
+ while(i < this.t) {
+ c += this[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ c += this.s;
+ }
+ else {
+ c += this.s;
+ while(i < a.t) {
+ c += a[i];
+ r[i++] = c&this.DM;
+ c >>= this.DB;
+ }
+ c += a.s;
+ }
+ r.s = (c<0)?-1:0;
+ if(c > 0) r[i++] = c;
+ else if(c < -1) r[i++] = this.DV+c;
+ r.t = i;
+ r.clamp();
+ }
+
+ BigInteger.prototype.fromRadix = bnpFromRadix;
+ BigInteger.prototype.chunkSize = bnpChunkSize;
+ BigInteger.prototype.signum = bnSigNum;
+ BigInteger.prototype.dMultiply = bnpDMultiply;
+ BigInteger.prototype.dAddOffset = bnpDAddOffset;
+ BigInteger.prototype.toRadix = bnpToRadix;
+ BigInteger.prototype.intValue = bnIntValue;
+ BigInteger.prototype.addTo = bnpAddTo;
+
+ //======= end jsbn =======
+
+ // Emscripten wrapper
+ var Wrapper = {
+ abs: function(l, h) {
+ var x = new goog.math.Long(l, h);
+ var ret;
+ if (x.isNegative()) {
+ ret = x.negate();
+ } else {
+ ret = x;
+ }
+ HEAP32[tempDoublePtr>>2] = ret.low_;
+ HEAP32[tempDoublePtr+4>>2] = ret.high_;
+ },
+ ensureTemps: function() {
+ if (Wrapper.ensuredTemps) return;
+ Wrapper.ensuredTemps = true;
+ Wrapper.two32 = new BigInteger();
+ Wrapper.two32.fromString('4294967296', 10);
+ Wrapper.two64 = new BigInteger();
+ Wrapper.two64.fromString('18446744073709551616', 10);
+ Wrapper.temp1 = new BigInteger();
+ Wrapper.temp2 = new BigInteger();
+ },
+ lh2bignum: function(l, h) {
+ var a = new BigInteger();
+ a.fromString(h.toString(), 10);
+ var b = new BigInteger();
+ a.multiplyTo(Wrapper.two32, b);
+ var c = new BigInteger();
+ c.fromString(l.toString(), 10);
+ var d = new BigInteger();
+ c.addTo(b, d);
+ return d;
+ },
+ stringify: function(l, h, unsigned) {
+ var ret = new goog.math.Long(l, h).toString();
+ if (unsigned && ret[0] == '-') {
+ // unsign slowly using jsbn bignums
+ Wrapper.ensureTemps();
+ var bignum = new BigInteger();
+ bignum.fromString(ret, 10);
+ ret = new BigInteger();
+ Wrapper.two64.addTo(bignum, ret);
+ ret = ret.toString(10);
+ }
+ return ret;
+ },
+ fromString: function(str, base, min, max, unsigned) {
+ Wrapper.ensureTemps();
+ var bignum = new BigInteger();
+ bignum.fromString(str, base);
+ var bigmin = new BigInteger();
+ bigmin.fromString(min, 10);
+ var bigmax = new BigInteger();
+ bigmax.fromString(max, 10);
+ if (unsigned && bignum.compareTo(BigInteger.ZERO) < 0) {
+ var temp = new BigInteger();
+ bignum.addTo(Wrapper.two64, temp);
+ bignum = temp;
+ }
+ var error = false;
+ if (bignum.compareTo(bigmin) < 0) {
+ bignum = bigmin;
+ error = true;
+ } else if (bignum.compareTo(bigmax) > 0) {
+ bignum = bigmax;
+ error = true;
+ }
+ var ret = goog.math.Long.fromString(bignum.toString()); // min-max checks should have clamped this to a range goog.math.Long can handle well
+ HEAP32[tempDoublePtr>>2] = ret.low_;
+ HEAP32[tempDoublePtr+4>>2] = ret.high_;
+ if (error) throw 'range error';
+ }
+ };
+ return Wrapper;
+})();
+
+//======= end closure i64 code =======
+
+
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run(['binarytrees.lua'].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run(['binarytrees.lua'].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/memops.js b/deps/v8/test/mjsunit/wasm/embenchen/memops.js
new file mode 100644
index 0000000000..09bbd36eae
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/memops.js
@@ -0,0 +1,8090 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT = 'final: 840.\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(531);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([101,114,114,111,114,58,32,37,100,10,0,0,0,0,0,0,102,105,110,97,108,58,32,37,100,46,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+ function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+
+ Module["_memset"] = _memset;
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+ function _abort() {
+ Module['abort']();
+ }
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var _fflush=env._fflush;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _printf=env._printf;
+ var _send=env._send;
+ var _pwrite=env._pwrite;
+ var _abort=env._abort;
+ var ___setErrNo=env.___setErrNo;
+ var _fwrite=env._fwrite;
+ var _sbrk=env._sbrk;
+ var _time=env._time;
+ var _mkport=env._mkport;
+ var __reallyNegative=env.__reallyNegative;
+ var __formatString=env.__formatString;
+ var _fileno=env._fileno;
+ var _write=env._write;
+ var _fprintf=env._fprintf;
+ var _sysconf=env._sysconf;
+ var ___errno_location=env.___errno_location;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[10] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 80 + (i5 << 2) | 0;
+ i5 = 80 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[10] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[48 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 80 + (i7 << 2) | 0;
+ i7 = 80 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[10] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[48 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[60 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 80 + (i9 << 2) | 0;
+ i7 = HEAP32[10] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 80 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[10] = i7 | i8;
+ i28 = 80 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[48 >> 2] = i4;
+ HEAP32[60 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[44 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[344 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[56 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 344 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[44 >> 2] = HEAP32[44 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[48 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[60 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 80 + (i9 << 2) | 0;
+ i7 = HEAP32[10] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 80 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[10] = i7 | i8;
+ i25 = 80 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[48 >> 2] = i2;
+ HEAP32[60 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[44 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[344 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[344 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[48 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[56 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 344 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[44 >> 2] = HEAP32[44 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 80 + (i6 << 2) | 0;
+ i5 = HEAP32[10] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 80 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[10] = i5 | i4;
+ i21 = 80 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 344 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[44 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[44 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[56 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[48 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[60 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[60 >> 2] = i2 + i12;
+ HEAP32[48 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[48 >> 2] = 0;
+ HEAP32[60 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[52 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[52 >> 2] = i31;
+ i32 = HEAP32[64 >> 2] | 0;
+ HEAP32[64 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[128] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[520 >> 2] = i18;
+ HEAP32[516 >> 2] = i18;
+ HEAP32[524 >> 2] = -1;
+ HEAP32[528 >> 2] = -1;
+ HEAP32[532 >> 2] = 0;
+ HEAP32[484 >> 2] = 0;
+ HEAP32[128] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[520 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[480 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[472 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[484 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[64 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 488 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[52 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[516 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[472 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[480 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[520 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[484 >> 2] = HEAP32[484 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[472 >> 2] | 0) + i14 | 0;
+ HEAP32[472 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[476 >> 2] | 0) >>> 0) {
+ HEAP32[476 >> 2] = i15;
+ }
+ i15 = HEAP32[64 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 488 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[52 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[64 >> 2] = i15 + i3;
+ HEAP32[52 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[68 >> 2] = HEAP32[528 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ HEAP32[56 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 488 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[64 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[60 >> 2] | 0)) {
+ i32 = (HEAP32[48 >> 2] | 0) + i10 | 0;
+ HEAP32[48 >> 2] = i32;
+ HEAP32[60 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 344 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[44 >> 2] = HEAP32[44 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 80 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[10] = HEAP32[10] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 80 + (i10 << 2) | 0;
+ i9 = HEAP32[10] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 80 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[10] = i9 | i5;
+ i3 = 80 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 344 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[44 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[44 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L444 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L444;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[56 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[52 >> 2] | 0) + i10 | 0;
+ HEAP32[52 >> 2] = i32;
+ HEAP32[64 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 488 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[64 >> 2] = i17 + i4;
+ HEAP32[52 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[68 >> 2] = HEAP32[528 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[488 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[492 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[496 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[500 >> 2];
+ HEAP32[488 >> 2] = i17;
+ HEAP32[492 >> 2] = i14;
+ HEAP32[500 >> 2] = 0;
+ HEAP32[496 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 80 + (i4 << 2) | 0;
+ i5 = HEAP32[10] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 80 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[10] = i5 | i3;
+ i7 = 80 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 344 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[44 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[44 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[56 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[56 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[56 >> 2] = i17;
+ }
+ HEAP32[488 >> 2] = i17;
+ HEAP32[492 >> 2] = i14;
+ HEAP32[500 >> 2] = 0;
+ HEAP32[76 >> 2] = HEAP32[128];
+ HEAP32[72 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 80 + (i32 << 2) | 0;
+ HEAP32[80 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[80 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[64 >> 2] = i17 + i2;
+ HEAP32[52 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[68 >> 2] = HEAP32[528 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[52 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[52 >> 2] = i31;
+ i32 = HEAP32[64 >> 2] | 0;
+ HEAP32[64 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[56 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[60 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[48 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 80 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[10] = HEAP32[10] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 344 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[44 >> 2] = HEAP32[44 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[64 >> 2] | 0)) {
+ i21 = (HEAP32[52 >> 2] | 0) + i11 | 0;
+ HEAP32[52 >> 2] = i21;
+ HEAP32[64 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[60 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[60 >> 2] = 0;
+ HEAP32[48 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[60 >> 2] | 0)) {
+ i21 = (HEAP32[48 >> 2] | 0) + i11 | 0;
+ HEAP32[48 >> 2] = i21;
+ HEAP32[60 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 344 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[44 >> 2] = HEAP32[44 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 80 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[10] = HEAP32[10] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[60 >> 2] | 0)) {
+ HEAP32[48 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 80 + (i7 << 2) | 0;
+ i8 = HEAP32[10] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 80 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[10] = i8 | i6;
+ i4 = 80 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 344 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[44 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L205 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L205;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[56 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[56 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[44 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[72 >> 2] | 0) + -1 | 0;
+ HEAP32[72 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 496 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[72 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function _main(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i3 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i3 | 0) {
+ case 50:
+ {
+ i3 = 400;
+ break L1;
+ }
+ case 51:
+ {
+ i4 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 4e3;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 8e3;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 55;
+ break L1;
+ }
+ case 48:
+ {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ default:
+ {
+ HEAP32[i2 >> 2] = i3 + -48;
+ _printf(8, i2 | 0) | 0;
+ i7 = -1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ }
+ } else {
+ i4 = 4;
+ }
+ } while (0);
+ if ((i4 | 0) == 4) {
+ i3 = 800;
+ }
+ i5 = _malloc(1048576) | 0;
+ i6 = 0;
+ i4 = 0;
+ do {
+ i7 = 0;
+ while (1) {
+ HEAP8[i5 + i7 | 0] = i7 + i6;
+ i7 = i7 + 1 | 0;
+ if ((i7 | 0) == 1048576) {
+ i7 = 0;
+ break;
+ }
+ }
+ do {
+ i6 = (HEAP8[i5 + i7 | 0] & 1) + i6 | 0;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != 1048576);
+ i6 = (i6 | 0) % 1e3 | 0;
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) < (i3 | 0));
+ HEAP32[i2 >> 2] = i6;
+ _printf(24, i2 | 0) | 0;
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function runPostSets() {}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+
+
+ return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "___setErrNo": ___setErrNo, "_fwrite": _fwrite, "_sbrk": _sbrk, "_time": _time, "_mkport": _mkport, "__reallyNegative": __reallyNegative, "__formatString": __formatString, "_fileno": _fileno, "_write": _write, "_fprintf": _fprintf, "_sysconf": _sysconf, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/primes.js b/deps/v8/test/mjsunit/wasm/embenchen/primes.js
new file mode 100644
index 0000000000..5e02d79dec
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/primes.js
@@ -0,0 +1,5987 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT = 'lastprime: 387677.\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(35);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([101,114,114,111,114,58,32,37,100,92,110,0,0,0,0,0,108,97,115,116,112,114,105,109,101,58,32,37,100,46,10,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+ function _malloc(bytes) {
+ /* Over-allocate to make sure it is byte-aligned by 8.
+ * This will leak memory, but this is only the dummy
+ * implementation (replaced by dlmalloc normally) so
+ * not an issue.
+ */
+ var ptr = Runtime.dynamicAlloc(bytes + 8);
+ return (ptr+8) & 0xFFFFFFF8;
+ }
+ Module["_malloc"] = _malloc;
+
+
+ Module["_memset"] = _memset;
+
+ function _free() {
+ }
+ Module["_free"] = _free;
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+
+
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+ var _sqrtf=Math_sqrt;
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var _free=env._free;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _printf=env._printf;
+ var _send=env._send;
+ var _pwrite=env._pwrite;
+ var _sqrtf=env._sqrtf;
+ var __reallyNegative=env.__reallyNegative;
+ var _fwrite=env._fwrite;
+ var _malloc=env._malloc;
+ var _mkport=env._mkport;
+ var _fprintf=env._fprintf;
+ var ___setErrNo=env.___setErrNo;
+ var __formatString=env.__formatString;
+ var _fileno=env._fileno;
+ var _fflush=env._fflush;
+ var _write=env._write;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _main(i3, i5) {
+ i3 = i3 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i6 = 0, i7 = 0, d8 = 0.0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ L1 : do {
+ if ((i3 | 0) > 1) {
+ i3 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i3 | 0) {
+ case 50:
+ {
+ i3 = 13e4;
+ break L1;
+ }
+ case 51:
+ {
+ i4 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i3 = 61e4;
+ break L1;
+ }
+ case 53:
+ {
+ i3 = 101e4;
+ break L1;
+ }
+ case 49:
+ {
+ i3 = 33e3;
+ break L1;
+ }
+ case 48:
+ {
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ default:
+ {
+ HEAP32[i2 >> 2] = i3 + -48;
+ _printf(8, i2 | 0) | 0;
+ i7 = -1;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ }
+ } else {
+ i4 = 4;
+ }
+ } while (0);
+ if ((i4 | 0) == 4) {
+ i3 = 22e4;
+ }
+ i4 = 2;
+ i5 = 0;
+ while (1) {
+ d8 = +Math_sqrt(+(+(i4 | 0)));
+ L15 : do {
+ if (d8 > 2.0) {
+ i7 = 2;
+ while (1) {
+ i6 = i7 + 1 | 0;
+ if (((i4 | 0) % (i7 | 0) | 0 | 0) == 0) {
+ i6 = 0;
+ break L15;
+ }
+ if (+(i6 | 0) < d8) {
+ i7 = i6;
+ } else {
+ i6 = 1;
+ break;
+ }
+ }
+ } else {
+ i6 = 1;
+ }
+ } while (0);
+ i5 = i6 + i5 | 0;
+ if ((i5 | 0) >= (i3 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ HEAP32[i2 >> 2] = i4;
+ _printf(24, i2 | 0) | 0;
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function runPostSets() {}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+
+
+ return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_sqrtf": _sqrtf, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/zlib.js b/deps/v8/test/mjsunit/wasm/embenchen/zlib.js
new file mode 100644
index 0000000000..9c0d30a813
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/embenchen/zlib.js
@@ -0,0 +1,14755 @@
+// Modified embenchen to direct to asm-wasm.
+// Flags: --expose-wasm
+
+var EXPECTED_OUTPUT = 'sizes: 100000,25906\nok.\n';
+var Module = {
+ arguments: [1],
+ print: function(x) {Module.printBuffer += x + '\n';},
+ preRun: [function() {Module.printBuffer = ''}],
+ postRun: [function() {
+ assertEquals(EXPECTED_OUTPUT, Module.printBuffer);
+ }],
+};
+// The Module object: Our interface to the outside world. We import
+// and export values on it, and do the work to get that through
+// closure compiler if necessary. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(Module) { ..generated code.. }
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to do an eval in order to handle the closure compiler
+// case, where this code here is minified but Module was defined
+// elsewhere (e.g. case 4 above). We also need to check if Module
+// already exists (e.g. case 3 above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module;
+if (!Module) Module = (typeof Module !== 'undefined' ? Module : null) || {};
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = {};
+for (var key in Module) {
+ if (Module.hasOwnProperty(key)) {
+ moduleOverrides[key] = Module[key];
+ }
+}
+
+// The environment setup code below is customized to use Module.
+// *** Environment setup code ***
+var ENVIRONMENT_IS_NODE = typeof process === 'object' && typeof require === 'function';
+var ENVIRONMENT_IS_WEB = typeof window === 'object';
+var ENVIRONMENT_IS_WORKER = typeof importScripts === 'function';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // Expose functionality in the same simple way that the shells work
+ // Note that we pollute the global namespace here, otherwise we break in node
+ if (!Module['print']) Module['print'] = function print(x) {
+ process['stdout'].write(x + '\n');
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ process['stderr'].write(x + '\n');
+ };
+
+ var nodeFS = require('fs');
+ var nodePath = require('path');
+
+ Module['read'] = function read(filename, binary) {
+ filename = nodePath['normalize'](filename);
+ var ret = nodeFS['readFileSync'](filename);
+ // The path is absolute if the normalized version is the same as the resolved.
+ if (!ret && filename != nodePath['resolve'](filename)) {
+ filename = path.join(__dirname, '..', 'src', filename);
+ ret = nodeFS['readFileSync'](filename);
+ }
+ if (ret && !binary) ret = ret.toString();
+ return ret;
+ };
+
+ Module['readBinary'] = function readBinary(filename) { return Module['read'](filename, true) };
+
+ Module['load'] = function load(f) {
+ globalEval(read(f));
+ };
+
+ Module['arguments'] = process['argv'].slice(2);
+
+ module['exports'] = Module;
+}
+else if (ENVIRONMENT_IS_SHELL) {
+ if (!Module['print']) Module['print'] = print;
+ if (typeof printErr != 'undefined') Module['printErr'] = printErr; // not present in v8 or older sm
+
+ if (typeof read != 'undefined') {
+ Module['read'] = read;
+ } else {
+ Module['read'] = function read() { throw 'no read() available (jsc?)' };
+ }
+
+ Module['readBinary'] = function readBinary(f) {
+ return read(f, 'binary');
+ };
+
+ if (typeof scriptArgs != 'undefined') {
+ Module['arguments'] = scriptArgs;
+ } else if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ this['Module'] = Module;
+
+ eval("if (typeof gc === 'function' && gc.toString().indexOf('[native code]') > 0) var gc = undefined"); // wipe out the SpiderMonkey shell 'gc' function, which can confuse closure (uses it as a minified name, and it is then initted to a non-falsey value unexpectedly)
+}
+else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ Module['read'] = function read(url) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+
+ if (typeof arguments != 'undefined') {
+ Module['arguments'] = arguments;
+ }
+
+ if (typeof console !== 'undefined') {
+ if (!Module['print']) Module['print'] = function print(x) {
+ console.log(x);
+ };
+ if (!Module['printErr']) Module['printErr'] = function printErr(x) {
+ console.log(x);
+ };
+ } else {
+ // Probably a worker, and without console.log. We can do very little here...
+ var TRY_USE_DUMP = false;
+ if (!Module['print']) Module['print'] = (TRY_USE_DUMP && (typeof(dump) !== "undefined") ? (function(x) {
+ dump(x);
+ }) : (function(x) {
+ // self.postMessage(x); // enable this if you want stdout to be sent as messages
+ }));
+ }
+
+ if (ENVIRONMENT_IS_WEB) {
+ window['Module'] = Module;
+ } else {
+ Module['load'] = importScripts;
+ }
+}
+else {
+ // Unreachable because SHELL is dependant on the others
+ throw 'Unknown runtime environment. Where are we?';
+}
+
+function globalEval(x) {
+ eval.call(null, x);
+}
+if (!Module['load'] == 'undefined' && Module['read']) {
+ Module['load'] = function load(f) {
+ globalEval(Module['read'](f));
+ };
+}
+if (!Module['print']) {
+ Module['print'] = function(){};
+}
+if (!Module['printErr']) {
+ Module['printErr'] = Module['print'];
+}
+if (!Module['arguments']) {
+ Module['arguments'] = [];
+}
+// *** Environment setup code ***
+
+// Closure helpers
+Module.print = Module['print'];
+Module.printErr = Module['printErr'];
+
+// Callbacks
+Module['preRun'] = [];
+Module['postRun'] = [];
+
+// Merge back in the overrides
+for (var key in moduleOverrides) {
+ if (moduleOverrides.hasOwnProperty(key)) {
+ Module[key] = moduleOverrides[key];
+ }
+}
+
+
+
+// === Auto-generated preamble library stuff ===
+
+//========================================
+// Runtime code shared with compiler
+//========================================
+
+var Runtime = {
+ stackSave: function () {
+ return STACKTOP;
+ },
+ stackRestore: function (stackTop) {
+ STACKTOP = stackTop;
+ },
+ forceAlign: function (target, quantum) {
+ quantum = quantum || 4;
+ if (quantum == 1) return target;
+ if (isNumber(target) && isNumber(quantum)) {
+ return Math.ceil(target/quantum)*quantum;
+ } else if (isNumber(quantum) && isPowerOfTwo(quantum)) {
+ return '(((' +target + ')+' + (quantum-1) + ')&' + -quantum + ')';
+ }
+ return 'Math.ceil((' + target + ')/' + quantum + ')*' + quantum;
+ },
+ isNumberType: function (type) {
+ return type in Runtime.INT_TYPES || type in Runtime.FLOAT_TYPES;
+ },
+ isPointerType: function isPointerType(type) {
+ return type[type.length-1] == '*';
+},
+ isStructType: function isStructType(type) {
+ if (isPointerType(type)) return false;
+ if (isArrayType(type)) return true;
+ if (/<?\{ ?[^}]* ?\}>?/.test(type)) return true; // { i32, i8 } etc. - anonymous struct types
+ // See comment in isStructPointerType()
+ return type[0] == '%';
+},
+ INT_TYPES: {"i1":0,"i8":0,"i16":0,"i32":0,"i64":0},
+ FLOAT_TYPES: {"float":0,"double":0},
+ or64: function (x, y) {
+ var l = (x | 0) | (y | 0);
+ var h = (Math.round(x / 4294967296) | Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ and64: function (x, y) {
+ var l = (x | 0) & (y | 0);
+ var h = (Math.round(x / 4294967296) & Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ xor64: function (x, y) {
+ var l = (x | 0) ^ (y | 0);
+ var h = (Math.round(x / 4294967296) ^ Math.round(y / 4294967296)) * 4294967296;
+ return l + h;
+ },
+ getNativeTypeSize: function (type) {
+ switch (type) {
+ case 'i1': case 'i8': return 1;
+ case 'i16': return 2;
+ case 'i32': return 4;
+ case 'i64': return 8;
+ case 'float': return 4;
+ case 'double': return 8;
+ default: {
+ if (type[type.length-1] === '*') {
+ return Runtime.QUANTUM_SIZE; // A pointer
+ } else if (type[0] === 'i') {
+ var bits = parseInt(type.substr(1));
+ assert(bits % 8 === 0);
+ return bits/8;
+ } else {
+ return 0;
+ }
+ }
+ }
+ },
+ getNativeFieldSize: function (type) {
+ return Math.max(Runtime.getNativeTypeSize(type), Runtime.QUANTUM_SIZE);
+ },
+ dedup: function dedup(items, ident) {
+ var seen = {};
+ if (ident) {
+ return items.filter(function(item) {
+ if (seen[item[ident]]) return false;
+ seen[item[ident]] = true;
+ return true;
+ });
+ } else {
+ return items.filter(function(item) {
+ if (seen[item]) return false;
+ seen[item] = true;
+ return true;
+ });
+ }
+},
+ set: function set() {
+ var args = typeof arguments[0] === 'object' ? arguments[0] : arguments;
+ var ret = {};
+ for (var i = 0; i < args.length; i++) {
+ ret[args[i]] = 0;
+ }
+ return ret;
+},
+ STACK_ALIGN: 8,
+ getAlignSize: function (type, size, vararg) {
+ // we align i64s and doubles on 64-bit boundaries, unlike x86
+ if (!vararg && (type == 'i64' || type == 'double')) return 8;
+ if (!type) return Math.min(size, 8); // align structures internally to 64 bits
+ return Math.min(size || (type ? Runtime.getNativeFieldSize(type) : 0), Runtime.QUANTUM_SIZE);
+ },
+ calculateStructAlignment: function calculateStructAlignment(type) {
+ type.flatSize = 0;
+ type.alignSize = 0;
+ var diffs = [];
+ var prev = -1;
+ var index = 0;
+ type.flatIndexes = type.fields.map(function(field) {
+ index++;
+ var size, alignSize;
+ if (Runtime.isNumberType(field) || Runtime.isPointerType(field)) {
+ size = Runtime.getNativeTypeSize(field); // pack char; char; in structs, also char[X]s.
+ alignSize = Runtime.getAlignSize(field, size);
+ } else if (Runtime.isStructType(field)) {
+ if (field[1] === '0') {
+ // this is [0 x something]. When inside another structure like here, it must be at the end,
+ // and it adds no size
+ // XXX this happens in java-nbody for example... assert(index === type.fields.length, 'zero-length in the middle!');
+ size = 0;
+ if (Types.types[field]) {
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ } else {
+ alignSize = type.alignSize || QUANTUM_SIZE;
+ }
+ } else {
+ size = Types.types[field].flatSize;
+ alignSize = Runtime.getAlignSize(null, Types.types[field].alignSize);
+ }
+ } else if (field[0] == 'b') {
+ // bN, large number field, like a [N x i8]
+ size = field.substr(1)|0;
+ alignSize = 1;
+ } else if (field[0] === '<') {
+ // vector type
+ size = alignSize = Types.types[field].flatSize; // fully aligned
+ } else if (field[0] === 'i') {
+ // illegal integer field, that could not be legalized because it is an internal structure field
+ // it is ok to have such fields, if we just use them as markers of field size and nothing more complex
+ size = alignSize = parseInt(field.substr(1))/8;
+ assert(size % 1 === 0, 'cannot handle non-byte-size field ' + field);
+ } else {
+ assert(false, 'invalid type for calculateStructAlignment');
+ }
+ if (type.packed) alignSize = 1;
+ type.alignSize = Math.max(type.alignSize, alignSize);
+ var curr = Runtime.alignMemory(type.flatSize, alignSize); // if necessary, place this on aligned memory
+ type.flatSize = curr + size;
+ if (prev >= 0) {
+ diffs.push(curr-prev);
+ }
+ prev = curr;
+ return curr;
+ });
+ if (type.name_ && type.name_[0] === '[') {
+ // arrays have 2 elements, so we get the proper difference. then we scale here. that way we avoid
+ // allocating a potentially huge array for [999999 x i8] etc.
+ type.flatSize = parseInt(type.name_.substr(1))*type.flatSize/2;
+ }
+ type.flatSize = Runtime.alignMemory(type.flatSize, type.alignSize);
+ if (diffs.length == 0) {
+ type.flatFactor = type.flatSize;
+ } else if (Runtime.dedup(diffs).length == 1) {
+ type.flatFactor = diffs[0];
+ }
+ type.needsFlattening = (type.flatFactor != 1);
+ return type.flatIndexes;
+ },
+ generateStructInfo: function (struct, typeName, offset) {
+ var type, alignment;
+ if (typeName) {
+ offset = offset || 0;
+ type = (typeof Types === 'undefined' ? Runtime.typeInfo : Types.types)[typeName];
+ if (!type) return null;
+ if (type.fields.length != struct.length) {
+ printErr('Number of named fields must match the type for ' + typeName + ': possibly duplicate struct names. Cannot return structInfo');
+ return null;
+ }
+ alignment = type.flatIndexes;
+ } else {
+ var type = { fields: struct.map(function(item) { return item[0] }) };
+ alignment = Runtime.calculateStructAlignment(type);
+ }
+ var ret = {
+ __size__: type.flatSize
+ };
+ if (typeName) {
+ struct.forEach(function(item, i) {
+ if (typeof item === 'string') {
+ ret[item] = alignment[i] + offset;
+ } else {
+ // embedded struct
+ var key;
+ for (var k in item) key = k;
+ ret[key] = Runtime.generateStructInfo(item[key], type.fields[i], alignment[i]);
+ }
+ });
+ } else {
+ struct.forEach(function(item, i) {
+ ret[item[1]] = alignment[i];
+ });
+ }
+ return ret;
+ },
+ dynCall: function (sig, ptr, args) {
+ if (args && args.length) {
+ if (!args.splice) args = Array.prototype.slice.call(args);
+ args.splice(0, 0, ptr);
+ return Module['dynCall_' + sig].apply(null, args);
+ } else {
+ return Module['dynCall_' + sig].call(null, ptr);
+ }
+ },
+ functionPointers: [],
+ addFunction: function (func) {
+ for (var i = 0; i < Runtime.functionPointers.length; i++) {
+ if (!Runtime.functionPointers[i]) {
+ Runtime.functionPointers[i] = func;
+ return 2*(1 + i);
+ }
+ }
+ throw 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.';
+ },
+ removeFunction: function (index) {
+ Runtime.functionPointers[(index-2)/2] = null;
+ },
+ getAsmConst: function (code, numArgs) {
+ // code is a constant string on the heap, so we can cache these
+ if (!Runtime.asmConstCache) Runtime.asmConstCache = {};
+ var func = Runtime.asmConstCache[code];
+ if (func) return func;
+ var args = [];
+ for (var i = 0; i < numArgs; i++) {
+ args.push(String.fromCharCode(36) + i); // $0, $1 etc
+ }
+ var source = Pointer_stringify(code);
+ if (source[0] === '"') {
+ // tolerate EM_ASM("..code..") even though EM_ASM(..code..) is correct
+ if (source.indexOf('"', 1) === source.length-1) {
+ source = source.substr(1, source.length-2);
+ } else {
+ // something invalid happened, e.g. EM_ASM("..code($0)..", input)
+ abort('invalid EM_ASM input |' + source + '|. Please use EM_ASM(..code..) (no quotes) or EM_ASM({ ..code($0).. }, input) (to input values)');
+ }
+ }
+ try {
+ var evalled = eval('(function(' + args.join(',') + '){ ' + source + ' })'); // new Function does not allow upvars in node
+ } catch(e) {
+ Module.printErr('error in executing inline EM_ASM code: ' + e + ' on: \n\n' + source + '\n\nwith args |' + args + '| (make sure to use the right one out of EM_ASM, EM_ASM_ARGS, etc.)');
+ throw e;
+ }
+ return Runtime.asmConstCache[code] = evalled;
+ },
+ warnOnce: function (text) {
+ if (!Runtime.warnOnce.shown) Runtime.warnOnce.shown = {};
+ if (!Runtime.warnOnce.shown[text]) {
+ Runtime.warnOnce.shown[text] = 1;
+ Module.printErr(text);
+ }
+ },
+ funcWrappers: {},
+ getFuncWrapper: function (func, sig) {
+ assert(sig);
+ if (!Runtime.funcWrappers[func]) {
+ Runtime.funcWrappers[func] = function dynCall_wrapper() {
+ return Runtime.dynCall(sig, func, arguments);
+ };
+ }
+ return Runtime.funcWrappers[func];
+ },
+ UTF8Processor: function () {
+ var buffer = [];
+ var needed = 0;
+ this.processCChar = function (code) {
+ code = code & 0xFF;
+
+ if (buffer.length == 0) {
+ if ((code & 0x80) == 0x00) { // 0xxxxxxx
+ return String.fromCharCode(code);
+ }
+ buffer.push(code);
+ if ((code & 0xE0) == 0xC0) { // 110xxxxx
+ needed = 1;
+ } else if ((code & 0xF0) == 0xE0) { // 1110xxxx
+ needed = 2;
+ } else { // 11110xxx
+ needed = 3;
+ }
+ return '';
+ }
+
+ if (needed) {
+ buffer.push(code);
+ needed--;
+ if (needed > 0) return '';
+ }
+
+ var c1 = buffer[0];
+ var c2 = buffer[1];
+ var c3 = buffer[2];
+ var c4 = buffer[3];
+ var ret;
+ if (buffer.length == 2) {
+ ret = String.fromCharCode(((c1 & 0x1F) << 6) | (c2 & 0x3F));
+ } else if (buffer.length == 3) {
+ ret = String.fromCharCode(((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F));
+ } else {
+ // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+ var codePoint = ((c1 & 0x07) << 18) | ((c2 & 0x3F) << 12) |
+ ((c3 & 0x3F) << 6) | (c4 & 0x3F);
+ ret = String.fromCharCode(
+ Math.floor((codePoint - 0x10000) / 0x400) + 0xD800,
+ (codePoint - 0x10000) % 0x400 + 0xDC00);
+ }
+ buffer.length = 0;
+ return ret;
+ }
+ this.processJSString = function processJSString(string) {
+ /* TODO: use TextEncoder when present,
+ var encoder = new TextEncoder();
+ encoder['encoding'] = "utf-8";
+ var utf8Array = encoder['encode'](aMsg.data);
+ */
+ string = unescape(encodeURIComponent(string));
+ var ret = [];
+ for (var i = 0; i < string.length; i++) {
+ ret.push(string.charCodeAt(i));
+ }
+ return ret;
+ }
+ },
+ getCompilerSetting: function (name) {
+ throw 'You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work';
+ },
+ stackAlloc: function (size) { var ret = STACKTOP;STACKTOP = (STACKTOP + size)|0;STACKTOP = (((STACKTOP)+7)&-8); return ret; },
+ staticAlloc: function (size) { var ret = STATICTOP;STATICTOP = (STATICTOP + size)|0;STATICTOP = (((STATICTOP)+7)&-8); return ret; },
+ dynamicAlloc: function (size) { var ret = DYNAMICTOP;DYNAMICTOP = (DYNAMICTOP + size)|0;DYNAMICTOP = (((DYNAMICTOP)+7)&-8); if (DYNAMICTOP >= TOTAL_MEMORY) enlargeMemory();; return ret; },
+ alignMemory: function (size,quantum) { var ret = size = Math.ceil((size)/(quantum ? quantum : 8))*(quantum ? quantum : 8); return ret; },
+ makeBigInt: function (low,high,unsigned) { var ret = (unsigned ? ((+((low>>>0)))+((+((high>>>0)))*(+4294967296))) : ((+((low>>>0)))+((+((high|0)))*(+4294967296)))); return ret; },
+ GLOBAL_BASE: 8,
+ QUANTUM_SIZE: 4,
+ __dummy__: 0
+}
+
+
+Module['Runtime'] = Runtime;
+
+
+
+
+
+
+
+
+
+//========================================
+// Runtime essentials
+//========================================
+
+var __THREW__ = 0; // Used in checking for thrown exceptions.
+
+var ABORT = false; // whether we are quitting the application. no code should run after this. set in exit() and abort()
+var EXITSTATUS = 0;
+
+var undef = 0;
+// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
+// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
+var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
+var tempI64, tempI64b;
+var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
+
+function assert(condition, text) {
+ if (!condition) {
+ abort('Assertion failed: ' + text);
+ }
+}
+
+var globalScope = this;
+
+// C calling interface. A convenient way to call C functions (in C files, or
+// defined with extern "C").
+//
+// Note: LLVM optimizations can inline and remove functions, after which you will not be
+// able to call them. Closure can also do so. To avoid that, add your function to
+// the exports using something like
+//
+// -s EXPORTED_FUNCTIONS='["_main", "_myfunc"]'
+//
+// @param ident The name of the C function (note that C++ functions will be name-mangled - use extern "C")
+// @param returnType The return type of the function, one of the JS types 'number', 'string' or 'array' (use 'number' for any C pointer, and
+// 'array' for JavaScript arrays and typed arrays; note that arrays are 8-bit).
+// @param argTypes An array of the types of arguments for the function (if there are no arguments, this can be ommitted). Types are as in returnType,
+// except that 'array' is not possible (there is no way for us to know the length of the array)
+// @param args An array of the arguments to the function, as native JS values (as in returnType)
+// Note that string arguments will be stored on the stack (the JS string will become a C string on the stack).
+// @return The return value, as a native JS value (as in returnType)
+function ccall(ident, returnType, argTypes, args) {
+ return ccallFunc(getCFunc(ident), returnType, argTypes, args);
+}
+Module["ccall"] = ccall;
+
+// Returns the C function with a specified identifier (for C++, you need to do manual name mangling)
+function getCFunc(ident) {
+ try {
+ var func = Module['_' + ident]; // closure exported function
+ if (!func) func = eval('_' + ident); // explicit lookup
+ } catch(e) {
+ }
+ assert(func, 'Cannot call unknown function ' + ident + ' (perhaps LLVM optimizations or closure removed it?)');
+ return func;
+}
+
+// Internal function that does a C call using a function, not an identifier
+function ccallFunc(func, returnType, argTypes, args) {
+ var stack = 0;
+ function toC(value, type) {
+ if (type == 'string') {
+ if (value === null || value === undefined || value === 0) return 0; // null string
+ value = intArrayFromString(value);
+ type = 'array';
+ }
+ if (type == 'array') {
+ if (!stack) stack = Runtime.stackSave();
+ var ret = Runtime.stackAlloc(value.length);
+ writeArrayToMemory(value, ret);
+ return ret;
+ }
+ return value;
+ }
+ function fromC(value, type) {
+ if (type == 'string') {
+ return Pointer_stringify(value);
+ }
+ assert(type != 'array');
+ return value;
+ }
+ var i = 0;
+ var cArgs = args ? args.map(function(arg) {
+ return toC(arg, argTypes[i++]);
+ }) : [];
+ var ret = fromC(func.apply(null, cArgs), returnType);
+ if (stack) Runtime.stackRestore(stack);
+ return ret;
+}
+
+// Returns a native JS wrapper for a C function. This is similar to ccall, but
+// returns a function you can call repeatedly in a normal way. For example:
+//
+// var my_function = cwrap('my_c_function', 'number', ['number', 'number']);
+// alert(my_function(5, 22));
+// alert(my_function(99, 12));
+//
+function cwrap(ident, returnType, argTypes) {
+ var func = getCFunc(ident);
+ return function() {
+ return ccallFunc(func, returnType, argTypes, Array.prototype.slice.call(arguments));
+ }
+}
+Module["cwrap"] = cwrap;
+
+// Sets a value in memory in a dynamic way at run-time. Uses the
+// type data. This is the same as makeSetValue, except that
+// makeSetValue is done at compile-time and generates the needed
+// code then, whereas this function picks the right code at
+// run-time.
+// Note that setValue and getValue only do *aligned* writes and reads!
+// Note that ccall uses JS types as for defining types, while setValue and
+// getValue need LLVM types ('i8', 'i32') - this is a lower-level operation
+function setValue(ptr, value, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': HEAP8[(ptr)]=value; break;
+ case 'i8': HEAP8[(ptr)]=value; break;
+ case 'i16': HEAP16[((ptr)>>1)]=value; break;
+ case 'i32': HEAP32[((ptr)>>2)]=value; break;
+ case 'i64': (tempI64 = [value>>>0,(tempDouble=value,(+(Math_abs(tempDouble))) >= (+1) ? (tempDouble > (+0) ? ((Math_min((+(Math_floor((tempDouble)/(+4294967296)))), (+4294967295)))|0)>>>0 : (~~((+(Math_ceil((tempDouble - +(((~~(tempDouble)))>>>0))/(+4294967296))))))>>>0) : 0)],HEAP32[((ptr)>>2)]=tempI64[0],HEAP32[(((ptr)+(4))>>2)]=tempI64[1]); break;
+ case 'float': HEAPF32[((ptr)>>2)]=value; break;
+ case 'double': HEAPF64[((ptr)>>3)]=value; break;
+ default: abort('invalid type for setValue: ' + type);
+ }
+}
+Module['setValue'] = setValue;
+
+// Parallel to setValue.
+function getValue(ptr, type, noSafe) {
+ type = type || 'i8';
+ if (type.charAt(type.length-1) === '*') type = 'i32'; // pointers are 32-bit
+ switch(type) {
+ case 'i1': return HEAP8[(ptr)];
+ case 'i8': return HEAP8[(ptr)];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP32[((ptr)>>2)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ default: abort('invalid type for setValue: ' + type);
+ }
+ return null;
+}
+Module['getValue'] = getValue;
+
+var ALLOC_NORMAL = 0; // Tries to use _malloc()
+var ALLOC_STACK = 1; // Lives for the duration of the current function call
+var ALLOC_STATIC = 2; // Cannot be freed
+var ALLOC_DYNAMIC = 3; // Cannot be freed except through sbrk
+var ALLOC_NONE = 4; // Do not allocate
+Module['ALLOC_NORMAL'] = ALLOC_NORMAL;
+Module['ALLOC_STACK'] = ALLOC_STACK;
+Module['ALLOC_STATIC'] = ALLOC_STATIC;
+Module['ALLOC_DYNAMIC'] = ALLOC_DYNAMIC;
+Module['ALLOC_NONE'] = ALLOC_NONE;
+
+// allocate(): This is for internal use. You can use it yourself as well, but the interface
+// is a little tricky (see docs right below). The reason is that it is optimized
+// for multiple syntaxes to save space in generated code. So you should
+// normally not use allocate(), and instead allocate memory using _malloc(),
+// initialize it with setValue(), and so forth.
+// @slab: An array of data, or a number. If a number, then the size of the block to allocate,
+// in *bytes* (note that this is sometimes confusing: the next parameter does not
+// affect this!)
+// @types: Either an array of types, one for each byte (or 0 if no type at that position),
+// or a single type which is used for the entire block. This only matters if there
+// is initial data - if @slab is a number, then this does not matter at all and is
+// ignored.
+// @allocator: How to allocate memory, see ALLOC_*
+function allocate(slab, types, allocator, ptr) {
+ var zeroinit, size;
+ if (typeof slab === 'number') {
+ zeroinit = true;
+ size = slab;
+ } else {
+ zeroinit = false;
+ size = slab.length;
+ }
+
+ var singleType = typeof types === 'string' ? types : null;
+
+ var ret;
+ if (allocator == ALLOC_NONE) {
+ ret = ptr;
+ } else {
+ ret = [_malloc, Runtime.stackAlloc, Runtime.staticAlloc, Runtime.dynamicAlloc][allocator === undefined ? ALLOC_STATIC : allocator](Math.max(size, singleType ? 1 : types.length));
+ }
+
+ if (zeroinit) {
+ var ptr = ret, stop;
+ assert((ret & 3) == 0);
+ stop = ret + (size & ~3);
+ for (; ptr < stop; ptr += 4) {
+ HEAP32[((ptr)>>2)]=0;
+ }
+ stop = ret + size;
+ while (ptr < stop) {
+ HEAP8[((ptr++)|0)]=0;
+ }
+ return ret;
+ }
+
+ if (singleType === 'i8') {
+ if (slab.subarray || slab.slice) {
+ HEAPU8.set(slab, ret);
+ } else {
+ HEAPU8.set(new Uint8Array(slab), ret);
+ }
+ return ret;
+ }
+
+ var i = 0, type, typeSize, previousType;
+ while (i < size) {
+ var curr = slab[i];
+
+ if (typeof curr === 'function') {
+ curr = Runtime.getFunctionIndex(curr);
+ }
+
+ type = singleType || types[i];
+ if (type === 0) {
+ i++;
+ continue;
+ }
+
+ if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
+
+ setValue(ret+i, curr, type);
+
+ // no need to look up size unless type changes, so cache it
+ if (previousType !== type) {
+ typeSize = Runtime.getNativeTypeSize(type);
+ previousType = type;
+ }
+ i += typeSize;
+ }
+
+ return ret;
+}
+Module['allocate'] = allocate;
+
+function Pointer_stringify(ptr, /* optional */ length) {
+ // TODO: use TextDecoder
+ // Find the length, and check for UTF while doing so
+ var hasUtf = false;
+ var t;
+ var i = 0;
+ while (1) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ if (t >= 128) hasUtf = true;
+ else if (t == 0 && !length) break;
+ i++;
+ if (length && i == length) break;
+ }
+ if (!length) length = i;
+
+ var ret = '';
+
+ if (!hasUtf) {
+ var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
+ var curr;
+ while (length > 0) {
+ curr = String.fromCharCode.apply(String, HEAPU8.subarray(ptr, ptr + Math.min(length, MAX_CHUNK)));
+ ret = ret ? ret + curr : curr;
+ ptr += MAX_CHUNK;
+ length -= MAX_CHUNK;
+ }
+ return ret;
+ }
+
+ var utf8 = new Runtime.UTF8Processor();
+ for (i = 0; i < length; i++) {
+ t = HEAPU8[(((ptr)+(i))|0)];
+ ret += utf8.processCChar(t);
+ }
+ return ret;
+}
+Module['Pointer_stringify'] = Pointer_stringify;
+
+// Given a pointer 'ptr' to a null-terminated UTF16LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF16ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var codeUnit = HEAP16[(((ptr)+(i*2))>>1)];
+ if (codeUnit == 0)
+ return str;
+ ++i;
+ // fromCharCode constructs a character from a UTF-16 code unit, so we can pass the UTF16 string right through.
+ str += String.fromCharCode(codeUnit);
+ }
+}
+Module['UTF16ToString'] = UTF16ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF16LE form. The copy will require at most (str.length*2+1)*2 bytes of space in the HEAP.
+function stringToUTF16(str, outPtr) {
+ for(var i = 0; i < str.length; ++i) {
+ // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
+ var codeUnit = str.charCodeAt(i); // possibly a lead surrogate
+ HEAP16[(((outPtr)+(i*2))>>1)]=codeUnit;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP16[(((outPtr)+(str.length*2))>>1)]=0;
+}
+Module['stringToUTF16'] = stringToUTF16;
+
+// Given a pointer 'ptr' to a null-terminated UTF32LE-encoded string in the emscripten HEAP, returns
+// a copy of that string as a Javascript String object.
+function UTF32ToString(ptr) {
+ var i = 0;
+
+ var str = '';
+ while (1) {
+ var utf32 = HEAP32[(((ptr)+(i*4))>>2)];
+ if (utf32 == 0)
+ return str;
+ ++i;
+ // Gotcha: fromCharCode constructs a character from a UTF-16 encoded code (pair), not from a Unicode code point! So encode the code point to UTF-16 for constructing.
+ if (utf32 >= 0x10000) {
+ var ch = utf32 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+}
+Module['UTF32ToString'] = UTF32ToString;
+
+// Copies the given Javascript String object 'str' to the emscripten HEAP at address 'outPtr',
+// null-terminated and encoded in UTF32LE form. The copy will require at most (str.length+1)*4 bytes of space in the HEAP,
+// but can use less, since str.length does not return the number of characters in the string, but the number of UTF-16 code units in the string.
+function stringToUTF32(str, outPtr) {
+ var iChar = 0;
+ for(var iCodeUnit = 0; iCodeUnit < str.length; ++iCodeUnit) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code unit, not a Unicode code point of the character! We must decode the string to UTF-32 to the heap.
+ var codeUnit = str.charCodeAt(iCodeUnit); // possibly a lead surrogate
+ if (codeUnit >= 0xD800 && codeUnit <= 0xDFFF) {
+ var trailSurrogate = str.charCodeAt(++iCodeUnit);
+ codeUnit = 0x10000 + ((codeUnit & 0x3FF) << 10) | (trailSurrogate & 0x3FF);
+ }
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=codeUnit;
+ ++iChar;
+ }
+ // Null-terminate the pointer to the HEAP.
+ HEAP32[(((outPtr)+(iChar*4))>>2)]=0;
+}
+Module['stringToUTF32'] = stringToUTF32;
+
+function demangle(func) {
+ var i = 3;
+ // params, etc.
+ var basicTypes = {
+ 'v': 'void',
+ 'b': 'bool',
+ 'c': 'char',
+ 's': 'short',
+ 'i': 'int',
+ 'l': 'long',
+ 'f': 'float',
+ 'd': 'double',
+ 'w': 'wchar_t',
+ 'a': 'signed char',
+ 'h': 'unsigned char',
+ 't': 'unsigned short',
+ 'j': 'unsigned int',
+ 'm': 'unsigned long',
+ 'x': 'long long',
+ 'y': 'unsigned long long',
+ 'z': '...'
+ };
+ var subs = [];
+ var first = true;
+ function dump(x) {
+ //return;
+ if (x) Module.print(x);
+ Module.print(func);
+ var pre = '';
+ for (var a = 0; a < i; a++) pre += ' ';
+ Module.print (pre + '^');
+ }
+ function parseNested() {
+ i++;
+ if (func[i] === 'K') i++; // ignore const
+ var parts = [];
+ while (func[i] !== 'E') {
+ if (func[i] === 'S') { // substitution
+ i++;
+ var next = func.indexOf('_', i);
+ var num = func.substring(i, next) || 0;
+ parts.push(subs[num] || '?');
+ i = next+1;
+ continue;
+ }
+ if (func[i] === 'C') { // constructor
+ parts.push(parts[parts.length-1]);
+ i += 2;
+ continue;
+ }
+ var size = parseInt(func.substr(i));
+ var pre = size.toString().length;
+ if (!size || !pre) { i--; break; } // counter i++ below us
+ var curr = func.substr(i + pre, size);
+ parts.push(curr);
+ subs.push(curr);
+ i += pre + size;
+ }
+ i++; // skip E
+ return parts;
+ }
+ function parse(rawList, limit, allowVoid) { // main parser
+ limit = limit || Infinity;
+ var ret = '', list = [];
+ function flushList() {
+ return '(' + list.join(', ') + ')';
+ }
+ var name;
+ if (func[i] === 'N') {
+ // namespaced N-E
+ name = parseNested().join('::');
+ limit--;
+ if (limit === 0) return rawList ? [name] : name;
+ } else {
+ // not namespaced
+ if (func[i] === 'K' || (first && func[i] === 'L')) i++; // ignore const and first 'L'
+ var size = parseInt(func.substr(i));
+ if (size) {
+ var pre = size.toString().length;
+ name = func.substr(i + pre, size);
+ i += pre + size;
+ }
+ }
+ first = false;
+ if (func[i] === 'I') {
+ i++;
+ var iList = parse(true);
+ var iRet = parse(true, 1, true);
+ ret += iRet[0] + ' ' + name + '<' + iList.join(', ') + '>';
+ } else {
+ ret = name;
+ }
+ paramLoop: while (i < func.length && limit-- > 0) {
+ //dump('paramLoop');
+ var c = func[i++];
+ if (c in basicTypes) {
+ list.push(basicTypes[c]);
+ } else {
+ switch (c) {
+ case 'P': list.push(parse(true, 1, true)[0] + '*'); break; // pointer
+ case 'R': list.push(parse(true, 1, true)[0] + '&'); break; // reference
+ case 'L': { // literal
+ i++; // skip basic type
+ var end = func.indexOf('E', i);
+ var size = end - i;
+ list.push(func.substr(i, size));
+ i += size + 2; // size + 'EE'
+ break;
+ }
+ case 'A': { // array
+ var size = parseInt(func.substr(i));
+ i += size.toString().length;
+ if (func[i] !== '_') throw '?';
+ i++; // skip _
+ list.push(parse(true, 1, true)[0] + ' [' + size + ']');
+ break;
+ }
+ case 'E': break paramLoop;
+ default: ret += '?' + c; break paramLoop;
+ }
+ }
+ }
+ if (!allowVoid && list.length === 1 && list[0] === 'void') list = []; // avoid (void)
+ if (rawList) {
+ if (ret) {
+ list.push(ret + '?');
+ }
+ return list;
+ } else {
+ return ret + flushList();
+ }
+ }
+ try {
+ // Special-case the entry point, since its name differs from other name mangling.
+ if (func == 'Object._main' || func == '_main') {
+ return 'main()';
+ }
+ if (typeof func === 'number') func = Pointer_stringify(func);
+ if (func[0] !== '_') return func;
+ if (func[1] !== '_') return func; // C function
+ if (func[2] !== 'Z') return func;
+ switch (func[3]) {
+ case 'n': return 'operator new()';
+ case 'd': return 'operator delete()';
+ }
+ return parse();
+ } catch(e) {
+ return func;
+ }
+}
+
+function demangleAll(text) {
+ return text.replace(/__Z[\w\d_]+/g, function(x) { var y = demangle(x); return x === y ? x : (x + ' [' + y + ']') });
+}
+
+function stackTrace() {
+ var stack = new Error().stack;
+ return stack ? demangleAll(stack) : '(no stack trace available)'; // Stack trace is not available at least on IE10 and Safari 6.
+}
+
+// Memory management
+
+var PAGE_SIZE = 4096;
+function alignMemoryPage(x) {
+ return (x+4095)&-4096;
+}
+
+var HEAP;
+var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
+
+var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
+var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
+var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
+
+function enlargeMemory() {
+ abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
+}
+
+var TOTAL_STACK = Module['TOTAL_STACK'] || 5242880;
+var TOTAL_MEMORY = Module['TOTAL_MEMORY'] || 134217728;
+var FAST_MEMORY = Module['FAST_MEMORY'] || 2097152;
+
+var totalMemory = 4096;
+while (totalMemory < TOTAL_MEMORY || totalMemory < 2*TOTAL_STACK) {
+ if (totalMemory < 16*1024*1024) {
+ totalMemory *= 2;
+ } else {
+ totalMemory += 16*1024*1024
+ }
+}
+if (totalMemory !== TOTAL_MEMORY) {
+ Module.printErr('increasing TOTAL_MEMORY to ' + totalMemory + ' to be more reasonable');
+ TOTAL_MEMORY = totalMemory;
+}
+
+// Initialize the runtime's memory
+// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
+assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
+ 'JS engine does not provide full typed array support');
+
+var buffer = new ArrayBuffer(TOTAL_MEMORY);
+HEAP8 = new Int8Array(buffer);
+HEAP16 = new Int16Array(buffer);
+HEAP32 = new Int32Array(buffer);
+HEAPU8 = new Uint8Array(buffer);
+HEAPU16 = new Uint16Array(buffer);
+HEAPU32 = new Uint32Array(buffer);
+HEAPF32 = new Float32Array(buffer);
+HEAPF64 = new Float64Array(buffer);
+
+// Endianness check (note: assumes compiler arch was little-endian)
+HEAP32[0] = 255;
+assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
+
+Module['HEAP'] = HEAP;
+Module['HEAP8'] = HEAP8;
+Module['HEAP16'] = HEAP16;
+Module['HEAP32'] = HEAP32;
+Module['HEAPU8'] = HEAPU8;
+Module['HEAPU16'] = HEAPU16;
+Module['HEAPU32'] = HEAPU32;
+Module['HEAPF32'] = HEAPF32;
+Module['HEAPF64'] = HEAPF64;
+
+function callRuntimeCallbacks(callbacks) {
+ while(callbacks.length > 0) {
+ var callback = callbacks.shift();
+ if (typeof callback == 'function') {
+ callback();
+ continue;
+ }
+ var func = callback.func;
+ if (typeof func === 'number') {
+ if (callback.arg === undefined) {
+ Runtime.dynCall('v', func);
+ } else {
+ Runtime.dynCall('vi', func, [callback.arg]);
+ }
+ } else {
+ func(callback.arg === undefined ? null : callback.arg);
+ }
+ }
+}
+
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATMAIN__ = []; // functions called when main() is to be run
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the runtime has exited
+
+var runtimeInitialized = false;
+
+function preRun() {
+ // compatibility - merge in anything from Module['preRun'] at this time
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function ensureInitRuntime() {
+ if (runtimeInitialized) return;
+ runtimeInitialized = true;
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function preMain() {
+ callRuntimeCallbacks(__ATMAIN__);
+}
+
+function exitRuntime() {
+ callRuntimeCallbacks(__ATEXIT__);
+}
+
+function postRun() {
+ // compatibility - merge in anything from Module['postRun'] at this time
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+Module['addOnPreRun'] = Module.addOnPreRun = addOnPreRun;
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+Module['addOnInit'] = Module.addOnInit = addOnInit;
+
+function addOnPreMain(cb) {
+ __ATMAIN__.unshift(cb);
+}
+Module['addOnPreMain'] = Module.addOnPreMain = addOnPreMain;
+
+function addOnExit(cb) {
+ __ATEXIT__.unshift(cb);
+}
+Module['addOnExit'] = Module.addOnExit = addOnExit;
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+Module['addOnPostRun'] = Module.addOnPostRun = addOnPostRun;
+
+// Tools
+
+// This processes a JS string into a C-line array of numbers, 0-terminated.
+// For LLVM-originating strings, see parser.js:parseLLVMString function
+function intArrayFromString(stringy, dontAddNull, length /* optional */) {
+ var ret = (new Runtime.UTF8Processor()).processJSString(stringy);
+ if (length) {
+ ret.length = length;
+ }
+ if (!dontAddNull) {
+ ret.push(0);
+ }
+ return ret;
+}
+Module['intArrayFromString'] = intArrayFromString;
+
+function intArrayToString(array) {
+ var ret = [];
+ for (var i = 0; i < array.length; i++) {
+ var chr = array[i];
+ if (chr > 0xFF) {
+ chr &= 0xFF;
+ }
+ ret.push(String.fromCharCode(chr));
+ }
+ return ret.join('');
+}
+Module['intArrayToString'] = intArrayToString;
+
+// Write a Javascript array to somewhere in the heap
+function writeStringToMemory(string, buffer, dontAddNull) {
+ var array = intArrayFromString(string, dontAddNull);
+ var i = 0;
+ while (i < array.length) {
+ var chr = array[i];
+ HEAP8[(((buffer)+(i))|0)]=chr;
+ i = i + 1;
+ }
+}
+Module['writeStringToMemory'] = writeStringToMemory;
+
+function writeArrayToMemory(array, buffer) {
+ for (var i = 0; i < array.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=array[i];
+ }
+}
+Module['writeArrayToMemory'] = writeArrayToMemory;
+
+function writeAsciiToMemory(str, buffer, dontAddNull) {
+ for (var i = 0; i < str.length; i++) {
+ HEAP8[(((buffer)+(i))|0)]=str.charCodeAt(i);
+ }
+ if (!dontAddNull) HEAP8[(((buffer)+(str.length))|0)]=0;
+}
+Module['writeAsciiToMemory'] = writeAsciiToMemory;
+
+function unSign(value, bits, ignore) {
+ if (value >= 0) {
+ return value;
+ }
+ return bits <= 32 ? 2*Math.abs(1 << (bits-1)) + value // Need some trickery, since if bits == 32, we are right at the limit of the bits JS uses in bitshifts
+ : Math.pow(2, bits) + value;
+}
+function reSign(value, bits, ignore) {
+ if (value <= 0) {
+ return value;
+ }
+ var half = bits <= 32 ? Math.abs(1 << (bits-1)) // abs is needed if bits == 32
+ : Math.pow(2, bits-1);
+ if (value >= half && (bits <= 32 || value > half)) { // for huge values, we can hit the precision limit and always get true here. so don't do that
+ // but, in general there is no perfect solution here. With 64-bit ints, we get rounding and errors
+ // TODO: In i64 mode 1, resign the two parts separately and safely
+ value = -2*half + value; // Cannot bitshift half, as it may be at the limit of the bits JS uses in bitshifts
+ }
+ return value;
+}
+
+// check for imul support, and also for correctness ( https://bugs.webkit.org/show_bug.cgi?id=126345 )
+if (!Math['imul'] || Math['imul'](0xffffffff, 5) !== -5) Math['imul'] = function imul(a, b) {
+ var ah = a >>> 16;
+ var al = a & 0xffff;
+ var bh = b >>> 16;
+ var bl = b & 0xffff;
+ return (al*bl + ((ah*bl + al*bh) << 16))|0;
+};
+Math.imul = Math['imul'];
+
+
+var Math_abs = Math.abs;
+var Math_cos = Math.cos;
+var Math_sin = Math.sin;
+var Math_tan = Math.tan;
+var Math_acos = Math.acos;
+var Math_asin = Math.asin;
+var Math_atan = Math.atan;
+var Math_atan2 = Math.atan2;
+var Math_exp = Math.exp;
+var Math_log = Math.log;
+var Math_sqrt = Math.sqrt;
+var Math_ceil = Math.ceil;
+var Math_floor = Math.floor;
+var Math_pow = Math.pow;
+var Math_imul = Math.imul;
+var Math_fround = Math.fround;
+var Math_min = Math.min;
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// PRE_RUN_ADDITIONS (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var runDependencyWatcher = null;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function addRunDependency(id) {
+ runDependencies++;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+}
+Module['addRunDependency'] = addRunDependency;
+function removeRunDependency(id) {
+ runDependencies--;
+ if (Module['monitorRunDependencies']) {
+ Module['monitorRunDependencies'](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+Module['removeRunDependency'] = removeRunDependency;
+
+Module["preloadedImages"] = {}; // maps url to image data
+Module["preloadedAudios"] = {}; // maps url to audio data
+
+
+var memoryInitializer = null;
+
+// === Body ===
+
+
+
+
+
+STATIC_BASE = 8;
+
+STATICTOP = STATIC_BASE + Runtime.alignMemory(14963);
+/* global initializers */ __ATINIT__.push();
+
+
+/* memory initializer */ allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,115,105,122,101,115,58,32,37,100,44,37,100,10,0,0,0,100,101,99,111,109,112,114,101,115,115,101,100,83,105,122,101,32,61,61,32,115,105,122,101,0,0,0,0,0,0,0,0,47,116,109,112,47,101,109,115,99,114,105,112,116,101,110,95,116,101,109,112,47,122,108,105,98,46,99,0,0,0,0,0,100,111,105,116,0,0,0,0,115,116,114,99,109,112,40,98,117,102,102,101,114,44,32,98,117,102,102,101,114,51,41,32,61,61,32,48,0,0,0,0,101,114,114,111,114,58,32,37,100,92,110,0,0,0,0,0,111,107,46,0,0,0,0,0,49,46,50,46,53,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,4,0,4,0,8,0,4,0,2,0,0,0,4,0,5,0,16,0,8,0,2,0,0,0,4,0,6,0,32,0,32,0,2,0,0,0,4,0,4,0,16,0,16,0,3,0,0,0,8,0,16,0,32,0,32,0,3,0,0,0,8,0,16,0,128,0,128,0,3,0,0,0,8,0,32,0,128,0,0,1,3,0,0,0,32,0,128,0,2,1,0,4,3,0,0,0,32,0,2,1,2,1,0,16,3,0,0,0,0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,0,0,16,17,18,18,19,19,20,20,20,20,21,21,21,21,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,0,1,2,3,4,5,6,7,8,8,9,9,10,10,11,11,12,12,12,12,13,13,13,13,14,14,14,14,15,15,15,15,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,112,4,0,0,104,9,0,0,1,1,0,0,30,1,0,0,15,0,0,0,0,0,0,0,240,8,0,0,88,10,0,0,0,0,0,0,30,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,96,11,0,0,0,0,0,0,19,0,0,0,7,0,0,0,0,0,0,0,12,0,8,0,140,0,8,0,76,0,8,0,204,0,8,0,44,0,8,0,172,0,8,0,108,0,8,0,236,0,8,0,28,0,8,0,156,0,8,0,92,0,8,0,220,0,8,0,60,0,8,0,188,0,8,0,124,0,8,0,252,0,8,0,2,0,8,0,130,0,8,0,66,0,8,0,194,0,8,0,34,0,8,0,162,0,8,0,98,0,8,0,226,0,8,0,18,0,8,0,146,0,8,0,82,0,8,0,210,0,8,0,50,0,8,0,178,0,8,0,114,0,8,0,242,0,8,0,10,0,8,0,138,0,8,0,74,0,8,0,202,0,8,0,42,0,8,0,170,0,8,0,106,0,8,0,234,0,8,0,26,0,8,0,154,0,8,0,90,0,8,0,218,0,8,0,58,0,8,0,186,0,8,0,122,0,8,0,250,0,8,0,6,0,8,0,134,0,8,0,70,0,8,0,198,0,8,0,38,0,8,0,166,0,8,0,102,0,8,0,230,0,8,0,22,0,8,0,150,0,8,0,86,0,8,0,214,0,8,0,54,0,8,0,182,0,8,0,118,0,8,0,246,0,8,0,14,0,8,0,142,0,8,0,78,0,8,0,206,0,8,0,46,0,8,0,174,0,8,0,110,0,8,0,238,0,8,0,30,0,8,0,158,0,8,0,94,0,8,0,222,0,8,0,62,0,8,0,190,0,8,0,126,0,8,0,254,0,8,0,1,0,8,0,129,0,8,0,65,0,8,0,193,0,8,0,33,0,8,0,161,0,8,0,97,0,8,0,225,0,8,0,17,0,8,0,145,0,8,0,81,0,8,0,209,0,8,0,49,0,8,0,177,0,8,0,113,0,8,0,241,0,8,0,9,0,8,0,137,0,8,0,73,0,8,0,201,0,8,0,41,0,8,0,169,0,8,0,105,0,8,0,233,0,8,0,25,0,8,0,153,0,8,0,89,0,8,0,217,0,8,0,57,0,8,0,185,0,8,0,121,0,8,0,249,0,8,0,5,0,8,0,133,0,8,0,69,0,8,0,197,0,8,0,37,0,8,0,165,0,8,0,101,0,8,0,229,0,8,0,21,0,8,0,149,0,8,0,85,0,8,0,213,0,8,0,53,0,8,0,181,0,8,0,117,0,8,0,245,0,8,0,13,0,8,0,141,0,8,0,77,0,8,0,205,0,8,0,45,0,8,0,173,0,8,0,109,0,8,0,237,0,8,0,29,0,8,0,157,0,8,0,93,0,8,0,221,0,8,0,61,0,8,0,189,0,8,0,125,0,8,0,253,0,8,0,19,0,9,0,19,1,9,0,147,0,9,0,147,1,9,0,83,0,9,0,83,1,9,0,211,0,9,0,211,1,9,0,51,0,9,0,51,1,9,0,179,0,9,0,179,1,9,0,115,0,9,0,115,1,9,0,243,0,9,0,243,1,9,0,11,0,9,0,11,1,9,0,139,0,9,0,139,1,9,0,75,0,9,0,75,1,9,0,203,0,9,0,203,1,9,0,43,0,9,0,43,1,9,0,171,0,9,0,171,1,9,0,107,0,9,0,107,1,9,0,235,0,9,0,235,1,9,0,27,0,9,0,27,1,9,0,155,0,9,0,155,1,9,0,91,0,9,0,91,1,9,0,219,0,9,0,219,1,9,0,59,0,9,0,59,1,9,0,187,0,9,0,187,1,9,0,123,0,9,0,123,1,9,0,251,0,9,0,251,1,9,0,7,0,9,0,7,1,9,0,135,0,9,0,135,1,9,0,71,0,9,0,71,1,9,0,199,0,9,0,199,1,9,0,39,0,9,0,39,1,9,0,167,0,9,0,167,1,9,0,103,0,9,0,103,1,9,0,231,0,9,0,231,1,9,0,23,0,9,0,23,1,9,0,151,0,9,0,151,1,9,0,87,0,9,0,87,1,9,0,215,0,9,0,215,1,9,0,55,0,9,0,55,1,9,0,183,0,9,0,183,1,9,0,119,0,9,0,119,1,9,0,247,0,9,0,247,1,9,0,15,0,9,0,15,1,9,0,143,0,9,0,143,1,9,0,79,0,9,0,79,1,9,0,207,0,9,0,207,1,9,0,47,0,9,0,47,1,9,0,175,0,9,0,175,1,9,0,111,0,9,0,111,1,9,0,239,0,9,0,239,1,9,0,31,0,9,0,31,1,9,0,159,0,9,0,159,1,9,0,95,0,9,0,95,1,9,0,223,0,9,0,223,1,9,0,63,0,9,0,63,1,9,0,191,0,9,0,191,1,9,0,127,0,9,0,127,1,9,0,255,0,9,0,255,1,9,0,0,0,7,0,64,0,7,0,32,0,7,0,96,0,7,0,16,0,7,0,80,0,7,0,48,0,7,0,112,0,7,0,8,0,7,0,72,0,7,0,40,0,7,0,104,0,7,0,24,0,7,0,88,0,7,0,56,0,7,0,120,0,7,0,4,0,7,0,68,0,7,0,36,0,7,0,100,0,7,0,20,0,7,0,84,0,7,0,52,0,7,0,116,0,7,0,3,0,8,0,131,0,8,0,67,0,8,0,195,0,8,0,35,0,8,0,163,0,8,0,99,0,8,0,227,0,8,0,0,0,5,0,16,0,5,0,8,0,5,0,24,0,5,0,4,0,5,0,20,0,5,0,12,0,5,0,28,0,5,0,2,0,5,0,18,0,5,0,10,0,5,0,26,0,5,0,6,0,5,0,22,0,5,0,14,0,5,0,30,0,5,0,1,0,5,0,17,0,5,0,9,0,5,0,25,0,5,0,5,0,5,0,21,0,5,0,13,0,5,0,29,0,5,0,3,0,5,0,19,0,5,0,11,0,5,0,27,0,5,0,7,0,5,0,23,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,3,0,0,0,3,0,0,0,3,0,0,0,3,0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,4,0,0,0,5,0,0,0,5,0,0,0,5,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,4,0,0,0,5,0,0,0,6,0,0,0,7,0,0,0,8,0,0,0,10,0,0,0,12,0,0,0,14,0,0,0,16,0,0,0,20,0,0,0,24,0,0,0,28,0,0,0,32,0,0,0,40,0,0,0,48,0,0,0,56,0,0,0,64,0,0,0,80,0,0,0,96,0,0,0,112,0,0,0,128,0,0,0,160,0,0,0,192,0,0,0,224,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,2,0,0,0,2,0,0,0,3,0,0,0,3,0,0,0,4,0,0,0,4,0,0,0,5,0,0,0,5,0,0,0,6,0,0,0,6,0,0,0,7,0,0,0,7,0,0,0,8,0,0,0,8,0,0,0,9,0,0,0,9,0,0,0,10,0,0,0,10,0,0,0,11,0,0,0,11,0,0,0,12,0,0,0,12,0,0,0,13,0,0,0,13,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,4,0,0,0,6,0,0,0,8,0,0,0,12,0,0,0,16,0,0,0,24,0,0,0,32,0,0,0,48,0,0,0,64,0,0,0,96,0,0,0,128,0,0,0,192,0,0,0,0,1,0,0,128,1,0,0,0,2,0,0,0,3,0,0,0,4,0,0,0,6,0,0,0,8,0,0,0,12,0,0,0,16,0,0,0,24,0,0,0,32,0,0,0,48,0,0,0,64,0,0,0,96,0,0,16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,3,0,0,0,7,0,0,0,0,0,0,0,49,46,50,46,53,0,0,0,110,101,101,100,32,100,105,99,116,105,111,110,97,114,121,0,115,116,114,101,97,109,32,101,110,100,0,0,0,0,0,0,0,0,0,0,0,0,0,0,102,105,108,101,32,101,114,114,111,114,0,0,0,0,0,0,115,116,114,101,97,109,32,101,114,114,111,114,0,0,0,0,100,97,116,97,32,101,114,114,111,114,0,0,0,0,0,0,105,110,115,117,102,102,105,99,105,101,110,116,32,109,101,109,111,114,121,0,0,0,0,0,98,117,102,102,101,114,32,101,114,114,111,114,0,0,0,0,105,110,99,111,109,112,97,116,105,98,108,101,32,118,101,114,115,105,111,110,0,0,0,0,184,11,0,0,200,11,0,0,216,11,0,0,224,11,0,0,240,11,0,0,0,12,0,0,16,12,0,0,40,12,0,0,56,12,0,0,216,11,0,0,0,0,0,0,150,48,7,119,44,97,14,238,186,81,9,153,25,196,109,7,143,244,106,112,53,165,99,233,163,149,100,158,50,136,219,14,164,184,220,121,30,233,213,224,136,217,210,151,43,76,182,9,189,124,177,126,7,45,184,231,145,29,191,144,100,16,183,29,242,32,176,106,72,113,185,243,222,65,190,132,125,212,218,26,235,228,221,109,81,181,212,244,199,133,211,131,86,152,108,19,192,168,107,100,122,249,98,253,236,201,101,138,79,92,1,20,217,108,6,99,99,61,15,250,245,13,8,141,200,32,110,59,94,16,105,76,228,65,96,213,114,113,103,162,209,228,3,60,71,212,4,75,253,133,13,210,107,181,10,165,250,168,181,53,108,152,178,66,214,201,187,219,64,249,188,172,227,108,216,50,117,92,223,69,207,13,214,220,89,61,209,171,172,48,217,38,58,0,222,81,128,81,215,200,22,97,208,191,181,244,180,33,35,196,179,86,153,149,186,207,15,165,189,184,158,184,2,40,8,136,5,95,178,217,12,198,36,233,11,177,135,124,111,47,17,76,104,88,171,29,97,193,61,45,102,182,144,65,220,118,6,113,219,1,188,32,210,152,42,16,213,239,137,133,177,113,31,181,182,6,165,228,191,159,51,212,184,232,162,201,7,120,52,249,0,15,142,168,9,150,24,152,14,225,187,13,106,127,45,61,109,8,151,108,100,145,1,92,99,230,244,81,107,107,98,97,108,28,216,48,101,133,78,0,98,242,237,149,6,108,123,165,1,27,193,244,8,130,87,196,15,245,198,217,176,101,80,233,183,18,234,184,190,139,124,136,185,252,223,29,221,98,73,45,218,21,243,124,211,140,101,76,212,251,88,97,178,77,206,81,181,58,116,0,188,163,226,48,187,212,65,165,223,74,215,149,216,61,109,196,209,164,251,244,214,211,106,233,105,67,252,217,110,52,70,136,103,173,208,184,96,218,115,45,4,68,229,29,3,51,95,76,10,170,201,124,13,221,60,113,5,80,170,65,2,39,16,16,11,190,134,32,12,201,37,181,104,87,179,133,111,32,9,212,102,185,159,228,97,206,14,249,222,94,152,201,217,41,34,152,208,176,180,168,215,199,23,61,179,89,129,13,180,46,59,92,189,183,173,108,186,192,32,131,184,237,182,179,191,154,12,226,182,3,154,210,177,116,57,71,213,234,175,119,210,157,21,38,219,4,131,22,220,115,18,11,99,227,132,59,100,148,62,106,109,13,168,90,106,122,11,207,14,228,157,255,9,147,39,174,0,10,177,158,7,125,68,147,15,240,210,163,8,135,104,242,1,30,254,194,6,105,93,87,98,247,203,103,101,128,113,54,108,25,231,6,107,110,118,27,212,254,224,43,211,137,90,122,218,16,204,74,221,103,111,223,185,249,249,239,190,142,67,190,183,23,213,142,176,96,232,163,214,214,126,147,209,161,196,194,216,56,82,242,223,79,241,103,187,209,103,87,188,166,221,6,181,63,75,54,178,72,218,43,13,216,76,27,10,175,246,74,3,54,96,122,4,65,195,239,96,223,85,223,103,168,239,142,110,49,121,190,105,70,140,179,97,203,26,131,102,188,160,210,111,37,54,226,104,82,149,119,12,204,3,71,11,187,185,22,2,34,47,38,5,85,190,59,186,197,40,11,189,178,146,90,180,43,4,106,179,92,167,255,215,194,49,207,208,181,139,158,217,44,29,174,222,91,176,194,100,155,38,242,99,236,156,163,106,117,10,147,109,2,169,6,9,156,63,54,14,235,133,103,7,114,19,87,0,5,130,74,191,149,20,122,184,226,174,43,177,123,56,27,182,12,155,142,210,146,13,190,213,229,183,239,220,124,33,223,219,11,212,210,211,134,66,226,212,241,248,179,221,104,110,131,218,31,205,22,190,129,91,38,185,246,225,119,176,111,119,71,183,24,230,90,8,136,112,106,15,255,202,59,6,102,92,11,1,17,255,158,101,143,105,174,98,248,211,255,107,97,69,207,108,22,120,226,10,160,238,210,13,215,84,131,4,78,194,179,3,57,97,38,103,167,247,22,96,208,77,71,105,73,219,119,110,62,74,106,209,174,220,90,214,217,102,11,223,64,240,59,216,55,83,174,188,169,197,158,187,222,127,207,178,71,233,255,181,48,28,242,189,189,138,194,186,202,48,147,179,83,166,163,180,36,5,54,208,186,147,6,215,205,41,87,222,84,191,103,217,35,46,122,102,179,184,74,97,196,2,27,104,93,148,43,111,42,55,190,11,180,161,142,12,195,27,223,5,90,141,239,2,45,0,0,0,0,65,49,27,25,130,98,54,50,195,83,45,43,4,197,108,100,69,244,119,125,134,167,90,86,199,150,65,79,8,138,217,200,73,187,194,209,138,232,239,250,203,217,244,227,12,79,181,172,77,126,174,181,142,45,131,158,207,28,152,135,81,18,194,74,16,35,217,83,211,112,244,120,146,65,239,97,85,215,174,46,20,230,181,55,215,181,152,28,150,132,131,5,89,152,27,130,24,169,0,155,219,250,45,176,154,203,54,169,93,93,119,230,28,108,108,255,223,63,65,212,158,14,90,205,162,36,132,149,227,21,159,140,32,70,178,167,97,119,169,190,166,225,232,241,231,208,243,232,36,131,222,195,101,178,197,218,170,174,93,93,235,159,70,68,40,204,107,111,105,253,112,118,174,107,49,57,239,90,42,32,44,9,7,11,109,56,28,18,243,54,70,223,178,7,93,198,113,84,112,237,48,101,107,244,247,243,42,187,182,194,49,162,117,145,28,137,52,160,7,144,251,188,159,23,186,141,132,14,121,222,169,37,56,239,178,60,255,121,243,115,190,72,232,106,125,27,197,65,60,42,222,88,5,79,121,240,68,126,98,233,135,45,79,194,198,28,84,219,1,138,21,148,64,187,14,141,131,232,35,166,194,217,56,191,13,197,160,56,76,244,187,33,143,167,150,10,206,150,141,19,9,0,204,92,72,49,215,69,139,98,250,110,202,83,225,119,84,93,187,186,21,108,160,163,214,63,141,136,151,14,150,145,80,152,215,222,17,169,204,199,210,250,225,236,147,203,250,245,92,215,98,114,29,230,121,107,222,181,84,64,159,132,79,89,88,18,14,22,25,35,21,15,218,112,56,36,155,65,35,61,167,107,253,101,230,90,230,124,37,9,203,87,100,56,208,78,163,174,145,1,226,159,138,24,33,204,167,51,96,253,188,42,175,225,36,173,238,208,63,180,45,131,18,159,108,178,9,134,171,36,72,201,234,21,83,208,41,70,126,251,104,119,101,226,246,121,63,47,183,72,36,54,116,27,9,29,53,42,18,4,242,188,83,75,179,141,72,82,112,222,101,121,49,239,126,96,254,243,230,231,191,194,253,254,124,145,208,213,61,160,203,204,250,54,138,131,187,7,145,154,120,84,188,177,57,101,167,168,75,152,131,59,10,169,152,34,201,250,181,9,136,203,174,16,79,93,239,95,14,108,244,70,205,63,217,109,140,14,194,116,67,18,90,243,2,35,65,234,193,112,108,193,128,65,119,216,71,215,54,151,6,230,45,142,197,181,0,165,132,132,27,188,26,138,65,113,91,187,90,104,152,232,119,67,217,217,108,90,30,79,45,21,95,126,54,12,156,45,27,39,221,28,0,62,18,0,152,185,83,49,131,160,144,98,174,139,209,83,181,146,22,197,244,221,87,244,239,196,148,167,194,239,213,150,217,246,233,188,7,174,168,141,28,183,107,222,49,156,42,239,42,133,237,121,107,202,172,72,112,211,111,27,93,248,46,42,70,225,225,54,222,102,160,7,197,127,99,84,232,84,34,101,243,77,229,243,178,2,164,194,169,27,103,145,132,48,38,160,159,41,184,174,197,228,249,159,222,253,58,204,243,214,123,253,232,207,188,107,169,128,253,90,178,153,62,9,159,178,127,56,132,171,176,36,28,44,241,21,7,53,50,70,42,30,115,119,49,7,180,225,112,72,245,208,107,81,54,131,70,122,119,178,93,99,78,215,250,203,15,230,225,210,204,181,204,249,141,132,215,224,74,18,150,175,11,35,141,182,200,112,160,157,137,65,187,132,70,93,35,3,7,108,56,26,196,63,21,49,133,14,14,40,66,152,79,103,3,169,84,126,192,250,121,85,129,203,98,76,31,197,56,129,94,244,35,152,157,167,14,179,220,150,21,170,27,0,84,229,90,49,79,252,153,98,98,215,216,83,121,206,23,79,225,73,86,126,250,80,149,45,215,123,212,28,204,98,19,138,141,45,82,187,150,52,145,232,187,31,208,217,160,6,236,243,126,94,173,194,101,71,110,145,72,108,47,160,83,117,232,54,18,58,169,7,9,35,106,84,36,8,43,101,63,17,228,121,167,150,165,72,188,143,102,27,145,164,39,42,138,189,224,188,203,242,161,141,208,235,98,222,253,192,35,239,230,217,189,225,188,20,252,208,167,13,63,131,138,38,126,178,145,63,185,36,208,112,248,21,203,105,59,70,230,66,122,119,253,91,181,107,101,220,244,90,126,197,55,9,83,238,118,56,72,247,177,174,9,184,240,159,18,161,51,204,63,138,114,253,36,147,0,0,0,0,55,106,194,1,110,212,132,3,89,190,70,2,220,168,9,7,235,194,203,6,178,124,141,4,133,22,79,5,184,81,19,14,143,59,209,15,214,133,151,13,225,239,85,12,100,249,26,9,83,147,216,8,10,45,158,10,61,71,92,11,112,163,38,28,71,201,228,29,30,119,162,31,41,29,96,30,172,11,47,27,155,97,237,26,194,223,171,24,245,181,105,25,200,242,53,18,255,152,247,19,166,38,177,17,145,76,115,16,20,90,60,21,35,48,254,20,122,142,184,22,77,228,122,23,224,70,77,56,215,44,143,57,142,146,201,59,185,248,11,58,60,238,68,63,11,132,134,62,82,58,192,60,101,80,2,61,88,23,94,54,111,125,156,55,54,195,218,53,1,169,24,52,132,191,87,49,179,213,149,48,234,107,211,50,221,1,17,51,144,229,107,36,167,143,169,37,254,49,239,39,201,91,45,38,76,77,98,35,123,39,160,34,34,153,230,32,21,243,36,33,40,180,120,42,31,222,186,43,70,96,252,41,113,10,62,40,244,28,113,45,195,118,179,44,154,200,245,46,173,162,55,47,192,141,154,112,247,231,88,113,174,89,30,115,153,51,220,114,28,37,147,119,43,79,81,118,114,241,23,116,69,155,213,117,120,220,137,126,79,182,75,127,22,8,13,125,33,98,207,124,164,116,128,121,147,30,66,120,202,160,4,122,253,202,198,123,176,46,188,108,135,68,126,109,222,250,56,111,233,144,250,110,108,134,181,107,91,236,119,106,2,82,49,104,53,56,243,105,8,127,175,98,63,21,109,99,102,171,43,97,81,193,233,96,212,215,166,101,227,189,100,100,186,3,34,102,141,105,224,103,32,203,215,72,23,161,21,73,78,31,83,75,121,117,145,74,252,99,222,79,203,9,28,78,146,183,90,76,165,221,152,77,152,154,196,70,175,240,6,71,246,78,64,69,193,36,130,68,68,50,205,65,115,88,15,64,42,230,73,66,29,140,139,67,80,104,241,84,103,2,51,85,62,188,117,87,9,214,183,86,140,192,248,83,187,170,58,82,226,20,124,80,213,126,190,81,232,57,226,90,223,83,32,91,134,237,102,89,177,135,164,88,52,145,235,93,3,251,41,92,90,69,111,94,109,47,173,95,128,27,53,225,183,113,247,224,238,207,177,226,217,165,115,227,92,179,60,230,107,217,254,231,50,103,184,229,5,13,122,228,56,74,38,239,15,32,228,238,86,158,162,236,97,244,96,237,228,226,47,232,211,136,237,233,138,54,171,235,189,92,105,234,240,184,19,253,199,210,209,252,158,108,151,254,169,6,85,255,44,16,26,250,27,122,216,251,66,196,158,249,117,174,92,248,72,233,0,243,127,131,194,242,38,61,132,240,17,87,70,241,148,65,9,244,163,43,203,245,250,149,141,247,205,255,79,246,96,93,120,217,87,55,186,216,14,137,252,218,57,227,62,219,188,245,113,222,139,159,179,223,210,33,245,221,229,75,55,220,216,12,107,215,239,102,169,214,182,216,239,212,129,178,45,213,4,164,98,208,51,206,160,209,106,112,230,211,93,26,36,210,16,254,94,197,39,148,156,196,126,42,218,198,73,64,24,199,204,86,87,194,251,60,149,195,162,130,211,193,149,232,17,192,168,175,77,203,159,197,143,202,198,123,201,200,241,17,11,201,116,7,68,204,67,109,134,205,26,211,192,207,45,185,2,206,64,150,175,145,119,252,109,144,46,66,43,146,25,40,233,147,156,62,166,150,171,84,100,151,242,234,34,149,197,128,224,148,248,199,188,159,207,173,126,158,150,19,56,156,161,121,250,157,36,111,181,152,19,5,119,153,74,187,49,155,125,209,243,154,48,53,137,141,7,95,75,140,94,225,13,142,105,139,207,143,236,157,128,138,219,247,66,139,130,73,4,137,181,35,198,136,136,100,154,131,191,14,88,130,230,176,30,128,209,218,220,129,84,204,147,132,99,166,81,133,58,24,23,135,13,114,213,134,160,208,226,169,151,186,32,168,206,4,102,170,249,110,164,171,124,120,235,174,75,18,41,175,18,172,111,173,37,198,173,172,24,129,241,167,47,235,51,166,118,85,117,164,65,63,183,165,196,41,248,160,243,67,58,161,170,253,124,163,157,151,190,162,208,115,196,181,231,25,6,180,190,167,64,182,137,205,130,183,12,219,205,178,59,177,15,179,98,15,73,177,85,101,139,176,104,34,215,187,95,72,21,186,6,246,83,184,49,156,145,185,180,138,222,188,131,224,28,189,218,94,90,191,237,52,152,190,0,0,0,0,101,103,188,184,139,200,9,170,238,175,181,18,87,151,98,143,50,240,222,55,220,95,107,37,185,56,215,157,239,40,180,197,138,79,8,125,100,224,189,111,1,135,1,215,184,191,214,74,221,216,106,242,51,119,223,224,86,16,99,88,159,87,25,80,250,48,165,232,20,159,16,250,113,248,172,66,200,192,123,223,173,167,199,103,67,8,114,117,38,111,206,205,112,127,173,149,21,24,17,45,251,183,164,63,158,208,24,135,39,232,207,26,66,143,115,162,172,32,198,176,201,71,122,8,62,175,50,160,91,200,142,24,181,103,59,10,208,0,135,178,105,56,80,47,12,95,236,151,226,240,89,133,135,151,229,61,209,135,134,101,180,224,58,221,90,79,143,207,63,40,51,119,134,16,228,234,227,119,88,82,13,216,237,64,104,191,81,248,161,248,43,240,196,159,151,72,42,48,34,90,79,87,158,226,246,111,73,127,147,8,245,199,125,167,64,213,24,192,252,109,78,208,159,53,43,183,35,141,197,24,150,159,160,127,42,39,25,71,253,186,124,32,65,2,146,143,244,16,247,232,72,168,61,88,20,155,88,63,168,35,182,144,29,49,211,247,161,137,106,207,118,20,15,168,202,172,225,7,127,190,132,96,195,6,210,112,160,94,183,23,28,230,89,184,169,244,60,223,21,76,133,231,194,209,224,128,126,105,14,47,203,123,107,72,119,195,162,15,13,203,199,104,177,115,41,199,4,97,76,160,184,217,245,152,111,68,144,255,211,252,126,80,102,238,27,55,218,86,77,39,185,14,40,64,5,182,198,239,176,164,163,136,12,28,26,176,219,129,127,215,103,57,145,120,210,43,244,31,110,147,3,247,38,59,102,144,154,131,136,63,47,145,237,88,147,41,84,96,68,180,49,7,248,12,223,168,77,30,186,207,241,166,236,223,146,254,137,184,46,70,103,23,155,84,2,112,39,236,187,72,240,113,222,47,76,201,48,128,249,219,85,231,69,99,156,160,63,107,249,199,131,211,23,104,54,193,114,15,138,121,203,55,93,228,174,80,225,92,64,255,84,78,37,152,232,246,115,136,139,174,22,239,55,22,248,64,130,4,157,39,62,188,36,31,233,33,65,120,85,153,175,215,224,139,202,176,92,51,59,182,89,237,94,209,229,85,176,126,80,71,213,25,236,255,108,33,59,98,9,70,135,218,231,233,50,200,130,142,142,112,212,158,237,40,177,249,81,144,95,86,228,130,58,49,88,58,131,9,143,167,230,110,51,31,8,193,134,13,109,166,58,181,164,225,64,189,193,134,252,5,47,41,73,23,74,78,245,175,243,118,34,50,150,17,158,138,120,190,43,152,29,217,151,32,75,201,244,120,46,174,72,192,192,1,253,210,165,102,65,106,28,94,150,247,121,57,42,79,151,150,159,93,242,241,35,229,5,25,107,77,96,126,215,245,142,209,98,231,235,182,222,95,82,142,9,194,55,233,181,122,217,70,0,104,188,33,188,208,234,49,223,136,143,86,99,48,97,249,214,34,4,158,106,154,189,166,189,7,216,193,1,191,54,110,180,173,83,9,8,21,154,78,114,29,255,41,206,165,17,134,123,183,116,225,199,15,205,217,16,146,168,190,172,42,70,17,25,56,35,118,165,128,117,102,198,216,16,1,122,96,254,174,207,114,155,201,115,202,34,241,164,87,71,150,24,239,169,57,173,253,204,94,17,69,6,238,77,118,99,137,241,206,141,38,68,220,232,65,248,100,81,121,47,249,52,30,147,65,218,177,38,83,191,214,154,235,233,198,249,179,140,161,69,11,98,14,240,25,7,105,76,161,190,81,155,60,219,54,39,132,53,153,146,150,80,254,46,46,153,185,84,38,252,222,232,158,18,113,93,140,119,22,225,52,206,46,54,169,171,73,138,17,69,230,63,3,32,129,131,187,118,145,224,227,19,246,92,91,253,89,233,73,152,62,85,241,33,6,130,108,68,97,62,212,170,206,139,198,207,169,55,126,56,65,127,214,93,38,195,110,179,137,118,124,214,238,202,196,111,214,29,89,10,177,161,225,228,30,20,243,129,121,168,75,215,105,203,19,178,14,119,171,92,161,194,185,57,198,126,1,128,254,169,156,229,153,21,36,11,54,160,54,110,81,28,142,167,22,102,134,194,113,218,62,44,222,111,44,73,185,211,148,240,129,4,9,149,230,184,177,123,73,13,163,30,46,177,27,72,62,210,67,45,89,110,251,195,246,219,233,166,145,103,81,31,169,176,204,122,206,12,116,148,97,185,102,241,6,5,222,0,0,0,0,119,7,48,150,238,14,97,44,153,9,81,186,7,109,196,25,112,106,244,143,233,99,165,53,158,100,149,163,14,219,136,50,121,220,184,164,224,213,233,30,151,210,217,136,9,182,76,43,126,177,124,189,231,184,45,7,144,191,29,145,29,183,16,100,106,176,32,242,243,185,113,72,132,190,65,222,26,218,212,125,109,221,228,235,244,212,181,81,131,211,133,199,19,108,152,86,100,107,168,192,253,98,249,122,138,101,201,236,20,1,92,79,99,6,108,217,250,15,61,99,141,8,13,245,59,110,32,200,76,105,16,94,213,96,65,228,162,103,113,114,60,3,228,209,75,4,212,71,210,13,133,253,165,10,181,107,53,181,168,250,66,178,152,108,219,187,201,214,172,188,249,64,50,216,108,227,69,223,92,117,220,214,13,207,171,209,61,89,38,217,48,172,81,222,0,58,200,215,81,128,191,208,97,22,33,180,244,181,86,179,196,35,207,186,149,153,184,189,165,15,40,2,184,158,95,5,136,8,198,12,217,178,177,11,233,36,47,111,124,135,88,104,76,17,193,97,29,171,182,102,45,61,118,220,65,144,1,219,113,6,152,210,32,188,239,213,16,42,113,177,133,137,6,182,181,31,159,191,228,165,232,184,212,51,120,7,201,162,15,0,249,52,150,9,168,142,225,14,152,24,127,106,13,187,8,109,61,45,145,100,108,151,230,99,92,1,107,107,81,244,28,108,97,98,133,101,48,216,242,98,0,78,108,6,149,237,27,1,165,123,130,8,244,193,245,15,196,87,101,176,217,198,18,183,233,80,139,190,184,234,252,185,136,124,98,221,29,223,21,218,45,73,140,211,124,243,251,212,76,101,77,178,97,88,58,181,81,206,163,188,0,116,212,187,48,226,74,223,165,65,61,216,149,215,164,209,196,109,211,214,244,251,67,105,233,106,52,110,217,252,173,103,136,70,218,96,184,208,68,4,45,115,51,3,29,229,170,10,76,95,221,13,124,201,80,5,113,60,39,2,65,170,190,11,16,16,201,12,32,134,87,104,181,37,32,111,133,179,185,102,212,9,206,97,228,159,94,222,249,14,41,217,201,152,176,208,152,34,199,215,168,180,89,179,61,23,46,180,13,129,183,189,92,59,192,186,108,173,237,184,131,32,154,191,179,182,3,182,226,12,116,177,210,154,234,213,71,57,157,210,119,175,4,219,38,21,115,220,22,131,227,99,11,18,148,100,59,132,13,109,106,62,122,106,90,168,228,14,207,11,147,9,255,157,10,0,174,39,125,7,158,177,240,15,147,68,135,8,163,210,30,1,242,104,105,6,194,254,247,98,87,93,128,101,103,203,25,108,54,113,110,107,6,231,254,212,27,118,137,211,43,224,16,218,122,90,103,221,74,204,249,185,223,111,142,190,239,249,23,183,190,67,96,176,142,213,214,214,163,232,161,209,147,126,56,216,194,196,79,223,242,82,209,187,103,241,166,188,87,103,63,181,6,221,72,178,54,75,216,13,43,218,175,10,27,76,54,3,74,246,65,4,122,96,223,96,239,195,168,103,223,85,49,110,142,239,70,105,190,121,203,97,179,140,188,102,131,26,37,111,210,160,82,104,226,54,204,12,119,149,187,11,71,3,34,2,22,185,85,5,38,47,197,186,59,190,178,189,11,40,43,180,90,146,92,179,106,4,194,215,255,167,181,208,207,49,44,217,158,139,91,222,174,29,155,100,194,176,236,99,242,38,117,106,163,156,2,109,147,10,156,9,6,169,235,14,54,63,114,7,103,133,5,0,87,19,149,191,74,130,226,184,122,20,123,177,43,174,12,182,27,56,146,210,142,155,229,213,190,13,124,220,239,183,11,219,223,33,134,211,210,212,241,212,226,66,104,221,179,248,31,218,131,110,129,190,22,205,246,185,38,91,111,176,119,225,24,183,71,119,136,8,90,230,255,15,106,112,102,6,59,202,17,1,11,92,143,101,158,255,248,98,174,105,97,107,255,211,22,108,207,69,160,10,226,120,215,13,210,238,78,4,131,84,57,3,179,194,167,103,38,97,208,96,22,247,73,105,71,77,62,110,119,219,174,209,106,74,217,214,90,220,64,223,11,102,55,216,59,240,169,188,174,83,222,187,158,197,71,178,207,127,48,181,255,233,189,189,242,28,202,186,194,138,83,179,147,48,36,180,163,166,186,208,54,5,205,215,6,147,84,222,87,41,35,217,103,191,179,102,122,46,196,97,74,184,93,104,27,2,42,111,43,148,180,11,190,55,195,12,142,161,90,5,223,27,45,2,239,141,0,0,0,0,25,27,49,65,50,54,98,130,43,45,83,195,100,108,197,4,125,119,244,69,86,90,167,134,79,65,150,199,200,217,138,8,209,194,187,73,250,239,232,138,227,244,217,203,172,181,79,12,181,174,126,77,158,131,45,142,135,152,28,207,74,194,18,81,83,217,35,16,120,244,112,211,97,239,65,146,46,174,215,85,55,181,230,20,28,152,181,215,5,131,132,150,130,27,152,89,155,0,169,24,176,45,250,219,169,54,203,154,230,119,93,93,255,108,108,28,212,65,63,223,205,90,14,158,149,132,36,162,140,159,21,227,167,178,70,32,190,169,119,97,241,232,225,166,232,243,208,231,195,222,131,36,218,197,178,101,93,93,174,170,68,70,159,235,111,107,204,40,118,112,253,105,57,49,107,174,32,42,90,239,11,7,9,44,18,28,56,109,223,70,54,243,198,93,7,178,237,112,84,113,244,107,101,48,187,42,243,247,162,49,194,182,137,28,145,117,144,7,160,52,23,159,188,251,14,132,141,186,37,169,222,121,60,178,239,56,115,243,121,255,106,232,72,190,65,197,27,125,88,222,42,60,240,121,79,5,233,98,126,68,194,79,45,135,219,84,28,198,148,21,138,1,141,14,187,64,166,35,232,131,191,56,217,194,56,160,197,13,33,187,244,76,10,150,167,143,19,141,150,206,92,204,0,9,69,215,49,72,110,250,98,139,119,225,83,202,186,187,93,84,163,160,108,21,136,141,63,214,145,150,14,151,222,215,152,80,199,204,169,17,236,225,250,210,245,250,203,147,114,98,215,92,107,121,230,29,64,84,181,222,89,79,132,159,22,14,18,88,15,21,35,25,36,56,112,218,61,35,65,155,101,253,107,167,124,230,90,230,87,203,9,37,78,208,56,100,1,145,174,163,24,138,159,226,51,167,204,33,42,188,253,96,173,36,225,175,180,63,208,238,159,18,131,45,134,9,178,108,201,72,36,171,208,83,21,234,251,126,70,41,226,101,119,104,47,63,121,246,54,36,72,183,29,9,27,116,4,18,42,53,75,83,188,242,82,72,141,179,121,101,222,112,96,126,239,49,231,230,243,254,254,253,194,191,213,208,145,124,204,203,160,61,131,138,54,250,154,145,7,187,177,188,84,120,168,167,101,57,59,131,152,75,34,152,169,10,9,181,250,201,16,174,203,136,95,239,93,79,70,244,108,14,109,217,63,205,116,194,14,140,243,90,18,67,234,65,35,2,193,108,112,193,216,119,65,128,151,54,215,71,142,45,230,6,165,0,181,197,188,27,132,132,113,65,138,26,104,90,187,91,67,119,232,152,90,108,217,217,21,45,79,30,12,54,126,95,39,27,45,156,62,0,28,221,185,152,0,18,160,131,49,83,139,174,98,144,146,181,83,209,221,244,197,22,196,239,244,87,239,194,167,148,246,217,150,213,174,7,188,233,183,28,141,168,156,49,222,107,133,42,239,42,202,107,121,237,211,112,72,172,248,93,27,111,225,70,42,46,102,222,54,225,127,197,7,160,84,232,84,99,77,243,101,34,2,178,243,229,27,169,194,164,48,132,145,103,41,159,160,38,228,197,174,184,253,222,159,249,214,243,204,58,207,232,253,123,128,169,107,188,153,178,90,253,178,159,9,62,171,132,56,127,44,28,36,176,53,7,21,241,30,42,70,50,7,49,119,115,72,112,225,180,81,107,208,245,122,70,131,54,99,93,178,119,203,250,215,78,210,225,230,15,249,204,181,204,224,215,132,141,175,150,18,74,182,141,35,11,157,160,112,200,132,187,65,137,3,35,93,70,26,56,108,7,49,21,63,196,40,14,14,133,103,79,152,66,126,84,169,3,85,121,250,192,76,98,203,129,129,56,197,31,152,35,244,94,179,14,167,157,170,21,150,220,229,84,0,27,252,79,49,90,215,98,98,153,206,121,83,216,73,225,79,23,80,250,126,86,123,215,45,149,98,204,28,212,45,141,138,19,52,150,187,82,31,187,232,145,6,160,217,208,94,126,243,236,71,101,194,173,108,72,145,110,117,83,160,47,58,18,54,232,35,9,7,169,8,36,84,106,17,63,101,43,150,167,121,228,143,188,72,165,164,145,27,102,189,138,42,39,242,203,188,224,235,208,141,161,192,253,222,98,217,230,239,35,20,188,225,189,13,167,208,252,38,138,131,63,63,145,178,126,112,208,36,185,105,203,21,248,66,230,70,59,91,253,119,122,220,101,107,181,197,126,90,244,238,83,9,55,247,72,56,118,184,9,174,177,161,18,159,240,138,63,204,51,147,36,253,114,0,0,0,0,1,194,106,55,3,132,212,110,2,70,190,89,7,9,168,220,6,203,194,235,4,141,124,178,5,79,22,133,14,19,81,184,15,209,59,143,13,151,133,214,12,85,239,225,9,26,249,100,8,216,147,83,10,158,45,10,11,92,71,61,28,38,163,112,29,228,201,71,31,162,119,30,30,96,29,41,27,47,11,172,26,237,97,155,24,171,223,194,25,105,181,245,18,53,242,200,19,247,152,255,17,177,38,166,16,115,76,145,21,60,90,20,20,254,48,35,22,184,142,122,23,122,228,77,56,77,70,224,57,143,44,215,59,201,146,142,58,11,248,185,63,68,238,60,62,134,132,11,60,192,58,82,61,2,80,101,54,94,23,88,55,156,125,111,53,218,195,54,52,24,169,1,49,87,191,132,48,149,213,179,50,211,107,234,51,17,1,221,36,107,229,144,37,169,143,167,39,239,49,254,38,45,91,201,35,98,77,76,34,160,39,123,32,230,153,34,33,36,243,21,42,120,180,40,43,186,222,31,41,252,96,70,40,62,10,113,45,113,28,244,44,179,118,195,46,245,200,154,47,55,162,173,112,154,141,192,113,88,231,247,115,30,89,174,114,220,51,153,119,147,37,28,118,81,79,43,116,23,241,114,117,213,155,69,126,137,220,120,127,75,182,79,125,13,8,22,124,207,98,33,121,128,116,164,120,66,30,147,122,4,160,202,123,198,202,253,108,188,46,176,109,126,68,135,111,56,250,222,110,250,144,233,107,181,134,108,106,119,236,91,104,49,82,2,105,243,56,53,98,175,127,8,99,109,21,63,97,43,171,102,96,233,193,81,101,166,215,212,100,100,189,227,102,34,3,186,103,224,105,141,72,215,203,32,73,21,161,23,75,83,31,78,74,145,117,121,79,222,99,252,78,28,9,203,76,90,183,146,77,152,221,165,70,196,154,152,71,6,240,175,69,64,78,246,68,130,36,193,65,205,50,68,64,15,88,115,66,73,230,42,67,139,140,29,84,241,104,80,85,51,2,103,87,117,188,62,86,183,214,9,83,248,192,140,82,58,170,187,80,124,20,226,81,190,126,213,90,226,57,232,91,32,83,223,89,102,237,134,88,164,135,177,93,235,145,52,92,41,251,3,94,111,69,90,95,173,47,109,225,53,27,128,224,247,113,183,226,177,207,238,227,115,165,217,230,60,179,92,231,254,217,107,229,184,103,50,228,122,13,5,239,38,74,56,238,228,32,15,236,162,158,86,237,96,244,97,232,47,226,228,233,237,136,211,235,171,54,138,234,105,92,189,253,19,184,240,252,209,210,199,254,151,108,158,255,85,6,169,250,26,16,44,251,216,122,27,249,158,196,66,248,92,174,117,243,0,233,72,242,194,131,127,240,132,61,38,241,70,87,17,244,9,65,148,245,203,43,163,247,141,149,250,246,79,255,205,217,120,93,96,216,186,55,87,218,252,137,14,219,62,227,57,222,113,245,188,223,179,159,139,221,245,33,210,220,55,75,229,215,107,12,216,214,169,102,239,212,239,216,182,213,45,178,129,208,98,164,4,209,160,206,51,211,230,112,106,210,36,26,93,197,94,254,16,196,156,148,39,198,218,42,126,199,24,64,73,194,87,86,204,195,149,60,251,193,211,130,162,192,17,232,149,203,77,175,168,202,143,197,159,200,201,123,198,201,11,17,241,204,68,7,116,205,134,109,67,207,192,211,26,206,2,185,45,145,175,150,64,144,109,252,119,146,43,66,46,147,233,40,25,150,166,62,156,151,100,84,171,149,34,234,242,148,224,128,197,159,188,199,248,158,126,173,207,156,56,19,150,157,250,121,161,152,181,111,36,153,119,5,19,155,49,187,74,154,243,209,125,141,137,53,48,140,75,95,7,142,13,225,94,143,207,139,105,138,128,157,236,139,66,247,219,137,4,73,130,136,198,35,181,131,154,100,136,130,88,14,191,128,30,176,230,129,220,218,209,132,147,204,84,133,81,166,99,135,23,24,58,134,213,114,13,169,226,208,160,168,32,186,151,170,102,4,206,171,164,110,249], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE);
+/* memory initializer */ allocate([174,235,120,124,175,41,18,75,173,111,172,18,172,173,198,37,167,241,129,24,166,51,235,47,164,117,85,118,165,183,63,65,160,248,41,196,161,58,67,243,163,124,253,170,162,190,151,157,181,196,115,208,180,6,25,231,182,64,167,190,183,130,205,137,178,205,219,12,179,15,177,59,177,73,15,98,176,139,101,85,187,215,34,104,186,21,72,95,184,83,246,6,185,145,156,49,188,222,138,180,189,28,224,131,191,90,94,218,190,152,52,237,0,0,0,0,184,188,103,101,170,9,200,139,18,181,175,238,143,98,151,87,55,222,240,50,37,107,95,220,157,215,56,185,197,180,40,239,125,8,79,138,111,189,224,100,215,1,135,1,74,214,191,184,242,106,216,221,224,223,119,51,88,99,16,86,80,25,87,159,232,165,48,250,250,16,159,20,66,172,248,113,223,123,192,200,103,199,167,173,117,114,8,67,205,206,111,38,149,173,127,112,45,17,24,21,63,164,183,251,135,24,208,158,26,207,232,39,162,115,143,66,176,198,32,172,8,122,71,201,160,50,175,62,24,142,200,91,10,59,103,181,178,135,0,208,47,80,56,105,151,236,95,12,133,89,240,226,61,229,151,135,101,134,135,209,221,58,224,180,207,143,79,90,119,51,40,63,234,228,16,134,82,88,119,227,64,237,216,13,248,81,191,104,240,43,248,161,72,151,159,196,90,34,48,42,226,158,87,79,127,73,111,246,199,245,8,147,213,64,167,125,109,252,192,24,53,159,208,78,141,35,183,43,159,150,24,197,39,42,127,160,186,253,71,25,2,65,32,124,16,244,143,146,168,72,232,247,155,20,88,61,35,168,63,88,49,29,144,182,137,161,247,211,20,118,207,106,172,202,168,15,190,127,7,225,6,195,96,132,94,160,112,210,230,28,23,183,244,169,184,89,76,21,223,60,209,194,231,133,105,126,128,224,123,203,47,14,195,119,72,107,203,13,15,162,115,177,104,199,97,4,199,41,217,184,160,76,68,111,152,245,252,211,255,144,238,102,80,126,86,218,55,27,14,185,39,77,182,5,64,40,164,176,239,198,28,12,136,163,129,219,176,26,57,103,215,127,43,210,120,145,147,110,31,244,59,38,247,3,131,154,144,102,145,47,63,136,41,147,88,237,180,68,96,84,12,248,7,49,30,77,168,223,166,241,207,186,254,146,223,236,70,46,184,137,84,155,23,103,236,39,112,2,113,240,72,187,201,76,47,222,219,249,128,48,99,69,231,85,107,63,160,156,211,131,199,249,193,54,104,23,121,138,15,114,228,93,55,203,92,225,80,174,78,84,255,64,246,232,152,37,174,139,136,115,22,55,239,22,4,130,64,248,188,62,39,157,33,233,31,36,153,85,120,65,139,224,215,175,51,92,176,202,237,89,182,59,85,229,209,94,71,80,126,176,255,236,25,213,98,59,33,108,218,135,70,9,200,50,233,231,112,142,142,130,40,237,158,212,144,81,249,177,130,228,86,95,58,88,49,58,167,143,9,131,31,51,110,230,13,134,193,8,181,58,166,109,189,64,225,164,5,252,134,193,23,73,41,47,175,245,78,74,50,34,118,243,138,158,17,150,152,43,190,120,32,151,217,29,120,244,201,75,192,72,174,46,210,253,1,192,106,65,102,165,247,150,94,28,79,42,57,121,93,159,150,151,229,35,241,242,77,107,25,5,245,215,126,96,231,98,209,142,95,222,182,235,194,9,142,82,122,181,233,55,104,0,70,217,208,188,33,188,136,223,49,234,48,99,86,143,34,214,249,97,154,106,158,4,7,189,166,189,191,1,193,216,173,180,110,54,21,8,9,83,29,114,78,154,165,206,41,255,183,123,134,17,15,199,225,116,146,16,217,205,42,172,190,168,56,25,17,70,128,165,118,35,216,198,102,117,96,122,1,16,114,207,174,254,202,115,201,155,87,164,241,34,239,24,150,71,253,173,57,169,69,17,94,204,118,77,238,6,206,241,137,99,220,68,38,141,100,248,65,232,249,47,121,81,65,147,30,52,83,38,177,218,235,154,214,191,179,249,198,233,11,69,161,140,25,240,14,98,161,76,105,7,60,155,81,190,132,39,54,219,150,146,153,53,46,46,254,80,38,84,185,153,158,232,222,252,140,93,113,18,52,225,22,119,169,54,46,206,17,138,73,171,3,63,230,69,187,131,129,32,227,224,145,118,91,92,246,19,73,233,89,253,241,85,62,152,108,130,6,33,212,62,97,68,198,139,206,170,126,55,169,207,214,127,65,56,110,195,38,93,124,118,137,179,196,202,238,214,89,29,214,111,225,161,177,10,243,20,30,228,75,168,121,129,19,203,105,215,171,119,14,178,185,194,161,92,1,126,198,57,156,169,254,128,36,21,153,229,54,160,54,11,142,28,81,110,134,102,22,167,62,218,113,194,44,111,222,44,148,211,185,73,9,4,129,240,177,184,230,149,163,13,73,123,27,177,46,30,67,210,62,72,251,110,89,45,233,219,246,195,81,103,145,166,204,176,169,31,116,12,206,122,102,185,97,148,222,5,6,241,16,0,17,0,18,0,0,0,8,0,7,0,9,0,6,0,10,0,5,0,11,0,4,0,12,0,3,0,13,0,2,0,14,0,1,0,15,0,0,0,105,110,99,111,114,114,101,99,116,32,104,101,97,100,101,114,32,99,104,101,99,107,0,0,117,110,107,110,111,119,110,32,99,111,109,112,114,101,115,115,105,111,110,32,109,101,116,104,111,100,0,0,0,0,0,0,105,110,118,97,108,105,100,32,119,105,110,100,111,119,32,115,105,122,101,0,0,0,0,0,117,110,107,110,111,119,110,32,104,101,97,100,101,114,32,102,108,97,103,115,32,115,101,116,0,0,0,0,0,0,0,0,104,101,97,100,101,114,32,99,114,99,32,109,105,115,109,97,116,99,104,0,0,0,0,0,105,110,118,97,108,105,100,32,98,108,111,99,107,32,116,121,112,101,0,0,0,0,0,0,105,110,118,97,108,105,100,32,115,116,111,114,101,100,32,98,108,111,99,107,32,108,101,110,103,116,104,115,0,0,0,0,116,111,111,32,109,97,110,121,32,108,101,110,103,116,104,32,111,114,32,100,105,115,116,97,110,99,101,32,115,121,109,98,111,108,115,0,0,0,0,0,105,110,118,97,108,105,100,32,99,111,100,101,32,108,101,110,103,116,104,115,32,115,101,116,0,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,98,105,116,32,108,101,110,103,116,104,32,114,101,112,101,97,116,0,0,0,0,0,0,0,105,110,118,97,108,105,100,32,99,111,100,101,32,45,45,32,109,105,115,115,105,110,103,32,101,110,100,45,111,102,45,98,108,111,99,107,0,0,0,0,105,110,118,97,108,105,100,32,108,105,116,101,114,97,108,47,108,101,110,103,116,104,115,32,115,101,116,0,0,0,0,0,105,110,118,97,108,105,100,32,100,105,115,116,97,110,99,101,115,32,115,101,116,0,0,0,105,110,118,97,108,105,100,32,108,105,116,101,114,97,108,47,108,101,110,103,116,104,32,99,111,100,101,0,0,0,0,0,105,110,118,97,108,105,100,32,100,105,115,116,97,110,99,101,32,99,111,100,101,0,0,0,105,110,118,97,108,105,100,32,100,105,115,116,97,110,99,101,32,116,111,111,32,102,97,114,32,98,97,99,107,0,0,0,105,110,99,111,114,114,101,99,116,32,100,97,116,97,32,99,104,101,99,107,0,0,0,0,105,110,99,111,114,114,101,99,116,32,108,101,110,103,116,104,32,99,104,101,99,107,0,0,96,7,0,0,0,8,80,0,0,8,16,0,20,8,115,0,18,7,31,0,0,8,112,0,0,8,48,0,0,9,192,0,16,7,10,0,0,8,96,0,0,8,32,0,0,9,160,0,0,8,0,0,0,8,128,0,0,8,64,0,0,9,224,0,16,7,6,0,0,8,88,0,0,8,24,0,0,9,144,0,19,7,59,0,0,8,120,0,0,8,56,0,0,9,208,0,17,7,17,0,0,8,104,0,0,8,40,0,0,9,176,0,0,8,8,0,0,8,136,0,0,8,72,0,0,9,240,0,16,7,4,0,0,8,84,0,0,8,20,0,21,8,227,0,19,7,43,0,0,8,116,0,0,8,52,0,0,9,200,0,17,7,13,0,0,8,100,0,0,8,36,0,0,9,168,0,0,8,4,0,0,8,132,0,0,8,68,0,0,9,232,0,16,7,8,0,0,8,92,0,0,8,28,0,0,9,152,0,20,7,83,0,0,8,124,0,0,8,60,0,0,9,216,0,18,7,23,0,0,8,108,0,0,8,44,0,0,9,184,0,0,8,12,0,0,8,140,0,0,8,76,0,0,9,248,0,16,7,3,0,0,8,82,0,0,8,18,0,21,8,163,0,19,7,35,0,0,8,114,0,0,8,50,0,0,9,196,0,17,7,11,0,0,8,98,0,0,8,34,0,0,9,164,0,0,8,2,0,0,8,130,0,0,8,66,0,0,9,228,0,16,7,7,0,0,8,90,0,0,8,26,0,0,9,148,0,20,7,67,0,0,8,122,0,0,8,58,0,0,9,212,0,18,7,19,0,0,8,106,0,0,8,42,0,0,9,180,0,0,8,10,0,0,8,138,0,0,8,74,0,0,9,244,0,16,7,5,0,0,8,86,0,0,8,22,0,64,8,0,0,19,7,51,0,0,8,118,0,0,8,54,0,0,9,204,0,17,7,15,0,0,8,102,0,0,8,38,0,0,9,172,0,0,8,6,0,0,8,134,0,0,8,70,0,0,9,236,0,16,7,9,0,0,8,94,0,0,8,30,0,0,9,156,0,20,7,99,0,0,8,126,0,0,8,62,0,0,9,220,0,18,7,27,0,0,8,110,0,0,8,46,0,0,9,188,0,0,8,14,0,0,8,142,0,0,8,78,0,0,9,252,0,96,7,0,0,0,8,81,0,0,8,17,0,21,8,131,0,18,7,31,0,0,8,113,0,0,8,49,0,0,9,194,0,16,7,10,0,0,8,97,0,0,8,33,0,0,9,162,0,0,8,1,0,0,8,129,0,0,8,65,0,0,9,226,0,16,7,6,0,0,8,89,0,0,8,25,0,0,9,146,0,19,7,59,0,0,8,121,0,0,8,57,0,0,9,210,0,17,7,17,0,0,8,105,0,0,8,41,0,0,9,178,0,0,8,9,0,0,8,137,0,0,8,73,0,0,9,242,0,16,7,4,0,0,8,85,0,0,8,21,0,16,8,2,1,19,7,43,0,0,8,117,0,0,8,53,0,0,9,202,0,17,7,13,0,0,8,101,0,0,8,37,0,0,9,170,0,0,8,5,0,0,8,133,0,0,8,69,0,0,9,234,0,16,7,8,0,0,8,93,0,0,8,29,0,0,9,154,0,20,7,83,0,0,8,125,0,0,8,61,0,0,9,218,0,18,7,23,0,0,8,109,0,0,8,45,0,0,9,186,0,0,8,13,0,0,8,141,0,0,8,77,0,0,9,250,0,16,7,3,0,0,8,83,0,0,8,19,0,21,8,195,0,19,7,35,0,0,8,115,0,0,8,51,0,0,9,198,0,17,7,11,0,0,8,99,0,0,8,35,0,0,9,166,0,0,8,3,0,0,8,131,0,0,8,67,0,0,9,230,0,16,7,7,0,0,8,91,0,0,8,27,0,0,9,150,0,20,7,67,0,0,8,123,0,0,8,59,0,0,9,214,0,18,7,19,0,0,8,107,0,0,8,43,0,0,9,182,0,0,8,11,0,0,8,139,0,0,8,75,0,0,9,246,0,16,7,5,0,0,8,87,0,0,8,23,0,64,8,0,0,19,7,51,0,0,8,119,0,0,8,55,0,0,9,206,0,17,7,15,0,0,8,103,0,0,8,39,0,0,9,174,0,0,8,7,0,0,8,135,0,0,8,71,0,0,9,238,0,16,7,9,0,0,8,95,0,0,8,31,0,0,9,158,0,20,7,99,0,0,8,127,0,0,8,63,0,0,9,222,0,18,7,27,0,0,8,111,0,0,8,47,0,0,9,190,0,0,8,15,0,0,8,143,0,0,8,79,0,0,9,254,0,96,7,0,0,0,8,80,0,0,8,16,0,20,8,115,0,18,7,31,0,0,8,112,0,0,8,48,0,0,9,193,0,16,7,10,0,0,8,96,0,0,8,32,0,0,9,161,0,0,8,0,0,0,8,128,0,0,8,64,0,0,9,225,0,16,7,6,0,0,8,88,0,0,8,24,0,0,9,145,0,19,7,59,0,0,8,120,0,0,8,56,0,0,9,209,0,17,7,17,0,0,8,104,0,0,8,40,0,0,9,177,0,0,8,8,0,0,8,136,0,0,8,72,0,0,9,241,0,16,7,4,0,0,8,84,0,0,8,20,0,21,8,227,0,19,7,43,0,0,8,116,0,0,8,52,0,0,9,201,0,17,7,13,0,0,8,100,0,0,8,36,0,0,9,169,0,0,8,4,0,0,8,132,0,0,8,68,0,0,9,233,0,16,7,8,0,0,8,92,0,0,8,28,0,0,9,153,0,20,7,83,0,0,8,124,0,0,8,60,0,0,9,217,0,18,7,23,0,0,8,108,0,0,8,44,0,0,9,185,0,0,8,12,0,0,8,140,0,0,8,76,0,0,9,249,0,16,7,3,0,0,8,82,0,0,8,18,0,21,8,163,0,19,7,35,0,0,8,114,0,0,8,50,0,0,9,197,0,17,7,11,0,0,8,98,0,0,8,34,0,0,9,165,0,0,8,2,0,0,8,130,0,0,8,66,0,0,9,229,0,16,7,7,0,0,8,90,0,0,8,26,0,0,9,149,0,20,7,67,0,0,8,122,0,0,8,58,0,0,9,213,0,18,7,19,0,0,8,106,0,0,8,42,0,0,9,181,0,0,8,10,0,0,8,138,0,0,8,74,0,0,9,245,0,16,7,5,0,0,8,86,0,0,8,22,0,64,8,0,0,19,7,51,0,0,8,118,0,0,8,54,0,0,9,205,0,17,7,15,0,0,8,102,0,0,8,38,0,0,9,173,0,0,8,6,0,0,8,134,0,0,8,70,0,0,9,237,0,16,7,9,0,0,8,94,0,0,8,30,0,0,9,157,0,20,7,99,0,0,8,126,0,0,8,62,0,0,9,221,0,18,7,27,0,0,8,110,0,0,8,46,0,0,9,189,0,0,8,14,0,0,8,142,0,0,8,78,0,0,9,253,0,96,7,0,0,0,8,81,0,0,8,17,0,21,8,131,0,18,7,31,0,0,8,113,0,0,8,49,0,0,9,195,0,16,7,10,0,0,8,97,0,0,8,33,0,0,9,163,0,0,8,1,0,0,8,129,0,0,8,65,0,0,9,227,0,16,7,6,0,0,8,89,0,0,8,25,0,0,9,147,0,19,7,59,0,0,8,121,0,0,8,57,0,0,9,211,0,17,7,17,0,0,8,105,0,0,8,41,0,0,9,179,0,0,8,9,0,0,8,137,0,0,8,73,0,0,9,243,0,16,7,4,0,0,8,85,0,0,8,21,0,16,8,2,1,19,7,43,0,0,8,117,0,0,8,53,0,0,9,203,0,17,7,13,0,0,8,101,0,0,8,37,0,0,9,171,0,0,8,5,0,0,8,133,0,0,8,69,0,0,9,235,0,16,7,8,0,0,8,93,0,0,8,29,0,0,9,155,0,20,7,83,0,0,8,125,0,0,8,61,0,0,9,219,0,18,7,23,0,0,8,109,0,0,8,45,0,0,9,187,0,0,8,13,0,0,8,141,0,0,8,77,0,0,9,251,0,16,7,3,0,0,8,83,0,0,8,19,0,21,8,195,0,19,7,35,0,0,8,115,0,0,8,51,0,0,9,199,0,17,7,11,0,0,8,99,0,0,8,35,0,0,9,167,0,0,8,3,0,0,8,131,0,0,8,67,0,0,9,231,0,16,7,7,0,0,8,91,0,0,8,27,0,0,9,151,0,20,7,67,0,0,8,123,0,0,8,59,0,0,9,215,0,18,7,19,0,0,8,107,0,0,8,43,0,0,9,183,0,0,8,11,0,0,8,139,0,0,8,75,0,0,9,247,0,16,7,5,0,0,8,87,0,0,8,23,0,64,8,0,0,19,7,51,0,0,8,119,0,0,8,55,0,0,9,207,0,17,7,15,0,0,8,103,0,0,8,39,0,0,9,175,0,0,8,7,0,0,8,135,0,0,8,71,0,0,9,239,0,16,7,9,0,0,8,95,0,0,8,31,0,0,9,159,0,20,7,99,0,0,8,127,0,0,8,63,0,0,9,223,0,18,7,27,0,0,8,111,0,0,8,47,0,0,9,191,0,0,8,15,0,0,8,143,0,0,8,79,0,0,9,255,0,16,5,1,0,23,5,1,1,19,5,17,0,27,5,1,16,17,5,5,0,25,5,1,4,21,5,65,0,29,5,1,64,16,5,3,0,24,5,1,2,20,5,33,0,28,5,1,32,18,5,9,0,26,5,1,8,22,5,129,0,64,5,0,0,16,5,2,0,23,5,129,1,19,5,25,0,27,5,1,24,17,5,7,0,25,5,1,6,21,5,97,0,29,5,1,96,16,5,4,0,24,5,1,3,20,5,49,0,28,5,1,48,18,5,13,0,26,5,1,12,22,5,193,0,64,5,0,0,3,0,4,0,5,0,6,0,7,0,8,0,9,0,10,0,11,0,13,0,15,0,17,0,19,0,23,0,27,0,31,0,35,0,43,0,51,0,59,0,67,0,83,0,99,0,115,0,131,0,163,0,195,0,227,0,2,1,0,0,0,0,0,0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,17,0,17,0,17,0,17,0,18,0,18,0,18,0,18,0,19,0,19,0,19,0,19,0,20,0,20,0,20,0,20,0,21,0,21,0,21,0,21,0,16,0,73,0,195,0,0,0,1,0,2,0,3,0,4,0,5,0,7,0,9,0,13,0,17,0,25,0,33,0,49,0,65,0,97,0,129,0,193,0,1,1,129,1,1,2,1,3,1,4,1,6,1,8,1,12,1,16,1,24,1,32,1,48,1,64,1,96,0,0,0,0,16,0,16,0,16,0,16,0,17,0,17,0,18,0,18,0,19,0,19,0,20,0,20,0,21,0,21,0,22,0,22,0,23,0,23,0,24,0,24,0,25,0,25,0,26,0,26,0,27,0,27,0,28,0,28,0,29,0,29,0,64,0,64,0,105,110,118,97,108,105,100,32,100,105,115,116,97,110,99,101,32,116,111,111,32,102,97,114,32,98,97,99,107,0,0,0,105,110,118,97,108,105,100,32,100,105,115,116,97,110,99,101,32,99,111,100,101,0,0,0,105,110,118,97,108,105,100,32,108,105,116,101,114,97,108,47,108,101,110,103,116,104,32,99,111,100,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_NONE, Runtime.GLOBAL_BASE+10240);
+
+
+
+
+var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);
+
+assert(tempDoublePtr % 8 == 0);
+
+function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+}
+
+function copyTempDouble(ptr) {
+
+ HEAP8[tempDoublePtr] = HEAP8[ptr];
+
+ HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];
+
+ HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];
+
+ HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];
+
+ HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];
+
+ HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];
+
+ HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];
+
+ HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];
+
+}
+
+
+
+
+
+
+ var ERRNO_CODES={EPERM:1,ENOENT:2,ESRCH:3,EINTR:4,EIO:5,ENXIO:6,E2BIG:7,ENOEXEC:8,EBADF:9,ECHILD:10,EAGAIN:11,EWOULDBLOCK:11,ENOMEM:12,EACCES:13,EFAULT:14,ENOTBLK:15,EBUSY:16,EEXIST:17,EXDEV:18,ENODEV:19,ENOTDIR:20,EISDIR:21,EINVAL:22,ENFILE:23,EMFILE:24,ENOTTY:25,ETXTBSY:26,EFBIG:27,ENOSPC:28,ESPIPE:29,EROFS:30,EMLINK:31,EPIPE:32,EDOM:33,ERANGE:34,ENOMSG:42,EIDRM:43,ECHRNG:44,EL2NSYNC:45,EL3HLT:46,EL3RST:47,ELNRNG:48,EUNATCH:49,ENOCSI:50,EL2HLT:51,EDEADLK:35,ENOLCK:37,EBADE:52,EBADR:53,EXFULL:54,ENOANO:55,EBADRQC:56,EBADSLT:57,EDEADLOCK:35,EBFONT:59,ENOSTR:60,ENODATA:61,ETIME:62,ENOSR:63,ENONET:64,ENOPKG:65,EREMOTE:66,ENOLINK:67,EADV:68,ESRMNT:69,ECOMM:70,EPROTO:71,EMULTIHOP:72,EDOTDOT:73,EBADMSG:74,ENOTUNIQ:76,EBADFD:77,EREMCHG:78,ELIBACC:79,ELIBBAD:80,ELIBSCN:81,ELIBMAX:82,ELIBEXEC:83,ENOSYS:38,ENOTEMPTY:39,ENAMETOOLONG:36,ELOOP:40,EOPNOTSUPP:95,EPFNOSUPPORT:96,ECONNRESET:104,ENOBUFS:105,EAFNOSUPPORT:97,EPROTOTYPE:91,ENOTSOCK:88,ENOPROTOOPT:92,ESHUTDOWN:108,ECONNREFUSED:111,EADDRINUSE:98,ECONNABORTED:103,ENETUNREACH:101,ENETDOWN:100,ETIMEDOUT:110,EHOSTDOWN:112,EHOSTUNREACH:113,EINPROGRESS:115,EALREADY:114,EDESTADDRREQ:89,EMSGSIZE:90,EPROTONOSUPPORT:93,ESOCKTNOSUPPORT:94,EADDRNOTAVAIL:99,ENETRESET:102,EISCONN:106,ENOTCONN:107,ETOOMANYREFS:109,EUSERS:87,EDQUOT:122,ESTALE:116,ENOTSUP:95,ENOMEDIUM:123,EILSEQ:84,EOVERFLOW:75,ECANCELED:125,ENOTRECOVERABLE:131,EOWNERDEAD:130,ESTRPIPE:86};
+
+ var ERRNO_MESSAGES={0:"Success",1:"Not super-user",2:"No such file or directory",3:"No such process",4:"Interrupted system call",5:"I/O error",6:"No such device or address",7:"Arg list too long",8:"Exec format error",9:"Bad file number",10:"No children",11:"No more processes",12:"Not enough core",13:"Permission denied",14:"Bad address",15:"Block device required",16:"Mount device busy",17:"File exists",18:"Cross-device link",19:"No such device",20:"Not a directory",21:"Is a directory",22:"Invalid argument",23:"Too many open files in system",24:"Too many open files",25:"Not a typewriter",26:"Text file busy",27:"File too large",28:"No space left on device",29:"Illegal seek",30:"Read only file system",31:"Too many links",32:"Broken pipe",33:"Math arg out of domain of func",34:"Math result not representable",35:"File locking deadlock error",36:"File or path name too long",37:"No record locks available",38:"Function not implemented",39:"Directory not empty",40:"Too many symbolic links",42:"No message of desired type",43:"Identifier removed",44:"Channel number out of range",45:"Level 2 not synchronized",46:"Level 3 halted",47:"Level 3 reset",48:"Link number out of range",49:"Protocol driver not attached",50:"No CSI structure available",51:"Level 2 halted",52:"Invalid exchange",53:"Invalid request descriptor",54:"Exchange full",55:"No anode",56:"Invalid request code",57:"Invalid slot",59:"Bad font file fmt",60:"Device not a stream",61:"No data (for no delay io)",62:"Timer expired",63:"Out of streams resources",64:"Machine is not on the network",65:"Package not installed",66:"The object is remote",67:"The link has been severed",68:"Advertise error",69:"Srmount error",70:"Communication error on send",71:"Protocol error",72:"Multihop attempted",73:"Cross mount point (not really error)",74:"Trying to read unreadable message",75:"Value too large for defined data type",76:"Given log. name not unique",77:"f.d. invalid for this operation",78:"Remote address changed",79:"Can access a needed shared lib",80:"Accessing a corrupted shared lib",81:".lib section in a.out corrupted",82:"Attempting to link in too many libs",83:"Attempting to exec a shared library",84:"Illegal byte sequence",86:"Streams pipe error",87:"Too many users",88:"Socket operation on non-socket",89:"Destination address required",90:"Message too long",91:"Protocol wrong type for socket",92:"Protocol not available",93:"Unknown protocol",94:"Socket type not supported",95:"Not supported",96:"Protocol family not supported",97:"Address family not supported by protocol family",98:"Address already in use",99:"Address not available",100:"Network interface is not configured",101:"Network is unreachable",102:"Connection reset by network",103:"Connection aborted",104:"Connection reset by peer",105:"No buffer space available",106:"Socket is already connected",107:"Socket is not connected",108:"Can't send after socket shutdown",109:"Too many references",110:"Connection timed out",111:"Connection refused",112:"Host is down",113:"Host is unreachable",114:"Socket already connected",115:"Connection already in progress",116:"Stale file handle",122:"Quota exceeded",123:"No medium (in tape drive)",125:"Operation canceled",130:"Previous owner died",131:"State not recoverable"};
+
+
+ var ___errno_state=0;function ___setErrNo(value) {
+ // For convenient setting and returning of errno.
+ HEAP32[((___errno_state)>>2)]=value;
+ return value;
+ }
+
+ var PATH={splitPath:function (filename) {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },normalizeArray:function (parts, allowAboveRoot) {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up--; up) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },normalize:function (path) {
+ var isAbsolute = path.charAt(0) === '/',
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },dirname:function (path) {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },basename:function (path) {
+ // EMSCRIPTEN return '/'' for '/', not an empty string
+ if (path === '/') return '/';
+ var lastSlash = path.lastIndexOf('/');
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash+1);
+ },extname:function (path) {
+ return PATH.splitPath(path)[3];
+ },join:function () {
+ var paths = Array.prototype.slice.call(arguments, 0);
+ return PATH.normalize(paths.join('/'));
+ },join2:function (l, r) {
+ return PATH.normalize(l + '/' + r);
+ },resolve:function () {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? arguments[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path !== 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ continue;
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = path.charAt(0) === '/';
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter(function(p) {
+ return !!p;
+ }), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },relative:function (from, to) {
+ from = PATH.resolve(from).substr(1);
+ to = PATH.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ }};
+
+ var TTY={ttys:[],init:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process['stdin']['setEncoding']('utf8');
+ // }
+ },shutdown:function () {
+ // https://github.com/kripken/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process['stdin']['pause']();
+ // }
+ },register:function (dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },stream_ops:{open:function (stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },close:function (stream) {
+ // flush any pending line data
+ if (stream.tty.output.length) {
+ stream.tty.ops.put_char(stream.tty, 10);
+ }
+ },read:function (stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENXIO);
+ }
+ for (var i = 0; i < length; i++) {
+ try {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }},default_tty_ops:{get_char:function (tty) {
+ if (!tty.input.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ result = process['stdin']['read']();
+ if (!result) {
+ if (process['stdin']['_readableState'] && process['stdin']['_readableState']['ended']) {
+ return null; // EOF
+ }
+ return undefined; // no data available
+ }
+ } else if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else if (typeof readline == 'function') {
+ // Command line.
+ result = readline();
+ if (result !== null) {
+ result += '\n';
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ tty.input = intArrayFromString(result, true);
+ }
+ return tty.input.shift();
+ },put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['print'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }},default_tty1_ops:{put_char:function (tty, val) {
+ if (val === null || val === 10) {
+ Module['printErr'](tty.output.join(''));
+ tty.output = [];
+ } else {
+ tty.output.push(TTY.utf8.processCChar(val));
+ }
+ }}};
+
+ var MEMFS={ops_table:null,CONTENT_OWNING:1,CONTENT_FLEXIBLE:2,CONTENT_FIXED:3,mount:function (mount) {
+ return MEMFS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.contents = [];
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ }
+ return node;
+ },ensureFlexible:function (node) {
+ if (node.contentMode !== MEMFS.CONTENT_FLEXIBLE) {
+ var contents = node.contents;
+ node.contents = Array.prototype.slice.call(contents);
+ node.contentMode = MEMFS.CONTENT_FLEXIBLE;
+ }
+ },node_ops:{getattr:function (node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.contents.length;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },setattr:function (node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ if (attr.size < contents.length) contents.length = attr.size;
+ else while (attr.size > contents.length) contents.push(0);
+ }
+ },lookup:function (parent, name) {
+ throw FS.genericErrors[ERRNO_CODES.ENOENT];
+ },mknod:function (parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },rename:function (old_node, new_dir, new_name) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ }
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ }
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ old_node.parent = new_dir;
+ },unlink:function (parent, name) {
+ delete parent.contents[name];
+ },rmdir:function (parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ delete parent.contents[name];
+ },readdir:function (node) {
+ var entries = ['.', '..']
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },symlink:function (parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 511 /* 0777 */ | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },readlink:function (node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return node.link;
+ }},stream_ops:{read:function (stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else
+ {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ }
+ return size;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ var node = stream.node;
+ node.timestamp = Date.now();
+ var contents = node.contents;
+ if (length && contents.length === 0 && position === 0 && buffer.subarray) {
+ // just replace it with the new data
+ if (canOwn && offset === 0) {
+ node.contents = buffer; // this could be a subarray of Emscripten HEAP, or allocated from some other source.
+ node.contentMode = (buffer.buffer === HEAP8.buffer) ? MEMFS.CONTENT_OWNING : MEMFS.CONTENT_FIXED;
+ } else {
+ node.contents = new Uint8Array(buffer.subarray(offset, offset+length));
+ node.contentMode = MEMFS.CONTENT_FIXED;
+ }
+ return length;
+ }
+ MEMFS.ensureFlexible(node);
+ var contents = node.contents;
+ while (contents.length < position) contents.push(0);
+ for (var i = 0; i < length; i++) {
+ contents[position + i] = buffer[offset + i];
+ }
+ return length;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.contents.length;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ stream.ungotten = [];
+ stream.position = position;
+ return position;
+ },allocate:function (stream, offset, length) {
+ MEMFS.ensureFlexible(stream.node);
+ var contents = stream.node.contents;
+ var limit = offset + length;
+ while (limit > contents.length) contents.push(0);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if ( !(flags & 2) &&
+ (contents.buffer === buffer || contents.buffer === buffer.buffer) ) {
+ // We can't emulate MAP_SHARED when the file is not backed by the buffer
+ // we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ allocated = true;
+ ptr = _malloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOMEM);
+ }
+ buffer.set(contents, ptr);
+ }
+ return { ptr: ptr, allocated: allocated };
+ }}};
+
+ var IDBFS={dbs:{},indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_VERSION:21,DB_STORE_NAME:"FILE_DATA",mount:function (mount) {
+ // reuse all of the core MEMFS functionality
+ return MEMFS.mount.apply(null, arguments);
+ },syncfs:function (mount, populate, callback) {
+ IDBFS.getLocalSet(mount, function(err, local) {
+ if (err) return callback(err);
+
+ IDBFS.getRemoteSet(mount, function(err, remote) {
+ if (err) return callback(err);
+
+ var src = populate ? remote : local;
+ var dst = populate ? local : remote;
+
+ IDBFS.reconcile(src, dst, callback);
+ });
+ });
+ },getDB:function (name, callback) {
+ // check the cache first
+ var db = IDBFS.dbs[name];
+ if (db) {
+ return callback(null, db);
+ }
+
+ var req;
+ try {
+ req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
+ } catch (e) {
+ return callback(e);
+ }
+ req.onupgradeneeded = function(e) {
+ var db = e.target.result;
+ var transaction = e.target.transaction;
+
+ var fileStore;
+
+ if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
+ fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ } else {
+ fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
+ }
+
+ fileStore.createIndex('timestamp', 'timestamp', { unique: false });
+ };
+ req.onsuccess = function() {
+ db = req.result;
+
+ // add to the cache
+ IDBFS.dbs[name] = db;
+ callback(null, db);
+ };
+ req.onerror = function() {
+ callback(this.error);
+ };
+ },getLocalSet:function (mount, callback) {
+ var entries = {};
+
+ function isRealDir(p) {
+ return p !== '.' && p !== '..';
+ };
+ function toAbsolute(root) {
+ return function(p) {
+ return PATH.join2(root, p);
+ }
+ };
+
+ var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
+
+ while (check.length) {
+ var path = check.pop();
+ var stat;
+
+ try {
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ check.push.apply(check, FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
+ }
+
+ entries[path] = { timestamp: stat.mtime };
+ }
+
+ return callback(null, { type: 'local', entries: entries });
+ },getRemoteSet:function (mount, callback) {
+ var entries = {};
+
+ IDBFS.getDB(mount.mountpoint, function(err, db) {
+ if (err) return callback(err);
+
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readonly');
+ transaction.onerror = function() { callback(this.error); };
+
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+ var index = store.index('timestamp');
+
+ index.openKeyCursor().onsuccess = function(event) {
+ var cursor = event.target.result;
+
+ if (!cursor) {
+ return callback(null, { type: 'remote', db: db, entries: entries });
+ }
+
+ entries[cursor.primaryKey] = { timestamp: cursor.key };
+
+ cursor.continue();
+ };
+ });
+ },loadLocalEntry:function (path, callback) {
+ var stat, node;
+
+ try {
+ var lookup = FS.lookupPath(path);
+ node = lookup.node;
+ stat = FS.stat(path);
+ } catch (e) {
+ return callback(e);
+ }
+
+ if (FS.isDir(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode });
+ } else if (FS.isFile(stat.mode)) {
+ return callback(null, { timestamp: stat.mtime, mode: stat.mode, contents: node.contents });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+ },storeLocalEntry:function (path, entry, callback) {
+ try {
+ if (FS.isDir(entry.mode)) {
+ FS.mkdir(path, entry.mode);
+ } else if (FS.isFile(entry.mode)) {
+ FS.writeFile(path, entry.contents, { encoding: 'binary', canOwn: true });
+ } else {
+ return callback(new Error('node type not supported'));
+ }
+
+ FS.utime(path, entry.timestamp, entry.timestamp);
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },removeLocalEntry:function (path, callback) {
+ try {
+ var lookup = FS.lookupPath(path);
+ var stat = FS.stat(path);
+
+ if (FS.isDir(stat.mode)) {
+ FS.rmdir(path);
+ } else if (FS.isFile(stat.mode)) {
+ FS.unlink(path);
+ }
+ } catch (e) {
+ return callback(e);
+ }
+
+ callback(null);
+ },loadRemoteEntry:function (store, path, callback) {
+ var req = store.get(path);
+ req.onsuccess = function(event) { callback(null, event.target.result); };
+ req.onerror = function() { callback(this.error); };
+ },storeRemoteEntry:function (store, path, entry, callback) {
+ var req = store.put(entry, path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },removeRemoteEntry:function (store, path, callback) {
+ var req = store.delete(path);
+ req.onsuccess = function() { callback(null); };
+ req.onerror = function() { callback(this.error); };
+ },reconcile:function (src, dst, callback) {
+ var total = 0;
+
+ var create = [];
+ Object.keys(src.entries).forEach(function (key) {
+ var e = src.entries[key];
+ var e2 = dst.entries[key];
+ if (!e2 || e.timestamp > e2.timestamp) {
+ create.push(key);
+ total++;
+ }
+ });
+
+ var remove = [];
+ Object.keys(dst.entries).forEach(function (key) {
+ var e = dst.entries[key];
+ var e2 = src.entries[key];
+ if (!e2) {
+ remove.push(key);
+ total++;
+ }
+ });
+
+ if (!total) {
+ return callback(null);
+ }
+
+ var errored = false;
+ var completed = 0;
+ var db = src.type === 'remote' ? src.db : dst.db;
+ var transaction = db.transaction([IDBFS.DB_STORE_NAME], 'readwrite');
+ var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= total) {
+ return callback(null);
+ }
+ };
+
+ transaction.onerror = function() { done(this.error); };
+
+ // sort paths in ascending order so directory entries are created
+ // before the files inside them
+ create.sort().forEach(function (path) {
+ if (dst.type === 'local') {
+ IDBFS.loadRemoteEntry(store, path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeLocalEntry(path, entry, done);
+ });
+ } else {
+ IDBFS.loadLocalEntry(path, function (err, entry) {
+ if (err) return done(err);
+ IDBFS.storeRemoteEntry(store, path, entry, done);
+ });
+ }
+ });
+
+ // sort paths in descending order so files are deleted before their
+ // parent directories
+ remove.sort().reverse().forEach(function(path) {
+ if (dst.type === 'local') {
+ IDBFS.removeLocalEntry(path, done);
+ } else {
+ IDBFS.removeRemoteEntry(store, path, done);
+ }
+ });
+ }};
+
+ var NODEFS={isWindows:false,staticInit:function () {
+ NODEFS.isWindows = !!process.platform.match(/^win/);
+ },mount:function (mount) {
+ assert(ENVIRONMENT_IS_NODE);
+ return NODEFS.createNode(null, '/', NODEFS.getMode(mount.opts.root), 0);
+ },createNode:function (parent, name, mode, dev) {
+ if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node = FS.createNode(parent, name, mode);
+ node.node_ops = NODEFS.node_ops;
+ node.stream_ops = NODEFS.stream_ops;
+ return node;
+ },getMode:function (path) {
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ if (NODEFS.isWindows) {
+ // On Windows, directories return permission bits 'rw-rw-rw-', even though they have 'rwxrwxrwx', so
+ // propagate write bits to execute bits.
+ stat.mode = stat.mode | ((stat.mode & 146) >> 1);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return stat.mode;
+ },realPath:function (node) {
+ var parts = [];
+ while (node.parent !== node) {
+ parts.push(node.name);
+ node = node.parent;
+ }
+ parts.push(node.mount.opts.root);
+ parts.reverse();
+ return PATH.join.apply(null, parts);
+ },flagsToPermissionStringMap:{0:"r",1:"r+",2:"r+",64:"r",65:"r+",66:"r+",129:"rx+",193:"rx+",514:"w+",577:"w",578:"w+",705:"wx",706:"wx+",1024:"a",1025:"a",1026:"a+",1089:"a",1090:"a+",1153:"ax",1154:"ax+",1217:"ax",1218:"ax+",4096:"rs",4098:"rs+"},flagsToPermissionString:function (flags) {
+ if (flags in NODEFS.flagsToPermissionStringMap) {
+ return NODEFS.flagsToPermissionStringMap[flags];
+ } else {
+ return flags;
+ }
+ },node_ops:{getattr:function (node) {
+ var path = NODEFS.realPath(node);
+ var stat;
+ try {
+ stat = fs.lstatSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ // node.js v0.10.20 doesn't report blksize and blocks on Windows. Fake them with default blksize of 4096.
+ // See http://support.microsoft.com/kb/140365
+ if (NODEFS.isWindows && !stat.blksize) {
+ stat.blksize = 4096;
+ }
+ if (NODEFS.isWindows && !stat.blocks) {
+ stat.blocks = (stat.size+stat.blksize-1)/stat.blksize|0;
+ }
+ return {
+ dev: stat.dev,
+ ino: stat.ino,
+ mode: stat.mode,
+ nlink: stat.nlink,
+ uid: stat.uid,
+ gid: stat.gid,
+ rdev: stat.rdev,
+ size: stat.size,
+ atime: stat.atime,
+ mtime: stat.mtime,
+ ctime: stat.ctime,
+ blksize: stat.blksize,
+ blocks: stat.blocks
+ };
+ },setattr:function (node, attr) {
+ var path = NODEFS.realPath(node);
+ try {
+ if (attr.mode !== undefined) {
+ fs.chmodSync(path, attr.mode);
+ // update the common node structure mode as well
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ var date = new Date(attr.timestamp);
+ fs.utimesSync(path, date, date);
+ }
+ if (attr.size !== undefined) {
+ fs.truncateSync(path, attr.size);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },lookup:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ var mode = NODEFS.getMode(path);
+ return NODEFS.createNode(parent, name, mode);
+ },mknod:function (parent, name, mode, dev) {
+ var node = NODEFS.createNode(parent, name, mode, dev);
+ // create the backing node for this in the fs root as well
+ var path = NODEFS.realPath(node);
+ try {
+ if (FS.isDir(node.mode)) {
+ fs.mkdirSync(path, node.mode);
+ } else {
+ fs.writeFileSync(path, '', { mode: node.mode });
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return node;
+ },rename:function (oldNode, newDir, newName) {
+ var oldPath = NODEFS.realPath(oldNode);
+ var newPath = PATH.join2(NODEFS.realPath(newDir), newName);
+ try {
+ fs.renameSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },unlink:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.unlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },rmdir:function (parent, name) {
+ var path = PATH.join2(NODEFS.realPath(parent), name);
+ try {
+ fs.rmdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readdir:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readdirSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },symlink:function (parent, newName, oldPath) {
+ var newPath = PATH.join2(NODEFS.realPath(parent), newName);
+ try {
+ fs.symlinkSync(oldPath, newPath);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },readlink:function (node) {
+ var path = NODEFS.realPath(node);
+ try {
+ return fs.readlinkSync(path);
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }},stream_ops:{open:function (stream) {
+ var path = NODEFS.realPath(stream.node);
+ try {
+ if (FS.isFile(stream.node.mode)) {
+ stream.nfd = fs.openSync(path, NODEFS.flagsToPermissionString(stream.flags));
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },close:function (stream) {
+ try {
+ if (FS.isFile(stream.node.mode) && stream.nfd) {
+ fs.closeSync(stream.nfd);
+ }
+ } catch (e) {
+ if (!e.code) throw e;
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ },read:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(length);
+ var res;
+ try {
+ res = fs.readSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ if (res > 0) {
+ for (var i = 0; i < res; i++) {
+ buffer[offset + i] = nbuffer[i];
+ }
+ }
+ return res;
+ },write:function (stream, buffer, offset, length, position) {
+ // FIXME this is terrible.
+ var nbuffer = new Buffer(buffer.subarray(offset, offset + length));
+ var res;
+ try {
+ res = fs.writeSync(stream.nfd, nbuffer, 0, length, position);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ return res;
+ },llseek:function (stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) { // SEEK_CUR.
+ position += stream.position;
+ } else if (whence === 2) { // SEEK_END.
+ if (FS.isFile(stream.node.mode)) {
+ try {
+ var stat = fs.fstatSync(stream.nfd);
+ position += stat.size;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES[e.code]);
+ }
+ }
+ }
+
+ if (position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ stream.position = position;
+ return position;
+ }}};
+
+ var _stdin=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stdout=allocate(1, "i32*", ALLOC_STATIC);
+
+ var _stderr=allocate(1, "i32*", ALLOC_STATIC);
+
+ function _fflush(stream) {
+ // int fflush(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fflush.html
+ // we don't currently perform any user-space buffering of data
+ }var FS={root:null,mounts:[],devices:[null],streams:[],nextInode:1,nameTable:null,currentPath:"/",initialized:false,ignorePermissions:true,ErrnoError:null,genericErrors:{},handleFSError:function (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e + ' : ' + stackTrace();
+ return ___setErrNo(e.errno);
+ },lookupPath:function (path, opts) {
+ path = PATH.resolve(FS.cwd(), path);
+ opts = opts || {};
+
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0
+ };
+ for (var key in defaults) {
+ if (opts[key] === undefined) {
+ opts[key] = defaults[key];
+ }
+ }
+
+ if (opts.recurse_count > 8) { // max recursive lookup of 8
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+
+ // split the path
+ var parts = PATH.normalizeArray(path.split('/').filter(function(p) {
+ return !!p;
+ }), false);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH.resolve(PATH.dirname(current_path), link);
+
+ var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count });
+ current = lookup.node;
+
+ if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ throw new FS.ErrnoError(ERRNO_CODES.ELOOP);
+ }
+ }
+ }
+ }
+
+ return { path: current_path, node: current };
+ },getPath:function (node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? mount + '/' + path : mount + path;
+ }
+ path = path ? node.name + '/' + path : node.name;
+ node = node.parent;
+ }
+ },hashName:function (parentid, name) {
+ var hash = 0;
+
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },hashAddNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },hashRemoveNode:function (node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },lookupNode:function (parent, name) {
+ var err = FS.mayLookup(parent);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },createNode:function (parent, name, mode, rdev) {
+ if (!FS.FSNode) {
+ FS.FSNode = function(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ FS.FSNode.prototype = {};
+
+ // compatibility
+ var readMode = 292 | 73;
+ var writeMode = 146;
+
+ // NOTE we must use Object.defineProperties instead of individual calls to
+ // Object.defineProperty in order to make closure compiler happy
+ Object.defineProperties(FS.FSNode.prototype, {
+ read: {
+ get: function() { return (this.mode & readMode) === readMode; },
+ set: function(val) { val ? this.mode |= readMode : this.mode &= ~readMode; }
+ },
+ write: {
+ get: function() { return (this.mode & writeMode) === writeMode; },
+ set: function(val) { val ? this.mode |= writeMode : this.mode &= ~writeMode; }
+ },
+ isFolder: {
+ get: function() { return FS.isDir(this.mode); },
+ },
+ isDevice: {
+ get: function() { return FS.isChrdev(this.mode); },
+ },
+ });
+ }
+
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },destroyNode:function (node) {
+ FS.hashRemoveNode(node);
+ },isRoot:function (node) {
+ return node === node.parent;
+ },isMountpoint:function (node) {
+ return !!node.mounted;
+ },isFile:function (mode) {
+ return (mode & 61440) === 32768;
+ },isDir:function (mode) {
+ return (mode & 61440) === 16384;
+ },isLink:function (mode) {
+ return (mode & 61440) === 40960;
+ },isChrdev:function (mode) {
+ return (mode & 61440) === 8192;
+ },isBlkdev:function (mode) {
+ return (mode & 61440) === 24576;
+ },isFIFO:function (mode) {
+ return (mode & 61440) === 4096;
+ },isSocket:function (mode) {
+ return (mode & 49152) === 49152;
+ },flagModes:{"r":0,"rs":1052672,"r+":2,"w":577,"wx":705,"xw":705,"w+":578,"wx+":706,"xw+":706,"a":1089,"ax":1217,"xa":1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function (str) {
+ var flags = FS.flagModes[str];
+ if (typeof flags === 'undefined') {
+ throw new Error('Unknown file open mode: ' + str);
+ }
+ return flags;
+ },flagsToPermissionString:function (flag) {
+ var accmode = flag & 2097155;
+ var perms = ['r', 'w', 'rw'][accmode];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },nodePermissions:function (node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.indexOf('r') !== -1 && !(node.mode & 292)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('w') !== -1 && !(node.mode & 146)) {
+ return ERRNO_CODES.EACCES;
+ } else if (perms.indexOf('x') !== -1 && !(node.mode & 73)) {
+ return ERRNO_CODES.EACCES;
+ }
+ return 0;
+ },mayLookup:function (dir) {
+ return FS.nodePermissions(dir, 'x');
+ },mayCreate:function (dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return ERRNO_CODES.EEXIST;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },mayDelete:function (dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var err = FS.nodePermissions(dir, 'wx');
+ if (err) {
+ return err;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return ERRNO_CODES.ENOTDIR;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return ERRNO_CODES.EBUSY;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return 0;
+ },mayOpen:function (node, flags) {
+ if (!node) {
+ return ERRNO_CODES.ENOENT;
+ }
+ if (FS.isLink(node.mode)) {
+ return ERRNO_CODES.ELOOP;
+ } else if (FS.isDir(node.mode)) {
+ if ((flags & 2097155) !== 0 || // opening for write
+ (flags & 512)) {
+ return ERRNO_CODES.EISDIR;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },MAX_OPEN_FDS:4096,nextfd:function (fd_start, fd_end) {
+ fd_start = fd_start || 0;
+ fd_end = fd_end || FS.MAX_OPEN_FDS;
+ for (var fd = fd_start; fd <= fd_end; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(ERRNO_CODES.EMFILE);
+ },getStream:function (fd) {
+ return FS.streams[fd];
+ },createStream:function (stream, fd_start, fd_end) {
+ if (!FS.FSStream) {
+ FS.FSStream = function(){};
+ FS.FSStream.prototype = {};
+ // compatibility
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ get: function() { return this.node; },
+ set: function(val) { this.node = val; }
+ },
+ isRead: {
+ get: function() { return (this.flags & 2097155) !== 1; }
+ },
+ isWrite: {
+ get: function() { return (this.flags & 2097155) !== 0; }
+ },
+ isAppend: {
+ get: function() { return (this.flags & 1024); }
+ }
+ });
+ }
+ if (0) {
+ // reuse the object
+ stream.__proto__ = FS.FSStream.prototype;
+ } else {
+ var newStream = new FS.FSStream();
+ for (var p in stream) {
+ newStream[p] = stream[p];
+ }
+ stream = newStream;
+ }
+ var fd = FS.nextfd(fd_start, fd_end);
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },closeStream:function (fd) {
+ FS.streams[fd] = null;
+ },getStreamFromPtr:function (ptr) {
+ return FS.streams[ptr - 1];
+ },getPtrForStream:function (stream) {
+ return stream ? stream.fd + 1 : 0;
+ },chrdev_stream_ops:{open:function (stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },llseek:function () {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }},major:function (dev) {
+ return ((dev) >> 8);
+ },minor:function (dev) {
+ return ((dev) & 0xff);
+ },makedev:function (ma, mi) {
+ return ((ma) << 8 | (mi));
+ },registerDevice:function (dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },getDevice:function (dev) {
+ return FS.devices[dev];
+ },getMounts:function (mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push.apply(check, m.mounts);
+ }
+
+ return mounts;
+ },syncfs:function (populate, callback) {
+ if (typeof(populate) === 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function done(err) {
+ if (err) {
+ if (!done.errored) {
+ done.errored = true;
+ return callback(err);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ callback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach(function (mount) {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },mount:function (type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ }
+
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },unmount:function (mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach(function (hash) {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.indexOf(current.mount) !== -1) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ assert(idx !== -1);
+ node.mount.mounts.splice(idx, 1);
+ },lookup:function (parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },mknod:function (path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var err = FS.mayCreate(parent, name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },create:function (path, mode) {
+ mode = mode !== undefined ? mode : 438 /* 0666 */;
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },mkdir:function (path, mode) {
+ mode = mode !== undefined ? mode : 511 /* 0777 */;
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },mkdev:function (path, mode, dev) {
+ if (typeof(dev) === 'undefined') {
+ dev = mode;
+ mode = 438 /* 0666 */;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },symlink:function (oldpath, newpath) {
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ var newname = PATH.basename(newpath);
+ var err = FS.mayCreate(parent, newname);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },rename:function (old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+ try {
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(ERRNO_CODES.EXDEV);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTEMPTY);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var err = FS.mayDelete(old_dir, old_name, isdir);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ err = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ err = FS.nodePermissions(old_dir, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },rmdir:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, true);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },readdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ return node.node_ops.readdir(node);
+ },unlink:function (path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var err = FS.mayDelete(parent, name, false);
+ if (err) {
+ // POSIX says unlink should set EPERM, not EISDIR
+ if (err === ERRNO_CODES.EISDIR) err = ERRNO_CODES.EPERM;
+ throw new FS.ErrnoError(err);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBUSY);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },readlink:function (path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ return link.node_ops.readlink(link);
+ },stat:function (path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ return node.node_ops.getattr(node);
+ },lstat:function (path) {
+ return FS.stat(path, true);
+ },chmod:function (path, mode, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now()
+ });
+ },lchmod:function (path, mode) {
+ FS.chmod(path, mode, true);
+ },fchmod:function (fd, mode) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chmod(stream.node, mode);
+ },chown:function (path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now()
+ // we ignore the uid / gid for now
+ });
+ },lchown:function (path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },fchown:function (fd, uid, gid) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ FS.chown(stream.node, uid, gid);
+ },truncate:function (path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var node;
+ if (typeof path === 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(ERRNO_CODES.EPERM);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var err = FS.nodePermissions(node, 'w');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },ftruncate:function (fd, len) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ FS.truncate(stream.node, len);
+ },utime:function (path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime)
+ });
+ },open:function (path, flags, mode, fd_start, fd_end) {
+ flags = typeof flags === 'string' ? FS.modeStringToFlags(flags) : flags;
+ mode = typeof mode === 'undefined' ? 438 /* 0666 */ : mode;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path === 'object') {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072)
+ });
+ node = lookup.node;
+ } catch (e) {
+ // ignore
+ }
+ }
+ // perhaps we need to create the node
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EEXIST);
+ }
+ } else {
+ // node doesn't exist, try to create it
+ node = FS.mknod(path, mode, 0);
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOENT);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // check permissions
+ var err = FS.mayOpen(node, flags);
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ // do truncation if necessary
+ if ((flags & 512)) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ }, fd_start, fd_end);
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ Module['printErr']('read file: ' + path);
+ }
+ }
+ return stream;
+ },close:function (stream) {
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ },llseek:function (stream, offset, whence) {
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ return stream.stream_ops.llseek(stream, offset, whence);
+ },read:function (stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },write:function (stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EISDIR);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var seeking = true;
+ if (typeof position === 'undefined') {
+ position = stream.position;
+ seeking = false;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(ERRNO_CODES.ESPIPE);
+ }
+ if (stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },allocate:function (stream, offset, length) {
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EBADF);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },mmap:function (stream, buffer, offset, length, position, prot, flags) {
+ // TODO if PROT is PROT_WRITE, make sure we have write access
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(ERRNO_CODES.EACCES);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENODEV);
+ }
+ return stream.stream_ops.mmap(stream, buffer, offset, length, position, prot, flags);
+ },ioctl:function (stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTTY);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },readFile:function (path, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'r';
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = '';
+ var utf8 = new Runtime.UTF8Processor();
+ for (var i = 0; i < length; i++) {
+ ret += utf8.processCChar(buf[i]);
+ }
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },writeFile:function (path, data, opts) {
+ opts = opts || {};
+ opts.flags = opts.flags || 'w';
+ opts.encoding = opts.encoding || 'utf8';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error('Invalid encoding type "' + opts.encoding + '"');
+ }
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (opts.encoding === 'utf8') {
+ var utf8 = new Runtime.UTF8Processor();
+ var buf = new Uint8Array(utf8.processJSString(data));
+ FS.write(stream, buf, 0, buf.length, 0, opts.canOwn);
+ } else if (opts.encoding === 'binary') {
+ FS.write(stream, data, 0, data.length, 0, opts.canOwn);
+ }
+ FS.close(stream);
+ },cwd:function () {
+ return FS.currentPath;
+ },chdir:function (path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTDIR);
+ }
+ var err = FS.nodePermissions(lookup.node, 'x');
+ if (err) {
+ throw new FS.ErrnoError(err);
+ }
+ FS.currentPath = lookup.path;
+ },createDefaultDirectories:function () {
+ FS.mkdir('/tmp');
+ },createDefaultDevices:function () {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: function() { return 0; },
+ write: function() { return 0; }
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using Module['printErr']
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },createStandardStreams:function () {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (Module['stdin']) {
+ FS.createDevice('/dev', 'stdin', Module['stdin']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (Module['stdout']) {
+ FS.createDevice('/dev', 'stdout', null, Module['stdout']);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (Module['stderr']) {
+ FS.createDevice('/dev', 'stderr', null, Module['stderr']);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 'r');
+ HEAP32[((_stdin)>>2)]=FS.getPtrForStream(stdin);
+ assert(stdin.fd === 0, 'invalid handle for stdin (' + stdin.fd + ')');
+
+ var stdout = FS.open('/dev/stdout', 'w');
+ HEAP32[((_stdout)>>2)]=FS.getPtrForStream(stdout);
+ assert(stdout.fd === 1, 'invalid handle for stdout (' + stdout.fd + ')');
+
+ var stderr = FS.open('/dev/stderr', 'w');
+ HEAP32[((_stderr)>>2)]=FS.getPtrForStream(stderr);
+ assert(stderr.fd === 2, 'invalid handle for stderr (' + stderr.fd + ')');
+ },ensureErrnoError:function () {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = function ErrnoError(errno) {
+ this.errno = errno;
+ for (var key in ERRNO_CODES) {
+ if (ERRNO_CODES[key] === errno) {
+ this.code = key;
+ break;
+ }
+ }
+ this.message = ERRNO_MESSAGES[errno];
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info)
+ [ERRNO_CODES.ENOENT].forEach(function(code) {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = '<generic error, no stack>';
+ });
+ },staticInit:function () {
+ FS.ensureErrnoError();
+
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ },init:function (input, output, error) {
+ assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
+ FS.init.initialized = true;
+
+ FS.ensureErrnoError();
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ Module['stdin'] = input || Module['stdin'];
+ Module['stdout'] = output || Module['stdout'];
+ Module['stderr'] = error || Module['stderr'];
+
+ FS.createStandardStreams();
+ },quit:function () {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },getMode:function (canRead, canWrite) {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ },joinPath:function (parts, forceRelative) {
+ var path = PATH.join.apply(null, parts);
+ if (forceRelative && path[0] == '/') path = path.substr(1);
+ return path;
+ },absolutePath:function (relative, base) {
+ return PATH.resolve(base, relative);
+ },standardizePath:function (path) {
+ return PATH.normalize(path);
+ },findObject:function (path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (ret.exists) {
+ return ret.object;
+ } else {
+ ___setErrNo(ret.error);
+ return null;
+ }
+ },analyzePath:function (path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },createFolder:function (parent, name, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.mkdir(path, mode);
+ },createPath:function (parent, path, canRead, canWrite) {
+ parent = typeof parent === 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },createFile:function (parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },createDataFile:function (parent, name, data, canRead, canWrite, canOwn) {
+ var path = name ? PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name) : parent;
+ var mode = FS.getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data === 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 'w');
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },createDevice:function (parent, name, input, output) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS.getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open: function(stream) {
+ stream.seekable = false;
+ },
+ close: function(stream) {
+ // flush any pending line data
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read: function(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write: function(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },createLink:function (parent, name, target, canRead, canWrite) {
+ var path = PATH.join2(typeof parent === 'string' ? parent : FS.getPath(parent), name);
+ return FS.symlink(target, path);
+ },forceLoadFile:function (obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ var success = true;
+ if (typeof XMLHttpRequest !== 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else if (Module['read']) {
+ // Command-line.
+ try {
+ // WARNING: Can't read binary files in V8's d8 or tracemonkey's js, as
+ // read() will try to parse UTF8.
+ obj.contents = intArrayFromString(Module['read'](obj.url), true);
+ } catch (e) {
+ success = false;
+ }
+ } else {
+ throw new Error('Cannot load without read() or XMLHttpRequest.');
+ }
+ if (!success) ___setErrNo(ERRNO_CODES.EIO);
+ return success;
+ },createLazyFile:function (parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array). Actual getting is abstracted away for eventual reuse.
+ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = []; // Loaded chunks. Index is the chunk number
+ }
+ LazyUint8Array.prototype.get = function LazyUint8Array_get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = Math.floor(idx / this.chunkSize);
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ LazyUint8Array.prototype.setDataGetter = function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ }
+ LazyUint8Array.prototype.cacheLength = function LazyUint8Array_cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (function(from, to) {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ if (typeof Uint8Array != 'undefined') xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(xhr.response || []);
+ } else {
+ return intArrayFromString(xhr.responseText || '', true);
+ }
+ });
+ var lazyArray = this;
+ lazyArray.setDataGetter(function(chunkNum) {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof(lazyArray.chunks[chunkNum]) === "undefined") throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ if (typeof XMLHttpRequest !== 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperty(lazyArray, "length", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ });
+ Object.defineProperty(lazyArray, "chunkSize", {
+ get: function() {
+ if(!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ });
+
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach(function(key) {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ return fn.apply(null, arguments);
+ };
+ });
+ // use a custom read function
+ stream_ops.read = function stream_ops_read(stream, buffer, offset, length, position) {
+ if (!FS.forceLoadFile(node)) {
+ throw new FS.ErrnoError(ERRNO_CODES.EIO);
+ }
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ assert(size >= 0);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },createPreloadedFile:function (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn) {
+ Browser.init();
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH.resolve(PATH.join2(parent, name)) : parent;
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (!dontCreateFile) {
+ FS.createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ if (onload) onload();
+ removeRunDependency('cp ' + fullname);
+ }
+ var handled = false;
+ Module['preloadPlugins'].forEach(function(plugin) {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, function() {
+ if (onerror) onerror();
+ removeRunDependency('cp ' + fullname);
+ });
+ handled = true;
+ }
+ });
+ if (!handled) finish(byteArray);
+ }
+ addRunDependency('cp ' + fullname);
+ if (typeof url == 'string') {
+ Browser.asyncLoad(url, function(byteArray) {
+ processData(byteArray);
+ }, onerror);
+ } else {
+ processData(url);
+ }
+ },indexedDB:function () {
+ return window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
+ },DB_NAME:function () {
+ return 'EM_FS_' + window.location.pathname;
+ },DB_VERSION:20,DB_STORE_NAME:"FILE_DATA",saveFilesToDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = function openRequest_onupgradeneeded() {
+ console.log('creating db');
+ var db = openRequest.result;
+ db.createObjectStore(FS.DB_STORE_NAME);
+ };
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readwrite');
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var putRequest = files.put(FS.analyzePath(path).object.contents, path);
+ putRequest.onsuccess = function putRequest_onsuccess() { ok++; if (ok + fail == total) finish() };
+ putRequest.onerror = function putRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ },loadFilesFromDB:function (paths, onload, onerror) {
+ onload = onload || function(){};
+ onerror = onerror || function(){};
+ var indexedDB = FS.indexedDB();
+ try {
+ var openRequest = indexedDB.open(FS.DB_NAME(), FS.DB_VERSION);
+ } catch (e) {
+ return onerror(e);
+ }
+ openRequest.onupgradeneeded = onerror; // no database to load from
+ openRequest.onsuccess = function openRequest_onsuccess() {
+ var db = openRequest.result;
+ try {
+ var transaction = db.transaction([FS.DB_STORE_NAME], 'readonly');
+ } catch(e) {
+ onerror(e);
+ return;
+ }
+ var files = transaction.objectStore(FS.DB_STORE_NAME);
+ var ok = 0, fail = 0, total = paths.length;
+ function finish() {
+ if (fail == 0) onload(); else onerror();
+ }
+ paths.forEach(function(path) {
+ var getRequest = files.get(path);
+ getRequest.onsuccess = function getRequest_onsuccess() {
+ if (FS.analyzePath(path).exists) {
+ FS.unlink(path);
+ }
+ FS.createDataFile(PATH.dirname(path), PATH.basename(path), getRequest.result, true, true, true);
+ ok++;
+ if (ok + fail == total) finish();
+ };
+ getRequest.onerror = function getRequest_onerror() { fail++; if (ok + fail == total) finish() };
+ });
+ transaction.onerror = onerror;
+ };
+ openRequest.onerror = onerror;
+ }};
+
+
+
+
+ function _mkport() { throw 'TODO' }var SOCKFS={mount:function (mount) {
+ return FS.createNode(null, '/', 16384 | 511 /* 0777 */, 0);
+ },createSocket:function (family, type, protocol) {
+ var streaming = type == 1;
+ if (protocol) {
+ assert(streaming == (protocol == 6)); // if SOCK_STREAM, must be tcp
+ }
+
+ // create our internal socket structure
+ var sock = {
+ family: family,
+ type: type,
+ protocol: protocol,
+ server: null,
+ peers: {},
+ pending: [],
+ recv_queue: [],
+ sock_ops: SOCKFS.websocket_sock_ops
+ };
+
+ // create the filesystem node to store the socket structure
+ var name = SOCKFS.nextname();
+ var node = FS.createNode(SOCKFS.root, name, 49152, 0);
+ node.sock = sock;
+
+ // and the wrapping stream that enables library functions such
+ // as read and write to indirectly interact with the socket
+ var stream = FS.createStream({
+ path: name,
+ node: node,
+ flags: FS.modeStringToFlags('r+'),
+ seekable: false,
+ stream_ops: SOCKFS.stream_ops
+ });
+
+ // map the new stream to the socket structure (sockets have a 1:1
+ // relationship with a stream)
+ sock.stream = stream;
+
+ return sock;
+ },getSocket:function (fd) {
+ var stream = FS.getStream(fd);
+ if (!stream || !FS.isSocket(stream.node.mode)) {
+ return null;
+ }
+ return stream.node.sock;
+ },stream_ops:{poll:function (stream) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.poll(sock);
+ },ioctl:function (stream, request, varargs) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.ioctl(sock, request, varargs);
+ },read:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ var msg = sock.sock_ops.recvmsg(sock, length);
+ if (!msg) {
+ // socket is closed
+ return 0;
+ }
+ buffer.set(msg.buffer, offset);
+ return msg.buffer.length;
+ },write:function (stream, buffer, offset, length, position /* ignored */) {
+ var sock = stream.node.sock;
+ return sock.sock_ops.sendmsg(sock, buffer, offset, length);
+ },close:function (stream) {
+ var sock = stream.node.sock;
+ sock.sock_ops.close(sock);
+ }},nextname:function () {
+ if (!SOCKFS.nextname.current) {
+ SOCKFS.nextname.current = 0;
+ }
+ return 'socket[' + (SOCKFS.nextname.current++) + ']';
+ },websocket_sock_ops:{createPeer:function (sock, addr, port) {
+ var ws;
+
+ if (typeof addr === 'object') {
+ ws = addr;
+ addr = null;
+ port = null;
+ }
+
+ if (ws) {
+ // for sockets that've already connected (e.g. we're the server)
+ // we can inspect the _socket property for the address
+ if (ws._socket) {
+ addr = ws._socket.remoteAddress;
+ port = ws._socket.remotePort;
+ }
+ // if we're just now initializing a connection to the remote,
+ // inspect the url property
+ else {
+ var result = /ws[s]?:\/\/([^:]+):(\d+)/.exec(ws.url);
+ if (!result) {
+ throw new Error('WebSocket URL must be in the format ws(s)://address:port');
+ }
+ addr = result[1];
+ port = parseInt(result[2], 10);
+ }
+ } else {
+ // create the actual websocket object and connect
+ try {
+ // runtimeConfig gets set to true if WebSocket runtime configuration is available.
+ var runtimeConfig = (Module['websocket'] && ('object' === typeof Module['websocket']));
+
+ // The default value is 'ws://' the replace is needed because the compiler replaces "//" comments with '#'
+ // comments without checking context, so we'd end up with ws:#, the replace swaps the "#" for "//" again.
+ var url = 'ws:#'.replace('#', '//');
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['url']) {
+ url = Module['websocket']['url']; // Fetch runtime WebSocket URL config.
+ }
+ }
+
+ if (url === 'ws://' || url === 'wss://') { // Is the supplied URL config just a prefix, if so complete it.
+ url = url + addr + ':' + port;
+ }
+
+ // Make the WebSocket subprotocol (Sec-WebSocket-Protocol) default to binary if no configuration is set.
+ var subProtocols = 'binary'; // The default value is 'binary'
+
+ if (runtimeConfig) {
+ if ('string' === typeof Module['websocket']['subprotocol']) {
+ subProtocols = Module['websocket']['subprotocol']; // Fetch runtime WebSocket subprotocol config.
+ }
+ }
+
+ // The regex trims the string (removes spaces at the beginning and end, then splits the string by
+ // <any space>,<any space> into an Array. Whitespace removal is important for Websockify and ws.
+ subProtocols = subProtocols.replace(/^ +| +$/g,"").split(/ *, */);
+
+ // The node ws library API for specifying optional subprotocol is slightly different than the browser's.
+ var opts = ENVIRONMENT_IS_NODE ? {'protocol': subProtocols.toString()} : subProtocols;
+
+ // If node we use the ws library.
+ var WebSocket = ENVIRONMENT_IS_NODE ? require('ws') : window['WebSocket'];
+ ws = new WebSocket(url, opts);
+ ws.binaryType = 'arraybuffer';
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EHOSTUNREACH);
+ }
+ }
+
+
+ var peer = {
+ addr: addr,
+ port: port,
+ socket: ws,
+ dgram_send_queue: []
+ };
+
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ SOCKFS.websocket_sock_ops.handlePeerEvents(sock, peer);
+
+ // if this is a bound dgram socket, send the port number first to allow
+ // us to override the ephemeral port reported to us by remotePort on the
+ // remote end.
+ if (sock.type === 2 && typeof sock.sport !== 'undefined') {
+ peer.dgram_send_queue.push(new Uint8Array([
+ 255, 255, 255, 255,
+ 'p'.charCodeAt(0), 'o'.charCodeAt(0), 'r'.charCodeAt(0), 't'.charCodeAt(0),
+ ((sock.sport & 0xff00) >> 8) , (sock.sport & 0xff)
+ ]));
+ }
+
+ return peer;
+ },getPeer:function (sock, addr, port) {
+ return sock.peers[addr + ':' + port];
+ },addPeer:function (sock, peer) {
+ sock.peers[peer.addr + ':' + peer.port] = peer;
+ },removePeer:function (sock, peer) {
+ delete sock.peers[peer.addr + ':' + peer.port];
+ },handlePeerEvents:function (sock, peer) {
+ var first = true;
+
+ var handleOpen = function () {
+ try {
+ var queued = peer.dgram_send_queue.shift();
+ while (queued) {
+ peer.socket.send(queued);
+ queued = peer.dgram_send_queue.shift();
+ }
+ } catch (e) {
+ // not much we can do here in the way of proper error handling as we've already
+ // lied and said this data was sent. shut it down.
+ peer.socket.close();
+ }
+ };
+
+ function handleMessage(data) {
+ assert(typeof data !== 'string' && data.byteLength !== undefined); // must receive an ArrayBuffer
+ data = new Uint8Array(data); // make a typed array view on the array buffer
+
+
+ // if this is the port message, override the peer's port with it
+ var wasfirst = first;
+ first = false;
+ if (wasfirst &&
+ data.length === 10 &&
+ data[0] === 255 && data[1] === 255 && data[2] === 255 && data[3] === 255 &&
+ data[4] === 'p'.charCodeAt(0) && data[5] === 'o'.charCodeAt(0) && data[6] === 'r'.charCodeAt(0) && data[7] === 't'.charCodeAt(0)) {
+ // update the peer's port and it's key in the peer map
+ var newport = ((data[8] << 8) | data[9]);
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ peer.port = newport;
+ SOCKFS.websocket_sock_ops.addPeer(sock, peer);
+ return;
+ }
+
+ sock.recv_queue.push({ addr: peer.addr, port: peer.port, data: data });
+ };
+
+ if (ENVIRONMENT_IS_NODE) {
+ peer.socket.on('open', handleOpen);
+ peer.socket.on('message', function(data, flags) {
+ if (!flags.binary) {
+ return;
+ }
+ handleMessage((new Uint8Array(data)).buffer); // copy from node Buffer -> ArrayBuffer
+ });
+ peer.socket.on('error', function() {
+ // don't throw
+ });
+ } else {
+ peer.socket.onopen = handleOpen;
+ peer.socket.onmessage = function peer_socket_onmessage(event) {
+ handleMessage(event.data);
+ };
+ }
+ },poll:function (sock) {
+ if (sock.type === 1 && sock.server) {
+ // listen sockets should only say they're available for reading
+ // if there are pending clients.
+ return sock.pending.length ? (64 | 1) : 0;
+ }
+
+ var mask = 0;
+ var dest = sock.type === 1 ? // we only care about the socket state for connection-based sockets
+ SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport) :
+ null;
+
+ if (sock.recv_queue.length ||
+ !dest || // connection-less sockets are always ready to read
+ (dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) { // let recv return 0 once closed
+ mask |= (64 | 1);
+ }
+
+ if (!dest || // connection-less sockets are always ready to write
+ (dest && dest.socket.readyState === dest.socket.OPEN)) {
+ mask |= 4;
+ }
+
+ if ((dest && dest.socket.readyState === dest.socket.CLOSING) ||
+ (dest && dest.socket.readyState === dest.socket.CLOSED)) {
+ mask |= 16;
+ }
+
+ return mask;
+ },ioctl:function (sock, request, arg) {
+ switch (request) {
+ case 21531:
+ var bytes = 0;
+ if (sock.recv_queue.length) {
+ bytes = sock.recv_queue[0].data.length;
+ }
+ HEAP32[((arg)>>2)]=bytes;
+ return 0;
+ default:
+ return ERRNO_CODES.EINVAL;
+ }
+ },close:function (sock) {
+ // if we've spawned a listen server, close it
+ if (sock.server) {
+ try {
+ sock.server.close();
+ } catch (e) {
+ }
+ sock.server = null;
+ }
+ // close any peer connections
+ var peers = Object.keys(sock.peers);
+ for (var i = 0; i < peers.length; i++) {
+ var peer = sock.peers[peers[i]];
+ try {
+ peer.socket.close();
+ } catch (e) {
+ }
+ SOCKFS.websocket_sock_ops.removePeer(sock, peer);
+ }
+ return 0;
+ },bind:function (sock, addr, port) {
+ if (typeof sock.saddr !== 'undefined' || typeof sock.sport !== 'undefined') {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already bound
+ }
+ sock.saddr = addr;
+ sock.sport = port || _mkport();
+ // in order to emulate dgram sockets, we need to launch a listen server when
+ // binding on a connection-less socket
+ // note: this is only required on the server side
+ if (sock.type === 2) {
+ // close the existing server if it exists
+ if (sock.server) {
+ sock.server.close();
+ sock.server = null;
+ }
+ // swallow error operation not supported error that occurs when binding in the
+ // browser where this isn't supported
+ try {
+ sock.sock_ops.listen(sock, 0);
+ } catch (e) {
+ if (!(e instanceof FS.ErrnoError)) throw e;
+ if (e.errno !== ERRNO_CODES.EOPNOTSUPP) throw e;
+ }
+ }
+ },connect:function (sock, addr, port) {
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODS.EOPNOTSUPP);
+ }
+
+ // TODO autobind
+ // if (!sock.addr && sock.type == 2) {
+ // }
+
+ // early out if we're already connected / in the middle of connecting
+ if (typeof sock.daddr !== 'undefined' && typeof sock.dport !== 'undefined') {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+ if (dest) {
+ if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EALREADY);
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EISCONN);
+ }
+ }
+ }
+
+ // add the socket to our peer list and set our
+ // destination address / port to match
+ var peer = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ sock.daddr = peer.addr;
+ sock.dport = peer.port;
+
+ // always "fail" in non-blocking mode
+ throw new FS.ErrnoError(ERRNO_CODES.EINPROGRESS);
+ },listen:function (sock, backlog) {
+ if (!ENVIRONMENT_IS_NODE) {
+ throw new FS.ErrnoError(ERRNO_CODES.EOPNOTSUPP);
+ }
+ if (sock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL); // already listening
+ }
+ var WebSocketServer = require('ws').Server;
+ var host = sock.saddr;
+ sock.server = new WebSocketServer({
+ host: host,
+ port: sock.sport
+ // TODO support backlog
+ });
+
+ sock.server.on('connection', function(ws) {
+ if (sock.type === 1) {
+ var newsock = SOCKFS.createSocket(sock.family, sock.type, sock.protocol);
+
+ // create a peer on the new socket
+ var peer = SOCKFS.websocket_sock_ops.createPeer(newsock, ws);
+ newsock.daddr = peer.addr;
+ newsock.dport = peer.port;
+
+ // push to queue for accept to pick up
+ sock.pending.push(newsock);
+ } else {
+ // create a peer on the listen socket so calling sendto
+ // with the listen socket and an address will resolve
+ // to the correct client
+ SOCKFS.websocket_sock_ops.createPeer(sock, ws);
+ }
+ });
+ sock.server.on('closed', function() {
+ sock.server = null;
+ });
+ sock.server.on('error', function() {
+ // don't throw
+ });
+ },accept:function (listensock) {
+ if (!listensock.server) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ var newsock = listensock.pending.shift();
+ newsock.stream.flags = listensock.stream.flags;
+ return newsock;
+ },getname:function (sock, peer) {
+ var addr, port;
+ if (peer) {
+ if (sock.daddr === undefined || sock.dport === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ addr = sock.daddr;
+ port = sock.dport;
+ } else {
+ // TODO saddr and sport will be set for bind()'d UDP sockets, but what
+ // should we be returning for TCP sockets that've been connect()'d?
+ addr = sock.saddr || 0;
+ port = sock.sport || 0;
+ }
+ return { addr: addr, port: port };
+ },sendmsg:function (sock, buffer, offset, length, addr, port) {
+ if (sock.type === 2) {
+ // connection-less sockets will honor the message address,
+ // and otherwise fall back to the bound destination address
+ if (addr === undefined || port === undefined) {
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+ // if there was no address to fall back to, error out
+ if (addr === undefined || port === undefined) {
+ throw new FS.ErrnoError(ERRNO_CODES.EDESTADDRREQ);
+ }
+ } else {
+ // connection-based sockets will only use the bound
+ addr = sock.daddr;
+ port = sock.dport;
+ }
+
+ // find the peer for the destination address
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, addr, port);
+
+ // early out if not connected with a connection-based socket
+ if (sock.type === 1) {
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ } else if (dest.socket.readyState === dest.socket.CONNECTING) {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // create a copy of the incoming data to send, as the WebSocket API
+ // doesn't work entirely with an ArrayBufferView, it'll just send
+ // the entire underlying buffer
+ var data;
+ if (buffer instanceof Array || buffer instanceof ArrayBuffer) {
+ data = buffer.slice(offset, offset + length);
+ } else { // ArrayBufferView
+ data = buffer.buffer.slice(buffer.byteOffset + offset, buffer.byteOffset + offset + length);
+ }
+
+ // if we're emulating a connection-less dgram socket and don't have
+ // a cached connection, queue the buffer to send upon connect and
+ // lie, saying the data was sent now.
+ if (sock.type === 2) {
+ if (!dest || dest.socket.readyState !== dest.socket.OPEN) {
+ // if we're not connected, open a new connection
+ if (!dest || dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ dest = SOCKFS.websocket_sock_ops.createPeer(sock, addr, port);
+ }
+ dest.dgram_send_queue.push(data);
+ return length;
+ }
+ }
+
+ try {
+ // send the actual data
+ dest.socket.send(data);
+ return length;
+ } catch (e) {
+ throw new FS.ErrnoError(ERRNO_CODES.EINVAL);
+ }
+ },recvmsg:function (sock, length) {
+ // http://pubs.opengroup.org/onlinepubs/7908799/xns/recvmsg.html
+ if (sock.type === 1 && sock.server) {
+ // tcp servers should not be recv()'ing on the listen socket
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+
+ var queued = sock.recv_queue.shift();
+ if (!queued) {
+ if (sock.type === 1) {
+ var dest = SOCKFS.websocket_sock_ops.getPeer(sock, sock.daddr, sock.dport);
+
+ if (!dest) {
+ // if we have a destination address but are not connected, error out
+ throw new FS.ErrnoError(ERRNO_CODES.ENOTCONN);
+ }
+ else if (dest.socket.readyState === dest.socket.CLOSING || dest.socket.readyState === dest.socket.CLOSED) {
+ // return null if the socket has closed
+ return null;
+ }
+ else {
+ // else, our socket is in a valid state but truly has nothing available
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ } else {
+ throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
+ }
+ }
+
+ // queued.data will be an ArrayBuffer if it's unadulterated, but if it's
+ // requeued TCP data it'll be an ArrayBufferView
+ var queuedLength = queued.data.byteLength || queued.data.length;
+ var queuedOffset = queued.data.byteOffset || 0;
+ var queuedBuffer = queued.data.buffer || queued.data;
+ var bytesRead = Math.min(length, queuedLength);
+ var res = {
+ buffer: new Uint8Array(queuedBuffer, queuedOffset, bytesRead),
+ addr: queued.addr,
+ port: queued.port
+ };
+
+
+ // push back any unread data for TCP connections
+ if (sock.type === 1 && bytesRead < queuedLength) {
+ var bytesRemaining = queuedLength - bytesRead;
+ queued.data = new Uint8Array(queuedBuffer, queuedOffset + bytesRead, bytesRemaining);
+ sock.recv_queue.unshift(queued);
+ }
+
+ return res;
+ }}};function _send(fd, buf, len, flags) {
+ var sock = SOCKFS.getSocket(fd);
+ if (!sock) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ // TODO honor flags
+ return _write(fd, buf, len);
+ }
+
+ function _pwrite(fildes, buf, nbyte, offset) {
+ // ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte, offset);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }function _write(fildes, buf, nbyte) {
+ // ssize_t write(int fildes, const void *buf, size_t nbyte);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/write.html
+ var stream = FS.getStream(fildes);
+ if (!stream) {
+ ___setErrNo(ERRNO_CODES.EBADF);
+ return -1;
+ }
+
+
+ try {
+ var slab = HEAP8;
+ return FS.write(stream, slab, buf, nbyte);
+ } catch (e) {
+ FS.handleFSError(e);
+ return -1;
+ }
+ }
+
+ function _fileno(stream) {
+ // int fileno(FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fileno.html
+ stream = FS.getStreamFromPtr(stream);
+ if (!stream) return -1;
+ return stream.fd;
+ }function _fwrite(ptr, size, nitems, stream) {
+ // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fwrite.html
+ var bytesToWrite = nitems * size;
+ if (bytesToWrite == 0) return 0;
+ var fd = _fileno(stream);
+ var bytesWritten = _write(fd, ptr, bytesToWrite);
+ if (bytesWritten == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return 0;
+ } else {
+ return Math.floor(bytesWritten / size);
+ }
+ }
+
+
+
+ Module["_strlen"] = _strlen;
+
+ function __reallyNegative(x) {
+ return x < 0 || (x === 0 && (1/x) === -Infinity);
+ }function __formatString(format, varargs) {
+ var textIndex = format;
+ var argIndex = 0;
+ function getNextArg(type) {
+ // NOTE: Explicitly ignoring type safety. Otherwise this fails:
+ // int x = 4; printf("%c\n", (char)x);
+ var ret;
+ if (type === 'double') {
+ ret = HEAPF64[(((varargs)+(argIndex))>>3)];
+ } else if (type == 'i64') {
+ ret = [HEAP32[(((varargs)+(argIndex))>>2)],
+ HEAP32[(((varargs)+(argIndex+4))>>2)]];
+
+ } else {
+ type = 'i32'; // varargs are always i32, i64, or double
+ ret = HEAP32[(((varargs)+(argIndex))>>2)];
+ }
+ argIndex += Runtime.getNativeFieldSize(type);
+ return ret;
+ }
+
+ var ret = [];
+ var curr, next, currArg;
+ while(1) {
+ var startTextIndex = textIndex;
+ curr = HEAP8[(textIndex)];
+ if (curr === 0) break;
+ next = HEAP8[((textIndex+1)|0)];
+ if (curr == 37) {
+ // Handle flags.
+ var flagAlwaysSigned = false;
+ var flagLeftAlign = false;
+ var flagAlternative = false;
+ var flagZeroPad = false;
+ var flagPadSign = false;
+ flagsLoop: while (1) {
+ switch (next) {
+ case 43:
+ flagAlwaysSigned = true;
+ break;
+ case 45:
+ flagLeftAlign = true;
+ break;
+ case 35:
+ flagAlternative = true;
+ break;
+ case 48:
+ if (flagZeroPad) {
+ break flagsLoop;
+ } else {
+ flagZeroPad = true;
+ break;
+ }
+ case 32:
+ flagPadSign = true;
+ break;
+ default:
+ break flagsLoop;
+ }
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+
+ // Handle width.
+ var width = 0;
+ if (next == 42) {
+ width = getNextArg('i32');
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ } else {
+ while (next >= 48 && next <= 57) {
+ width = width * 10 + (next - 48);
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ }
+
+ // Handle precision.
+ var precisionSet = false, precision = -1;
+ if (next == 46) {
+ precision = 0;
+ precisionSet = true;
+ textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+ if (next == 42) {
+ precision = getNextArg('i32');
+ textIndex++;
+ } else {
+ while(1) {
+ var precisionChr = HEAP8[((textIndex+1)|0)];
+ if (precisionChr < 48 ||
+ precisionChr > 57) break;
+ precision = precision * 10 + (precisionChr - 48);
+ textIndex++;
+ }
+ }
+ next = HEAP8[((textIndex+1)|0)];
+ }
+ if (precision < 0) {
+ precision = 6; // Standard default.
+ precisionSet = false;
+ }
+
+ // Handle integer sizes. WARNING: These assume a 32-bit architecture!
+ var argSize;
+ switch (String.fromCharCode(next)) {
+ case 'h':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 104) {
+ textIndex++;
+ argSize = 1; // char (actually i32 in varargs)
+ } else {
+ argSize = 2; // short (actually i32 in varargs)
+ }
+ break;
+ case 'l':
+ var nextNext = HEAP8[((textIndex+2)|0)];
+ if (nextNext == 108) {
+ textIndex++;
+ argSize = 8; // long long
+ } else {
+ argSize = 4; // long
+ }
+ break;
+ case 'L': // long long
+ case 'q': // int64_t
+ case 'j': // intmax_t
+ argSize = 8;
+ break;
+ case 'z': // size_t
+ case 't': // ptrdiff_t
+ case 'I': // signed ptrdiff_t or unsigned size_t
+ argSize = 4;
+ break;
+ default:
+ argSize = null;
+ }
+ if (argSize) textIndex++;
+ next = HEAP8[((textIndex+1)|0)];
+
+ // Handle type specifier.
+ switch (String.fromCharCode(next)) {
+ case 'd': case 'i': case 'u': case 'o': case 'x': case 'X': case 'p': {
+ // Integer.
+ var signed = next == 100 || next == 105;
+ argSize = argSize || 4;
+ var currArg = getNextArg('i' + (argSize * 8));
+ var argText;
+ // Flatten i64-1 [low, high] into a (slightly rounded) double
+ if (argSize == 8) {
+ currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == 117);
+ }
+ // Truncate to requested size.
+ if (argSize <= 4) {
+ var limit = Math.pow(256, argSize) - 1;
+ currArg = (signed ? reSign : unSign)(currArg & limit, argSize * 8);
+ }
+ // Format the number.
+ var currAbsArg = Math.abs(currArg);
+ var prefix = '';
+ if (next == 100 || next == 105) {
+ argText = reSign(currArg, 8 * argSize, 1).toString(10);
+ } else if (next == 117) {
+ argText = unSign(currArg, 8 * argSize, 1).toString(10);
+ currArg = Math.abs(currArg);
+ } else if (next == 111) {
+ argText = (flagAlternative ? '0' : '') + currAbsArg.toString(8);
+ } else if (next == 120 || next == 88) {
+ prefix = (flagAlternative && currArg != 0) ? '0x' : '';
+ if (currArg < 0) {
+ // Represent negative numbers in hex as 2's complement.
+ currArg = -currArg;
+ argText = (currAbsArg - 1).toString(16);
+ var buffer = [];
+ for (var i = 0; i < argText.length; i++) {
+ buffer.push((0xF - parseInt(argText[i], 16)).toString(16));
+ }
+ argText = buffer.join('');
+ while (argText.length < argSize * 2) argText = 'f' + argText;
+ } else {
+ argText = currAbsArg.toString(16);
+ }
+ if (next == 88) {
+ prefix = prefix.toUpperCase();
+ argText = argText.toUpperCase();
+ }
+ } else if (next == 112) {
+ if (currAbsArg === 0) {
+ argText = '(nil)';
+ } else {
+ prefix = '0x';
+ argText = currAbsArg.toString(16);
+ }
+ }
+ if (precisionSet) {
+ while (argText.length < precision) {
+ argText = '0' + argText;
+ }
+ }
+
+ // Add sign if needed
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ prefix = '+' + prefix;
+ } else if (flagPadSign) {
+ prefix = ' ' + prefix;
+ }
+ }
+
+ // Move sign to prefix so we zero-pad after the sign
+ if (argText.charAt(0) == '-') {
+ prefix = '-' + prefix;
+ argText = argText.substr(1);
+ }
+
+ // Add padding.
+ while (prefix.length + argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad) {
+ argText = '0' + argText;
+ } else {
+ prefix = ' ' + prefix;
+ }
+ }
+ }
+
+ // Insert the result into the buffer.
+ argText = prefix + argText;
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 'f': case 'F': case 'e': case 'E': case 'g': case 'G': {
+ // Float.
+ var currArg = getNextArg('double');
+ var argText;
+ if (isNaN(currArg)) {
+ argText = 'nan';
+ flagZeroPad = false;
+ } else if (!isFinite(currArg)) {
+ argText = (currArg < 0 ? '-' : '') + 'inf';
+ flagZeroPad = false;
+ } else {
+ var isGeneral = false;
+ var effectivePrecision = Math.min(precision, 20);
+
+ // Convert g/G to f/F or e/E, as per:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/printf.html
+ if (next == 103 || next == 71) {
+ isGeneral = true;
+ precision = precision || 1;
+ var exponent = parseInt(currArg.toExponential(effectivePrecision).split('e')[1], 10);
+ if (precision > exponent && exponent >= -4) {
+ next = ((next == 103) ? 'f' : 'F').charCodeAt(0);
+ precision -= exponent + 1;
+ } else {
+ next = ((next == 103) ? 'e' : 'E').charCodeAt(0);
+ precision--;
+ }
+ effectivePrecision = Math.min(precision, 20);
+ }
+
+ if (next == 101 || next == 69) {
+ argText = currArg.toExponential(effectivePrecision);
+ // Make sure the exponent has at least 2 digits.
+ if (/[eE][-+]\d$/.test(argText)) {
+ argText = argText.slice(0, -1) + '0' + argText.slice(-1);
+ }
+ } else if (next == 102 || next == 70) {
+ argText = currArg.toFixed(effectivePrecision);
+ if (currArg === 0 && __reallyNegative(currArg)) {
+ argText = '-' + argText;
+ }
+ }
+
+ var parts = argText.split('e');
+ if (isGeneral && !flagAlternative) {
+ // Discard trailing zeros and periods.
+ while (parts[0].length > 1 && parts[0].indexOf('.') != -1 &&
+ (parts[0].slice(-1) == '0' || parts[0].slice(-1) == '.')) {
+ parts[0] = parts[0].slice(0, -1);
+ }
+ } else {
+ // Make sure we have a period in alternative mode.
+ if (flagAlternative && argText.indexOf('.') == -1) parts[0] += '.';
+ // Zero pad until required precision.
+ while (precision > effectivePrecision++) parts[0] += '0';
+ }
+ argText = parts[0] + (parts.length > 1 ? 'e' + parts[1] : '');
+
+ // Capitalize 'E' if needed.
+ if (next == 69) argText = argText.toUpperCase();
+
+ // Add sign.
+ if (currArg >= 0) {
+ if (flagAlwaysSigned) {
+ argText = '+' + argText;
+ } else if (flagPadSign) {
+ argText = ' ' + argText;
+ }
+ }
+ }
+
+ // Add padding.
+ while (argText.length < width) {
+ if (flagLeftAlign) {
+ argText += ' ';
+ } else {
+ if (flagZeroPad && (argText[0] == '-' || argText[0] == '+')) {
+ argText = argText[0] + '0' + argText.slice(1);
+ } else {
+ argText = (flagZeroPad ? '0' : ' ') + argText;
+ }
+ }
+ }
+
+ // Adjust case.
+ if (next < 97) argText = argText.toUpperCase();
+
+ // Insert the result into the buffer.
+ argText.split('').forEach(function(chr) {
+ ret.push(chr.charCodeAt(0));
+ });
+ break;
+ }
+ case 's': {
+ // String.
+ var arg = getNextArg('i8*');
+ var argLength = arg ? _strlen(arg) : '(null)'.length;
+ if (precisionSet) argLength = Math.min(argLength, precision);
+ if (!flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ if (arg) {
+ for (var i = 0; i < argLength; i++) {
+ ret.push(HEAPU8[((arg++)|0)]);
+ }
+ } else {
+ ret = ret.concat(intArrayFromString('(null)'.substr(0, argLength), true));
+ }
+ if (flagLeftAlign) {
+ while (argLength < width--) {
+ ret.push(32);
+ }
+ }
+ break;
+ }
+ case 'c': {
+ // Character.
+ if (flagLeftAlign) ret.push(getNextArg('i8'));
+ while (--width > 0) {
+ ret.push(32);
+ }
+ if (!flagLeftAlign) ret.push(getNextArg('i8'));
+ break;
+ }
+ case 'n': {
+ // Write the length written so far to the next parameter.
+ var ptr = getNextArg('i32*');
+ HEAP32[((ptr)>>2)]=ret.length;
+ break;
+ }
+ case '%': {
+ // Literal percent sign.
+ ret.push(curr);
+ break;
+ }
+ default: {
+ // Unknown specifiers remain untouched.
+ for (var i = startTextIndex; i < textIndex + 2; i++) {
+ ret.push(HEAP8[(i)]);
+ }
+ }
+ }
+ textIndex += 2;
+ // TODO: Support a/A (hex float) and m (last error) specifiers.
+ // TODO: Support %1${specifier} for arg selection.
+ } else {
+ ret.push(curr);
+ textIndex += 1;
+ }
+ }
+ return ret;
+ }function _fprintf(stream, format, varargs) {
+ // int fprintf(FILE *restrict stream, const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var result = __formatString(format, varargs);
+ var stack = Runtime.stackSave();
+ var ret = _fwrite(allocate(result, 'i8', ALLOC_STACK), 1, result.length, stream);
+ Runtime.stackRestore(stack);
+ return ret;
+ }function _printf(format, varargs) {
+ // int printf(const char *restrict format, ...);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/printf.html
+ var stdout = HEAP32[((_stdout)>>2)];
+ return _fprintf(stdout, format, varargs);
+ }
+
+
+ function _fputs(s, stream) {
+ // int fputs(const char *restrict s, FILE *restrict stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputs.html
+ var fd = _fileno(stream);
+ return _write(fd, s, _strlen(s));
+ }
+
+ function _fputc(c, stream) {
+ // int fputc(int c, FILE *stream);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/fputc.html
+ var chr = unSign(c & 0xFF);
+ HEAP8[((_fputc.ret)|0)]=chr;
+ var fd = _fileno(stream);
+ var ret = _write(fd, _fputc.ret, 1);
+ if (ret == -1) {
+ var streamObj = FS.getStreamFromPtr(stream);
+ if (streamObj) streamObj.error = true;
+ return -1;
+ } else {
+ return chr;
+ }
+ }function _puts(s) {
+ // int puts(const char *s);
+ // http://pubs.opengroup.org/onlinepubs/000095399/functions/puts.html
+ // NOTE: puts() always writes an extra newline.
+ var stdout = HEAP32[((_stdout)>>2)];
+ var ret = _fputs(s, stdout);
+ if (ret < 0) {
+ return ret;
+ } else {
+ var newlineRet = _fputc(10, stdout);
+ return (newlineRet < 0) ? -1 : ret + 1;
+ }
+ }
+
+ function _sysconf(name) {
+ // long sysconf(int name);
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html
+ switch(name) {
+ case 30: return PAGE_SIZE;
+ case 132:
+ case 133:
+ case 12:
+ case 137:
+ case 138:
+ case 15:
+ case 235:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 149:
+ case 13:
+ case 10:
+ case 236:
+ case 153:
+ case 9:
+ case 21:
+ case 22:
+ case 159:
+ case 154:
+ case 14:
+ case 77:
+ case 78:
+ case 139:
+ case 80:
+ case 81:
+ case 79:
+ case 82:
+ case 68:
+ case 67:
+ case 164:
+ case 11:
+ case 29:
+ case 47:
+ case 48:
+ case 95:
+ case 52:
+ case 51:
+ case 46:
+ return 200809;
+ case 27:
+ case 246:
+ case 127:
+ case 128:
+ case 23:
+ case 24:
+ case 160:
+ case 161:
+ case 181:
+ case 182:
+ case 242:
+ case 183:
+ case 184:
+ case 243:
+ case 244:
+ case 245:
+ case 165:
+ case 178:
+ case 179:
+ case 49:
+ case 50:
+ case 168:
+ case 169:
+ case 175:
+ case 170:
+ case 171:
+ case 172:
+ case 97:
+ case 76:
+ case 32:
+ case 173:
+ case 35:
+ return -1;
+ case 176:
+ case 177:
+ case 7:
+ case 155:
+ case 8:
+ case 157:
+ case 125:
+ case 126:
+ case 92:
+ case 93:
+ case 129:
+ case 130:
+ case 131:
+ case 94:
+ case 91:
+ return 1;
+ case 74:
+ case 60:
+ case 69:
+ case 70:
+ case 4:
+ return 1024;
+ case 31:
+ case 42:
+ case 72:
+ return 32;
+ case 87:
+ case 26:
+ case 33:
+ return 2147483647;
+ case 34:
+ case 1:
+ return 47839;
+ case 38:
+ case 36:
+ return 99;
+ case 43:
+ case 37:
+ return 2048;
+ case 0: return 2097152;
+ case 3: return 65536;
+ case 28: return 32768;
+ case 44: return 32767;
+ case 75: return 16384;
+ case 39: return 1000;
+ case 89: return 700;
+ case 71: return 256;
+ case 40: return 255;
+ case 2: return 100;
+ case 180: return 64;
+ case 25: return 20;
+ case 5: return 16;
+ case 6: return 6;
+ case 73: return 4;
+ case 84: return 1;
+ }
+ ___setErrNo(ERRNO_CODES.EINVAL);
+ return -1;
+ }
+
+
+ Module["_memset"] = _memset;
+
+ function ___errno_location() {
+ return ___errno_state;
+ }
+
+ function _abort() {
+ Module['abort']();
+ }
+
+ var Browser={mainLoop:{scheduler:null,method:"",shouldPause:false,paused:false,queue:[],pause:function () {
+ Browser.mainLoop.shouldPause = true;
+ },resume:function () {
+ if (Browser.mainLoop.paused) {
+ Browser.mainLoop.paused = false;
+ Browser.mainLoop.scheduler();
+ }
+ Browser.mainLoop.shouldPause = false;
+ },updateStatus:function () {
+ if (Module['setStatus']) {
+ var message = Module['statusMessage'] || 'Please wait...';
+ var remaining = Browser.mainLoop.remainingBlockers;
+ var expected = Browser.mainLoop.expectedBlockers;
+ if (remaining) {
+ if (remaining < expected) {
+ Module['setStatus'](message + ' (' + (expected - remaining) + '/' + expected + ')');
+ } else {
+ Module['setStatus'](message);
+ }
+ } else {
+ Module['setStatus']('');
+ }
+ }
+ }},isFullScreen:false,pointerLock:false,moduleContextCreatedCallbacks:[],workers:[],init:function () {
+ if (!Module["preloadPlugins"]) Module["preloadPlugins"] = []; // needs to exist even in workers
+
+ if (Browser.initted || ENVIRONMENT_IS_WORKER) return;
+ Browser.initted = true;
+
+ try {
+ new Blob();
+ Browser.hasBlobConstructor = true;
+ } catch(e) {
+ Browser.hasBlobConstructor = false;
+ console.log("warning: no blob constructor, cannot create blobs with mimetypes");
+ }
+ Browser.BlobBuilder = typeof MozBlobBuilder != "undefined" ? MozBlobBuilder : (typeof WebKitBlobBuilder != "undefined" ? WebKitBlobBuilder : (!Browser.hasBlobConstructor ? console.log("warning: no BlobBuilder") : null));
+ Browser.URLObject = typeof window != "undefined" ? (window.URL ? window.URL : window.webkitURL) : undefined;
+ if (!Module.noImageDecoding && typeof Browser.URLObject === 'undefined') {
+ console.log("warning: Browser does not support creating object URLs. Built-in browser image decoding will not be available.");
+ Module.noImageDecoding = true;
+ }
+
+ // Support for plugins that can process preloaded files. You can add more of these to
+ // your app by creating and appending to Module.preloadPlugins.
+ //
+ // Each plugin is asked if it can handle a file based on the file's name. If it can,
+ // it is given the file's raw data. When it is done, it calls a callback with the file's
+ // (possibly modified) data. For example, a plugin might decompress a file, or it
+ // might create some side data structure for use later (like an Image element, etc.).
+
+ var imagePlugin = {};
+ imagePlugin['canHandle'] = function imagePlugin_canHandle(name) {
+ return !Module.noImageDecoding && /\.(jpg|jpeg|png|bmp)$/i.test(name);
+ };
+ imagePlugin['handle'] = function imagePlugin_handle(byteArray, name, onload, onerror) {
+ var b = null;
+ if (Browser.hasBlobConstructor) {
+ try {
+ b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ if (b.size !== byteArray.length) { // Safari bug #118630
+ // Safari's Blob can only take an ArrayBuffer
+ b = new Blob([(new Uint8Array(byteArray)).buffer], { type: Browser.getMimetype(name) });
+ }
+ } catch(e) {
+ Runtime.warnOnce('Blob constructor present but fails: ' + e + '; falling back to blob builder');
+ }
+ }
+ if (!b) {
+ var bb = new Browser.BlobBuilder();
+ bb.append((new Uint8Array(byteArray)).buffer); // we need to pass a buffer, and must copy the array to get the right data range
+ b = bb.getBlob();
+ }
+ var url = Browser.URLObject.createObjectURL(b);
+ var img = new Image();
+ img.onload = function img_onload() {
+ assert(img.complete, 'Image ' + name + ' could not be decoded');
+ var canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0);
+ Module["preloadedImages"][name] = canvas;
+ Browser.URLObject.revokeObjectURL(url);
+ if (onload) onload(byteArray);
+ };
+ img.onerror = function img_onerror(event) {
+ console.log('Image ' + url + ' could not be decoded');
+ if (onerror) onerror();
+ };
+ img.src = url;
+ };
+ Module['preloadPlugins'].push(imagePlugin);
+
+ var audioPlugin = {};
+ audioPlugin['canHandle'] = function audioPlugin_canHandle(name) {
+ return !Module.noAudioDecoding && name.substr(-4) in { '.ogg': 1, '.wav': 1, '.mp3': 1 };
+ };
+ audioPlugin['handle'] = function audioPlugin_handle(byteArray, name, onload, onerror) {
+ var done = false;
+ function finish(audio) {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = audio;
+ if (onload) onload(byteArray);
+ }
+ function fail() {
+ if (done) return;
+ done = true;
+ Module["preloadedAudios"][name] = new Audio(); // empty shim
+ if (onerror) onerror();
+ }
+ if (Browser.hasBlobConstructor) {
+ try {
+ var b = new Blob([byteArray], { type: Browser.getMimetype(name) });
+ } catch(e) {
+ return fail();
+ }
+ var url = Browser.URLObject.createObjectURL(b); // XXX we never revoke this!
+ var audio = new Audio();
+ audio.addEventListener('canplaythrough', function() { finish(audio) }, false); // use addEventListener due to chromium bug 124926
+ audio.onerror = function audio_onerror(event) {
+ if (done) return;
+ console.log('warning: browser could not fully decode audio ' + name + ', trying slower base64 approach');
+ function encode64(data) {
+ var BASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ var PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ var curr = (leftchar >> (leftbits-6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar&3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar&0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+ }
+ audio.src = 'data:audio/x-' + name.substr(-3) + ';base64,' + encode64(byteArray);
+ finish(audio); // we don't wait for confirmation this worked - but it's worth trying
+ };
+ audio.src = url;
+ // workaround for chrome bug 124926 - we do not always get oncanplaythrough or onerror
+ Browser.safeSetTimeout(function() {
+ finish(audio); // try to use it even though it is not necessarily ready to play
+ }, 10000);
+ } else {
+ return fail();
+ }
+ };
+ Module['preloadPlugins'].push(audioPlugin);
+
+ // Canvas event setup
+
+ var canvas = Module['canvas'];
+
+ // forced aspect ratio can be enabled by defining 'forcedAspectRatio' on Module
+ // Module['forcedAspectRatio'] = 4 / 3;
+
+ canvas.requestPointerLock = canvas['requestPointerLock'] ||
+ canvas['mozRequestPointerLock'] ||
+ canvas['webkitRequestPointerLock'] ||
+ canvas['msRequestPointerLock'] ||
+ function(){};
+ canvas.exitPointerLock = document['exitPointerLock'] ||
+ document['mozExitPointerLock'] ||
+ document['webkitExitPointerLock'] ||
+ document['msExitPointerLock'] ||
+ function(){}; // no-op if function does not exist
+ canvas.exitPointerLock = canvas.exitPointerLock.bind(document);
+
+ function pointerLockChange() {
+ Browser.pointerLock = document['pointerLockElement'] === canvas ||
+ document['mozPointerLockElement'] === canvas ||
+ document['webkitPointerLockElement'] === canvas ||
+ document['msPointerLockElement'] === canvas;
+ }
+
+ document.addEventListener('pointerlockchange', pointerLockChange, false);
+ document.addEventListener('mozpointerlockchange', pointerLockChange, false);
+ document.addEventListener('webkitpointerlockchange', pointerLockChange, false);
+ document.addEventListener('mspointerlockchange', pointerLockChange, false);
+
+ if (Module['elementPointerLock']) {
+ canvas.addEventListener("click", function(ev) {
+ if (!Browser.pointerLock && canvas.requestPointerLock) {
+ canvas.requestPointerLock();
+ ev.preventDefault();
+ }
+ }, false);
+ }
+ },createContext:function (canvas, useWebGL, setInModule, webGLContextAttributes) {
+ var ctx;
+ var errorInfo = '?';
+ function onContextCreationError(event) {
+ errorInfo = event.statusMessage || errorInfo;
+ }
+ try {
+ if (useWebGL) {
+ var contextAttributes = {
+ antialias: false,
+ alpha: false
+ };
+
+ if (webGLContextAttributes) {
+ for (var attribute in webGLContextAttributes) {
+ contextAttributes[attribute] = webGLContextAttributes[attribute];
+ }
+ }
+
+
+ canvas.addEventListener('webglcontextcreationerror', onContextCreationError, false);
+ try {
+ ['experimental-webgl', 'webgl'].some(function(webglId) {
+ return ctx = canvas.getContext(webglId, contextAttributes);
+ });
+ } finally {
+ canvas.removeEventListener('webglcontextcreationerror', onContextCreationError, false);
+ }
+ } else {
+ ctx = canvas.getContext('2d');
+ }
+ if (!ctx) throw ':(';
+ } catch (e) {
+ Module.print('Could not create canvas: ' + [errorInfo, e]);
+ return null;
+ }
+ if (useWebGL) {
+ // Set the background of the WebGL canvas to black
+ canvas.style.backgroundColor = "black";
+
+ // Warn on context loss
+ canvas.addEventListener('webglcontextlost', function(event) {
+ alert('WebGL context lost. You will need to reload the page.');
+ }, false);
+ }
+ if (setInModule) {
+ GLctx = Module.ctx = ctx;
+ Module.useWebGL = useWebGL;
+ Browser.moduleContextCreatedCallbacks.forEach(function(callback) { callback() });
+ Browser.init();
+ }
+ return ctx;
+ },destroyContext:function (canvas, useWebGL, setInModule) {},fullScreenHandlersInstalled:false,lockPointer:undefined,resizeCanvas:undefined,requestFullScreen:function (lockPointer, resizeCanvas) {
+ Browser.lockPointer = lockPointer;
+ Browser.resizeCanvas = resizeCanvas;
+ if (typeof Browser.lockPointer === 'undefined') Browser.lockPointer = true;
+ if (typeof Browser.resizeCanvas === 'undefined') Browser.resizeCanvas = false;
+
+ var canvas = Module['canvas'];
+ function fullScreenChange() {
+ Browser.isFullScreen = false;
+ var canvasContainer = canvas.parentNode;
+ if ((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvasContainer) {
+ canvas.cancelFullScreen = document['cancelFullScreen'] ||
+ document['mozCancelFullScreen'] ||
+ document['webkitCancelFullScreen'] ||
+ document['msExitFullscreen'] ||
+ document['exitFullscreen'] ||
+ function() {};
+ canvas.cancelFullScreen = canvas.cancelFullScreen.bind(document);
+ if (Browser.lockPointer) canvas.requestPointerLock();
+ Browser.isFullScreen = true;
+ if (Browser.resizeCanvas) Browser.setFullScreenCanvasSize();
+ } else {
+
+ // remove the full screen specific parent of the canvas again to restore the HTML structure from before going full screen
+ canvasContainer.parentNode.insertBefore(canvas, canvasContainer);
+ canvasContainer.parentNode.removeChild(canvasContainer);
+
+ if (Browser.resizeCanvas) Browser.setWindowedCanvasSize();
+ }
+ if (Module['onFullScreen']) Module['onFullScreen'](Browser.isFullScreen);
+ Browser.updateCanvasDimensions(canvas);
+ }
+
+ if (!Browser.fullScreenHandlersInstalled) {
+ Browser.fullScreenHandlersInstalled = true;
+ document.addEventListener('fullscreenchange', fullScreenChange, false);
+ document.addEventListener('mozfullscreenchange', fullScreenChange, false);
+ document.addEventListener('webkitfullscreenchange', fullScreenChange, false);
+ document.addEventListener('MSFullscreenChange', fullScreenChange, false);
+ }
+
+ // create a new parent to ensure the canvas has no siblings. this allows browsers to optimize full screen performance when its parent is the full screen root
+ var canvasContainer = document.createElement("div");
+ canvas.parentNode.insertBefore(canvasContainer, canvas);
+ canvasContainer.appendChild(canvas);
+
+ // use parent of canvas as full screen root to allow aspect ratio correction (Firefox stretches the root to screen size)
+ canvasContainer.requestFullScreen = canvasContainer['requestFullScreen'] ||
+ canvasContainer['mozRequestFullScreen'] ||
+ canvasContainer['msRequestFullscreen'] ||
+ (canvasContainer['webkitRequestFullScreen'] ? function() { canvasContainer['webkitRequestFullScreen'](Element['ALLOW_KEYBOARD_INPUT']) } : null);
+ canvasContainer.requestFullScreen();
+ },requestAnimationFrame:function requestAnimationFrame(func) {
+ if (typeof window === 'undefined') { // Provide fallback to setTimeout if window is undefined (e.g. in Node.js)
+ setTimeout(func, 1000/60);
+ } else {
+ if (!window.requestAnimationFrame) {
+ window.requestAnimationFrame = window['requestAnimationFrame'] ||
+ window['mozRequestAnimationFrame'] ||
+ window['webkitRequestAnimationFrame'] ||
+ window['msRequestAnimationFrame'] ||
+ window['oRequestAnimationFrame'] ||
+ window['setTimeout'];
+ }
+ window.requestAnimationFrame(func);
+ }
+ },safeCallback:function (func) {
+ return function() {
+ if (!ABORT) return func.apply(null, arguments);
+ };
+ },safeRequestAnimationFrame:function (func) {
+ return Browser.requestAnimationFrame(function() {
+ if (!ABORT) func();
+ });
+ },safeSetTimeout:function (func, timeout) {
+ return setTimeout(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },safeSetInterval:function (func, timeout) {
+ return setInterval(function() {
+ if (!ABORT) func();
+ }, timeout);
+ },getMimetype:function (name) {
+ return {
+ 'jpg': 'image/jpeg',
+ 'jpeg': 'image/jpeg',
+ 'png': 'image/png',
+ 'bmp': 'image/bmp',
+ 'ogg': 'audio/ogg',
+ 'wav': 'audio/wav',
+ 'mp3': 'audio/mpeg'
+ }[name.substr(name.lastIndexOf('.')+1)];
+ },getUserMedia:function (func) {
+ if(!window.getUserMedia) {
+ window.getUserMedia = navigator['getUserMedia'] ||
+ navigator['mozGetUserMedia'];
+ }
+ window.getUserMedia(func);
+ },getMovementX:function (event) {
+ return event['movementX'] ||
+ event['mozMovementX'] ||
+ event['webkitMovementX'] ||
+ 0;
+ },getMovementY:function (event) {
+ return event['movementY'] ||
+ event['mozMovementY'] ||
+ event['webkitMovementY'] ||
+ 0;
+ },getMouseWheelDelta:function (event) {
+ return Math.max(-1, Math.min(1, event.type === 'DOMMouseScroll' ? event.detail : -event.wheelDelta));
+ },mouseX:0,mouseY:0,mouseMovementX:0,mouseMovementY:0,calculateMouseEvent:function (event) { // event should be mousemove, mousedown or mouseup
+ if (Browser.pointerLock) {
+ // When the pointer is locked, calculate the coordinates
+ // based on the movement of the mouse.
+ // Workaround for Firefox bug 764498
+ if (event.type != 'mousemove' &&
+ ('mozMovementX' in event)) {
+ Browser.mouseMovementX = Browser.mouseMovementY = 0;
+ } else {
+ Browser.mouseMovementX = Browser.getMovementX(event);
+ Browser.mouseMovementY = Browser.getMovementY(event);
+ }
+
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ Browser.mouseX = SDL.mouseX + Browser.mouseMovementX;
+ Browser.mouseY = SDL.mouseY + Browser.mouseMovementY;
+ } else {
+ // just add the mouse delta to the current absolut mouse position
+ // FIXME: ideally this should be clamped against the canvas size and zero
+ Browser.mouseX += Browser.mouseMovementX;
+ Browser.mouseY += Browser.mouseMovementY;
+ }
+ } else {
+ // Otherwise, calculate the movement based on the changes
+ // in the coordinates.
+ var rect = Module["canvas"].getBoundingClientRect();
+ var x, y;
+
+ // Neither .scrollX or .pageXOffset are defined in a spec, but
+ // we prefer .scrollX because it is currently in a spec draft.
+ // (see: http://www.w3.org/TR/2013/WD-cssom-view-20131217/)
+ var scrollX = ((typeof window.scrollX !== 'undefined') ? window.scrollX : window.pageXOffset);
+ var scrollY = ((typeof window.scrollY !== 'undefined') ? window.scrollY : window.pageYOffset);
+ if (event.type == 'touchstart' ||
+ event.type == 'touchend' ||
+ event.type == 'touchmove') {
+ var t = event.touches.item(0);
+ if (t) {
+ x = t.pageX - (scrollX + rect.left);
+ y = t.pageY - (scrollY + rect.top);
+ } else {
+ return;
+ }
+ } else {
+ x = event.pageX - (scrollX + rect.left);
+ y = event.pageY - (scrollY + rect.top);
+ }
+
+ // the canvas might be CSS-scaled compared to its backbuffer;
+ // SDL-using content will want mouse coordinates in terms
+ // of backbuffer units.
+ var cw = Module["canvas"].width;
+ var ch = Module["canvas"].height;
+ x = x * (cw / rect.width);
+ y = y * (ch / rect.height);
+
+ Browser.mouseMovementX = x - Browser.mouseX;
+ Browser.mouseMovementY = y - Browser.mouseY;
+ Browser.mouseX = x;
+ Browser.mouseY = y;
+ }
+ },xhrLoad:function (url, onload, onerror) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = function xhr_onload() {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ onload(xhr.response);
+ } else {
+ onerror();
+ }
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ },asyncLoad:function (url, onload, onerror, noRunDep) {
+ Browser.xhrLoad(url, function(arrayBuffer) {
+ assert(arrayBuffer, 'Loading data file "' + url + '" failed (no arrayBuffer).');
+ onload(new Uint8Array(arrayBuffer));
+ if (!noRunDep) removeRunDependency('al ' + url);
+ }, function(event) {
+ if (onerror) {
+ onerror();
+ } else {
+ throw 'Loading data file "' + url + '" failed.';
+ }
+ });
+ if (!noRunDep) addRunDependency('al ' + url);
+ },resizeListeners:[],updateResizeListeners:function () {
+ var canvas = Module['canvas'];
+ Browser.resizeListeners.forEach(function(listener) {
+ listener(canvas.width, canvas.height);
+ });
+ },setCanvasSize:function (width, height, noUpdates) {
+ var canvas = Module['canvas'];
+ Browser.updateCanvasDimensions(canvas, width, height);
+ if (!noUpdates) Browser.updateResizeListeners();
+ },windowedWidth:0,windowedHeight:0,setFullScreenCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags | 0x00800000; // set SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },setWindowedCanvasSize:function () {
+ // check if SDL is available
+ if (typeof SDL != "undefined") {
+ var flags = HEAPU32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)];
+ flags = flags & ~0x00800000; // clear SDL_FULLSCREEN flag
+ HEAP32[((SDL.screen+Runtime.QUANTUM_SIZE*0)>>2)]=flags
+ }
+ Browser.updateResizeListeners();
+ },updateCanvasDimensions:function (canvas, wNative, hNative) {
+ if (wNative && hNative) {
+ canvas.widthNative = wNative;
+ canvas.heightNative = hNative;
+ } else {
+ wNative = canvas.widthNative;
+ hNative = canvas.heightNative;
+ }
+ var w = wNative;
+ var h = hNative;
+ if (Module['forcedAspectRatio'] && Module['forcedAspectRatio'] > 0) {
+ if (w/h < Module['forcedAspectRatio']) {
+ w = Math.round(h * Module['forcedAspectRatio']);
+ } else {
+ h = Math.round(w / Module['forcedAspectRatio']);
+ }
+ }
+ if (((document['webkitFullScreenElement'] || document['webkitFullscreenElement'] ||
+ document['mozFullScreenElement'] || document['mozFullscreenElement'] ||
+ document['fullScreenElement'] || document['fullscreenElement'] ||
+ document['msFullScreenElement'] || document['msFullscreenElement'] ||
+ document['webkitCurrentFullScreenElement']) === canvas.parentNode) && (typeof screen != 'undefined')) {
+ var factor = Math.min(screen.width / w, screen.height / h);
+ w = Math.round(w * factor);
+ h = Math.round(h * factor);
+ }
+ if (Browser.resizeCanvas) {
+ if (canvas.width != w) canvas.width = w;
+ if (canvas.height != h) canvas.height = h;
+ if (typeof canvas.style != 'undefined') {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ } else {
+ if (canvas.width != wNative) canvas.width = wNative;
+ if (canvas.height != hNative) canvas.height = hNative;
+ if (typeof canvas.style != 'undefined') {
+ if (w != wNative || h != hNative) {
+ canvas.style.setProperty( "width", w + "px", "important");
+ canvas.style.setProperty("height", h + "px", "important");
+ } else {
+ canvas.style.removeProperty( "width");
+ canvas.style.removeProperty("height");
+ }
+ }
+ }
+ }};
+
+ function _sbrk(bytes) {
+ // Implement a Linux-like 'memory area' for our 'process'.
+ // Changes the size of the memory area by |bytes|; returns the
+ // address of the previous top ('break') of the memory area
+ // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP
+ var self = _sbrk;
+ if (!self.called) {
+ DYNAMICTOP = alignMemoryPage(DYNAMICTOP); // make sure we start out aligned
+ self.called = true;
+ assert(Runtime.dynamicAlloc);
+ self.alloc = Runtime.dynamicAlloc;
+ Runtime.dynamicAlloc = function() { abort('cannot dynamically allocate, sbrk now has control') };
+ }
+ var ret = DYNAMICTOP;
+ if (bytes != 0) self.alloc(bytes);
+ return ret; // Previous break location.
+ }
+
+ function ___assert_fail(condition, filename, line, func) {
+ ABORT = true;
+ throw 'Assertion failed: ' + Pointer_stringify(condition) + ', at: ' + [filename ? Pointer_stringify(filename) : 'unknown filename', line, func ? Pointer_stringify(func) : 'unknown function'] + ' at ' + stackTrace();
+ }
+
+ function _time(ptr) {
+ var ret = Math.floor(Date.now()/1000);
+ if (ptr) {
+ HEAP32[((ptr)>>2)]=ret;
+ }
+ return ret;
+ }
+
+ function _llvm_bswap_i32(x) {
+ return ((x&0xff)<<24) | (((x>>8)&0xff)<<16) | (((x>>16)&0xff)<<8) | (x>>>24);
+ }
+
+
+
+ function _emscripten_memcpy_big(dest, src, num) {
+ HEAPU8.set(HEAPU8.subarray(src, src+num), dest);
+ return dest;
+ }
+ Module["_memcpy"] = _memcpy;
+FS.staticInit();__ATINIT__.unshift({ func: function() { if (!Module["noFSInit"] && !FS.init.initialized) FS.init() } });__ATMAIN__.push({ func: function() { FS.ignorePermissions = false } });__ATEXIT__.push({ func: function() { FS.quit() } });Module["FS_createFolder"] = FS.createFolder;Module["FS_createPath"] = FS.createPath;Module["FS_createDataFile"] = FS.createDataFile;Module["FS_createPreloadedFile"] = FS.createPreloadedFile;Module["FS_createLazyFile"] = FS.createLazyFile;Module["FS_createLink"] = FS.createLink;Module["FS_createDevice"] = FS.createDevice;
+___errno_state = Runtime.staticAlloc(4); HEAP32[((___errno_state)>>2)]=0;
+__ATINIT__.unshift({ func: function() { TTY.init() } });__ATEXIT__.push({ func: function() { TTY.shutdown() } });TTY.utf8 = new Runtime.UTF8Processor();
+if (ENVIRONMENT_IS_NODE) { var fs = require("fs"); NODEFS.staticInit(); }
+__ATINIT__.push({ func: function() { SOCKFS.root = FS.mount(SOCKFS, {}, null); } });
+_fputc.ret = allocate([0], "i8", ALLOC_STATIC);
+Module["requestFullScreen"] = function Module_requestFullScreen(lockPointer, resizeCanvas) { Browser.requestFullScreen(lockPointer, resizeCanvas) };
+ Module["requestAnimationFrame"] = function Module_requestAnimationFrame(func) { Browser.requestAnimationFrame(func) };
+ Module["setCanvasSize"] = function Module_setCanvasSize(width, height, noUpdates) { Browser.setCanvasSize(width, height, noUpdates) };
+ Module["pauseMainLoop"] = function Module_pauseMainLoop() { Browser.mainLoop.pause() };
+ Module["resumeMainLoop"] = function Module_resumeMainLoop() { Browser.mainLoop.resume() };
+ Module["getUserMedia"] = function Module_getUserMedia() { Browser.getUserMedia() }
+STACK_BASE = STACKTOP = Runtime.alignMemory(STATICTOP);
+
+staticSealed = true; // seal the static portion of memory
+
+STACK_MAX = STACK_BASE + 5242880;
+
+DYNAMIC_BASE = DYNAMICTOP = Runtime.alignMemory(STACK_MAX);
+
+assert(DYNAMIC_BASE < TOTAL_MEMORY, "TOTAL_MEMORY not big enough for stack");
+
+
+var Math_min = Math.min;
+function invoke_iiii(index,a1,a2,a3) {
+ try {
+ return Module["dynCall_iiii"](index,a1,a2,a3);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_vii(index,a1,a2) {
+ try {
+ Module["dynCall_vii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function invoke_iii(index,a1,a2) {
+ try {
+ return Module["dynCall_iii"](index,a1,a2);
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 'longjmp') throw e;
+ asm["setThrew"](1, 0);
+ }
+}
+
+function asmPrintInt(x, y) {
+ Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+function asmPrintFloat(x, y) {
+ Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
+}
+// EMSCRIPTEN_START_ASM
+var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+ 'use asm';
+ var HEAP8 = new global.Int8Array(buffer);
+ var HEAP16 = new global.Int16Array(buffer);
+ var HEAP32 = new global.Int32Array(buffer);
+ var HEAPU8 = new global.Uint8Array(buffer);
+ var HEAPU16 = new global.Uint16Array(buffer);
+ var HEAPU32 = new global.Uint32Array(buffer);
+ var HEAPF32 = new global.Float32Array(buffer);
+ var HEAPF64 = new global.Float64Array(buffer);
+
+ var STACKTOP=env.STACKTOP|0;
+ var STACK_MAX=env.STACK_MAX|0;
+ var tempDoublePtr=env.tempDoublePtr|0;
+ var ABORT=env.ABORT|0;
+
+ var __THREW__ = 0;
+ var threwValue = 0;
+ var setjmpId = 0;
+ var undef = 0;
+ var nan = +env.NaN, inf = +env.Infinity;
+ var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
+
+ var tempRet0 = 0;
+ var tempRet1 = 0;
+ var tempRet2 = 0;
+ var tempRet3 = 0;
+ var tempRet4 = 0;
+ var tempRet5 = 0;
+ var tempRet6 = 0;
+ var tempRet7 = 0;
+ var tempRet8 = 0;
+ var tempRet9 = 0;
+ var Math_floor=global.Math.floor;
+ var Math_abs=global.Math.abs;
+ var Math_sqrt=global.Math.sqrt;
+ var Math_pow=global.Math.pow;
+ var Math_cos=global.Math.cos;
+ var Math_sin=global.Math.sin;
+ var Math_tan=global.Math.tan;
+ var Math_acos=global.Math.acos;
+ var Math_asin=global.Math.asin;
+ var Math_atan=global.Math.atan;
+ var Math_atan2=global.Math.atan2;
+ var Math_exp=global.Math.exp;
+ var Math_log=global.Math.log;
+ var Math_ceil=global.Math.ceil;
+ var Math_imul=global.Math.imul;
+ var abort=env.abort;
+ var assert=env.assert;
+ var asmPrintInt=env.asmPrintInt;
+ var asmPrintFloat=env.asmPrintFloat;
+ var Math_min=env.min;
+ var invoke_iiii=env.invoke_iiii;
+ var invoke_vii=env.invoke_vii;
+ var invoke_iii=env.invoke_iii;
+ var _send=env._send;
+ var ___setErrNo=env.___setErrNo;
+ var ___assert_fail=env.___assert_fail;
+ var _fflush=env._fflush;
+ var _pwrite=env._pwrite;
+ var __reallyNegative=env.__reallyNegative;
+ var _sbrk=env._sbrk;
+ var ___errno_location=env.___errno_location;
+ var _emscripten_memcpy_big=env._emscripten_memcpy_big;
+ var _fileno=env._fileno;
+ var _sysconf=env._sysconf;
+ var _puts=env._puts;
+ var _mkport=env._mkport;
+ var _write=env._write;
+ var _llvm_bswap_i32=env._llvm_bswap_i32;
+ var _fputc=env._fputc;
+ var _abort=env._abort;
+ var _fwrite=env._fwrite;
+ var _time=env._time;
+ var _fprintf=env._fprintf;
+ var __formatString=env.__formatString;
+ var _fputs=env._fputs;
+ var _printf=env._printf;
+ var tempFloat = 0.0;
+
+// EMSCRIPTEN_START_FUNCS
+function _inflate(i2, i3) {
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0, i38 = 0, i39 = 0, i40 = 0, i41 = 0, i42 = 0, i43 = 0, i44 = 0, i45 = 0, i46 = 0, i47 = 0, i48 = 0, i49 = 0, i50 = 0, i51 = 0, i52 = 0, i53 = 0, i54 = 0, i55 = 0, i56 = 0, i57 = 0, i58 = 0, i59 = 0, i60 = 0, i61 = 0, i62 = 0, i63 = 0, i64 = 0, i65 = 0, i66 = 0, i67 = 0, i68 = 0, i69 = 0, i70 = 0, i71 = 0, i72 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i25 = i1;
+ if ((i2 | 0) == 0) {
+ i72 = -2;
+ STACKTOP = i1;
+ return i72 | 0;
+ }
+ i4 = HEAP32[i2 + 28 >> 2] | 0;
+ if ((i4 | 0) == 0) {
+ i72 = -2;
+ STACKTOP = i1;
+ return i72 | 0;
+ }
+ i8 = i2 + 12 | 0;
+ i19 = HEAP32[i8 >> 2] | 0;
+ if ((i19 | 0) == 0) {
+ i72 = -2;
+ STACKTOP = i1;
+ return i72 | 0;
+ }
+ i62 = HEAP32[i2 >> 2] | 0;
+ if ((i62 | 0) == 0 ? (HEAP32[i2 + 4 >> 2] | 0) != 0 : 0) {
+ i72 = -2;
+ STACKTOP = i1;
+ return i72 | 0;
+ }
+ i68 = HEAP32[i4 >> 2] | 0;
+ if ((i68 | 0) == 11) {
+ HEAP32[i4 >> 2] = 12;
+ i68 = 12;
+ i62 = HEAP32[i2 >> 2] | 0;
+ i19 = HEAP32[i8 >> 2] | 0;
+ }
+ i15 = i2 + 16 | 0;
+ i59 = HEAP32[i15 >> 2] | 0;
+ i16 = i2 + 4 | 0;
+ i5 = HEAP32[i16 >> 2] | 0;
+ i17 = i4 + 56 | 0;
+ i6 = i4 + 60 | 0;
+ i12 = i4 + 8 | 0;
+ i10 = i4 + 24 | 0;
+ i39 = i25 + 1 | 0;
+ i11 = i4 + 16 | 0;
+ i38 = i4 + 32 | 0;
+ i35 = i2 + 24 | 0;
+ i40 = i4 + 36 | 0;
+ i41 = i4 + 20 | 0;
+ i9 = i2 + 48 | 0;
+ i42 = i4 + 64 | 0;
+ i46 = i4 + 12 | 0;
+ i47 = (i3 + -5 | 0) >>> 0 < 2;
+ i7 = i4 + 4 | 0;
+ i48 = i4 + 76 | 0;
+ i49 = i4 + 84 | 0;
+ i50 = i4 + 80 | 0;
+ i51 = i4 + 88 | 0;
+ i43 = (i3 | 0) == 6;
+ i57 = i4 + 7108 | 0;
+ i37 = i4 + 72 | 0;
+ i58 = i4 + 7112 | 0;
+ i54 = i4 + 68 | 0;
+ i28 = i4 + 44 | 0;
+ i29 = i4 + 7104 | 0;
+ i30 = i4 + 48 | 0;
+ i31 = i4 + 52 | 0;
+ i18 = i4 + 40 | 0;
+ i13 = i2 + 20 | 0;
+ i14 = i4 + 28 | 0;
+ i32 = i4 + 96 | 0;
+ i33 = i4 + 100 | 0;
+ i34 = i4 + 92 | 0;
+ i36 = i4 + 104 | 0;
+ i52 = i4 + 1328 | 0;
+ i53 = i4 + 108 | 0;
+ i27 = i4 + 112 | 0;
+ i55 = i4 + 752 | 0;
+ i56 = i4 + 624 | 0;
+ i44 = i25 + 2 | 0;
+ i45 = i25 + 3 | 0;
+ i67 = HEAP32[i6 >> 2] | 0;
+ i65 = i5;
+ i64 = HEAP32[i17 >> 2] | 0;
+ i26 = i59;
+ i61 = 0;
+ L17 : while (1) {
+ L19 : do {
+ switch (i68 | 0) {
+ case 16:
+ {
+ if (i67 >>> 0 < 14) {
+ i63 = i67;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 14) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ } else {
+ i63 = i67;
+ }
+ i71 = (i64 & 31) + 257 | 0;
+ HEAP32[i32 >> 2] = i71;
+ i72 = (i64 >>> 5 & 31) + 1 | 0;
+ HEAP32[i33 >> 2] = i72;
+ HEAP32[i34 >> 2] = (i64 >>> 10 & 15) + 4;
+ i64 = i64 >>> 14;
+ i63 = i63 + -14 | 0;
+ if (i71 >>> 0 > 286 | i72 >>> 0 > 30) {
+ HEAP32[i35 >> 2] = 11616;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break L19;
+ } else {
+ HEAP32[i36 >> 2] = 0;
+ HEAP32[i4 >> 2] = 17;
+ i66 = 0;
+ i60 = 154;
+ break L19;
+ }
+ }
+ case 2:
+ {
+ if (i67 >>> 0 < 32) {
+ i63 = i67;
+ i60 = 47;
+ } else {
+ i60 = 49;
+ }
+ break;
+ }
+ case 23:
+ {
+ i66 = HEAP32[i37 >> 2] | 0;
+ i63 = i67;
+ i60 = 240;
+ break;
+ }
+ case 18:
+ {
+ i63 = HEAP32[i36 >> 2] | 0;
+ i69 = i65;
+ i60 = 164;
+ break;
+ }
+ case 1:
+ {
+ if (i67 >>> 0 < 16) {
+ i63 = i67;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 16) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ } else {
+ i63 = i67;
+ }
+ HEAP32[i11 >> 2] = i64;
+ if ((i64 & 255 | 0) != 8) {
+ HEAP32[i35 >> 2] = 11448;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break L19;
+ }
+ if ((i64 & 57344 | 0) != 0) {
+ HEAP32[i35 >> 2] = 11504;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break L19;
+ }
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) == 0) {
+ i60 = i64;
+ } else {
+ HEAP32[i60 >> 2] = i64 >>> 8 & 1;
+ i60 = HEAP32[i11 >> 2] | 0;
+ }
+ if ((i60 & 512 | 0) != 0) {
+ HEAP8[i25] = i64;
+ HEAP8[i39] = i64 >>> 8;
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i25, 2) | 0;
+ }
+ HEAP32[i4 >> 2] = 2;
+ i63 = 0;
+ i64 = 0;
+ i60 = 47;
+ break;
+ }
+ case 8:
+ {
+ i63 = i67;
+ i60 = 109;
+ break;
+ }
+ case 22:
+ {
+ i63 = i67;
+ i60 = 228;
+ break;
+ }
+ case 24:
+ {
+ i63 = i67;
+ i60 = 246;
+ break;
+ }
+ case 19:
+ {
+ i63 = i67;
+ i60 = 201;
+ break;
+ }
+ case 20:
+ {
+ i63 = i67;
+ i60 = 202;
+ break;
+ }
+ case 21:
+ {
+ i66 = HEAP32[i37 >> 2] | 0;
+ i63 = i67;
+ i60 = 221;
+ break;
+ }
+ case 10:
+ {
+ i63 = i67;
+ i60 = 121;
+ break;
+ }
+ case 11:
+ {
+ i63 = i67;
+ i60 = 124;
+ break;
+ }
+ case 12:
+ {
+ i63 = i67;
+ i60 = 125;
+ break;
+ }
+ case 5:
+ {
+ i63 = i67;
+ i60 = 73;
+ break;
+ }
+ case 4:
+ {
+ i63 = i67;
+ i60 = 62;
+ break;
+ }
+ case 0:
+ {
+ i66 = HEAP32[i12 >> 2] | 0;
+ if ((i66 | 0) == 0) {
+ HEAP32[i4 >> 2] = 12;
+ i63 = i67;
+ i66 = i26;
+ break L19;
+ }
+ if (i67 >>> 0 < 16) {
+ i63 = i67;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i67 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 16) {
+ i62 = i67;
+ } else {
+ i62 = i67;
+ break;
+ }
+ }
+ } else {
+ i63 = i67;
+ }
+ if ((i66 & 2 | 0) != 0 & (i64 | 0) == 35615) {
+ HEAP32[i10 >> 2] = _crc32(0, 0, 0) | 0;
+ HEAP8[i25] = 31;
+ HEAP8[i39] = -117;
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i25, 2) | 0;
+ HEAP32[i4 >> 2] = 1;
+ i63 = 0;
+ i64 = 0;
+ i66 = i26;
+ break L19;
+ }
+ HEAP32[i11 >> 2] = 0;
+ i67 = HEAP32[i38 >> 2] | 0;
+ if ((i67 | 0) != 0) {
+ HEAP32[i67 + 48 >> 2] = -1;
+ i66 = HEAP32[i12 >> 2] | 0;
+ }
+ if ((i66 & 1 | 0) != 0 ? ((((i64 << 8 & 65280) + (i64 >>> 8) | 0) >>> 0) % 31 | 0 | 0) == 0 : 0) {
+ if ((i64 & 15 | 0) != 8) {
+ HEAP32[i35 >> 2] = 11448;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break L19;
+ }
+ i66 = i64 >>> 4;
+ i63 = i63 + -4 | 0;
+ i68 = (i66 & 15) + 8 | 0;
+ i67 = HEAP32[i40 >> 2] | 0;
+ if ((i67 | 0) != 0) {
+ if (i68 >>> 0 > i67 >>> 0) {
+ HEAP32[i35 >> 2] = 11480;
+ HEAP32[i4 >> 2] = 29;
+ i64 = i66;
+ i66 = i26;
+ break L19;
+ }
+ } else {
+ HEAP32[i40 >> 2] = i68;
+ }
+ HEAP32[i41 >> 2] = 1 << i68;
+ i63 = _adler32(0, 0, 0) | 0;
+ HEAP32[i10 >> 2] = i63;
+ HEAP32[i9 >> 2] = i63;
+ HEAP32[i4 >> 2] = i64 >>> 12 & 2 ^ 11;
+ i63 = 0;
+ i64 = 0;
+ i66 = i26;
+ break L19;
+ }
+ HEAP32[i35 >> 2] = 11424;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break;
+ }
+ case 26:
+ {
+ if ((HEAP32[i12 >> 2] | 0) != 0) {
+ if (i67 >>> 0 < 32) {
+ i63 = i67;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 32) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ } else {
+ i63 = i67;
+ }
+ i66 = i59 - i26 | 0;
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + i66;
+ HEAP32[i14 >> 2] = (HEAP32[i14 >> 2] | 0) + i66;
+ if ((i59 | 0) != (i26 | 0)) {
+ i59 = HEAP32[i10 >> 2] | 0;
+ i67 = i19 + (0 - i66) | 0;
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ i59 = _adler32(i59, i67, i66) | 0;
+ } else {
+ i59 = _crc32(i59, i67, i66) | 0;
+ }
+ HEAP32[i10 >> 2] = i59;
+ HEAP32[i9 >> 2] = i59;
+ }
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ i59 = _llvm_bswap_i32(i64 | 0) | 0;
+ } else {
+ i59 = i64;
+ }
+ if ((i59 | 0) == (HEAP32[i10 >> 2] | 0)) {
+ i63 = 0;
+ i64 = 0;
+ i59 = i26;
+ } else {
+ HEAP32[i35 >> 2] = 11904;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ i59 = i26;
+ break L19;
+ }
+ } else {
+ i63 = i67;
+ }
+ HEAP32[i4 >> 2] = 27;
+ i60 = 277;
+ break;
+ }
+ case 27:
+ {
+ i63 = i67;
+ i60 = 277;
+ break;
+ }
+ case 28:
+ {
+ i63 = i67;
+ i61 = 1;
+ i60 = 285;
+ break L17;
+ }
+ case 29:
+ {
+ i63 = i67;
+ i61 = -3;
+ break L17;
+ }
+ case 25:
+ {
+ if ((i26 | 0) == 0) {
+ i63 = i67;
+ i26 = 0;
+ i60 = 285;
+ break L17;
+ }
+ HEAP8[i19] = HEAP32[i42 >> 2];
+ HEAP32[i4 >> 2] = 20;
+ i63 = i67;
+ i66 = i26 + -1 | 0;
+ i19 = i19 + 1 | 0;
+ break;
+ }
+ case 17:
+ {
+ i66 = HEAP32[i36 >> 2] | 0;
+ if (i66 >>> 0 < (HEAP32[i34 >> 2] | 0) >>> 0) {
+ i63 = i67;
+ i60 = 154;
+ } else {
+ i60 = 158;
+ }
+ break;
+ }
+ case 13:
+ {
+ i63 = i67 & 7;
+ i64 = i64 >>> i63;
+ i63 = i67 - i63 | 0;
+ if (i63 >>> 0 < 32) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 32) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ }
+ i66 = i64 & 65535;
+ if ((i66 | 0) == (i64 >>> 16 ^ 65535 | 0)) {
+ HEAP32[i42 >> 2] = i66;
+ HEAP32[i4 >> 2] = 14;
+ if (i43) {
+ i63 = 0;
+ i64 = 0;
+ i60 = 285;
+ break L17;
+ } else {
+ i63 = 0;
+ i64 = 0;
+ i60 = 143;
+ break L19;
+ }
+ } else {
+ HEAP32[i35 >> 2] = 11584;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break L19;
+ }
+ }
+ case 7:
+ {
+ i63 = i67;
+ i60 = 96;
+ break;
+ }
+ case 14:
+ {
+ i63 = i67;
+ i60 = 143;
+ break;
+ }
+ case 15:
+ {
+ i63 = i67;
+ i60 = 144;
+ break;
+ }
+ case 9:
+ {
+ if (i67 >>> 0 < 32) {
+ i63 = i67;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 32) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ }
+ i63 = _llvm_bswap_i32(i64 | 0) | 0;
+ HEAP32[i10 >> 2] = i63;
+ HEAP32[i9 >> 2] = i63;
+ HEAP32[i4 >> 2] = 10;
+ i63 = 0;
+ i64 = 0;
+ i60 = 121;
+ break;
+ }
+ case 30:
+ {
+ i60 = 299;
+ break L17;
+ }
+ case 6:
+ {
+ i63 = i67;
+ i60 = 83;
+ break;
+ }
+ case 3:
+ {
+ if (i67 >>> 0 < 16) {
+ i63 = i67;
+ i66 = i62;
+ i60 = 55;
+ } else {
+ i60 = 57;
+ }
+ break;
+ }
+ default:
+ {
+ i2 = -2;
+ i60 = 300;
+ break L17;
+ }
+ }
+ } while (0);
+ if ((i60 | 0) == 47) {
+ while (1) {
+ i60 = 0;
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i60 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 32) {
+ i62 = i60;
+ i60 = 47;
+ } else {
+ i62 = i60;
+ i60 = 49;
+ break;
+ }
+ }
+ } else if ((i60 | 0) == 121) {
+ if ((HEAP32[i46 >> 2] | 0) == 0) {
+ i60 = 122;
+ break;
+ }
+ i60 = _adler32(0, 0, 0) | 0;
+ HEAP32[i10 >> 2] = i60;
+ HEAP32[i9 >> 2] = i60;
+ HEAP32[i4 >> 2] = 11;
+ i60 = 124;
+ } else if ((i60 | 0) == 143) {
+ HEAP32[i4 >> 2] = 15;
+ i60 = 144;
+ } else if ((i60 | 0) == 154) {
+ while (1) {
+ i60 = 0;
+ if (i63 >>> 0 < 3) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i67 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 3) {
+ i62 = i67;
+ } else {
+ i62 = i67;
+ break;
+ }
+ }
+ }
+ HEAP32[i36 >> 2] = i66 + 1;
+ HEAP16[i4 + (HEAPU16[11384 + (i66 << 1) >> 1] << 1) + 112 >> 1] = i64 & 7;
+ i64 = i64 >>> 3;
+ i63 = i63 + -3 | 0;
+ i66 = HEAP32[i36 >> 2] | 0;
+ if (i66 >>> 0 < (HEAP32[i34 >> 2] | 0) >>> 0) {
+ i60 = 154;
+ } else {
+ i67 = i63;
+ i60 = 158;
+ break;
+ }
+ }
+ } else if ((i60 | 0) == 277) {
+ i60 = 0;
+ if ((HEAP32[i12 >> 2] | 0) == 0) {
+ i60 = 284;
+ break;
+ }
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ i60 = 284;
+ break;
+ }
+ if (i63 >>> 0 < 32) {
+ i66 = i62;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i62 = i66;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i62 = i66 + 1 | 0;
+ i64 = (HEAPU8[i66] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 32) {
+ i66 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i64 | 0) == (HEAP32[i14 >> 2] | 0)) {
+ i63 = 0;
+ i64 = 0;
+ i60 = 284;
+ break;
+ }
+ HEAP32[i35 >> 2] = 11928;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ }
+ do {
+ if ((i60 | 0) == 49) {
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 4 >> 2] = i64;
+ }
+ if ((HEAP32[i11 >> 2] & 512 | 0) != 0) {
+ HEAP8[i25] = i64;
+ HEAP8[i39] = i64 >>> 8;
+ HEAP8[i44] = i64 >>> 16;
+ HEAP8[i45] = i64 >>> 24;
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i25, 4) | 0;
+ }
+ HEAP32[i4 >> 2] = 3;
+ i63 = 0;
+ i64 = 0;
+ i66 = i62;
+ i60 = 55;
+ } else if ((i60 | 0) == 124) {
+ if (i47) {
+ i60 = 285;
+ break L17;
+ } else {
+ i60 = 125;
+ }
+ } else if ((i60 | 0) == 144) {
+ i60 = 0;
+ i66 = HEAP32[i42 >> 2] | 0;
+ if ((i66 | 0) == 0) {
+ HEAP32[i4 >> 2] = 11;
+ i66 = i26;
+ break;
+ }
+ i66 = i66 >>> 0 > i65 >>> 0 ? i65 : i66;
+ i67 = i66 >>> 0 > i26 >>> 0 ? i26 : i66;
+ if ((i67 | 0) == 0) {
+ i60 = 285;
+ break L17;
+ }
+ _memcpy(i19 | 0, i62 | 0, i67 | 0) | 0;
+ HEAP32[i42 >> 2] = (HEAP32[i42 >> 2] | 0) - i67;
+ i65 = i65 - i67 | 0;
+ i66 = i26 - i67 | 0;
+ i62 = i62 + i67 | 0;
+ i19 = i19 + i67 | 0;
+ } else if ((i60 | 0) == 158) {
+ i60 = 0;
+ if (i66 >>> 0 < 19) {
+ while (1) {
+ i61 = i66 + 1 | 0;
+ HEAP16[i4 + (HEAPU16[11384 + (i66 << 1) >> 1] << 1) + 112 >> 1] = 0;
+ if ((i61 | 0) == 19) {
+ break;
+ } else {
+ i66 = i61;
+ }
+ }
+ HEAP32[i36 >> 2] = 19;
+ }
+ HEAP32[i53 >> 2] = i52;
+ HEAP32[i48 >> 2] = i52;
+ HEAP32[i49 >> 2] = 7;
+ i61 = _inflate_table(0, i27, 19, i53, i49, i55) | 0;
+ if ((i61 | 0) == 0) {
+ HEAP32[i36 >> 2] = 0;
+ HEAP32[i4 >> 2] = 18;
+ i63 = 0;
+ i69 = i65;
+ i61 = 0;
+ i60 = 164;
+ break;
+ } else {
+ HEAP32[i35 >> 2] = 11656;
+ HEAP32[i4 >> 2] = 29;
+ i63 = i67;
+ i66 = i26;
+ break;
+ }
+ }
+ } while (0);
+ L163 : do {
+ if ((i60 | 0) == 55) {
+ while (1) {
+ i60 = 0;
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i62 = i66;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i62 = i66 + 1 | 0;
+ i64 = (HEAPU8[i66] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 16) {
+ i66 = i62;
+ i60 = 55;
+ } else {
+ i60 = 57;
+ break;
+ }
+ }
+ } else if ((i60 | 0) == 125) {
+ i60 = 0;
+ if ((HEAP32[i7 >> 2] | 0) != 0) {
+ i66 = i63 & 7;
+ HEAP32[i4 >> 2] = 26;
+ i63 = i63 - i66 | 0;
+ i64 = i64 >>> i66;
+ i66 = i26;
+ break;
+ }
+ if (i63 >>> 0 < 3) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i66 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 3) {
+ i62 = i66;
+ } else {
+ i62 = i66;
+ break;
+ }
+ }
+ }
+ HEAP32[i7 >> 2] = i64 & 1;
+ i66 = i64 >>> 1 & 3;
+ if ((i66 | 0) == 0) {
+ HEAP32[i4 >> 2] = 13;
+ } else if ((i66 | 0) == 1) {
+ HEAP32[i48 >> 2] = 11952;
+ HEAP32[i49 >> 2] = 9;
+ HEAP32[i50 >> 2] = 14e3;
+ HEAP32[i51 >> 2] = 5;
+ HEAP32[i4 >> 2] = 19;
+ if (i43) {
+ i60 = 133;
+ break L17;
+ }
+ } else if ((i66 | 0) == 2) {
+ HEAP32[i4 >> 2] = 16;
+ } else if ((i66 | 0) == 3) {
+ HEAP32[i35 >> 2] = 11560;
+ HEAP32[i4 >> 2] = 29;
+ }
+ i63 = i63 + -3 | 0;
+ i64 = i64 >>> 3;
+ i66 = i26;
+ } else if ((i60 | 0) == 164) {
+ i60 = 0;
+ i65 = HEAP32[i32 >> 2] | 0;
+ i66 = HEAP32[i33 >> 2] | 0;
+ do {
+ if (i63 >>> 0 < (i66 + i65 | 0) >>> 0) {
+ i71 = i67;
+ L181 : while (1) {
+ i70 = (1 << HEAP32[i49 >> 2]) + -1 | 0;
+ i72 = i70 & i64;
+ i68 = HEAP32[i48 >> 2] | 0;
+ i67 = HEAPU8[i68 + (i72 << 2) + 1 | 0] | 0;
+ if (i67 >>> 0 > i71 >>> 0) {
+ i67 = i71;
+ while (1) {
+ if ((i69 | 0) == 0) {
+ i63 = i67;
+ i65 = 0;
+ break L17;
+ }
+ i69 = i69 + -1 | 0;
+ i71 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i67) + i64 | 0;
+ i62 = i67 + 8 | 0;
+ i72 = i70 & i64;
+ i67 = HEAPU8[i68 + (i72 << 2) + 1 | 0] | 0;
+ if (i67 >>> 0 > i62 >>> 0) {
+ i67 = i62;
+ i62 = i71;
+ } else {
+ i70 = i62;
+ i62 = i71;
+ break;
+ }
+ }
+ } else {
+ i70 = i71;
+ }
+ i68 = HEAP16[i68 + (i72 << 2) + 2 >> 1] | 0;
+ L188 : do {
+ if ((i68 & 65535) < 16) {
+ if (i70 >>> 0 < i67 >>> 0) {
+ while (1) {
+ if ((i69 | 0) == 0) {
+ i63 = i70;
+ i65 = 0;
+ break L17;
+ }
+ i69 = i69 + -1 | 0;
+ i65 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i70) + i64 | 0;
+ i70 = i70 + 8 | 0;
+ if (i70 >>> 0 < i67 >>> 0) {
+ i62 = i65;
+ } else {
+ i62 = i65;
+ break;
+ }
+ }
+ }
+ HEAP32[i36 >> 2] = i63 + 1;
+ HEAP16[i4 + (i63 << 1) + 112 >> 1] = i68;
+ i71 = i70 - i67 | 0;
+ i64 = i64 >>> i67;
+ } else {
+ if (i68 << 16 >> 16 == 16) {
+ i68 = i67 + 2 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ while (1) {
+ if ((i69 | 0) == 0) {
+ i63 = i70;
+ i65 = 0;
+ i62 = i71;
+ break L17;
+ }
+ i69 = i69 + -1 | 0;
+ i62 = i71 + 1 | 0;
+ i64 = (HEAPU8[i71] << i70) + i64 | 0;
+ i70 = i70 + 8 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ i64 = i64 >>> i67;
+ i67 = i70 - i67 | 0;
+ if ((i63 | 0) == 0) {
+ i60 = 181;
+ break L181;
+ }
+ i67 = i67 + -2 | 0;
+ i68 = (i64 & 3) + 3 | 0;
+ i64 = i64 >>> 2;
+ i70 = HEAP16[i4 + (i63 + -1 << 1) + 112 >> 1] | 0;
+ } else if (i68 << 16 >> 16 == 17) {
+ i68 = i67 + 3 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ while (1) {
+ if ((i69 | 0) == 0) {
+ i63 = i70;
+ i65 = 0;
+ i62 = i71;
+ break L17;
+ }
+ i69 = i69 + -1 | 0;
+ i62 = i71 + 1 | 0;
+ i64 = (HEAPU8[i71] << i70) + i64 | 0;
+ i70 = i70 + 8 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ i64 = i64 >>> i67;
+ i67 = -3 - i67 + i70 | 0;
+ i68 = (i64 & 7) + 3 | 0;
+ i64 = i64 >>> 3;
+ i70 = 0;
+ } else {
+ i68 = i67 + 7 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ while (1) {
+ if ((i69 | 0) == 0) {
+ i63 = i70;
+ i65 = 0;
+ i62 = i71;
+ break L17;
+ }
+ i69 = i69 + -1 | 0;
+ i62 = i71 + 1 | 0;
+ i64 = (HEAPU8[i71] << i70) + i64 | 0;
+ i70 = i70 + 8 | 0;
+ if (i70 >>> 0 < i68 >>> 0) {
+ i71 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ i64 = i64 >>> i67;
+ i67 = -7 - i67 + i70 | 0;
+ i68 = (i64 & 127) + 11 | 0;
+ i64 = i64 >>> 7;
+ i70 = 0;
+ }
+ if ((i63 + i68 | 0) >>> 0 > (i66 + i65 | 0) >>> 0) {
+ i60 = 190;
+ break L181;
+ }
+ while (1) {
+ i68 = i68 + -1 | 0;
+ HEAP32[i36 >> 2] = i63 + 1;
+ HEAP16[i4 + (i63 << 1) + 112 >> 1] = i70;
+ if ((i68 | 0) == 0) {
+ i71 = i67;
+ break L188;
+ }
+ i63 = HEAP32[i36 >> 2] | 0;
+ }
+ }
+ } while (0);
+ i63 = HEAP32[i36 >> 2] | 0;
+ i65 = HEAP32[i32 >> 2] | 0;
+ i66 = HEAP32[i33 >> 2] | 0;
+ if (!(i63 >>> 0 < (i66 + i65 | 0) >>> 0)) {
+ i60 = 193;
+ break;
+ }
+ }
+ if ((i60 | 0) == 181) {
+ i60 = 0;
+ HEAP32[i35 >> 2] = 11688;
+ HEAP32[i4 >> 2] = 29;
+ i63 = i67;
+ i65 = i69;
+ i66 = i26;
+ break L163;
+ } else if ((i60 | 0) == 190) {
+ i60 = 0;
+ HEAP32[i35 >> 2] = 11688;
+ HEAP32[i4 >> 2] = 29;
+ i63 = i67;
+ i65 = i69;
+ i66 = i26;
+ break L163;
+ } else if ((i60 | 0) == 193) {
+ i60 = 0;
+ if ((HEAP32[i4 >> 2] | 0) == 29) {
+ i63 = i71;
+ i65 = i69;
+ i66 = i26;
+ break L163;
+ } else {
+ i63 = i71;
+ break;
+ }
+ }
+ } else {
+ i63 = i67;
+ }
+ } while (0);
+ if ((HEAP16[i56 >> 1] | 0) == 0) {
+ HEAP32[i35 >> 2] = 11720;
+ HEAP32[i4 >> 2] = 29;
+ i65 = i69;
+ i66 = i26;
+ break;
+ }
+ HEAP32[i53 >> 2] = i52;
+ HEAP32[i48 >> 2] = i52;
+ HEAP32[i49 >> 2] = 9;
+ i61 = _inflate_table(1, i27, i65, i53, i49, i55) | 0;
+ if ((i61 | 0) != 0) {
+ HEAP32[i35 >> 2] = 11760;
+ HEAP32[i4 >> 2] = 29;
+ i65 = i69;
+ i66 = i26;
+ break;
+ }
+ HEAP32[i50 >> 2] = HEAP32[i53 >> 2];
+ HEAP32[i51 >> 2] = 6;
+ i61 = _inflate_table(2, i4 + (HEAP32[i32 >> 2] << 1) + 112 | 0, HEAP32[i33 >> 2] | 0, i53, i51, i55) | 0;
+ if ((i61 | 0) == 0) {
+ HEAP32[i4 >> 2] = 19;
+ if (i43) {
+ i65 = i69;
+ i61 = 0;
+ i60 = 285;
+ break L17;
+ } else {
+ i65 = i69;
+ i61 = 0;
+ i60 = 201;
+ break;
+ }
+ } else {
+ HEAP32[i35 >> 2] = 11792;
+ HEAP32[i4 >> 2] = 29;
+ i65 = i69;
+ i66 = i26;
+ break;
+ }
+ }
+ } while (0);
+ if ((i60 | 0) == 57) {
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 8 >> 2] = i64 & 255;
+ HEAP32[i60 + 12 >> 2] = i64 >>> 8;
+ }
+ if ((HEAP32[i11 >> 2] & 512 | 0) != 0) {
+ HEAP8[i25] = i64;
+ HEAP8[i39] = i64 >>> 8;
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i25, 2) | 0;
+ }
+ HEAP32[i4 >> 2] = 4;
+ i63 = 0;
+ i64 = 0;
+ i60 = 62;
+ } else if ((i60 | 0) == 201) {
+ HEAP32[i4 >> 2] = 20;
+ i60 = 202;
+ }
+ do {
+ if ((i60 | 0) == 62) {
+ i60 = 0;
+ i66 = HEAP32[i11 >> 2] | 0;
+ if ((i66 & 1024 | 0) == 0) {
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 16 >> 2] = 0;
+ }
+ } else {
+ if (i63 >>> 0 < 16) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i67 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 16) {
+ i62 = i67;
+ } else {
+ i62 = i67;
+ break;
+ }
+ }
+ }
+ HEAP32[i42 >> 2] = i64;
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 20 >> 2] = i64;
+ i66 = HEAP32[i11 >> 2] | 0;
+ }
+ if ((i66 & 512 | 0) == 0) {
+ i63 = 0;
+ i64 = 0;
+ } else {
+ HEAP8[i25] = i64;
+ HEAP8[i39] = i64 >>> 8;
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i25, 2) | 0;
+ i63 = 0;
+ i64 = 0;
+ }
+ }
+ HEAP32[i4 >> 2] = 5;
+ i60 = 73;
+ } else if ((i60 | 0) == 202) {
+ i60 = 0;
+ if (i65 >>> 0 > 5 & i26 >>> 0 > 257) {
+ HEAP32[i8 >> 2] = i19;
+ HEAP32[i15 >> 2] = i26;
+ HEAP32[i2 >> 2] = i62;
+ HEAP32[i16 >> 2] = i65;
+ HEAP32[i17 >> 2] = i64;
+ HEAP32[i6 >> 2] = i63;
+ _inflate_fast(i2, i59);
+ i19 = HEAP32[i8 >> 2] | 0;
+ i66 = HEAP32[i15 >> 2] | 0;
+ i62 = HEAP32[i2 >> 2] | 0;
+ i65 = HEAP32[i16 >> 2] | 0;
+ i64 = HEAP32[i17 >> 2] | 0;
+ i63 = HEAP32[i6 >> 2] | 0;
+ if ((HEAP32[i4 >> 2] | 0) != 11) {
+ break;
+ }
+ HEAP32[i57 >> 2] = -1;
+ break;
+ }
+ HEAP32[i57 >> 2] = 0;
+ i69 = (1 << HEAP32[i49 >> 2]) + -1 | 0;
+ i71 = i69 & i64;
+ i66 = HEAP32[i48 >> 2] | 0;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ i67 = i68 & 255;
+ if (i67 >>> 0 > i63 >>> 0) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i70 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ i71 = i69 & i64;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ i67 = i68 & 255;
+ if (i67 >>> 0 > i63 >>> 0) {
+ i62 = i70;
+ } else {
+ i62 = i70;
+ break;
+ }
+ }
+ }
+ i69 = HEAP8[i66 + (i71 << 2) | 0] | 0;
+ i70 = HEAP16[i66 + (i71 << 2) + 2 >> 1] | 0;
+ i71 = i69 & 255;
+ if (!(i69 << 24 >> 24 == 0)) {
+ if ((i71 & 240 | 0) == 0) {
+ i69 = i70 & 65535;
+ i70 = (1 << i67 + i71) + -1 | 0;
+ i71 = ((i64 & i70) >>> i67) + i69 | 0;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ if (((i68 & 255) + i67 | 0) >>> 0 > i63 >>> 0) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i71 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ i62 = ((i64 & i70) >>> i67) + i69 | 0;
+ i68 = HEAP8[i66 + (i62 << 2) + 1 | 0] | 0;
+ if (((i68 & 255) + i67 | 0) >>> 0 > i63 >>> 0) {
+ i62 = i71;
+ } else {
+ i69 = i62;
+ i62 = i71;
+ break;
+ }
+ }
+ } else {
+ i69 = i71;
+ }
+ i70 = HEAP16[i66 + (i69 << 2) + 2 >> 1] | 0;
+ i69 = HEAP8[i66 + (i69 << 2) | 0] | 0;
+ HEAP32[i57 >> 2] = i67;
+ i66 = i67;
+ i63 = i63 - i67 | 0;
+ i64 = i64 >>> i67;
+ } else {
+ i66 = 0;
+ }
+ } else {
+ i66 = 0;
+ i69 = 0;
+ }
+ i72 = i68 & 255;
+ i64 = i64 >>> i72;
+ i63 = i63 - i72 | 0;
+ HEAP32[i57 >> 2] = i66 + i72;
+ HEAP32[i42 >> 2] = i70 & 65535;
+ i66 = i69 & 255;
+ if (i69 << 24 >> 24 == 0) {
+ HEAP32[i4 >> 2] = 25;
+ i66 = i26;
+ break;
+ }
+ if ((i66 & 32 | 0) != 0) {
+ HEAP32[i57 >> 2] = -1;
+ HEAP32[i4 >> 2] = 11;
+ i66 = i26;
+ break;
+ }
+ if ((i66 & 64 | 0) == 0) {
+ i66 = i66 & 15;
+ HEAP32[i37 >> 2] = i66;
+ HEAP32[i4 >> 2] = 21;
+ i60 = 221;
+ break;
+ } else {
+ HEAP32[i35 >> 2] = 11816;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break;
+ }
+ }
+ } while (0);
+ if ((i60 | 0) == 73) {
+ i68 = HEAP32[i11 >> 2] | 0;
+ if ((i68 & 1024 | 0) != 0) {
+ i67 = HEAP32[i42 >> 2] | 0;
+ i60 = i67 >>> 0 > i65 >>> 0 ? i65 : i67;
+ if ((i60 | 0) != 0) {
+ i66 = HEAP32[i38 >> 2] | 0;
+ if ((i66 | 0) != 0 ? (i20 = HEAP32[i66 + 16 >> 2] | 0, (i20 | 0) != 0) : 0) {
+ i67 = (HEAP32[i66 + 20 >> 2] | 0) - i67 | 0;
+ i66 = HEAP32[i66 + 24 >> 2] | 0;
+ _memcpy(i20 + i67 | 0, i62 | 0, ((i67 + i60 | 0) >>> 0 > i66 >>> 0 ? i66 - i67 | 0 : i60) | 0) | 0;
+ i68 = HEAP32[i11 >> 2] | 0;
+ }
+ if ((i68 & 512 | 0) != 0) {
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i62, i60) | 0;
+ }
+ i67 = (HEAP32[i42 >> 2] | 0) - i60 | 0;
+ HEAP32[i42 >> 2] = i67;
+ i65 = i65 - i60 | 0;
+ i62 = i62 + i60 | 0;
+ }
+ if ((i67 | 0) != 0) {
+ i60 = 285;
+ break;
+ }
+ }
+ HEAP32[i42 >> 2] = 0;
+ HEAP32[i4 >> 2] = 6;
+ i60 = 83;
+ } else if ((i60 | 0) == 221) {
+ i60 = 0;
+ if ((i66 | 0) == 0) {
+ i60 = HEAP32[i42 >> 2] | 0;
+ } else {
+ if (i63 >>> 0 < i66 >>> 0) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i67 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < i66 >>> 0) {
+ i62 = i67;
+ } else {
+ i62 = i67;
+ break;
+ }
+ }
+ }
+ i60 = (HEAP32[i42 >> 2] | 0) + ((1 << i66) + -1 & i64) | 0;
+ HEAP32[i42 >> 2] = i60;
+ HEAP32[i57 >> 2] = (HEAP32[i57 >> 2] | 0) + i66;
+ i63 = i63 - i66 | 0;
+ i64 = i64 >>> i66;
+ }
+ HEAP32[i58 >> 2] = i60;
+ HEAP32[i4 >> 2] = 22;
+ i60 = 228;
+ }
+ do {
+ if ((i60 | 0) == 83) {
+ if ((HEAP32[i11 >> 2] & 2048 | 0) == 0) {
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 28 >> 2] = 0;
+ }
+ } else {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i60 = 285;
+ break L17;
+ } else {
+ i66 = 0;
+ }
+ while (1) {
+ i60 = i66 + 1 | 0;
+ i67 = HEAP8[i62 + i66 | 0] | 0;
+ i66 = HEAP32[i38 >> 2] | 0;
+ if (((i66 | 0) != 0 ? (i23 = HEAP32[i66 + 28 >> 2] | 0, (i23 | 0) != 0) : 0) ? (i21 = HEAP32[i42 >> 2] | 0, i21 >>> 0 < (HEAP32[i66 + 32 >> 2] | 0) >>> 0) : 0) {
+ HEAP32[i42 >> 2] = i21 + 1;
+ HEAP8[i23 + i21 | 0] = i67;
+ }
+ i66 = i67 << 24 >> 24 != 0;
+ if (i66 & i60 >>> 0 < i65 >>> 0) {
+ i66 = i60;
+ } else {
+ break;
+ }
+ }
+ if ((HEAP32[i11 >> 2] & 512 | 0) != 0) {
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i62, i60) | 0;
+ }
+ i65 = i65 - i60 | 0;
+ i62 = i62 + i60 | 0;
+ if (i66) {
+ i60 = 285;
+ break L17;
+ }
+ }
+ HEAP32[i42 >> 2] = 0;
+ HEAP32[i4 >> 2] = 7;
+ i60 = 96;
+ } else if ((i60 | 0) == 228) {
+ i60 = 0;
+ i69 = (1 << HEAP32[i51 >> 2]) + -1 | 0;
+ i71 = i69 & i64;
+ i66 = HEAP32[i50 >> 2] | 0;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ i67 = i68 & 255;
+ if (i67 >>> 0 > i63 >>> 0) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i70 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ i71 = i69 & i64;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ i67 = i68 & 255;
+ if (i67 >>> 0 > i63 >>> 0) {
+ i62 = i70;
+ } else {
+ i62 = i70;
+ break;
+ }
+ }
+ }
+ i69 = HEAP8[i66 + (i71 << 2) | 0] | 0;
+ i70 = HEAP16[i66 + (i71 << 2) + 2 >> 1] | 0;
+ i71 = i69 & 255;
+ if ((i71 & 240 | 0) == 0) {
+ i69 = i70 & 65535;
+ i70 = (1 << i67 + i71) + -1 | 0;
+ i71 = ((i64 & i70) >>> i67) + i69 | 0;
+ i68 = HEAP8[i66 + (i71 << 2) + 1 | 0] | 0;
+ if (((i68 & 255) + i67 | 0) >>> 0 > i63 >>> 0) {
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i71 = i62 + 1 | 0;
+ i64 = (HEAPU8[i62] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ i62 = ((i64 & i70) >>> i67) + i69 | 0;
+ i68 = HEAP8[i66 + (i62 << 2) + 1 | 0] | 0;
+ if (((i68 & 255) + i67 | 0) >>> 0 > i63 >>> 0) {
+ i62 = i71;
+ } else {
+ i69 = i62;
+ i62 = i71;
+ break;
+ }
+ }
+ } else {
+ i69 = i71;
+ }
+ i70 = HEAP16[i66 + (i69 << 2) + 2 >> 1] | 0;
+ i69 = HEAP8[i66 + (i69 << 2) | 0] | 0;
+ i66 = (HEAP32[i57 >> 2] | 0) + i67 | 0;
+ HEAP32[i57 >> 2] = i66;
+ i63 = i63 - i67 | 0;
+ i64 = i64 >>> i67;
+ } else {
+ i66 = HEAP32[i57 >> 2] | 0;
+ }
+ i72 = i68 & 255;
+ i64 = i64 >>> i72;
+ i63 = i63 - i72 | 0;
+ HEAP32[i57 >> 2] = i66 + i72;
+ i66 = i69 & 255;
+ if ((i66 & 64 | 0) == 0) {
+ HEAP32[i54 >> 2] = i70 & 65535;
+ i66 = i66 & 15;
+ HEAP32[i37 >> 2] = i66;
+ HEAP32[i4 >> 2] = 23;
+ i60 = 240;
+ break;
+ } else {
+ HEAP32[i35 >> 2] = 11848;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break;
+ }
+ }
+ } while (0);
+ if ((i60 | 0) == 96) {
+ if ((HEAP32[i11 >> 2] & 4096 | 0) == 0) {
+ i60 = HEAP32[i38 >> 2] | 0;
+ if ((i60 | 0) != 0) {
+ HEAP32[i60 + 36 >> 2] = 0;
+ }
+ } else {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i60 = 285;
+ break;
+ } else {
+ i66 = 0;
+ }
+ while (1) {
+ i60 = i66 + 1 | 0;
+ i66 = HEAP8[i62 + i66 | 0] | 0;
+ i67 = HEAP32[i38 >> 2] | 0;
+ if (((i67 | 0) != 0 ? (i24 = HEAP32[i67 + 36 >> 2] | 0, (i24 | 0) != 0) : 0) ? (i22 = HEAP32[i42 >> 2] | 0, i22 >>> 0 < (HEAP32[i67 + 40 >> 2] | 0) >>> 0) : 0) {
+ HEAP32[i42 >> 2] = i22 + 1;
+ HEAP8[i24 + i22 | 0] = i66;
+ }
+ i66 = i66 << 24 >> 24 != 0;
+ if (i66 & i60 >>> 0 < i65 >>> 0) {
+ i66 = i60;
+ } else {
+ break;
+ }
+ }
+ if ((HEAP32[i11 >> 2] & 512 | 0) != 0) {
+ HEAP32[i10 >> 2] = _crc32(HEAP32[i10 >> 2] | 0, i62, i60) | 0;
+ }
+ i65 = i65 - i60 | 0;
+ i62 = i62 + i60 | 0;
+ if (i66) {
+ i60 = 285;
+ break;
+ }
+ }
+ HEAP32[i4 >> 2] = 8;
+ i60 = 109;
+ } else if ((i60 | 0) == 240) {
+ i60 = 0;
+ if ((i66 | 0) != 0) {
+ if (i63 >>> 0 < i66 >>> 0) {
+ i67 = i62;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i62 = i67;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i62 = i67 + 1 | 0;
+ i64 = (HEAPU8[i67] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < i66 >>> 0) {
+ i67 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ HEAP32[i54 >> 2] = (HEAP32[i54 >> 2] | 0) + ((1 << i66) + -1 & i64);
+ HEAP32[i57 >> 2] = (HEAP32[i57 >> 2] | 0) + i66;
+ i63 = i63 - i66 | 0;
+ i64 = i64 >>> i66;
+ }
+ HEAP32[i4 >> 2] = 24;
+ i60 = 246;
+ }
+ do {
+ if ((i60 | 0) == 109) {
+ i60 = 0;
+ i66 = HEAP32[i11 >> 2] | 0;
+ if ((i66 & 512 | 0) != 0) {
+ if (i63 >>> 0 < 16) {
+ i67 = i62;
+ while (1) {
+ if ((i65 | 0) == 0) {
+ i65 = 0;
+ i62 = i67;
+ break L17;
+ }
+ i65 = i65 + -1 | 0;
+ i62 = i67 + 1 | 0;
+ i64 = (HEAPU8[i67] << i63) + i64 | 0;
+ i63 = i63 + 8 | 0;
+ if (i63 >>> 0 < 16) {
+ i67 = i62;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i64 | 0) == (HEAP32[i10 >> 2] & 65535 | 0)) {
+ i63 = 0;
+ i64 = 0;
+ } else {
+ HEAP32[i35 >> 2] = 11536;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break;
+ }
+ }
+ i67 = HEAP32[i38 >> 2] | 0;
+ if ((i67 | 0) != 0) {
+ HEAP32[i67 + 44 >> 2] = i66 >>> 9 & 1;
+ HEAP32[i67 + 48 >> 2] = 1;
+ }
+ i66 = _crc32(0, 0, 0) | 0;
+ HEAP32[i10 >> 2] = i66;
+ HEAP32[i9 >> 2] = i66;
+ HEAP32[i4 >> 2] = 11;
+ i66 = i26;
+ } else if ((i60 | 0) == 246) {
+ i60 = 0;
+ if ((i26 | 0) == 0) {
+ i26 = 0;
+ i60 = 285;
+ break L17;
+ }
+ i67 = i59 - i26 | 0;
+ i66 = HEAP32[i54 >> 2] | 0;
+ if (i66 >>> 0 > i67 >>> 0) {
+ i67 = i66 - i67 | 0;
+ if (i67 >>> 0 > (HEAP32[i28 >> 2] | 0) >>> 0 ? (HEAP32[i29 >> 2] | 0) != 0 : 0) {
+ HEAP32[i35 >> 2] = 11872;
+ HEAP32[i4 >> 2] = 29;
+ i66 = i26;
+ break;
+ }
+ i68 = HEAP32[i30 >> 2] | 0;
+ if (i67 >>> 0 > i68 >>> 0) {
+ i68 = i67 - i68 | 0;
+ i66 = i68;
+ i68 = (HEAP32[i31 >> 2] | 0) + ((HEAP32[i18 >> 2] | 0) - i68) | 0;
+ } else {
+ i66 = i67;
+ i68 = (HEAP32[i31 >> 2] | 0) + (i68 - i67) | 0;
+ }
+ i69 = HEAP32[i42 >> 2] | 0;
+ i67 = i69;
+ i69 = i66 >>> 0 > i69 >>> 0 ? i69 : i66;
+ } else {
+ i69 = HEAP32[i42 >> 2] | 0;
+ i67 = i69;
+ i68 = i19 + (0 - i66) | 0;
+ }
+ i66 = i69 >>> 0 > i26 >>> 0 ? i26 : i69;
+ HEAP32[i42 >> 2] = i67 - i66;
+ i67 = ~i26;
+ i69 = ~i69;
+ i67 = i67 >>> 0 > i69 >>> 0 ? i67 : i69;
+ i69 = i66;
+ i70 = i19;
+ while (1) {
+ HEAP8[i70] = HEAP8[i68] | 0;
+ i69 = i69 + -1 | 0;
+ if ((i69 | 0) == 0) {
+ break;
+ } else {
+ i68 = i68 + 1 | 0;
+ i70 = i70 + 1 | 0;
+ }
+ }
+ i66 = i26 - i66 | 0;
+ i19 = i19 + ~i67 | 0;
+ if ((HEAP32[i42 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 20;
+ }
+ }
+ } while (0);
+ i68 = HEAP32[i4 >> 2] | 0;
+ i67 = i63;
+ i26 = i66;
+ }
+ if ((i60 | 0) == 122) {
+ HEAP32[i8 >> 2] = i19;
+ HEAP32[i15 >> 2] = i26;
+ HEAP32[i2 >> 2] = i62;
+ HEAP32[i16 >> 2] = i65;
+ HEAP32[i17 >> 2] = i64;
+ HEAP32[i6 >> 2] = i63;
+ i72 = 2;
+ STACKTOP = i1;
+ return i72 | 0;
+ } else if ((i60 | 0) == 133) {
+ i63 = i63 + -3 | 0;
+ i64 = i64 >>> 3;
+ } else if ((i60 | 0) == 284) {
+ HEAP32[i4 >> 2] = 28;
+ i61 = 1;
+ } else if ((i60 | 0) != 285) if ((i60 | 0) == 299) {
+ i72 = -4;
+ STACKTOP = i1;
+ return i72 | 0;
+ } else if ((i60 | 0) == 300) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ HEAP32[i8 >> 2] = i19;
+ HEAP32[i15 >> 2] = i26;
+ HEAP32[i2 >> 2] = i62;
+ HEAP32[i16 >> 2] = i65;
+ HEAP32[i17 >> 2] = i64;
+ HEAP32[i6 >> 2] = i63;
+ if ((HEAP32[i18 >> 2] | 0) == 0) {
+ if ((HEAP32[i4 >> 2] | 0) >>> 0 < 26 ? (i59 | 0) != (HEAP32[i15 >> 2] | 0) : 0) {
+ i60 = 289;
+ }
+ } else {
+ i60 = 289;
+ }
+ if ((i60 | 0) == 289 ? (_updatewindow(i2, i59) | 0) != 0 : 0) {
+ HEAP32[i4 >> 2] = 30;
+ i72 = -4;
+ STACKTOP = i1;
+ return i72 | 0;
+ }
+ i16 = HEAP32[i16 >> 2] | 0;
+ i72 = HEAP32[i15 >> 2] | 0;
+ i15 = i59 - i72 | 0;
+ i71 = i2 + 8 | 0;
+ HEAP32[i71 >> 2] = i5 - i16 + (HEAP32[i71 >> 2] | 0);
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + i15;
+ HEAP32[i14 >> 2] = (HEAP32[i14 >> 2] | 0) + i15;
+ i13 = (i59 | 0) == (i72 | 0);
+ if (!((HEAP32[i12 >> 2] | 0) == 0 | i13)) {
+ i12 = HEAP32[i10 >> 2] | 0;
+ i8 = (HEAP32[i8 >> 2] | 0) + (0 - i15) | 0;
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ i8 = _adler32(i12, i8, i15) | 0;
+ } else {
+ i8 = _crc32(i12, i8, i15) | 0;
+ }
+ HEAP32[i10 >> 2] = i8;
+ HEAP32[i9 >> 2] = i8;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i4 | 0) == 19) {
+ i8 = 256;
+ } else {
+ i8 = (i4 | 0) == 14 ? 256 : 0;
+ }
+ HEAP32[i2 + 44 >> 2] = ((HEAP32[i7 >> 2] | 0) != 0 ? 64 : 0) + (HEAP32[i6 >> 2] | 0) + ((i4 | 0) == 11 ? 128 : 0) + i8;
+ i72 = ((i5 | 0) == (i16 | 0) & i13 | (i3 | 0) == 4) & (i61 | 0) == 0 ? -5 : i61;
+ STACKTOP = i1;
+ return i72 | 0;
+}
+function _malloc(i12) {
+ i12 = i12 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0;
+ i1 = STACKTOP;
+ do {
+ if (i12 >>> 0 < 245) {
+ if (i12 >>> 0 < 11) {
+ i12 = 16;
+ } else {
+ i12 = i12 + 11 & -8;
+ }
+ i20 = i12 >>> 3;
+ i18 = HEAP32[3618] | 0;
+ i21 = i18 >>> i20;
+ if ((i21 & 3 | 0) != 0) {
+ i6 = (i21 & 1 ^ 1) + i20 | 0;
+ i5 = i6 << 1;
+ i3 = 14512 + (i5 << 2) | 0;
+ i5 = 14512 + (i5 + 2 << 2) | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ i2 = i7 + 8 | 0;
+ i4 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i3 | 0) != (i4 | 0)) {
+ if (i4 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i4 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i7 | 0)) {
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i5 >> 2] = i4;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[3618] = i18 & ~(1 << i6);
+ }
+ } while (0);
+ i32 = i6 << 3;
+ HEAP32[i7 + 4 >> 2] = i32 | 3;
+ i32 = i7 + (i32 | 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ if (i12 >>> 0 > (HEAP32[14480 >> 2] | 0) >>> 0) {
+ if ((i21 | 0) != 0) {
+ i7 = 2 << i20;
+ i7 = i21 << i20 & (i7 | 0 - i7);
+ i7 = (i7 & 0 - i7) + -1 | 0;
+ i2 = i7 >>> 12 & 16;
+ i7 = i7 >>> i2;
+ i6 = i7 >>> 5 & 8;
+ i7 = i7 >>> i6;
+ i5 = i7 >>> 2 & 4;
+ i7 = i7 >>> i5;
+ i4 = i7 >>> 1 & 2;
+ i7 = i7 >>> i4;
+ i3 = i7 >>> 1 & 1;
+ i3 = (i6 | i2 | i5 | i4 | i3) + (i7 >>> i3) | 0;
+ i7 = i3 << 1;
+ i4 = 14512 + (i7 << 2) | 0;
+ i7 = 14512 + (i7 + 2 << 2) | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i2 = i5 + 8 | 0;
+ i6 = HEAP32[i2 >> 2] | 0;
+ do {
+ if ((i4 | 0) != (i6 | 0)) {
+ if (i6 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i6 + 12 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i5 | 0)) {
+ HEAP32[i8 >> 2] = i4;
+ HEAP32[i7 >> 2] = i6;
+ break;
+ } else {
+ _abort();
+ }
+ } else {
+ HEAP32[3618] = i18 & ~(1 << i3);
+ }
+ } while (0);
+ i6 = i3 << 3;
+ i4 = i6 - i12 | 0;
+ HEAP32[i5 + 4 >> 2] = i12 | 3;
+ i3 = i5 + i12 | 0;
+ HEAP32[i5 + (i12 | 4) >> 2] = i4 | 1;
+ HEAP32[i5 + i6 >> 2] = i4;
+ i6 = HEAP32[14480 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[14492 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 14512 + (i9 << 2) | 0;
+ i7 = HEAP32[3618] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 14512 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i28 = i7;
+ i27 = i8;
+ }
+ } else {
+ HEAP32[3618] = i7 | i8;
+ i28 = 14512 + (i9 + 2 << 2) | 0;
+ i27 = i6;
+ }
+ HEAP32[i28 >> 2] = i5;
+ HEAP32[i27 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i27;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[14480 >> 2] = i4;
+ HEAP32[14492 >> 2] = i3;
+ i32 = i2;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[14476 >> 2] | 0;
+ if ((i18 | 0) != 0) {
+ i2 = (i18 & 0 - i18) + -1 | 0;
+ i31 = i2 >>> 12 & 16;
+ i2 = i2 >>> i31;
+ i30 = i2 >>> 5 & 8;
+ i2 = i2 >>> i30;
+ i32 = i2 >>> 2 & 4;
+ i2 = i2 >>> i32;
+ i6 = i2 >>> 1 & 2;
+ i2 = i2 >>> i6;
+ i3 = i2 >>> 1 & 1;
+ i3 = HEAP32[14776 + ((i30 | i31 | i32 | i6 | i3) + (i2 >>> i3) << 2) >> 2] | 0;
+ i2 = (HEAP32[i3 + 4 >> 2] & -8) - i12 | 0;
+ i6 = i3;
+ while (1) {
+ i5 = HEAP32[i6 + 16 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i5 = HEAP32[i6 + 20 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ i6 = (HEAP32[i5 + 4 >> 2] & -8) - i12 | 0;
+ i4 = i6 >>> 0 < i2 >>> 0;
+ i2 = i4 ? i6 : i2;
+ i6 = i5;
+ i3 = i4 ? i5 : i3;
+ }
+ i6 = HEAP32[14488 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i4 = i3 + i12 | 0;
+ if (!(i3 >>> 0 < i4 >>> 0)) {
+ _abort();
+ }
+ i5 = HEAP32[i3 + 24 >> 2] | 0;
+ i7 = HEAP32[i3 + 12 >> 2] | 0;
+ do {
+ if ((i7 | 0) == (i3 | 0)) {
+ i8 = i3 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i8 = i3 + 16 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) == 0) {
+ i26 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i10 = i7 + 20 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ i7 = i9;
+ i8 = i10;
+ continue;
+ }
+ i10 = i7 + 16 | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ } else {
+ i7 = i9;
+ i8 = i10;
+ }
+ }
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i8 >> 2] = 0;
+ i26 = i7;
+ break;
+ }
+ } else {
+ i8 = HEAP32[i3 + 8 >> 2] | 0;
+ if (i8 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ i6 = i8 + 12 | 0;
+ if ((HEAP32[i6 >> 2] | 0) != (i3 | 0)) {
+ _abort();
+ }
+ i9 = i7 + 8 | 0;
+ if ((HEAP32[i9 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i7;
+ HEAP32[i9 >> 2] = i8;
+ i26 = i7;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i5 | 0) != 0) {
+ i7 = HEAP32[i3 + 28 >> 2] | 0;
+ i6 = 14776 + (i7 << 2) | 0;
+ if ((i3 | 0) == (HEAP32[i6 >> 2] | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ if ((i26 | 0) == 0) {
+ HEAP32[14476 >> 2] = HEAP32[14476 >> 2] & ~(1 << i7);
+ break;
+ }
+ } else {
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i6 = i5 + 16 | 0;
+ if ((HEAP32[i6 >> 2] | 0) == (i3 | 0)) {
+ HEAP32[i6 >> 2] = i26;
+ } else {
+ HEAP32[i5 + 20 >> 2] = i26;
+ }
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ }
+ if (i26 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i26 + 24 >> 2] = i5;
+ i5 = HEAP32[i3 + 16 >> 2] | 0;
+ do {
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 16 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ } while (0);
+ i5 = HEAP32[i3 + 20 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i26 + 20 >> 2] = i5;
+ HEAP32[i5 + 24 >> 2] = i26;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i2 >>> 0 < 16) {
+ i32 = i2 + i12 | 0;
+ HEAP32[i3 + 4 >> 2] = i32 | 3;
+ i32 = i3 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ } else {
+ HEAP32[i3 + 4 >> 2] = i12 | 3;
+ HEAP32[i3 + (i12 | 4) >> 2] = i2 | 1;
+ HEAP32[i3 + (i2 + i12) >> 2] = i2;
+ i6 = HEAP32[14480 >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ i5 = HEAP32[14492 >> 2] | 0;
+ i8 = i6 >>> 3;
+ i9 = i8 << 1;
+ i6 = 14512 + (i9 << 2) | 0;
+ i7 = HEAP32[3618] | 0;
+ i8 = 1 << i8;
+ if ((i7 & i8 | 0) != 0) {
+ i7 = 14512 + (i9 + 2 << 2) | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if (i8 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i25 = i7;
+ i24 = i8;
+ }
+ } else {
+ HEAP32[3618] = i7 | i8;
+ i25 = 14512 + (i9 + 2 << 2) | 0;
+ i24 = i6;
+ }
+ HEAP32[i25 >> 2] = i5;
+ HEAP32[i24 + 12 >> 2] = i5;
+ HEAP32[i5 + 8 >> 2] = i24;
+ HEAP32[i5 + 12 >> 2] = i6;
+ }
+ HEAP32[14480 >> 2] = i2;
+ HEAP32[14492 >> 2] = i4;
+ }
+ i32 = i3 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ if (!(i12 >>> 0 > 4294967231)) {
+ i24 = i12 + 11 | 0;
+ i12 = i24 & -8;
+ i26 = HEAP32[14476 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i25 = 0 - i12 | 0;
+ i24 = i24 >>> 8;
+ if ((i24 | 0) != 0) {
+ if (i12 >>> 0 > 16777215) {
+ i27 = 31;
+ } else {
+ i31 = (i24 + 1048320 | 0) >>> 16 & 8;
+ i32 = i24 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i27 = (i32 + 245760 | 0) >>> 16 & 2;
+ i27 = 14 - (i30 | i31 | i27) + (i32 << i27 >>> 15) | 0;
+ i27 = i12 >>> (i27 + 7 | 0) & 1 | i27 << 1;
+ }
+ } else {
+ i27 = 0;
+ }
+ i30 = HEAP32[14776 + (i27 << 2) >> 2] | 0;
+ L126 : do {
+ if ((i30 | 0) == 0) {
+ i29 = 0;
+ i24 = 0;
+ } else {
+ if ((i27 | 0) == 31) {
+ i24 = 0;
+ } else {
+ i24 = 25 - (i27 >>> 1) | 0;
+ }
+ i29 = 0;
+ i28 = i12 << i24;
+ i24 = 0;
+ while (1) {
+ i32 = HEAP32[i30 + 4 >> 2] & -8;
+ i31 = i32 - i12 | 0;
+ if (i31 >>> 0 < i25 >>> 0) {
+ if ((i32 | 0) == (i12 | 0)) {
+ i25 = i31;
+ i29 = i30;
+ i24 = i30;
+ break L126;
+ } else {
+ i25 = i31;
+ i24 = i30;
+ }
+ }
+ i31 = HEAP32[i30 + 20 >> 2] | 0;
+ i30 = HEAP32[i30 + (i28 >>> 31 << 2) + 16 >> 2] | 0;
+ i29 = (i31 | 0) == 0 | (i31 | 0) == (i30 | 0) ? i29 : i31;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i28 = i28 << 1;
+ }
+ }
+ }
+ } while (0);
+ if ((i29 | 0) == 0 & (i24 | 0) == 0) {
+ i32 = 2 << i27;
+ i26 = i26 & (i32 | 0 - i32);
+ if ((i26 | 0) == 0) {
+ break;
+ }
+ i32 = (i26 & 0 - i26) + -1 | 0;
+ i28 = i32 >>> 12 & 16;
+ i32 = i32 >>> i28;
+ i27 = i32 >>> 5 & 8;
+ i32 = i32 >>> i27;
+ i30 = i32 >>> 2 & 4;
+ i32 = i32 >>> i30;
+ i31 = i32 >>> 1 & 2;
+ i32 = i32 >>> i31;
+ i29 = i32 >>> 1 & 1;
+ i29 = HEAP32[14776 + ((i27 | i28 | i30 | i31 | i29) + (i32 >>> i29) << 2) >> 2] | 0;
+ }
+ if ((i29 | 0) != 0) {
+ while (1) {
+ i27 = (HEAP32[i29 + 4 >> 2] & -8) - i12 | 0;
+ i26 = i27 >>> 0 < i25 >>> 0;
+ i25 = i26 ? i27 : i25;
+ i24 = i26 ? i29 : i24;
+ i26 = HEAP32[i29 + 16 >> 2] | 0;
+ if ((i26 | 0) != 0) {
+ i29 = i26;
+ continue;
+ }
+ i29 = HEAP32[i29 + 20 >> 2] | 0;
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ }
+ }
+ if ((i24 | 0) != 0 ? i25 >>> 0 < ((HEAP32[14480 >> 2] | 0) - i12 | 0) >>> 0 : 0) {
+ i4 = HEAP32[14488 >> 2] | 0;
+ if (i24 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i2 = i24 + i12 | 0;
+ if (!(i24 >>> 0 < i2 >>> 0)) {
+ _abort();
+ }
+ i3 = HEAP32[i24 + 24 >> 2] | 0;
+ i6 = HEAP32[i24 + 12 >> 2] | 0;
+ do {
+ if ((i6 | 0) == (i24 | 0)) {
+ i6 = i24 + 20 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i6 = i24 + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i22 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i8 = i5 + 20 | 0;
+ i7 = HEAP32[i8 >> 2] | 0;
+ if ((i7 | 0) != 0) {
+ i5 = i7;
+ i6 = i8;
+ continue;
+ }
+ i7 = i5 + 16 | 0;
+ i8 = HEAP32[i7 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i5 = i8;
+ i6 = i7;
+ }
+ }
+ if (i6 >>> 0 < i4 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = 0;
+ i22 = i5;
+ break;
+ }
+ } else {
+ i5 = HEAP32[i24 + 8 >> 2] | 0;
+ if (i5 >>> 0 < i4 >>> 0) {
+ _abort();
+ }
+ i7 = i5 + 12 | 0;
+ if ((HEAP32[i7 >> 2] | 0) != (i24 | 0)) {
+ _abort();
+ }
+ i4 = i6 + 8 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i7 >> 2] = i6;
+ HEAP32[i4 >> 2] = i5;
+ i22 = i6;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ do {
+ if ((i3 | 0) != 0) {
+ i4 = HEAP32[i24 + 28 >> 2] | 0;
+ i5 = 14776 + (i4 << 2) | 0;
+ if ((i24 | 0) == (HEAP32[i5 >> 2] | 0)) {
+ HEAP32[i5 >> 2] = i22;
+ if ((i22 | 0) == 0) {
+ HEAP32[14476 >> 2] = HEAP32[14476 >> 2] & ~(1 << i4);
+ break;
+ }
+ } else {
+ if (i3 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i4 = i3 + 16 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == (i24 | 0)) {
+ HEAP32[i4 >> 2] = i22;
+ } else {
+ HEAP32[i3 + 20 >> 2] = i22;
+ }
+ if ((i22 | 0) == 0) {
+ break;
+ }
+ }
+ if (i22 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i22 + 24 >> 2] = i3;
+ i3 = HEAP32[i24 + 16 >> 2] | 0;
+ do {
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 16 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ } while (0);
+ i3 = HEAP32[i24 + 20 >> 2] | 0;
+ if ((i3 | 0) != 0) {
+ if (i3 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i22 + 20 >> 2] = i3;
+ HEAP32[i3 + 24 >> 2] = i22;
+ break;
+ }
+ }
+ }
+ } while (0);
+ L204 : do {
+ if (!(i25 >>> 0 < 16)) {
+ HEAP32[i24 + 4 >> 2] = i12 | 3;
+ HEAP32[i24 + (i12 | 4) >> 2] = i25 | 1;
+ HEAP32[i24 + (i25 + i12) >> 2] = i25;
+ i4 = i25 >>> 3;
+ if (i25 >>> 0 < 256) {
+ i6 = i4 << 1;
+ i3 = 14512 + (i6 << 2) | 0;
+ i5 = HEAP32[3618] | 0;
+ i4 = 1 << i4;
+ if ((i5 & i4 | 0) != 0) {
+ i5 = 14512 + (i6 + 2 << 2) | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ if (i4 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i21 = i5;
+ i20 = i4;
+ }
+ } else {
+ HEAP32[3618] = i5 | i4;
+ i21 = 14512 + (i6 + 2 << 2) | 0;
+ i20 = i3;
+ }
+ HEAP32[i21 >> 2] = i2;
+ HEAP32[i20 + 12 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i20;
+ HEAP32[i24 + (i12 + 12) >> 2] = i3;
+ break;
+ }
+ i3 = i25 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i25 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i25 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i6 = 14776 + (i3 << 2) | 0;
+ HEAP32[i24 + (i12 + 28) >> 2] = i3;
+ HEAP32[i24 + (i12 + 20) >> 2] = 0;
+ HEAP32[i24 + (i12 + 16) >> 2] = 0;
+ i4 = HEAP32[14476 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[14476 >> 2] = i4 | i5;
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i6;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break;
+ }
+ i4 = HEAP32[i6 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L225 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i25 | 0)) {
+ i3 = i25 << i3;
+ while (1) {
+ i6 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i25 | 0)) {
+ i18 = i5;
+ break L225;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i24 + (i12 + 24) >> 2] = i4;
+ HEAP32[i24 + (i12 + 12) >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i2;
+ break L204;
+ }
+ } else {
+ i18 = i4;
+ }
+ } while (0);
+ i4 = i18 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[14488 >> 2] | 0;
+ if (i18 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i2;
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i24 + (i12 + 8) >> 2] = i3;
+ HEAP32[i24 + (i12 + 12) >> 2] = i18;
+ HEAP32[i24 + (i12 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = i25 + i12 | 0;
+ HEAP32[i24 + 4 >> 2] = i32 | 3;
+ i32 = i24 + (i32 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ } while (0);
+ i32 = i24 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ } else {
+ i12 = -1;
+ }
+ }
+ } while (0);
+ i18 = HEAP32[14480 >> 2] | 0;
+ if (!(i12 >>> 0 > i18 >>> 0)) {
+ i3 = i18 - i12 | 0;
+ i2 = HEAP32[14492 >> 2] | 0;
+ if (i3 >>> 0 > 15) {
+ HEAP32[14492 >> 2] = i2 + i12;
+ HEAP32[14480 >> 2] = i3;
+ HEAP32[i2 + (i12 + 4) >> 2] = i3 | 1;
+ HEAP32[i2 + i18 >> 2] = i3;
+ HEAP32[i2 + 4 >> 2] = i12 | 3;
+ } else {
+ HEAP32[14480 >> 2] = 0;
+ HEAP32[14492 >> 2] = 0;
+ HEAP32[i2 + 4 >> 2] = i18 | 3;
+ i32 = i2 + (i18 + 4) | 0;
+ HEAP32[i32 >> 2] = HEAP32[i32 >> 2] | 1;
+ }
+ i32 = i2 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i18 = HEAP32[14484 >> 2] | 0;
+ if (i12 >>> 0 < i18 >>> 0) {
+ i31 = i18 - i12 | 0;
+ HEAP32[14484 >> 2] = i31;
+ i32 = HEAP32[14496 >> 2] | 0;
+ HEAP32[14496 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ do {
+ if ((HEAP32[3736] | 0) == 0) {
+ i18 = _sysconf(30) | 0;
+ if ((i18 + -1 & i18 | 0) == 0) {
+ HEAP32[14952 >> 2] = i18;
+ HEAP32[14948 >> 2] = i18;
+ HEAP32[14956 >> 2] = -1;
+ HEAP32[14960 >> 2] = -1;
+ HEAP32[14964 >> 2] = 0;
+ HEAP32[14916 >> 2] = 0;
+ HEAP32[3736] = (_time(0) | 0) & -16 ^ 1431655768;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ i20 = i12 + 48 | 0;
+ i25 = HEAP32[14952 >> 2] | 0;
+ i21 = i12 + 47 | 0;
+ i22 = i25 + i21 | 0;
+ i25 = 0 - i25 | 0;
+ i18 = i22 & i25;
+ if (!(i18 >>> 0 > i12 >>> 0)) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i24 = HEAP32[14912 >> 2] | 0;
+ if ((i24 | 0) != 0 ? (i31 = HEAP32[14904 >> 2] | 0, i32 = i31 + i18 | 0, i32 >>> 0 <= i31 >>> 0 | i32 >>> 0 > i24 >>> 0) : 0) {
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ L269 : do {
+ if ((HEAP32[14916 >> 2] & 4 | 0) == 0) {
+ i26 = HEAP32[14496 >> 2] | 0;
+ L271 : do {
+ if ((i26 | 0) != 0) {
+ i24 = 14920 | 0;
+ while (1) {
+ i27 = HEAP32[i24 >> 2] | 0;
+ if (!(i27 >>> 0 > i26 >>> 0) ? (i23 = i24 + 4 | 0, (i27 + (HEAP32[i23 >> 2] | 0) | 0) >>> 0 > i26 >>> 0) : 0) {
+ break;
+ }
+ i24 = HEAP32[i24 + 8 >> 2] | 0;
+ if ((i24 | 0) == 0) {
+ i13 = 182;
+ break L271;
+ }
+ }
+ if ((i24 | 0) != 0) {
+ i25 = i22 - (HEAP32[14484 >> 2] | 0) & i25;
+ if (i25 >>> 0 < 2147483647) {
+ i13 = _sbrk(i25 | 0) | 0;
+ i26 = (i13 | 0) == ((HEAP32[i24 >> 2] | 0) + (HEAP32[i23 >> 2] | 0) | 0);
+ i22 = i13;
+ i24 = i25;
+ i23 = i26 ? i13 : -1;
+ i25 = i26 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i13 = 182;
+ }
+ } else {
+ i13 = 182;
+ }
+ } while (0);
+ do {
+ if ((i13 | 0) == 182) {
+ i23 = _sbrk(0) | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i24 = i23;
+ i22 = HEAP32[14948 >> 2] | 0;
+ i25 = i22 + -1 | 0;
+ if ((i25 & i24 | 0) == 0) {
+ i25 = i18;
+ } else {
+ i25 = i18 - i24 + (i25 + i24 & 0 - i22) | 0;
+ }
+ i24 = HEAP32[14904 >> 2] | 0;
+ i26 = i24 + i25 | 0;
+ if (i25 >>> 0 > i12 >>> 0 & i25 >>> 0 < 2147483647) {
+ i22 = HEAP32[14912 >> 2] | 0;
+ if ((i22 | 0) != 0 ? i26 >>> 0 <= i24 >>> 0 | i26 >>> 0 > i22 >>> 0 : 0) {
+ i25 = 0;
+ break;
+ }
+ i22 = _sbrk(i25 | 0) | 0;
+ i13 = (i22 | 0) == (i23 | 0);
+ i24 = i25;
+ i23 = i13 ? i23 : -1;
+ i25 = i13 ? i25 : 0;
+ i13 = 191;
+ } else {
+ i25 = 0;
+ }
+ } else {
+ i25 = 0;
+ }
+ }
+ } while (0);
+ L291 : do {
+ if ((i13 | 0) == 191) {
+ i13 = 0 - i24 | 0;
+ if ((i23 | 0) != (-1 | 0)) {
+ i17 = i23;
+ i14 = i25;
+ i13 = 202;
+ break L269;
+ }
+ do {
+ if ((i22 | 0) != (-1 | 0) & i24 >>> 0 < 2147483647 & i24 >>> 0 < i20 >>> 0 ? (i19 = HEAP32[14952 >> 2] | 0, i19 = i21 - i24 + i19 & 0 - i19, i19 >>> 0 < 2147483647) : 0) {
+ if ((_sbrk(i19 | 0) | 0) == (-1 | 0)) {
+ _sbrk(i13 | 0) | 0;
+ break L291;
+ } else {
+ i24 = i19 + i24 | 0;
+ break;
+ }
+ }
+ } while (0);
+ if ((i22 | 0) != (-1 | 0)) {
+ i17 = i22;
+ i14 = i24;
+ i13 = 202;
+ break L269;
+ }
+ }
+ } while (0);
+ HEAP32[14916 >> 2] = HEAP32[14916 >> 2] | 4;
+ i13 = 199;
+ } else {
+ i25 = 0;
+ i13 = 199;
+ }
+ } while (0);
+ if ((((i13 | 0) == 199 ? i18 >>> 0 < 2147483647 : 0) ? (i17 = _sbrk(i18 | 0) | 0, i16 = _sbrk(0) | 0, (i16 | 0) != (-1 | 0) & (i17 | 0) != (-1 | 0) & i17 >>> 0 < i16 >>> 0) : 0) ? (i15 = i16 - i17 | 0, i14 = i15 >>> 0 > (i12 + 40 | 0) >>> 0, i14) : 0) {
+ i14 = i14 ? i15 : i25;
+ i13 = 202;
+ }
+ if ((i13 | 0) == 202) {
+ i15 = (HEAP32[14904 >> 2] | 0) + i14 | 0;
+ HEAP32[14904 >> 2] = i15;
+ if (i15 >>> 0 > (HEAP32[14908 >> 2] | 0) >>> 0) {
+ HEAP32[14908 >> 2] = i15;
+ }
+ i15 = HEAP32[14496 >> 2] | 0;
+ L311 : do {
+ if ((i15 | 0) != 0) {
+ i21 = 14920 | 0;
+ while (1) {
+ i16 = HEAP32[i21 >> 2] | 0;
+ i19 = i21 + 4 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i17 | 0) == (i16 + i20 | 0)) {
+ i13 = 214;
+ break;
+ }
+ i18 = HEAP32[i21 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i21 = i18;
+ }
+ }
+ if (((i13 | 0) == 214 ? (HEAP32[i21 + 12 >> 2] & 8 | 0) == 0 : 0) ? i15 >>> 0 >= i16 >>> 0 & i15 >>> 0 < i17 >>> 0 : 0) {
+ HEAP32[i19 >> 2] = i20 + i14;
+ i2 = (HEAP32[14484 >> 2] | 0) + i14 | 0;
+ i3 = i15 + 8 | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i32 = i2 - i3 | 0;
+ HEAP32[14496 >> 2] = i15 + i3;
+ HEAP32[14484 >> 2] = i32;
+ HEAP32[i15 + (i3 + 4) >> 2] = i32 | 1;
+ HEAP32[i15 + (i2 + 4) >> 2] = 40;
+ HEAP32[14500 >> 2] = HEAP32[14960 >> 2];
+ break;
+ }
+ if (i17 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ HEAP32[14488 >> 2] = i17;
+ }
+ i19 = i17 + i14 | 0;
+ i16 = 14920 | 0;
+ while (1) {
+ if ((HEAP32[i16 >> 2] | 0) == (i19 | 0)) {
+ i13 = 224;
+ break;
+ }
+ i18 = HEAP32[i16 + 8 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ break;
+ } else {
+ i16 = i18;
+ }
+ }
+ if ((i13 | 0) == 224 ? (HEAP32[i16 + 12 >> 2] & 8 | 0) == 0 : 0) {
+ HEAP32[i16 >> 2] = i17;
+ i6 = i16 + 4 | 0;
+ HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i14;
+ i6 = i17 + 8 | 0;
+ if ((i6 & 7 | 0) == 0) {
+ i6 = 0;
+ } else {
+ i6 = 0 - i6 & 7;
+ }
+ i7 = i17 + (i14 + 8) | 0;
+ if ((i7 & 7 | 0) == 0) {
+ i13 = 0;
+ } else {
+ i13 = 0 - i7 & 7;
+ }
+ i15 = i17 + (i13 + i14) | 0;
+ i8 = i6 + i12 | 0;
+ i7 = i17 + i8 | 0;
+ i10 = i15 - (i17 + i6) - i12 | 0;
+ HEAP32[i17 + (i6 + 4) >> 2] = i12 | 3;
+ L348 : do {
+ if ((i15 | 0) != (HEAP32[14496 >> 2] | 0)) {
+ if ((i15 | 0) == (HEAP32[14492 >> 2] | 0)) {
+ i32 = (HEAP32[14480 >> 2] | 0) + i10 | 0;
+ HEAP32[14480 >> 2] = i32;
+ HEAP32[14492 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i32 + i8) >> 2] = i32;
+ break;
+ }
+ i12 = i14 + 4 | 0;
+ i18 = HEAP32[i17 + (i12 + i13) >> 2] | 0;
+ if ((i18 & 3 | 0) == 1) {
+ i11 = i18 & -8;
+ i16 = i18 >>> 3;
+ do {
+ if (!(i18 >>> 0 < 256)) {
+ i9 = HEAP32[i17 + ((i13 | 24) + i14) >> 2] | 0;
+ i19 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ do {
+ if ((i19 | 0) == (i15 | 0)) {
+ i19 = i13 | 16;
+ i18 = i17 + (i12 + i19) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i18 = i17 + (i19 + i14) | 0;
+ i16 = HEAP32[i18 >> 2] | 0;
+ if ((i16 | 0) == 0) {
+ i5 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i20 = i16 + 20 | 0;
+ i19 = HEAP32[i20 >> 2] | 0;
+ if ((i19 | 0) != 0) {
+ i16 = i19;
+ i18 = i20;
+ continue;
+ }
+ i19 = i16 + 16 | 0;
+ i20 = HEAP32[i19 >> 2] | 0;
+ if ((i20 | 0) == 0) {
+ break;
+ } else {
+ i16 = i20;
+ i18 = i19;
+ }
+ }
+ if (i18 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i18 >> 2] = 0;
+ i5 = i16;
+ break;
+ }
+ } else {
+ i18 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ if (i18 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i18 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ i20 = i19 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i19;
+ HEAP32[i20 >> 2] = i18;
+ i5 = i19;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i9 | 0) != 0) {
+ i16 = HEAP32[i17 + (i14 + 28 + i13) >> 2] | 0;
+ i18 = 14776 + (i16 << 2) | 0;
+ if ((i15 | 0) == (HEAP32[i18 >> 2] | 0)) {
+ HEAP32[i18 >> 2] = i5;
+ if ((i5 | 0) == 0) {
+ HEAP32[14476 >> 2] = HEAP32[14476 >> 2] & ~(1 << i16);
+ break;
+ }
+ } else {
+ if (i9 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i9 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ HEAP32[i16 >> 2] = i5;
+ } else {
+ HEAP32[i9 + 20 >> 2] = i5;
+ }
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i5 + 24 >> 2] = i9;
+ i15 = i13 | 16;
+ i9 = HEAP32[i17 + (i15 + i14) >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 16 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ } while (0);
+ i9 = HEAP32[i17 + (i12 + i15) >> 2] | 0;
+ if ((i9 | 0) != 0) {
+ if (i9 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 + 20 >> 2] = i9;
+ HEAP32[i9 + 24 >> 2] = i5;
+ break;
+ }
+ }
+ }
+ } else {
+ i5 = HEAP32[i17 + ((i13 | 8) + i14) >> 2] | 0;
+ i12 = HEAP32[i17 + (i14 + 12 + i13) >> 2] | 0;
+ i18 = 14512 + (i16 << 1 << 2) | 0;
+ if ((i5 | 0) != (i18 | 0)) {
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i5 + 12 >> 2] | 0) != (i15 | 0)) {
+ _abort();
+ }
+ }
+ if ((i12 | 0) == (i5 | 0)) {
+ HEAP32[3618] = HEAP32[3618] & ~(1 << i16);
+ break;
+ }
+ if ((i12 | 0) != (i18 | 0)) {
+ if (i12 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i12 + 8 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i15 | 0)) {
+ i9 = i16;
+ } else {
+ _abort();
+ }
+ } else {
+ i9 = i12 + 8 | 0;
+ }
+ HEAP32[i5 + 12 >> 2] = i12;
+ HEAP32[i9 >> 2] = i5;
+ }
+ } while (0);
+ i15 = i17 + ((i11 | i13) + i14) | 0;
+ i10 = i11 + i10 | 0;
+ }
+ i5 = i15 + 4 | 0;
+ HEAP32[i5 >> 2] = HEAP32[i5 >> 2] & -2;
+ HEAP32[i17 + (i8 + 4) >> 2] = i10 | 1;
+ HEAP32[i17 + (i10 + i8) >> 2] = i10;
+ i5 = i10 >>> 3;
+ if (i10 >>> 0 < 256) {
+ i10 = i5 << 1;
+ i2 = 14512 + (i10 << 2) | 0;
+ i9 = HEAP32[3618] | 0;
+ i5 = 1 << i5;
+ if ((i9 & i5 | 0) != 0) {
+ i9 = 14512 + (i10 + 2 << 2) | 0;
+ i5 = HEAP32[i9 >> 2] | 0;
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i3 = i9;
+ i4 = i5;
+ }
+ } else {
+ HEAP32[3618] = i9 | i5;
+ i3 = 14512 + (i10 + 2 << 2) | 0;
+ i4 = i2;
+ }
+ HEAP32[i3 >> 2] = i7;
+ HEAP32[i4 + 12 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ break;
+ }
+ i3 = i10 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i10 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i10 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i4 = 14776 + (i3 << 2) | 0;
+ HEAP32[i17 + (i8 + 28) >> 2] = i3;
+ HEAP32[i17 + (i8 + 20) >> 2] = 0;
+ HEAP32[i17 + (i8 + 16) >> 2] = 0;
+ i9 = HEAP32[14476 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i9 & i5 | 0) == 0) {
+ HEAP32[14476 >> 2] = i9 | i5;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break;
+ }
+ i4 = HEAP32[i4 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L444 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i10 | 0)) {
+ i3 = i10 << i3;
+ while (1) {
+ i5 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i9 = HEAP32[i5 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i9 + 4 >> 2] & -8 | 0) == (i10 | 0)) {
+ i2 = i9;
+ break L444;
+ } else {
+ i3 = i3 << 1;
+ i4 = i9;
+ }
+ }
+ if (i5 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i17 + (i8 + 24) >> 2] = i4;
+ HEAP32[i17 + (i8 + 12) >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i7;
+ break L348;
+ }
+ } else {
+ i2 = i4;
+ }
+ } while (0);
+ i4 = i2 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i5 = HEAP32[14488 >> 2] | 0;
+ if (i2 >>> 0 < i5 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i5 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i7;
+ HEAP32[i4 >> 2] = i7;
+ HEAP32[i17 + (i8 + 8) >> 2] = i3;
+ HEAP32[i17 + (i8 + 12) >> 2] = i2;
+ HEAP32[i17 + (i8 + 24) >> 2] = 0;
+ break;
+ }
+ } else {
+ i32 = (HEAP32[14484 >> 2] | 0) + i10 | 0;
+ HEAP32[14484 >> 2] = i32;
+ HEAP32[14496 >> 2] = i7;
+ HEAP32[i17 + (i8 + 4) >> 2] = i32 | 1;
+ }
+ } while (0);
+ i32 = i17 + (i6 | 8) | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ i3 = 14920 | 0;
+ while (1) {
+ i2 = HEAP32[i3 >> 2] | 0;
+ if (!(i2 >>> 0 > i15 >>> 0) ? (i11 = HEAP32[i3 + 4 >> 2] | 0, i10 = i2 + i11 | 0, i10 >>> 0 > i15 >>> 0) : 0) {
+ break;
+ }
+ i3 = HEAP32[i3 + 8 >> 2] | 0;
+ }
+ i3 = i2 + (i11 + -39) | 0;
+ if ((i3 & 7 | 0) == 0) {
+ i3 = 0;
+ } else {
+ i3 = 0 - i3 & 7;
+ }
+ i2 = i2 + (i11 + -47 + i3) | 0;
+ i2 = i2 >>> 0 < (i15 + 16 | 0) >>> 0 ? i15 : i2;
+ i3 = i2 + 8 | 0;
+ i4 = i17 + 8 | 0;
+ if ((i4 & 7 | 0) == 0) {
+ i4 = 0;
+ } else {
+ i4 = 0 - i4 & 7;
+ }
+ i32 = i14 + -40 - i4 | 0;
+ HEAP32[14496 >> 2] = i17 + i4;
+ HEAP32[14484 >> 2] = i32;
+ HEAP32[i17 + (i4 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[14500 >> 2] = HEAP32[14960 >> 2];
+ HEAP32[i2 + 4 >> 2] = 27;
+ HEAP32[i3 + 0 >> 2] = HEAP32[14920 >> 2];
+ HEAP32[i3 + 4 >> 2] = HEAP32[14924 >> 2];
+ HEAP32[i3 + 8 >> 2] = HEAP32[14928 >> 2];
+ HEAP32[i3 + 12 >> 2] = HEAP32[14932 >> 2];
+ HEAP32[14920 >> 2] = i17;
+ HEAP32[14924 >> 2] = i14;
+ HEAP32[14932 >> 2] = 0;
+ HEAP32[14928 >> 2] = i3;
+ i4 = i2 + 28 | 0;
+ HEAP32[i4 >> 2] = 7;
+ if ((i2 + 32 | 0) >>> 0 < i10 >>> 0) {
+ while (1) {
+ i3 = i4 + 4 | 0;
+ HEAP32[i3 >> 2] = 7;
+ if ((i4 + 8 | 0) >>> 0 < i10 >>> 0) {
+ i4 = i3;
+ } else {
+ break;
+ }
+ }
+ }
+ if ((i2 | 0) != (i15 | 0)) {
+ i2 = i2 - i15 | 0;
+ i3 = i15 + (i2 + 4) | 0;
+ HEAP32[i3 >> 2] = HEAP32[i3 >> 2] & -2;
+ HEAP32[i15 + 4 >> 2] = i2 | 1;
+ HEAP32[i15 + i2 >> 2] = i2;
+ i3 = i2 >>> 3;
+ if (i2 >>> 0 < 256) {
+ i4 = i3 << 1;
+ i2 = 14512 + (i4 << 2) | 0;
+ i5 = HEAP32[3618] | 0;
+ i3 = 1 << i3;
+ if ((i5 & i3 | 0) != 0) {
+ i4 = 14512 + (i4 + 2 << 2) | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ if (i3 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i7 = i4;
+ i8 = i3;
+ }
+ } else {
+ HEAP32[3618] = i5 | i3;
+ i7 = 14512 + (i4 + 2 << 2) | 0;
+ i8 = i2;
+ }
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i8 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i8;
+ HEAP32[i15 + 12 >> 2] = i2;
+ break;
+ }
+ i3 = i2 >>> 8;
+ if ((i3 | 0) != 0) {
+ if (i2 >>> 0 > 16777215) {
+ i3 = 31;
+ } else {
+ i31 = (i3 + 1048320 | 0) >>> 16 & 8;
+ i32 = i3 << i31;
+ i30 = (i32 + 520192 | 0) >>> 16 & 4;
+ i32 = i32 << i30;
+ i3 = (i32 + 245760 | 0) >>> 16 & 2;
+ i3 = 14 - (i30 | i31 | i3) + (i32 << i3 >>> 15) | 0;
+ i3 = i2 >>> (i3 + 7 | 0) & 1 | i3 << 1;
+ }
+ } else {
+ i3 = 0;
+ }
+ i7 = 14776 + (i3 << 2) | 0;
+ HEAP32[i15 + 28 >> 2] = i3;
+ HEAP32[i15 + 20 >> 2] = 0;
+ HEAP32[i15 + 16 >> 2] = 0;
+ i4 = HEAP32[14476 >> 2] | 0;
+ i5 = 1 << i3;
+ if ((i4 & i5 | 0) == 0) {
+ HEAP32[14476 >> 2] = i4 | i5;
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i7;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break;
+ }
+ i4 = HEAP32[i7 >> 2] | 0;
+ if ((i3 | 0) == 31) {
+ i3 = 0;
+ } else {
+ i3 = 25 - (i3 >>> 1) | 0;
+ }
+ L499 : do {
+ if ((HEAP32[i4 + 4 >> 2] & -8 | 0) != (i2 | 0)) {
+ i3 = i2 << i3;
+ while (1) {
+ i7 = i4 + (i3 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i2 | 0)) {
+ i6 = i5;
+ break L499;
+ } else {
+ i3 = i3 << 1;
+ i4 = i5;
+ }
+ }
+ if (i7 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i7 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i4;
+ HEAP32[i15 + 12 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i15;
+ break L311;
+ }
+ } else {
+ i6 = i4;
+ }
+ } while (0);
+ i4 = i6 + 8 | 0;
+ i3 = HEAP32[i4 >> 2] | 0;
+ i2 = HEAP32[14488 >> 2] | 0;
+ if (i6 >>> 0 < i2 >>> 0) {
+ _abort();
+ }
+ if (i3 >>> 0 < i2 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i3 + 12 >> 2] = i15;
+ HEAP32[i4 >> 2] = i15;
+ HEAP32[i15 + 8 >> 2] = i3;
+ HEAP32[i15 + 12 >> 2] = i6;
+ HEAP32[i15 + 24 >> 2] = 0;
+ break;
+ }
+ }
+ } else {
+ i32 = HEAP32[14488 >> 2] | 0;
+ if ((i32 | 0) == 0 | i17 >>> 0 < i32 >>> 0) {
+ HEAP32[14488 >> 2] = i17;
+ }
+ HEAP32[14920 >> 2] = i17;
+ HEAP32[14924 >> 2] = i14;
+ HEAP32[14932 >> 2] = 0;
+ HEAP32[14508 >> 2] = HEAP32[3736];
+ HEAP32[14504 >> 2] = -1;
+ i2 = 0;
+ do {
+ i32 = i2 << 1;
+ i31 = 14512 + (i32 << 2) | 0;
+ HEAP32[14512 + (i32 + 3 << 2) >> 2] = i31;
+ HEAP32[14512 + (i32 + 2 << 2) >> 2] = i31;
+ i2 = i2 + 1 | 0;
+ } while ((i2 | 0) != 32);
+ i2 = i17 + 8 | 0;
+ if ((i2 & 7 | 0) == 0) {
+ i2 = 0;
+ } else {
+ i2 = 0 - i2 & 7;
+ }
+ i32 = i14 + -40 - i2 | 0;
+ HEAP32[14496 >> 2] = i17 + i2;
+ HEAP32[14484 >> 2] = i32;
+ HEAP32[i17 + (i2 + 4) >> 2] = i32 | 1;
+ HEAP32[i17 + (i14 + -36) >> 2] = 40;
+ HEAP32[14500 >> 2] = HEAP32[14960 >> 2];
+ }
+ } while (0);
+ i2 = HEAP32[14484 >> 2] | 0;
+ if (i2 >>> 0 > i12 >>> 0) {
+ i31 = i2 - i12 | 0;
+ HEAP32[14484 >> 2] = i31;
+ i32 = HEAP32[14496 >> 2] | 0;
+ HEAP32[14496 >> 2] = i32 + i12;
+ HEAP32[i32 + (i12 + 4) >> 2] = i31 | 1;
+ HEAP32[i32 + 4 >> 2] = i12 | 3;
+ i32 = i32 + 8 | 0;
+ STACKTOP = i1;
+ return i32 | 0;
+ }
+ }
+ HEAP32[(___errno_location() | 0) >> 2] = 12;
+ i32 = 0;
+ STACKTOP = i1;
+ return i32 | 0;
+}
+function _deflate(i2, i10) {
+ i2 = i2 | 0;
+ i10 = i10 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0;
+ i1 = STACKTOP;
+ if ((i2 | 0) == 0) {
+ i37 = -2;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ i5 = i2 + 28 | 0;
+ i7 = HEAP32[i5 >> 2] | 0;
+ if ((i7 | 0) == 0 | i10 >>> 0 > 5) {
+ i37 = -2;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ i4 = i2 + 12 | 0;
+ do {
+ if ((HEAP32[i4 >> 2] | 0) != 0) {
+ if ((HEAP32[i2 >> 2] | 0) == 0 ? (HEAP32[i2 + 4 >> 2] | 0) != 0 : 0) {
+ break;
+ }
+ i11 = i7 + 4 | 0;
+ i29 = HEAP32[i11 >> 2] | 0;
+ i9 = (i10 | 0) == 4;
+ if ((i29 | 0) != 666 | i9) {
+ i3 = i2 + 16 | 0;
+ if ((HEAP32[i3 >> 2] | 0) == 0) {
+ HEAP32[i2 + 24 >> 2] = HEAP32[3180 >> 2];
+ i37 = -5;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ HEAP32[i7 >> 2] = i2;
+ i8 = i7 + 40 | 0;
+ i18 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i10;
+ do {
+ if ((i29 | 0) == 42) {
+ if ((HEAP32[i7 + 24 >> 2] | 0) != 2) {
+ i17 = (HEAP32[i7 + 48 >> 2] << 12) + -30720 | 0;
+ if ((HEAP32[i7 + 136 >> 2] | 0) <= 1 ? (i28 = HEAP32[i7 + 132 >> 2] | 0, (i28 | 0) >= 2) : 0) {
+ if ((i28 | 0) < 6) {
+ i28 = 64;
+ } else {
+ i28 = (i28 | 0) == 6 ? 128 : 192;
+ }
+ } else {
+ i28 = 0;
+ }
+ i28 = i28 | i17;
+ i17 = i7 + 108 | 0;
+ i37 = (HEAP32[i17 >> 2] | 0) == 0 ? i28 : i28 | 32;
+ HEAP32[i11 >> 2] = 113;
+ i29 = i7 + 20 | 0;
+ i30 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i30 + 1;
+ i28 = i7 + 8 | 0;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i30 | 0] = i37 >>> 8;
+ i30 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i30 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i30 | 0] = (i37 | ((i37 >>> 0) % 31 | 0)) ^ 31;
+ i30 = i2 + 48 | 0;
+ if ((HEAP32[i17 >> 2] | 0) != 0) {
+ i37 = HEAP32[i30 >> 2] | 0;
+ i36 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i36 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i36 | 0] = i37 >>> 24;
+ i36 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i36 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i36 | 0] = i37 >>> 16;
+ i36 = HEAP32[i30 >> 2] | 0;
+ i37 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i37 | 0] = i36 >>> 8;
+ i37 = HEAP32[i29 >> 2] | 0;
+ HEAP32[i29 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i37 | 0] = i36;
+ }
+ HEAP32[i30 >> 2] = _adler32(0, 0, 0) | 0;
+ i31 = HEAP32[i11 >> 2] | 0;
+ i17 = 32;
+ break;
+ }
+ i32 = i2 + 48 | 0;
+ HEAP32[i32 >> 2] = _crc32(0, 0, 0) | 0;
+ i30 = i7 + 20 | 0;
+ i28 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i28 + 1;
+ i29 = i7 + 8 | 0;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i28 | 0] = 31;
+ i28 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i28 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i28 | 0] = -117;
+ i28 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i28 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i28 | 0] = 8;
+ i28 = i7 + 28 | 0;
+ i33 = HEAP32[i28 >> 2] | 0;
+ if ((i33 | 0) == 0) {
+ i22 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i22 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i22 | 0] = 0;
+ i22 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i22 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i22 | 0] = 0;
+ i22 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i22 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i22 | 0] = 0;
+ i22 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i22 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i22 | 0] = 0;
+ i22 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i22 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i22 | 0] = 0;
+ i22 = HEAP32[i7 + 132 >> 2] | 0;
+ if ((i22 | 0) != 9) {
+ if ((HEAP32[i7 + 136 >> 2] | 0) > 1) {
+ i22 = 4;
+ } else {
+ i22 = (i22 | 0) < 2 ? 4 : 0;
+ }
+ } else {
+ i22 = 2;
+ }
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = i22;
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = 3;
+ HEAP32[i11 >> 2] = 113;
+ break;
+ }
+ i37 = (((HEAP32[i33 + 44 >> 2] | 0) != 0 ? 2 : 0) | (HEAP32[i33 >> 2] | 0) != 0 | ((HEAP32[i33 + 16 >> 2] | 0) == 0 ? 0 : 4) | ((HEAP32[i33 + 28 >> 2] | 0) == 0 ? 0 : 8) | ((HEAP32[i33 + 36 >> 2] | 0) == 0 ? 0 : 16)) & 255;
+ i17 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i17 | 0] = i37;
+ i17 = HEAP32[(HEAP32[i28 >> 2] | 0) + 4 >> 2] & 255;
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = i17;
+ i37 = (HEAP32[(HEAP32[i28 >> 2] | 0) + 4 >> 2] | 0) >>> 8 & 255;
+ i17 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i17 | 0] = i37;
+ i17 = (HEAP32[(HEAP32[i28 >> 2] | 0) + 4 >> 2] | 0) >>> 16 & 255;
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = i17;
+ i37 = (HEAP32[(HEAP32[i28 >> 2] | 0) + 4 >> 2] | 0) >>> 24 & 255;
+ i17 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i17 | 0] = i37;
+ i17 = HEAP32[i7 + 132 >> 2] | 0;
+ if ((i17 | 0) != 9) {
+ if ((HEAP32[i7 + 136 >> 2] | 0) > 1) {
+ i17 = 4;
+ } else {
+ i17 = (i17 | 0) < 2 ? 4 : 0;
+ }
+ } else {
+ i17 = 2;
+ }
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = i17;
+ i37 = HEAP32[(HEAP32[i28 >> 2] | 0) + 12 >> 2] & 255;
+ i17 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i17 | 0] = i37;
+ i17 = HEAP32[i28 >> 2] | 0;
+ if ((HEAP32[i17 + 16 >> 2] | 0) != 0) {
+ i17 = HEAP32[i17 + 20 >> 2] & 255;
+ i37 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i37 | 0] = i17;
+ i37 = (HEAP32[(HEAP32[i28 >> 2] | 0) + 20 >> 2] | 0) >>> 8 & 255;
+ i17 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i17 | 0] = i37;
+ i17 = HEAP32[i28 >> 2] | 0;
+ }
+ if ((HEAP32[i17 + 44 >> 2] | 0) != 0) {
+ HEAP32[i32 >> 2] = _crc32(HEAP32[i32 >> 2] | 0, HEAP32[i29 >> 2] | 0, HEAP32[i30 >> 2] | 0) | 0;
+ }
+ HEAP32[i7 + 32 >> 2] = 0;
+ HEAP32[i11 >> 2] = 69;
+ i17 = 34;
+ } else {
+ i31 = i29;
+ i17 = 32;
+ }
+ } while (0);
+ if ((i17 | 0) == 32) {
+ if ((i31 | 0) == 69) {
+ i28 = i7 + 28 | 0;
+ i17 = 34;
+ } else {
+ i17 = 55;
+ }
+ }
+ do {
+ if ((i17 | 0) == 34) {
+ i37 = HEAP32[i28 >> 2] | 0;
+ if ((HEAP32[i37 + 16 >> 2] | 0) == 0) {
+ HEAP32[i11 >> 2] = 73;
+ i17 = 57;
+ break;
+ }
+ i29 = i7 + 20 | 0;
+ i34 = HEAP32[i29 >> 2] | 0;
+ i17 = i7 + 32 | 0;
+ i36 = HEAP32[i17 >> 2] | 0;
+ L55 : do {
+ if (i36 >>> 0 < (HEAP32[i37 + 20 >> 2] & 65535) >>> 0) {
+ i30 = i7 + 12 | 0;
+ i32 = i2 + 48 | 0;
+ i31 = i7 + 8 | 0;
+ i33 = i2 + 20 | 0;
+ i35 = i34;
+ while (1) {
+ if ((i35 | 0) == (HEAP32[i30 >> 2] | 0)) {
+ if ((HEAP32[i37 + 44 >> 2] | 0) != 0 & i35 >>> 0 > i34 >>> 0) {
+ HEAP32[i32 >> 2] = _crc32(HEAP32[i32 >> 2] | 0, (HEAP32[i31 >> 2] | 0) + i34 | 0, i35 - i34 | 0) | 0;
+ }
+ i34 = HEAP32[i5 >> 2] | 0;
+ i35 = HEAP32[i34 + 20 >> 2] | 0;
+ i36 = HEAP32[i3 >> 2] | 0;
+ i35 = i35 >>> 0 > i36 >>> 0 ? i36 : i35;
+ if ((i35 | 0) != 0 ? (_memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i34 + 16 >> 2] | 0, i35 | 0) | 0, HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i35, i27 = (HEAP32[i5 >> 2] | 0) + 16 | 0, HEAP32[i27 >> 2] = (HEAP32[i27 >> 2] | 0) + i35, HEAP32[i33 >> 2] = (HEAP32[i33 >> 2] | 0) + i35, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i35, i27 = HEAP32[i5 >> 2] | 0, i36 = i27 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i35, (i37 | 0) == (i35 | 0)) : 0) {
+ HEAP32[i27 + 16 >> 2] = HEAP32[i27 + 8 >> 2];
+ }
+ i34 = HEAP32[i29 >> 2] | 0;
+ if ((i34 | 0) == (HEAP32[i30 >> 2] | 0)) {
+ break;
+ }
+ i37 = HEAP32[i28 >> 2] | 0;
+ i36 = HEAP32[i17 >> 2] | 0;
+ i35 = i34;
+ }
+ i36 = HEAP8[(HEAP32[i37 + 16 >> 2] | 0) + i36 | 0] | 0;
+ HEAP32[i29 >> 2] = i35 + 1;
+ HEAP8[(HEAP32[i31 >> 2] | 0) + i35 | 0] = i36;
+ i36 = (HEAP32[i17 >> 2] | 0) + 1 | 0;
+ HEAP32[i17 >> 2] = i36;
+ i37 = HEAP32[i28 >> 2] | 0;
+ if (!(i36 >>> 0 < (HEAP32[i37 + 20 >> 2] & 65535) >>> 0)) {
+ break L55;
+ }
+ i35 = HEAP32[i29 >> 2] | 0;
+ }
+ i37 = HEAP32[i28 >> 2] | 0;
+ }
+ } while (0);
+ if ((HEAP32[i37 + 44 >> 2] | 0) != 0 ? (i26 = HEAP32[i29 >> 2] | 0, i26 >>> 0 > i34 >>> 0) : 0) {
+ i37 = i2 + 48 | 0;
+ HEAP32[i37 >> 2] = _crc32(HEAP32[i37 >> 2] | 0, (HEAP32[i7 + 8 >> 2] | 0) + i34 | 0, i26 - i34 | 0) | 0;
+ i37 = HEAP32[i28 >> 2] | 0;
+ }
+ if ((HEAP32[i17 >> 2] | 0) == (HEAP32[i37 + 20 >> 2] | 0)) {
+ HEAP32[i17 >> 2] = 0;
+ HEAP32[i11 >> 2] = 73;
+ i17 = 57;
+ break;
+ } else {
+ i31 = HEAP32[i11 >> 2] | 0;
+ i17 = 55;
+ break;
+ }
+ }
+ } while (0);
+ if ((i17 | 0) == 55) {
+ if ((i31 | 0) == 73) {
+ i37 = HEAP32[i7 + 28 >> 2] | 0;
+ i17 = 57;
+ } else {
+ i17 = 76;
+ }
+ }
+ do {
+ if ((i17 | 0) == 57) {
+ i26 = i7 + 28 | 0;
+ if ((HEAP32[i37 + 28 >> 2] | 0) == 0) {
+ HEAP32[i11 >> 2] = 91;
+ i17 = 78;
+ break;
+ }
+ i27 = i7 + 20 | 0;
+ i35 = HEAP32[i27 >> 2] | 0;
+ i32 = i7 + 12 | 0;
+ i29 = i2 + 48 | 0;
+ i28 = i7 + 8 | 0;
+ i31 = i2 + 20 | 0;
+ i30 = i7 + 32 | 0;
+ i33 = i35;
+ while (1) {
+ if ((i33 | 0) == (HEAP32[i32 >> 2] | 0)) {
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 44 >> 2] | 0) != 0 & i33 >>> 0 > i35 >>> 0) {
+ HEAP32[i29 >> 2] = _crc32(HEAP32[i29 >> 2] | 0, (HEAP32[i28 >> 2] | 0) + i35 | 0, i33 - i35 | 0) | 0;
+ }
+ i33 = HEAP32[i5 >> 2] | 0;
+ i34 = HEAP32[i33 + 20 >> 2] | 0;
+ i35 = HEAP32[i3 >> 2] | 0;
+ i34 = i34 >>> 0 > i35 >>> 0 ? i35 : i34;
+ if ((i34 | 0) != 0 ? (_memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i33 + 16 >> 2] | 0, i34 | 0) | 0, HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i34, i25 = (HEAP32[i5 >> 2] | 0) + 16 | 0, HEAP32[i25 >> 2] = (HEAP32[i25 >> 2] | 0) + i34, HEAP32[i31 >> 2] = (HEAP32[i31 >> 2] | 0) + i34, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i34, i25 = HEAP32[i5 >> 2] | 0, i36 = i25 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i34, (i37 | 0) == (i34 | 0)) : 0) {
+ HEAP32[i25 + 16 >> 2] = HEAP32[i25 + 8 >> 2];
+ }
+ i35 = HEAP32[i27 >> 2] | 0;
+ if ((i35 | 0) == (HEAP32[i32 >> 2] | 0)) {
+ i25 = 1;
+ break;
+ } else {
+ i33 = i35;
+ }
+ }
+ i34 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i34 + 1;
+ i34 = HEAP8[(HEAP32[(HEAP32[i26 >> 2] | 0) + 28 >> 2] | 0) + i34 | 0] | 0;
+ HEAP32[i27 >> 2] = i33 + 1;
+ HEAP8[(HEAP32[i28 >> 2] | 0) + i33 | 0] = i34;
+ if (i34 << 24 >> 24 == 0) {
+ i17 = 68;
+ break;
+ }
+ i33 = HEAP32[i27 >> 2] | 0;
+ }
+ if ((i17 | 0) == 68) {
+ i25 = i34 & 255;
+ }
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 44 >> 2] | 0) != 0 ? (i24 = HEAP32[i27 >> 2] | 0, i24 >>> 0 > i35 >>> 0) : 0) {
+ HEAP32[i29 >> 2] = _crc32(HEAP32[i29 >> 2] | 0, (HEAP32[i28 >> 2] | 0) + i35 | 0, i24 - i35 | 0) | 0;
+ }
+ if ((i25 | 0) == 0) {
+ HEAP32[i30 >> 2] = 0;
+ HEAP32[i11 >> 2] = 91;
+ i17 = 78;
+ break;
+ } else {
+ i31 = HEAP32[i11 >> 2] | 0;
+ i17 = 76;
+ break;
+ }
+ }
+ } while (0);
+ if ((i17 | 0) == 76) {
+ if ((i31 | 0) == 91) {
+ i26 = i7 + 28 | 0;
+ i17 = 78;
+ } else {
+ i17 = 97;
+ }
+ }
+ do {
+ if ((i17 | 0) == 78) {
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 36 >> 2] | 0) == 0) {
+ HEAP32[i11 >> 2] = 103;
+ i17 = 99;
+ break;
+ }
+ i24 = i7 + 20 | 0;
+ i32 = HEAP32[i24 >> 2] | 0;
+ i29 = i7 + 12 | 0;
+ i27 = i2 + 48 | 0;
+ i25 = i7 + 8 | 0;
+ i28 = i2 + 20 | 0;
+ i30 = i7 + 32 | 0;
+ i31 = i32;
+ while (1) {
+ if ((i31 | 0) == (HEAP32[i29 >> 2] | 0)) {
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 44 >> 2] | 0) != 0 & i31 >>> 0 > i32 >>> 0) {
+ HEAP32[i27 >> 2] = _crc32(HEAP32[i27 >> 2] | 0, (HEAP32[i25 >> 2] | 0) + i32 | 0, i31 - i32 | 0) | 0;
+ }
+ i31 = HEAP32[i5 >> 2] | 0;
+ i33 = HEAP32[i31 + 20 >> 2] | 0;
+ i32 = HEAP32[i3 >> 2] | 0;
+ i32 = i33 >>> 0 > i32 >>> 0 ? i32 : i33;
+ if ((i32 | 0) != 0 ? (_memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i31 + 16 >> 2] | 0, i32 | 0) | 0, HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i32, i23 = (HEAP32[i5 >> 2] | 0) + 16 | 0, HEAP32[i23 >> 2] = (HEAP32[i23 >> 2] | 0) + i32, HEAP32[i28 >> 2] = (HEAP32[i28 >> 2] | 0) + i32, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i32, i23 = HEAP32[i5 >> 2] | 0, i36 = i23 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i32, (i37 | 0) == (i32 | 0)) : 0) {
+ HEAP32[i23 + 16 >> 2] = HEAP32[i23 + 8 >> 2];
+ }
+ i32 = HEAP32[i24 >> 2] | 0;
+ if ((i32 | 0) == (HEAP32[i29 >> 2] | 0)) {
+ i23 = 1;
+ break;
+ } else {
+ i31 = i32;
+ }
+ }
+ i33 = HEAP32[i30 >> 2] | 0;
+ HEAP32[i30 >> 2] = i33 + 1;
+ i33 = HEAP8[(HEAP32[(HEAP32[i26 >> 2] | 0) + 36 >> 2] | 0) + i33 | 0] | 0;
+ HEAP32[i24 >> 2] = i31 + 1;
+ HEAP8[(HEAP32[i25 >> 2] | 0) + i31 | 0] = i33;
+ if (i33 << 24 >> 24 == 0) {
+ i17 = 89;
+ break;
+ }
+ i31 = HEAP32[i24 >> 2] | 0;
+ }
+ if ((i17 | 0) == 89) {
+ i23 = i33 & 255;
+ }
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 44 >> 2] | 0) != 0 ? (i22 = HEAP32[i24 >> 2] | 0, i22 >>> 0 > i32 >>> 0) : 0) {
+ HEAP32[i27 >> 2] = _crc32(HEAP32[i27 >> 2] | 0, (HEAP32[i25 >> 2] | 0) + i32 | 0, i22 - i32 | 0) | 0;
+ }
+ if ((i23 | 0) == 0) {
+ HEAP32[i11 >> 2] = 103;
+ i17 = 99;
+ break;
+ } else {
+ i31 = HEAP32[i11 >> 2] | 0;
+ i17 = 97;
+ break;
+ }
+ }
+ } while (0);
+ if ((i17 | 0) == 97 ? (i31 | 0) == 103 : 0) {
+ i26 = i7 + 28 | 0;
+ i17 = 99;
+ }
+ do {
+ if ((i17 | 0) == 99) {
+ if ((HEAP32[(HEAP32[i26 >> 2] | 0) + 44 >> 2] | 0) == 0) {
+ HEAP32[i11 >> 2] = 113;
+ break;
+ }
+ i17 = i7 + 20 | 0;
+ i22 = i7 + 12 | 0;
+ if ((((HEAP32[i17 >> 2] | 0) + 2 | 0) >>> 0 > (HEAP32[i22 >> 2] | 0) >>> 0 ? (i20 = HEAP32[i5 >> 2] | 0, i21 = HEAP32[i20 + 20 >> 2] | 0, i23 = HEAP32[i3 >> 2] | 0, i21 = i21 >>> 0 > i23 >>> 0 ? i23 : i21, (i21 | 0) != 0) : 0) ? (_memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i20 + 16 >> 2] | 0, i21 | 0) | 0, HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i21, i19 = (HEAP32[i5 >> 2] | 0) + 16 | 0, HEAP32[i19 >> 2] = (HEAP32[i19 >> 2] | 0) + i21, i19 = i2 + 20 | 0, HEAP32[i19 >> 2] = (HEAP32[i19 >> 2] | 0) + i21, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i21, i19 = HEAP32[i5 >> 2] | 0, i36 = i19 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i21, (i37 | 0) == (i21 | 0)) : 0) {
+ HEAP32[i19 + 16 >> 2] = HEAP32[i19 + 8 >> 2];
+ }
+ i19 = HEAP32[i17 >> 2] | 0;
+ if (!((i19 + 2 | 0) >>> 0 > (HEAP32[i22 >> 2] | 0) >>> 0)) {
+ i37 = i2 + 48 | 0;
+ i34 = HEAP32[i37 >> 2] & 255;
+ HEAP32[i17 >> 2] = i19 + 1;
+ i35 = i7 + 8 | 0;
+ HEAP8[(HEAP32[i35 >> 2] | 0) + i19 | 0] = i34;
+ i34 = (HEAP32[i37 >> 2] | 0) >>> 8 & 255;
+ i36 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i17 >> 2] = i36 + 1;
+ HEAP8[(HEAP32[i35 >> 2] | 0) + i36 | 0] = i34;
+ HEAP32[i37 >> 2] = _crc32(0, 0, 0) | 0;
+ HEAP32[i11 >> 2] = 113;
+ }
+ }
+ } while (0);
+ i19 = i7 + 20 | 0;
+ if ((HEAP32[i19 >> 2] | 0) == 0) {
+ if ((HEAP32[i2 + 4 >> 2] | 0) == 0 ? (i18 | 0) >= (i10 | 0) & (i10 | 0) != 4 : 0) {
+ HEAP32[i2 + 24 >> 2] = HEAP32[3180 >> 2];
+ i37 = -5;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ } else {
+ i17 = HEAP32[i5 >> 2] | 0;
+ i20 = HEAP32[i17 + 20 >> 2] | 0;
+ i18 = HEAP32[i3 >> 2] | 0;
+ i20 = i20 >>> 0 > i18 >>> 0 ? i18 : i20;
+ if ((i20 | 0) != 0) {
+ _memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i17 + 16 >> 2] | 0, i20 | 0) | 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i20;
+ i17 = (HEAP32[i5 >> 2] | 0) + 16 | 0;
+ HEAP32[i17 >> 2] = (HEAP32[i17 >> 2] | 0) + i20;
+ i17 = i2 + 20 | 0;
+ HEAP32[i17 >> 2] = (HEAP32[i17 >> 2] | 0) + i20;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i20;
+ i17 = HEAP32[i5 >> 2] | 0;
+ i36 = i17 + 20 | 0;
+ i37 = HEAP32[i36 >> 2] | 0;
+ HEAP32[i36 >> 2] = i37 - i20;
+ if ((i37 | 0) == (i20 | 0)) {
+ HEAP32[i17 + 16 >> 2] = HEAP32[i17 + 8 >> 2];
+ }
+ i18 = HEAP32[i3 >> 2] | 0;
+ }
+ if ((i18 | 0) == 0) {
+ HEAP32[i8 >> 2] = -1;
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ }
+ i18 = (HEAP32[i11 >> 2] | 0) == 666;
+ i17 = (HEAP32[i2 + 4 >> 2] | 0) == 0;
+ if (i18) {
+ if (i17) {
+ i17 = 121;
+ } else {
+ HEAP32[i2 + 24 >> 2] = HEAP32[3180 >> 2];
+ i37 = -5;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ } else {
+ if (i17) {
+ i17 = 121;
+ } else {
+ i17 = 124;
+ }
+ }
+ do {
+ if ((i17 | 0) == 121) {
+ if ((HEAP32[i7 + 116 >> 2] | 0) == 0) {
+ if ((i10 | 0) != 0) {
+ if (i18) {
+ break;
+ } else {
+ i17 = 124;
+ break;
+ }
+ } else {
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ } else {
+ i17 = 124;
+ }
+ }
+ } while (0);
+ do {
+ if ((i17 | 0) == 124) {
+ i18 = HEAP32[i7 + 136 >> 2] | 0;
+ L185 : do {
+ if ((i18 | 0) == 2) {
+ i22 = i7 + 116 | 0;
+ i18 = i7 + 96 | 0;
+ i13 = i7 + 108 | 0;
+ i14 = i7 + 56 | 0;
+ i21 = i7 + 5792 | 0;
+ i20 = i7 + 5796 | 0;
+ i24 = i7 + 5784 | 0;
+ i23 = i7 + 5788 | 0;
+ i12 = i7 + 92 | 0;
+ while (1) {
+ if ((HEAP32[i22 >> 2] | 0) == 0 ? (_fill_window(i7), (HEAP32[i22 >> 2] | 0) == 0) : 0) {
+ break;
+ }
+ HEAP32[i18 >> 2] = 0;
+ i37 = HEAP8[(HEAP32[i14 >> 2] | 0) + (HEAP32[i13 >> 2] | 0) | 0] | 0;
+ i26 = HEAP32[i21 >> 2] | 0;
+ HEAP16[(HEAP32[i20 >> 2] | 0) + (i26 << 1) >> 1] = 0;
+ HEAP32[i21 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i24 >> 2] | 0) + i26 | 0] = i37;
+ i37 = i7 + ((i37 & 255) << 2) + 148 | 0;
+ HEAP16[i37 >> 1] = (HEAP16[i37 >> 1] | 0) + 1 << 16 >> 16;
+ i37 = (HEAP32[i21 >> 2] | 0) == ((HEAP32[i23 >> 2] | 0) + -1 | 0);
+ HEAP32[i22 >> 2] = (HEAP32[i22 >> 2] | 0) + -1;
+ i26 = (HEAP32[i13 >> 2] | 0) + 1 | 0;
+ HEAP32[i13 >> 2] = i26;
+ if (!i37) {
+ continue;
+ }
+ i25 = HEAP32[i12 >> 2] | 0;
+ if ((i25 | 0) > -1) {
+ i27 = (HEAP32[i14 >> 2] | 0) + i25 | 0;
+ } else {
+ i27 = 0;
+ }
+ __tr_flush_block(i7, i27, i26 - i25 | 0, 0);
+ HEAP32[i12 >> 2] = HEAP32[i13 >> 2];
+ i26 = HEAP32[i7 >> 2] | 0;
+ i25 = i26 + 28 | 0;
+ i27 = HEAP32[i25 >> 2] | 0;
+ i30 = HEAP32[i27 + 20 >> 2] | 0;
+ i28 = i26 + 16 | 0;
+ i29 = HEAP32[i28 >> 2] | 0;
+ i29 = i30 >>> 0 > i29 >>> 0 ? i29 : i30;
+ if ((i29 | 0) != 0 ? (i16 = i26 + 12 | 0, _memcpy(HEAP32[i16 >> 2] | 0, HEAP32[i27 + 16 >> 2] | 0, i29 | 0) | 0, HEAP32[i16 >> 2] = (HEAP32[i16 >> 2] | 0) + i29, i16 = (HEAP32[i25 >> 2] | 0) + 16 | 0, HEAP32[i16 >> 2] = (HEAP32[i16 >> 2] | 0) + i29, i16 = i26 + 20 | 0, HEAP32[i16 >> 2] = (HEAP32[i16 >> 2] | 0) + i29, HEAP32[i28 >> 2] = (HEAP32[i28 >> 2] | 0) - i29, i16 = HEAP32[i25 >> 2] | 0, i36 = i16 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i29, (i37 | 0) == (i29 | 0)) : 0) {
+ HEAP32[i16 + 16 >> 2] = HEAP32[i16 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i7 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ break L185;
+ }
+ }
+ if ((i10 | 0) != 0) {
+ i16 = HEAP32[i12 >> 2] | 0;
+ if ((i16 | 0) > -1) {
+ i14 = (HEAP32[i14 >> 2] | 0) + i16 | 0;
+ } else {
+ i14 = 0;
+ }
+ __tr_flush_block(i7, i14, (HEAP32[i13 >> 2] | 0) - i16 | 0, i9 & 1);
+ HEAP32[i12 >> 2] = HEAP32[i13 >> 2];
+ i14 = HEAP32[i7 >> 2] | 0;
+ i13 = i14 + 28 | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ i17 = HEAP32[i12 + 20 >> 2] | 0;
+ i16 = i14 + 16 | 0;
+ i18 = HEAP32[i16 >> 2] | 0;
+ i17 = i17 >>> 0 > i18 >>> 0 ? i18 : i17;
+ if ((i17 | 0) != 0 ? (i15 = i14 + 12 | 0, _memcpy(HEAP32[i15 >> 2] | 0, HEAP32[i12 + 16 >> 2] | 0, i17 | 0) | 0, HEAP32[i15 >> 2] = (HEAP32[i15 >> 2] | 0) + i17, i15 = (HEAP32[i13 >> 2] | 0) + 16 | 0, HEAP32[i15 >> 2] = (HEAP32[i15 >> 2] | 0) + i17, i15 = i14 + 20 | 0, HEAP32[i15 >> 2] = (HEAP32[i15 >> 2] | 0) + i17, HEAP32[i16 >> 2] = (HEAP32[i16 >> 2] | 0) - i17, i15 = HEAP32[i13 >> 2] | 0, i36 = i15 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i17, (i37 | 0) == (i17 | 0)) : 0) {
+ HEAP32[i15 + 16 >> 2] = HEAP32[i15 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i7 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i12 = i9 ? 2 : 0;
+ i17 = 183;
+ break;
+ } else {
+ i12 = i9 ? 3 : 1;
+ i17 = 183;
+ break;
+ }
+ }
+ } else if ((i18 | 0) == 3) {
+ i27 = i7 + 116 | 0;
+ i26 = (i10 | 0) == 0;
+ i22 = i7 + 96 | 0;
+ i15 = i7 + 108 | 0;
+ i20 = i7 + 5792 | 0;
+ i24 = i7 + 5796 | 0;
+ i23 = i7 + 5784 | 0;
+ i21 = i7 + (HEAPU8[296] << 2) + 2440 | 0;
+ i25 = i7 + 5788 | 0;
+ i18 = i7 + 56 | 0;
+ i16 = i7 + 92 | 0;
+ while (1) {
+ i29 = HEAP32[i27 >> 2] | 0;
+ if (i29 >>> 0 < 258) {
+ _fill_window(i7);
+ i29 = HEAP32[i27 >> 2] | 0;
+ if (i29 >>> 0 < 258 & i26) {
+ break L185;
+ }
+ if ((i29 | 0) == 0) {
+ break;
+ }
+ HEAP32[i22 >> 2] = 0;
+ if (i29 >>> 0 > 2) {
+ i17 = 151;
+ } else {
+ i28 = HEAP32[i15 >> 2] | 0;
+ i17 = 166;
+ }
+ } else {
+ HEAP32[i22 >> 2] = 0;
+ i17 = 151;
+ }
+ if ((i17 | 0) == 151) {
+ i17 = 0;
+ i28 = HEAP32[i15 >> 2] | 0;
+ if ((i28 | 0) != 0) {
+ i31 = HEAP32[i18 >> 2] | 0;
+ i30 = HEAP8[i31 + (i28 + -1) | 0] | 0;
+ if ((i30 << 24 >> 24 == (HEAP8[i31 + i28 | 0] | 0) ? i30 << 24 >> 24 == (HEAP8[i31 + (i28 + 1) | 0] | 0) : 0) ? (i14 = i31 + (i28 + 2) | 0, i30 << 24 >> 24 == (HEAP8[i14] | 0)) : 0) {
+ i31 = i31 + (i28 + 258) | 0;
+ i32 = i14;
+ do {
+ i33 = i32 + 1 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 2 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 3 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 4 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 5 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 6 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i33 = i32 + 7 | 0;
+ if (!(i30 << 24 >> 24 == (HEAP8[i33] | 0))) {
+ i32 = i33;
+ break;
+ }
+ i32 = i32 + 8 | 0;
+ } while (i30 << 24 >> 24 == (HEAP8[i32] | 0) & i32 >>> 0 < i31 >>> 0);
+ i30 = i32 - i31 + 258 | 0;
+ i29 = i30 >>> 0 > i29 >>> 0 ? i29 : i30;
+ HEAP32[i22 >> 2] = i29;
+ if (i29 >>> 0 > 2) {
+ i29 = i29 + 253 | 0;
+ i28 = HEAP32[i20 >> 2] | 0;
+ HEAP16[(HEAP32[i24 >> 2] | 0) + (i28 << 1) >> 1] = 1;
+ HEAP32[i20 >> 2] = i28 + 1;
+ HEAP8[(HEAP32[i23 >> 2] | 0) + i28 | 0] = i29;
+ i29 = i7 + ((HEAPU8[808 + (i29 & 255) | 0] | 256) + 1 << 2) + 148 | 0;
+ HEAP16[i29 >> 1] = (HEAP16[i29 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP16[i21 >> 1] = (HEAP16[i21 >> 1] | 0) + 1 << 16 >> 16;
+ i29 = (HEAP32[i20 >> 2] | 0) == ((HEAP32[i25 >> 2] | 0) + -1 | 0) | 0;
+ i28 = HEAP32[i22 >> 2] | 0;
+ HEAP32[i27 >> 2] = (HEAP32[i27 >> 2] | 0) - i28;
+ i28 = (HEAP32[i15 >> 2] | 0) + i28 | 0;
+ HEAP32[i15 >> 2] = i28;
+ HEAP32[i22 >> 2] = 0;
+ } else {
+ i17 = 166;
+ }
+ } else {
+ i17 = 166;
+ }
+ } else {
+ i28 = 0;
+ i17 = 166;
+ }
+ }
+ if ((i17 | 0) == 166) {
+ i17 = 0;
+ i29 = HEAP8[(HEAP32[i18 >> 2] | 0) + i28 | 0] | 0;
+ i28 = HEAP32[i20 >> 2] | 0;
+ HEAP16[(HEAP32[i24 >> 2] | 0) + (i28 << 1) >> 1] = 0;
+ HEAP32[i20 >> 2] = i28 + 1;
+ HEAP8[(HEAP32[i23 >> 2] | 0) + i28 | 0] = i29;
+ i29 = i7 + ((i29 & 255) << 2) + 148 | 0;
+ HEAP16[i29 >> 1] = (HEAP16[i29 >> 1] | 0) + 1 << 16 >> 16;
+ i29 = (HEAP32[i20 >> 2] | 0) == ((HEAP32[i25 >> 2] | 0) + -1 | 0) | 0;
+ HEAP32[i27 >> 2] = (HEAP32[i27 >> 2] | 0) + -1;
+ i28 = (HEAP32[i15 >> 2] | 0) + 1 | 0;
+ HEAP32[i15 >> 2] = i28;
+ }
+ if ((i29 | 0) == 0) {
+ continue;
+ }
+ i29 = HEAP32[i16 >> 2] | 0;
+ if ((i29 | 0) > -1) {
+ i30 = (HEAP32[i18 >> 2] | 0) + i29 | 0;
+ } else {
+ i30 = 0;
+ }
+ __tr_flush_block(i7, i30, i28 - i29 | 0, 0);
+ HEAP32[i16 >> 2] = HEAP32[i15 >> 2];
+ i30 = HEAP32[i7 >> 2] | 0;
+ i28 = i30 + 28 | 0;
+ i29 = HEAP32[i28 >> 2] | 0;
+ i33 = HEAP32[i29 + 20 >> 2] | 0;
+ i31 = i30 + 16 | 0;
+ i32 = HEAP32[i31 >> 2] | 0;
+ i32 = i33 >>> 0 > i32 >>> 0 ? i32 : i33;
+ if ((i32 | 0) != 0 ? (i13 = i30 + 12 | 0, _memcpy(HEAP32[i13 >> 2] | 0, HEAP32[i29 + 16 >> 2] | 0, i32 | 0) | 0, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + i32, i13 = (HEAP32[i28 >> 2] | 0) + 16 | 0, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + i32, i13 = i30 + 20 | 0, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) + i32, HEAP32[i31 >> 2] = (HEAP32[i31 >> 2] | 0) - i32, i13 = HEAP32[i28 >> 2] | 0, i36 = i13 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i32, (i37 | 0) == (i32 | 0)) : 0) {
+ HEAP32[i13 + 16 >> 2] = HEAP32[i13 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i7 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ break L185;
+ }
+ }
+ i13 = HEAP32[i16 >> 2] | 0;
+ if ((i13 | 0) > -1) {
+ i14 = (HEAP32[i18 >> 2] | 0) + i13 | 0;
+ } else {
+ i14 = 0;
+ }
+ __tr_flush_block(i7, i14, (HEAP32[i15 >> 2] | 0) - i13 | 0, i9 & 1);
+ HEAP32[i16 >> 2] = HEAP32[i15 >> 2];
+ i14 = HEAP32[i7 >> 2] | 0;
+ i16 = i14 + 28 | 0;
+ i15 = HEAP32[i16 >> 2] | 0;
+ i18 = HEAP32[i15 + 20 >> 2] | 0;
+ i13 = i14 + 16 | 0;
+ i17 = HEAP32[i13 >> 2] | 0;
+ i17 = i18 >>> 0 > i17 >>> 0 ? i17 : i18;
+ if ((i17 | 0) != 0 ? (i12 = i14 + 12 | 0, _memcpy(HEAP32[i12 >> 2] | 0, HEAP32[i15 + 16 >> 2] | 0, i17 | 0) | 0, HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + i17, i12 = (HEAP32[i16 >> 2] | 0) + 16 | 0, HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + i17, i12 = i14 + 20 | 0, HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) + i17, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) - i17, i12 = HEAP32[i16 >> 2] | 0, i36 = i12 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i17, (i37 | 0) == (i17 | 0)) : 0) {
+ HEAP32[i12 + 16 >> 2] = HEAP32[i12 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i7 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i12 = i9 ? 2 : 0;
+ i17 = 183;
+ break;
+ } else {
+ i12 = i9 ? 3 : 1;
+ i17 = 183;
+ break;
+ }
+ } else {
+ i12 = FUNCTION_TABLE_iii[HEAP32[184 + ((HEAP32[i7 + 132 >> 2] | 0) * 12 | 0) >> 2] & 3](i7, i10) | 0;
+ i17 = 183;
+ }
+ } while (0);
+ if ((i17 | 0) == 183) {
+ if ((i12 & -2 | 0) == 2) {
+ HEAP32[i11 >> 2] = 666;
+ }
+ if ((i12 & -3 | 0) != 0) {
+ if ((i12 | 0) != 1) {
+ break;
+ }
+ if ((i10 | 0) == 1) {
+ __tr_align(i7);
+ } else if (((i10 | 0) != 5 ? (__tr_stored_block(i7, 0, 0, 0), (i10 | 0) == 3) : 0) ? (i37 = HEAP32[i7 + 76 >> 2] | 0, i36 = HEAP32[i7 + 68 >> 2] | 0, HEAP16[i36 + (i37 + -1 << 1) >> 1] = 0, _memset(i36 | 0, 0, (i37 << 1) + -2 | 0) | 0, (HEAP32[i7 + 116 >> 2] | 0) == 0) : 0) {
+ HEAP32[i7 + 108 >> 2] = 0;
+ HEAP32[i7 + 92 >> 2] = 0;
+ }
+ i11 = HEAP32[i5 >> 2] | 0;
+ i12 = HEAP32[i11 + 20 >> 2] | 0;
+ i10 = HEAP32[i3 >> 2] | 0;
+ i12 = i12 >>> 0 > i10 >>> 0 ? i10 : i12;
+ if ((i12 | 0) != 0) {
+ _memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i11 + 16 >> 2] | 0, i12 | 0) | 0;
+ HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i12;
+ i10 = (HEAP32[i5 >> 2] | 0) + 16 | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + i12;
+ i10 = i2 + 20 | 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + i12;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i12;
+ i10 = HEAP32[i5 >> 2] | 0;
+ i36 = i10 + 20 | 0;
+ i37 = HEAP32[i36 >> 2] | 0;
+ HEAP32[i36 >> 2] = i37 - i12;
+ if ((i37 | 0) == (i12 | 0)) {
+ HEAP32[i10 + 16 >> 2] = HEAP32[i10 + 8 >> 2];
+ }
+ i10 = HEAP32[i3 >> 2] | 0;
+ }
+ if ((i10 | 0) != 0) {
+ break;
+ }
+ HEAP32[i8 >> 2] = -1;
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ }
+ if ((HEAP32[i3 >> 2] | 0) != 0) {
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ HEAP32[i8 >> 2] = -1;
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ } while (0);
+ if (!i9) {
+ i37 = 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ i8 = i7 + 24 | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ if ((i10 | 0) < 1) {
+ i37 = 1;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ i11 = i2 + 48 | 0;
+ i9 = HEAP32[i11 >> 2] | 0;
+ if ((i10 | 0) == 2) {
+ i34 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i34 + 1;
+ i36 = i7 + 8 | 0;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i34 | 0] = i9;
+ i34 = (HEAP32[i11 >> 2] | 0) >>> 8 & 255;
+ i35 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i35 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i35 | 0] = i34;
+ i35 = (HEAP32[i11 >> 2] | 0) >>> 16 & 255;
+ i34 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i34 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i34 | 0] = i35;
+ i34 = (HEAP32[i11 >> 2] | 0) >>> 24 & 255;
+ i35 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i35 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i35 | 0] = i34;
+ i35 = i2 + 8 | 0;
+ i34 = HEAP32[i35 >> 2] & 255;
+ i37 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i37 | 0] = i34;
+ i37 = (HEAP32[i35 >> 2] | 0) >>> 8 & 255;
+ i34 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i34 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i34 | 0] = i37;
+ i34 = (HEAP32[i35 >> 2] | 0) >>> 16 & 255;
+ i37 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i37 | 0] = i34;
+ i35 = (HEAP32[i35 >> 2] | 0) >>> 24 & 255;
+ i37 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i37 | 0] = i35;
+ } else {
+ i35 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i35 + 1;
+ i36 = i7 + 8 | 0;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i35 | 0] = i9 >>> 24;
+ i35 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i35 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i35 | 0] = i9 >>> 16;
+ i35 = HEAP32[i11 >> 2] | 0;
+ i37 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i37 | 0] = i35 >>> 8;
+ i37 = HEAP32[i19 >> 2] | 0;
+ HEAP32[i19 >> 2] = i37 + 1;
+ HEAP8[(HEAP32[i36 >> 2] | 0) + i37 | 0] = i35;
+ }
+ i7 = HEAP32[i5 >> 2] | 0;
+ i10 = HEAP32[i7 + 20 >> 2] | 0;
+ i9 = HEAP32[i3 >> 2] | 0;
+ i9 = i10 >>> 0 > i9 >>> 0 ? i9 : i10;
+ if ((i9 | 0) != 0 ? (_memcpy(HEAP32[i4 >> 2] | 0, HEAP32[i7 + 16 >> 2] | 0, i9 | 0) | 0, HEAP32[i4 >> 2] = (HEAP32[i4 >> 2] | 0) + i9, i6 = (HEAP32[i5 >> 2] | 0) + 16 | 0, HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i9, i6 = i2 + 20 | 0, HEAP32[i6 >> 2] = (HEAP32[i6 >> 2] | 0) + i9, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) - i9, i6 = HEAP32[i5 >> 2] | 0, i36 = i6 + 20 | 0, i37 = HEAP32[i36 >> 2] | 0, HEAP32[i36 >> 2] = i37 - i9, (i37 | 0) == (i9 | 0)) : 0) {
+ HEAP32[i6 + 16 >> 2] = HEAP32[i6 + 8 >> 2];
+ }
+ i2 = HEAP32[i8 >> 2] | 0;
+ if ((i2 | 0) > 0) {
+ HEAP32[i8 >> 2] = 0 - i2;
+ }
+ i37 = (HEAP32[i19 >> 2] | 0) == 0 | 0;
+ STACKTOP = i1;
+ return i37 | 0;
+ }
+ }
+ } while (0);
+ HEAP32[i2 + 24 >> 2] = HEAP32[3168 >> 2];
+ i37 = -2;
+ STACKTOP = i1;
+ return i37 | 0;
+}
+function _free(i7) {
+ i7 = i7 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0;
+ i1 = STACKTOP;
+ if ((i7 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = i7 + -8 | 0;
+ i16 = HEAP32[14488 >> 2] | 0;
+ if (i15 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i13 = HEAP32[i7 + -4 >> 2] | 0;
+ i12 = i13 & 3;
+ if ((i12 | 0) == 1) {
+ _abort();
+ }
+ i8 = i13 & -8;
+ i6 = i7 + (i8 + -8) | 0;
+ do {
+ if ((i13 & 1 | 0) == 0) {
+ i19 = HEAP32[i15 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i15 = -8 - i19 | 0;
+ i13 = i7 + i15 | 0;
+ i12 = i19 + i8 | 0;
+ if (i13 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((i13 | 0) == (HEAP32[14492 >> 2] | 0)) {
+ i2 = i7 + (i8 + -4) | 0;
+ if ((HEAP32[i2 >> 2] & 3 | 0) != 3) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ HEAP32[14480 >> 2] = i12;
+ HEAP32[i2 >> 2] = HEAP32[i2 >> 2] & -2;
+ HEAP32[i7 + (i15 + 4) >> 2] = i12 | 1;
+ HEAP32[i6 >> 2] = i12;
+ STACKTOP = i1;
+ return;
+ }
+ i18 = i19 >>> 3;
+ if (i19 >>> 0 < 256) {
+ i2 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ i11 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ i14 = 14512 + (i18 << 1 << 2) | 0;
+ if ((i2 | 0) != (i14 | 0)) {
+ if (i2 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i2 + 12 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ }
+ if ((i11 | 0) == (i2 | 0)) {
+ HEAP32[3618] = HEAP32[3618] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ if ((i11 | 0) != (i14 | 0)) {
+ if (i11 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i14 = i11 + 8 | 0;
+ if ((HEAP32[i14 >> 2] | 0) == (i13 | 0)) {
+ i17 = i14;
+ } else {
+ _abort();
+ }
+ } else {
+ i17 = i11 + 8 | 0;
+ }
+ HEAP32[i2 + 12 >> 2] = i11;
+ HEAP32[i17 >> 2] = i2;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ i17 = HEAP32[i7 + (i15 + 24) >> 2] | 0;
+ i18 = HEAP32[i7 + (i15 + 12) >> 2] | 0;
+ do {
+ if ((i18 | 0) == (i13 | 0)) {
+ i19 = i7 + (i15 + 20) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i19 = i7 + (i15 + 16) | 0;
+ i18 = HEAP32[i19 >> 2] | 0;
+ if ((i18 | 0) == 0) {
+ i14 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i21 = i18 + 20 | 0;
+ i20 = HEAP32[i21 >> 2] | 0;
+ if ((i20 | 0) != 0) {
+ i18 = i20;
+ i19 = i21;
+ continue;
+ }
+ i20 = i18 + 16 | 0;
+ i21 = HEAP32[i20 >> 2] | 0;
+ if ((i21 | 0) == 0) {
+ break;
+ } else {
+ i18 = i21;
+ i19 = i20;
+ }
+ }
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i19 >> 2] = 0;
+ i14 = i18;
+ break;
+ }
+ } else {
+ i19 = HEAP32[i7 + (i15 + 8) >> 2] | 0;
+ if (i19 >>> 0 < i16 >>> 0) {
+ _abort();
+ }
+ i16 = i19 + 12 | 0;
+ if ((HEAP32[i16 >> 2] | 0) != (i13 | 0)) {
+ _abort();
+ }
+ i20 = i18 + 8 | 0;
+ if ((HEAP32[i20 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i18;
+ HEAP32[i20 >> 2] = i19;
+ i14 = i18;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i17 | 0) != 0) {
+ i18 = HEAP32[i7 + (i15 + 28) >> 2] | 0;
+ i16 = 14776 + (i18 << 2) | 0;
+ if ((i13 | 0) == (HEAP32[i16 >> 2] | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ if ((i14 | 0) == 0) {
+ HEAP32[14476 >> 2] = HEAP32[14476 >> 2] & ~(1 << i18);
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ if (i17 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i16 = i17 + 16 | 0;
+ if ((HEAP32[i16 >> 2] | 0) == (i13 | 0)) {
+ HEAP32[i16 >> 2] = i14;
+ } else {
+ HEAP32[i17 + 20 >> 2] = i14;
+ }
+ if ((i14 | 0) == 0) {
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ }
+ if (i14 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i14 + 24 >> 2] = i17;
+ i16 = HEAP32[i7 + (i15 + 16) >> 2] | 0;
+ do {
+ if ((i16 | 0) != 0) {
+ if (i16 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 16 >> 2] = i16;
+ HEAP32[i16 + 24 >> 2] = i14;
+ break;
+ }
+ }
+ } while (0);
+ i15 = HEAP32[i7 + (i15 + 20) >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ if (i15 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i14 + 20 >> 2] = i15;
+ HEAP32[i15 + 24 >> 2] = i14;
+ i2 = i13;
+ i11 = i12;
+ break;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i13;
+ i11 = i12;
+ }
+ } else {
+ i2 = i15;
+ i11 = i8;
+ }
+ } while (0);
+ if (!(i2 >>> 0 < i6 >>> 0)) {
+ _abort();
+ }
+ i12 = i7 + (i8 + -4) | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 & 1 | 0) == 0) {
+ _abort();
+ }
+ if ((i13 & 2 | 0) == 0) {
+ if ((i6 | 0) == (HEAP32[14496 >> 2] | 0)) {
+ i21 = (HEAP32[14484 >> 2] | 0) + i11 | 0;
+ HEAP32[14484 >> 2] = i21;
+ HEAP32[14496 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ if ((i2 | 0) != (HEAP32[14492 >> 2] | 0)) {
+ STACKTOP = i1;
+ return;
+ }
+ HEAP32[14492 >> 2] = 0;
+ HEAP32[14480 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+ }
+ if ((i6 | 0) == (HEAP32[14492 >> 2] | 0)) {
+ i21 = (HEAP32[14480 >> 2] | 0) + i11 | 0;
+ HEAP32[14480 >> 2] = i21;
+ HEAP32[14492 >> 2] = i2;
+ HEAP32[i2 + 4 >> 2] = i21 | 1;
+ HEAP32[i2 + i21 >> 2] = i21;
+ STACKTOP = i1;
+ return;
+ }
+ i11 = (i13 & -8) + i11 | 0;
+ i12 = i13 >>> 3;
+ do {
+ if (!(i13 >>> 0 < 256)) {
+ i10 = HEAP32[i7 + (i8 + 16) >> 2] | 0;
+ i15 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ do {
+ if ((i15 | 0) == (i6 | 0)) {
+ i13 = i7 + (i8 + 12) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i13 = i7 + (i8 + 8) | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ if ((i12 | 0) == 0) {
+ i9 = 0;
+ break;
+ }
+ }
+ while (1) {
+ i14 = i12 + 20 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) != 0) {
+ i12 = i15;
+ i13 = i14;
+ continue;
+ }
+ i14 = i12 + 16 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ if ((i15 | 0) == 0) {
+ break;
+ } else {
+ i12 = i15;
+ i13 = i14;
+ }
+ }
+ if (i13 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i13 >> 2] = 0;
+ i9 = i12;
+ break;
+ }
+ } else {
+ i13 = HEAP32[i7 + i8 >> 2] | 0;
+ if (i13 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i14 = i13 + 12 | 0;
+ if ((HEAP32[i14 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ i12 = i15 + 8 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i14 >> 2] = i15;
+ HEAP32[i12 >> 2] = i13;
+ i9 = i15;
+ break;
+ } else {
+ _abort();
+ }
+ }
+ } while (0);
+ if ((i10 | 0) != 0) {
+ i12 = HEAP32[i7 + (i8 + 20) >> 2] | 0;
+ i13 = 14776 + (i12 << 2) | 0;
+ if ((i6 | 0) == (HEAP32[i13 >> 2] | 0)) {
+ HEAP32[i13 >> 2] = i9;
+ if ((i9 | 0) == 0) {
+ HEAP32[14476 >> 2] = HEAP32[14476 >> 2] & ~(1 << i12);
+ break;
+ }
+ } else {
+ if (i10 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i12 = i10 + 16 | 0;
+ if ((HEAP32[i12 >> 2] | 0) == (i6 | 0)) {
+ HEAP32[i12 >> 2] = i9;
+ } else {
+ HEAP32[i10 + 20 >> 2] = i9;
+ }
+ if ((i9 | 0) == 0) {
+ break;
+ }
+ }
+ if (i9 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ HEAP32[i9 + 24 >> 2] = i10;
+ i6 = HEAP32[i7 + (i8 + 8) >> 2] | 0;
+ do {
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 16 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ } while (0);
+ i6 = HEAP32[i7 + (i8 + 12) >> 2] | 0;
+ if ((i6 | 0) != 0) {
+ if (i6 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i9 + 20 >> 2] = i6;
+ HEAP32[i6 + 24 >> 2] = i9;
+ break;
+ }
+ }
+ }
+ } else {
+ i9 = HEAP32[i7 + i8 >> 2] | 0;
+ i7 = HEAP32[i7 + (i8 | 4) >> 2] | 0;
+ i8 = 14512 + (i12 << 1 << 2) | 0;
+ if ((i9 | 0) != (i8 | 0)) {
+ if (i9 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ if ((HEAP32[i9 + 12 >> 2] | 0) != (i6 | 0)) {
+ _abort();
+ }
+ }
+ if ((i7 | 0) == (i9 | 0)) {
+ HEAP32[3618] = HEAP32[3618] & ~(1 << i12);
+ break;
+ }
+ if ((i7 | 0) != (i8 | 0)) {
+ if (i7 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ }
+ i8 = i7 + 8 | 0;
+ if ((HEAP32[i8 >> 2] | 0) == (i6 | 0)) {
+ i10 = i8;
+ } else {
+ _abort();
+ }
+ } else {
+ i10 = i7 + 8 | 0;
+ }
+ HEAP32[i9 + 12 >> 2] = i7;
+ HEAP32[i10 >> 2] = i9;
+ }
+ } while (0);
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ if ((i2 | 0) == (HEAP32[14492 >> 2] | 0)) {
+ HEAP32[14480 >> 2] = i11;
+ STACKTOP = i1;
+ return;
+ }
+ } else {
+ HEAP32[i12 >> 2] = i13 & -2;
+ HEAP32[i2 + 4 >> 2] = i11 | 1;
+ HEAP32[i2 + i11 >> 2] = i11;
+ }
+ i6 = i11 >>> 3;
+ if (i11 >>> 0 < 256) {
+ i7 = i6 << 1;
+ i3 = 14512 + (i7 << 2) | 0;
+ i8 = HEAP32[3618] | 0;
+ i6 = 1 << i6;
+ if ((i8 & i6 | 0) != 0) {
+ i6 = 14512 + (i7 + 2 << 2) | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ if (i7 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ i4 = i6;
+ i5 = i7;
+ }
+ } else {
+ HEAP32[3618] = i8 | i6;
+ i4 = 14512 + (i7 + 2 << 2) | 0;
+ i5 = i3;
+ }
+ HEAP32[i4 >> 2] = i2;
+ HEAP32[i5 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i3;
+ STACKTOP = i1;
+ return;
+ }
+ i4 = i11 >>> 8;
+ if ((i4 | 0) != 0) {
+ if (i11 >>> 0 > 16777215) {
+ i4 = 31;
+ } else {
+ i20 = (i4 + 1048320 | 0) >>> 16 & 8;
+ i21 = i4 << i20;
+ i19 = (i21 + 520192 | 0) >>> 16 & 4;
+ i21 = i21 << i19;
+ i4 = (i21 + 245760 | 0) >>> 16 & 2;
+ i4 = 14 - (i19 | i20 | i4) + (i21 << i4 >>> 15) | 0;
+ i4 = i11 >>> (i4 + 7 | 0) & 1 | i4 << 1;
+ }
+ } else {
+ i4 = 0;
+ }
+ i5 = 14776 + (i4 << 2) | 0;
+ HEAP32[i2 + 28 >> 2] = i4;
+ HEAP32[i2 + 20 >> 2] = 0;
+ HEAP32[i2 + 16 >> 2] = 0;
+ i7 = HEAP32[14476 >> 2] | 0;
+ i6 = 1 << i4;
+ L199 : do {
+ if ((i7 & i6 | 0) != 0) {
+ i5 = HEAP32[i5 >> 2] | 0;
+ if ((i4 | 0) == 31) {
+ i4 = 0;
+ } else {
+ i4 = 25 - (i4 >>> 1) | 0;
+ }
+ L204 : do {
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) != (i11 | 0)) {
+ i4 = i11 << i4;
+ i7 = i5;
+ while (1) {
+ i6 = i7 + (i4 >>> 31 << 2) + 16 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ }
+ if ((HEAP32[i5 + 4 >> 2] & -8 | 0) == (i11 | 0)) {
+ i3 = i5;
+ break L204;
+ } else {
+ i4 = i4 << 1;
+ i7 = i5;
+ }
+ }
+ if (i6 >>> 0 < (HEAP32[14488 >> 2] | 0) >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i6 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i7;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ break L199;
+ }
+ } else {
+ i3 = i5;
+ }
+ } while (0);
+ i5 = i3 + 8 | 0;
+ i4 = HEAP32[i5 >> 2] | 0;
+ i6 = HEAP32[14488 >> 2] | 0;
+ if (i3 >>> 0 < i6 >>> 0) {
+ _abort();
+ }
+ if (i4 >>> 0 < i6 >>> 0) {
+ _abort();
+ } else {
+ HEAP32[i4 + 12 >> 2] = i2;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i4;
+ HEAP32[i2 + 12 >> 2] = i3;
+ HEAP32[i2 + 24 >> 2] = 0;
+ break;
+ }
+ } else {
+ HEAP32[14476 >> 2] = i7 | i6;
+ HEAP32[i5 >> 2] = i2;
+ HEAP32[i2 + 24 >> 2] = i5;
+ HEAP32[i2 + 12 >> 2] = i2;
+ HEAP32[i2 + 8 >> 2] = i2;
+ }
+ } while (0);
+ i21 = (HEAP32[14504 >> 2] | 0) + -1 | 0;
+ HEAP32[14504 >> 2] = i21;
+ if ((i21 | 0) == 0) {
+ i2 = 14928 | 0;
+ } else {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i2 = HEAP32[i2 >> 2] | 0;
+ if ((i2 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 8 | 0;
+ }
+ }
+ HEAP32[14504 >> 2] = -1;
+ STACKTOP = i1;
+ return;
+}
+function _build_tree(i4, i9) {
+ i4 = i4 | 0;
+ i9 = i9 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 32 | 0;
+ i1 = i2;
+ i3 = HEAP32[i9 >> 2] | 0;
+ i7 = i9 + 8 | 0;
+ i11 = HEAP32[i7 >> 2] | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ i11 = HEAP32[i11 + 12 >> 2] | 0;
+ i8 = i4 + 5200 | 0;
+ HEAP32[i8 >> 2] = 0;
+ i6 = i4 + 5204 | 0;
+ HEAP32[i6 >> 2] = 573;
+ if ((i11 | 0) > 0) {
+ i5 = -1;
+ i13 = 0;
+ do {
+ if ((HEAP16[i3 + (i13 << 2) >> 1] | 0) == 0) {
+ HEAP16[i3 + (i13 << 2) + 2 >> 1] = 0;
+ } else {
+ i5 = (HEAP32[i8 >> 2] | 0) + 1 | 0;
+ HEAP32[i8 >> 2] = i5;
+ HEAP32[i4 + (i5 << 2) + 2908 >> 2] = i13;
+ HEAP8[i4 + i13 + 5208 | 0] = 0;
+ i5 = i13;
+ }
+ i13 = i13 + 1 | 0;
+ } while ((i13 | 0) != (i11 | 0));
+ i14 = HEAP32[i8 >> 2] | 0;
+ if ((i14 | 0) < 2) {
+ i10 = 3;
+ }
+ } else {
+ i14 = 0;
+ i5 = -1;
+ i10 = 3;
+ }
+ if ((i10 | 0) == 3) {
+ i10 = i4 + 5800 | 0;
+ i13 = i4 + 5804 | 0;
+ if ((i12 | 0) == 0) {
+ do {
+ i12 = (i5 | 0) < 2;
+ i13 = i5 + 1 | 0;
+ i5 = i12 ? i13 : i5;
+ i23 = i12 ? i13 : 0;
+ i14 = i14 + 1 | 0;
+ HEAP32[i8 >> 2] = i14;
+ HEAP32[i4 + (i14 << 2) + 2908 >> 2] = i23;
+ HEAP16[i3 + (i23 << 2) >> 1] = 1;
+ HEAP8[i4 + i23 + 5208 | 0] = 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -1;
+ i14 = HEAP32[i8 >> 2] | 0;
+ } while ((i14 | 0) < 2);
+ } else {
+ do {
+ i15 = (i5 | 0) < 2;
+ i16 = i5 + 1 | 0;
+ i5 = i15 ? i16 : i5;
+ i23 = i15 ? i16 : 0;
+ i14 = i14 + 1 | 0;
+ HEAP32[i8 >> 2] = i14;
+ HEAP32[i4 + (i14 << 2) + 2908 >> 2] = i23;
+ HEAP16[i3 + (i23 << 2) >> 1] = 1;
+ HEAP8[i4 + i23 + 5208 | 0] = 0;
+ HEAP32[i10 >> 2] = (HEAP32[i10 >> 2] | 0) + -1;
+ HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) - (HEAPU16[i12 + (i23 << 2) + 2 >> 1] | 0);
+ i14 = HEAP32[i8 >> 2] | 0;
+ } while ((i14 | 0) < 2);
+ }
+ }
+ i10 = i9 + 4 | 0;
+ HEAP32[i10 >> 2] = i5;
+ i12 = HEAP32[i8 >> 2] | 0;
+ if ((i12 | 0) > 1) {
+ i18 = i12;
+ i13 = (i12 | 0) / 2 | 0;
+ do {
+ i12 = HEAP32[i4 + (i13 << 2) + 2908 >> 2] | 0;
+ i14 = i4 + i12 + 5208 | 0;
+ i17 = i13 << 1;
+ L21 : do {
+ if ((i17 | 0) > (i18 | 0)) {
+ i15 = i13;
+ } else {
+ i16 = i3 + (i12 << 2) | 0;
+ i15 = i13;
+ while (1) {
+ do {
+ if ((i17 | 0) < (i18 | 0)) {
+ i18 = i17 | 1;
+ i19 = HEAP32[i4 + (i18 << 2) + 2908 >> 2] | 0;
+ i22 = HEAP16[i3 + (i19 << 2) >> 1] | 0;
+ i20 = HEAP32[i4 + (i17 << 2) + 2908 >> 2] | 0;
+ i21 = HEAP16[i3 + (i20 << 2) >> 1] | 0;
+ if (!((i22 & 65535) < (i21 & 65535))) {
+ if (!(i22 << 16 >> 16 == i21 << 16 >> 16)) {
+ break;
+ }
+ if ((HEAPU8[i4 + i19 + 5208 | 0] | 0) > (HEAPU8[i4 + i20 + 5208 | 0] | 0)) {
+ break;
+ }
+ }
+ i17 = i18;
+ }
+ } while (0);
+ i19 = HEAP16[i16 >> 1] | 0;
+ i18 = HEAP32[i4 + (i17 << 2) + 2908 >> 2] | 0;
+ i20 = HEAP16[i3 + (i18 << 2) >> 1] | 0;
+ if ((i19 & 65535) < (i20 & 65535)) {
+ break L21;
+ }
+ if (i19 << 16 >> 16 == i20 << 16 >> 16 ? (HEAPU8[i14] | 0) <= (HEAPU8[i4 + i18 + 5208 | 0] | 0) : 0) {
+ break L21;
+ }
+ HEAP32[i4 + (i15 << 2) + 2908 >> 2] = i18;
+ i19 = i17 << 1;
+ i18 = HEAP32[i8 >> 2] | 0;
+ if ((i19 | 0) > (i18 | 0)) {
+ i15 = i17;
+ break;
+ } else {
+ i15 = i17;
+ i17 = i19;
+ }
+ }
+ }
+ } while (0);
+ HEAP32[i4 + (i15 << 2) + 2908 >> 2] = i12;
+ i13 = i13 + -1 | 0;
+ i18 = HEAP32[i8 >> 2] | 0;
+ } while ((i13 | 0) > 0);
+ } else {
+ i18 = i12;
+ }
+ i12 = i4 + 2912 | 0;
+ while (1) {
+ i13 = HEAP32[i12 >> 2] | 0;
+ i20 = i18 + -1 | 0;
+ HEAP32[i8 >> 2] = i20;
+ i14 = HEAP32[i4 + (i18 << 2) + 2908 >> 2] | 0;
+ HEAP32[i12 >> 2] = i14;
+ i15 = i4 + i14 + 5208 | 0;
+ L40 : do {
+ if ((i18 | 0) < 3) {
+ i17 = 1;
+ } else {
+ i16 = i3 + (i14 << 2) | 0;
+ i17 = 1;
+ i18 = 2;
+ while (1) {
+ do {
+ if ((i18 | 0) < (i20 | 0)) {
+ i22 = i18 | 1;
+ i21 = HEAP32[i4 + (i22 << 2) + 2908 >> 2] | 0;
+ i23 = HEAP16[i3 + (i21 << 2) >> 1] | 0;
+ i20 = HEAP32[i4 + (i18 << 2) + 2908 >> 2] | 0;
+ i19 = HEAP16[i3 + (i20 << 2) >> 1] | 0;
+ if (!((i23 & 65535) < (i19 & 65535))) {
+ if (!(i23 << 16 >> 16 == i19 << 16 >> 16)) {
+ break;
+ }
+ if ((HEAPU8[i4 + i21 + 5208 | 0] | 0) > (HEAPU8[i4 + i20 + 5208 | 0] | 0)) {
+ break;
+ }
+ }
+ i18 = i22;
+ }
+ } while (0);
+ i21 = HEAP16[i16 >> 1] | 0;
+ i20 = HEAP32[i4 + (i18 << 2) + 2908 >> 2] | 0;
+ i19 = HEAP16[i3 + (i20 << 2) >> 1] | 0;
+ if ((i21 & 65535) < (i19 & 65535)) {
+ break L40;
+ }
+ if (i21 << 16 >> 16 == i19 << 16 >> 16 ? (HEAPU8[i15] | 0) <= (HEAPU8[i4 + i20 + 5208 | 0] | 0) : 0) {
+ break L40;
+ }
+ HEAP32[i4 + (i17 << 2) + 2908 >> 2] = i20;
+ i19 = i18 << 1;
+ i20 = HEAP32[i8 >> 2] | 0;
+ if ((i19 | 0) > (i20 | 0)) {
+ i17 = i18;
+ break;
+ } else {
+ i17 = i18;
+ i18 = i19;
+ }
+ }
+ }
+ } while (0);
+ HEAP32[i4 + (i17 << 2) + 2908 >> 2] = i14;
+ i17 = HEAP32[i12 >> 2] | 0;
+ i14 = (HEAP32[i6 >> 2] | 0) + -1 | 0;
+ HEAP32[i6 >> 2] = i14;
+ HEAP32[i4 + (i14 << 2) + 2908 >> 2] = i13;
+ i14 = (HEAP32[i6 >> 2] | 0) + -1 | 0;
+ HEAP32[i6 >> 2] = i14;
+ HEAP32[i4 + (i14 << 2) + 2908 >> 2] = i17;
+ i14 = i3 + (i11 << 2) | 0;
+ HEAP16[i14 >> 1] = (HEAPU16[i3 + (i17 << 2) >> 1] | 0) + (HEAPU16[i3 + (i13 << 2) >> 1] | 0);
+ i18 = HEAP8[i4 + i13 + 5208 | 0] | 0;
+ i16 = HEAP8[i4 + i17 + 5208 | 0] | 0;
+ i15 = i4 + i11 + 5208 | 0;
+ HEAP8[i15] = (((i18 & 255) < (i16 & 255) ? i16 : i18) & 255) + 1;
+ i19 = i11 & 65535;
+ HEAP16[i3 + (i17 << 2) + 2 >> 1] = i19;
+ HEAP16[i3 + (i13 << 2) + 2 >> 1] = i19;
+ i13 = i11 + 1 | 0;
+ HEAP32[i12 >> 2] = i11;
+ i19 = HEAP32[i8 >> 2] | 0;
+ L56 : do {
+ if ((i19 | 0) < 2) {
+ i16 = 1;
+ } else {
+ i16 = 1;
+ i17 = 2;
+ while (1) {
+ do {
+ if ((i17 | 0) < (i19 | 0)) {
+ i21 = i17 | 1;
+ i22 = HEAP32[i4 + (i21 << 2) + 2908 >> 2] | 0;
+ i19 = HEAP16[i3 + (i22 << 2) >> 1] | 0;
+ i18 = HEAP32[i4 + (i17 << 2) + 2908 >> 2] | 0;
+ i20 = HEAP16[i3 + (i18 << 2) >> 1] | 0;
+ if (!((i19 & 65535) < (i20 & 65535))) {
+ if (!(i19 << 16 >> 16 == i20 << 16 >> 16)) {
+ break;
+ }
+ if ((HEAPU8[i4 + i22 + 5208 | 0] | 0) > (HEAPU8[i4 + i18 + 5208 | 0] | 0)) {
+ break;
+ }
+ }
+ i17 = i21;
+ }
+ } while (0);
+ i19 = HEAP16[i14 >> 1] | 0;
+ i20 = HEAP32[i4 + (i17 << 2) + 2908 >> 2] | 0;
+ i18 = HEAP16[i3 + (i20 << 2) >> 1] | 0;
+ if ((i19 & 65535) < (i18 & 65535)) {
+ break L56;
+ }
+ if (i19 << 16 >> 16 == i18 << 16 >> 16 ? (HEAPU8[i15] | 0) <= (HEAPU8[i4 + i20 + 5208 | 0] | 0) : 0) {
+ break L56;
+ }
+ HEAP32[i4 + (i16 << 2) + 2908 >> 2] = i20;
+ i18 = i17 << 1;
+ i19 = HEAP32[i8 >> 2] | 0;
+ if ((i18 | 0) > (i19 | 0)) {
+ i16 = i17;
+ break;
+ } else {
+ i16 = i17;
+ i17 = i18;
+ }
+ }
+ }
+ } while (0);
+ HEAP32[i4 + (i16 << 2) + 2908 >> 2] = i11;
+ i18 = HEAP32[i8 >> 2] | 0;
+ if ((i18 | 0) > 1) {
+ i11 = i13;
+ } else {
+ break;
+ }
+ }
+ i12 = HEAP32[i12 >> 2] | 0;
+ i8 = (HEAP32[i6 >> 2] | 0) + -1 | 0;
+ HEAP32[i6 >> 2] = i8;
+ HEAP32[i4 + (i8 << 2) + 2908 >> 2] = i12;
+ i8 = HEAP32[i9 >> 2] | 0;
+ i9 = HEAP32[i10 >> 2] | 0;
+ i7 = HEAP32[i7 >> 2] | 0;
+ i12 = HEAP32[i7 >> 2] | 0;
+ i11 = HEAP32[i7 + 4 >> 2] | 0;
+ i10 = HEAP32[i7 + 8 >> 2] | 0;
+ i7 = HEAP32[i7 + 16 >> 2] | 0;
+ i13 = i4 + 2876 | 0;
+ i14 = i13 + 32 | 0;
+ do {
+ HEAP16[i13 >> 1] = 0;
+ i13 = i13 + 2 | 0;
+ } while ((i13 | 0) < (i14 | 0));
+ i14 = HEAP32[i6 >> 2] | 0;
+ HEAP16[i8 + (HEAP32[i4 + (i14 << 2) + 2908 >> 2] << 2) + 2 >> 1] = 0;
+ i14 = i14 + 1 | 0;
+ L72 : do {
+ if ((i14 | 0) < 573) {
+ i6 = i4 + 5800 | 0;
+ i13 = i4 + 5804 | 0;
+ if ((i12 | 0) == 0) {
+ i18 = 0;
+ do {
+ i12 = HEAP32[i4 + (i14 << 2) + 2908 >> 2] | 0;
+ i13 = i8 + (i12 << 2) + 2 | 0;
+ i15 = HEAPU16[i8 + (HEAPU16[i13 >> 1] << 2) + 2 >> 1] | 0;
+ i16 = (i15 | 0) < (i7 | 0);
+ i15 = i16 ? i15 + 1 | 0 : i7;
+ i18 = (i16 & 1 ^ 1) + i18 | 0;
+ HEAP16[i13 >> 1] = i15;
+ if ((i12 | 0) <= (i9 | 0)) {
+ i23 = i4 + (i15 << 1) + 2876 | 0;
+ HEAP16[i23 >> 1] = (HEAP16[i23 >> 1] | 0) + 1 << 16 >> 16;
+ if ((i12 | 0) < (i10 | 0)) {
+ i13 = 0;
+ } else {
+ i13 = HEAP32[i11 + (i12 - i10 << 2) >> 2] | 0;
+ }
+ i23 = Math_imul(HEAPU16[i8 + (i12 << 2) >> 1] | 0, i13 + i15 | 0) | 0;
+ HEAP32[i6 >> 2] = i23 + (HEAP32[i6 >> 2] | 0);
+ }
+ i14 = i14 + 1 | 0;
+ } while ((i14 | 0) != 573);
+ } else {
+ i18 = 0;
+ do {
+ i15 = HEAP32[i4 + (i14 << 2) + 2908 >> 2] | 0;
+ i16 = i8 + (i15 << 2) + 2 | 0;
+ i17 = HEAPU16[i8 + (HEAPU16[i16 >> 1] << 2) + 2 >> 1] | 0;
+ i19 = (i17 | 0) < (i7 | 0);
+ i17 = i19 ? i17 + 1 | 0 : i7;
+ i18 = (i19 & 1 ^ 1) + i18 | 0;
+ HEAP16[i16 >> 1] = i17;
+ if ((i15 | 0) <= (i9 | 0)) {
+ i23 = i4 + (i17 << 1) + 2876 | 0;
+ HEAP16[i23 >> 1] = (HEAP16[i23 >> 1] | 0) + 1 << 16 >> 16;
+ if ((i15 | 0) < (i10 | 0)) {
+ i16 = 0;
+ } else {
+ i16 = HEAP32[i11 + (i15 - i10 << 2) >> 2] | 0;
+ }
+ i23 = HEAPU16[i8 + (i15 << 2) >> 1] | 0;
+ i22 = Math_imul(i23, i16 + i17 | 0) | 0;
+ HEAP32[i6 >> 2] = i22 + (HEAP32[i6 >> 2] | 0);
+ i23 = Math_imul((HEAPU16[i12 + (i15 << 2) + 2 >> 1] | 0) + i16 | 0, i23) | 0;
+ HEAP32[i13 >> 2] = i23 + (HEAP32[i13 >> 2] | 0);
+ }
+ i14 = i14 + 1 | 0;
+ } while ((i14 | 0) != 573);
+ }
+ if ((i18 | 0) != 0) {
+ i10 = i4 + (i7 << 1) + 2876 | 0;
+ do {
+ i12 = i7;
+ while (1) {
+ i11 = i12 + -1 | 0;
+ i13 = i4 + (i11 << 1) + 2876 | 0;
+ i14 = HEAP16[i13 >> 1] | 0;
+ if (i14 << 16 >> 16 == 0) {
+ i12 = i11;
+ } else {
+ break;
+ }
+ }
+ HEAP16[i13 >> 1] = i14 + -1 << 16 >> 16;
+ i11 = i4 + (i12 << 1) + 2876 | 0;
+ HEAP16[i11 >> 1] = (HEAPU16[i11 >> 1] | 0) + 2;
+ i11 = (HEAP16[i10 >> 1] | 0) + -1 << 16 >> 16;
+ HEAP16[i10 >> 1] = i11;
+ i18 = i18 + -2 | 0;
+ } while ((i18 | 0) > 0);
+ if ((i7 | 0) != 0) {
+ i12 = 573;
+ while (1) {
+ i10 = i7 & 65535;
+ if (!(i11 << 16 >> 16 == 0)) {
+ i11 = i11 & 65535;
+ do {
+ do {
+ i12 = i12 + -1 | 0;
+ i15 = HEAP32[i4 + (i12 << 2) + 2908 >> 2] | 0;
+ } while ((i15 | 0) > (i9 | 0));
+ i13 = i8 + (i15 << 2) + 2 | 0;
+ i14 = HEAPU16[i13 >> 1] | 0;
+ if ((i14 | 0) != (i7 | 0)) {
+ i23 = Math_imul(HEAPU16[i8 + (i15 << 2) >> 1] | 0, i7 - i14 | 0) | 0;
+ HEAP32[i6 >> 2] = i23 + (HEAP32[i6 >> 2] | 0);
+ HEAP16[i13 >> 1] = i10;
+ }
+ i11 = i11 + -1 | 0;
+ } while ((i11 | 0) != 0);
+ }
+ i7 = i7 + -1 | 0;
+ if ((i7 | 0) == 0) {
+ break L72;
+ }
+ i11 = HEAP16[i4 + (i7 << 1) + 2876 >> 1] | 0;
+ }
+ }
+ }
+ }
+ } while (0);
+ i7 = 1;
+ i6 = 0;
+ do {
+ i6 = (HEAPU16[i4 + (i7 + -1 << 1) + 2876 >> 1] | 0) + (i6 & 65534) << 1;
+ HEAP16[i1 + (i7 << 1) >> 1] = i6;
+ i7 = i7 + 1 | 0;
+ } while ((i7 | 0) != 16);
+ if ((i5 | 0) < 0) {
+ STACKTOP = i2;
+ return;
+ } else {
+ i4 = 0;
+ }
+ while (1) {
+ i23 = HEAP16[i3 + (i4 << 2) + 2 >> 1] | 0;
+ i7 = i23 & 65535;
+ if (!(i23 << 16 >> 16 == 0)) {
+ i8 = i1 + (i7 << 1) | 0;
+ i6 = HEAP16[i8 >> 1] | 0;
+ HEAP16[i8 >> 1] = i6 + 1 << 16 >> 16;
+ i6 = i6 & 65535;
+ i8 = 0;
+ while (1) {
+ i8 = i8 | i6 & 1;
+ i7 = i7 + -1 | 0;
+ if ((i7 | 0) <= 0) {
+ break;
+ } else {
+ i6 = i6 >>> 1;
+ i8 = i8 << 1;
+ }
+ }
+ HEAP16[i3 + (i4 << 2) >> 1] = i8;
+ }
+ if ((i4 | 0) == (i5 | 0)) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ STACKTOP = i2;
+ return;
+}
+function _deflate_slow(i2, i6) {
+ i2 = i2 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0;
+ i1 = STACKTOP;
+ i15 = i2 + 116 | 0;
+ i16 = (i6 | 0) == 0;
+ i17 = i2 + 72 | 0;
+ i18 = i2 + 88 | 0;
+ i5 = i2 + 108 | 0;
+ i7 = i2 + 56 | 0;
+ i19 = i2 + 84 | 0;
+ i20 = i2 + 68 | 0;
+ i22 = i2 + 52 | 0;
+ i21 = i2 + 64 | 0;
+ i9 = i2 + 96 | 0;
+ i10 = i2 + 120 | 0;
+ i11 = i2 + 112 | 0;
+ i12 = i2 + 100 | 0;
+ i26 = i2 + 5792 | 0;
+ i27 = i2 + 5796 | 0;
+ i29 = i2 + 5784 | 0;
+ i23 = i2 + 5788 | 0;
+ i8 = i2 + 104 | 0;
+ i4 = i2 + 92 | 0;
+ i24 = i2 + 128 | 0;
+ i14 = i2 + 44 | 0;
+ i13 = i2 + 136 | 0;
+ L1 : while (1) {
+ i30 = HEAP32[i15 >> 2] | 0;
+ while (1) {
+ if (i30 >>> 0 < 262) {
+ _fill_window(i2);
+ i30 = HEAP32[i15 >> 2] | 0;
+ if (i30 >>> 0 < 262 & i16) {
+ i2 = 0;
+ i30 = 50;
+ break L1;
+ }
+ if ((i30 | 0) == 0) {
+ i30 = 40;
+ break L1;
+ }
+ if (!(i30 >>> 0 > 2)) {
+ HEAP32[i10 >> 2] = HEAP32[i9 >> 2];
+ HEAP32[i12 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i9 >> 2] = 2;
+ i32 = 2;
+ i30 = 16;
+ } else {
+ i30 = 8;
+ }
+ } else {
+ i30 = 8;
+ }
+ do {
+ if ((i30 | 0) == 8) {
+ i30 = 0;
+ i34 = HEAP32[i5 >> 2] | 0;
+ i31 = ((HEAPU8[(HEAP32[i7 >> 2] | 0) + (i34 + 2) | 0] | 0) ^ HEAP32[i17 >> 2] << HEAP32[i18 >> 2]) & HEAP32[i19 >> 2];
+ HEAP32[i17 >> 2] = i31;
+ i31 = (HEAP32[i20 >> 2] | 0) + (i31 << 1) | 0;
+ i35 = HEAP16[i31 >> 1] | 0;
+ HEAP16[(HEAP32[i21 >> 2] | 0) + ((HEAP32[i22 >> 2] & i34) << 1) >> 1] = i35;
+ i32 = i35 & 65535;
+ HEAP16[i31 >> 1] = i34;
+ i31 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i10 >> 2] = i31;
+ HEAP32[i12 >> 2] = HEAP32[i11 >> 2];
+ HEAP32[i9 >> 2] = 2;
+ if (!(i35 << 16 >> 16 == 0)) {
+ if (i31 >>> 0 < (HEAP32[i24 >> 2] | 0) >>> 0) {
+ if (!(((HEAP32[i5 >> 2] | 0) - i32 | 0) >>> 0 > ((HEAP32[i14 >> 2] | 0) + -262 | 0) >>> 0)) {
+ i32 = _longest_match(i2, i32) | 0;
+ HEAP32[i9 >> 2] = i32;
+ if (i32 >>> 0 < 6) {
+ if ((HEAP32[i13 >> 2] | 0) != 1) {
+ if ((i32 | 0) != 3) {
+ i30 = 16;
+ break;
+ }
+ if (!(((HEAP32[i5 >> 2] | 0) - (HEAP32[i11 >> 2] | 0) | 0) >>> 0 > 4096)) {
+ i32 = 3;
+ i30 = 16;
+ break;
+ }
+ }
+ HEAP32[i9 >> 2] = 2;
+ i32 = 2;
+ i30 = 16;
+ } else {
+ i30 = 16;
+ }
+ } else {
+ i32 = 2;
+ i30 = 16;
+ }
+ } else {
+ i32 = 2;
+ }
+ } else {
+ i32 = 2;
+ i30 = 16;
+ }
+ }
+ } while (0);
+ if ((i30 | 0) == 16) {
+ i31 = HEAP32[i10 >> 2] | 0;
+ }
+ if (!(i31 >>> 0 < 3 | i32 >>> 0 > i31 >>> 0)) {
+ break;
+ }
+ if ((HEAP32[i8 >> 2] | 0) == 0) {
+ HEAP32[i8 >> 2] = 1;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ i30 = (HEAP32[i15 >> 2] | 0) + -1 | 0;
+ HEAP32[i15 >> 2] = i30;
+ continue;
+ }
+ i35 = HEAP8[(HEAP32[i7 >> 2] | 0) + ((HEAP32[i5 >> 2] | 0) + -1) | 0] | 0;
+ i34 = HEAP32[i26 >> 2] | 0;
+ HEAP16[(HEAP32[i27 >> 2] | 0) + (i34 << 1) >> 1] = 0;
+ HEAP32[i26 >> 2] = i34 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i34 | 0] = i35;
+ i35 = i2 + ((i35 & 255) << 2) + 148 | 0;
+ HEAP16[i35 >> 1] = (HEAP16[i35 >> 1] | 0) + 1 << 16 >> 16;
+ if ((HEAP32[i26 >> 2] | 0) == ((HEAP32[i23 >> 2] | 0) + -1 | 0)) {
+ i30 = HEAP32[i4 >> 2] | 0;
+ if ((i30 | 0) > -1) {
+ i31 = (HEAP32[i7 >> 2] | 0) + i30 | 0;
+ } else {
+ i31 = 0;
+ }
+ __tr_flush_block(i2, i31, (HEAP32[i5 >> 2] | 0) - i30 | 0, 0);
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i33 = HEAP32[i2 >> 2] | 0;
+ i32 = i33 + 28 | 0;
+ i30 = HEAP32[i32 >> 2] | 0;
+ i35 = HEAP32[i30 + 20 >> 2] | 0;
+ i31 = i33 + 16 | 0;
+ i34 = HEAP32[i31 >> 2] | 0;
+ i34 = i35 >>> 0 > i34 >>> 0 ? i34 : i35;
+ if ((i34 | 0) != 0 ? (i28 = i33 + 12 | 0, _memcpy(HEAP32[i28 >> 2] | 0, HEAP32[i30 + 16 >> 2] | 0, i34 | 0) | 0, HEAP32[i28 >> 2] = (HEAP32[i28 >> 2] | 0) + i34, i28 = (HEAP32[i32 >> 2] | 0) + 16 | 0, HEAP32[i28 >> 2] = (HEAP32[i28 >> 2] | 0) + i34, i28 = i33 + 20 | 0, HEAP32[i28 >> 2] = (HEAP32[i28 >> 2] | 0) + i34, HEAP32[i31 >> 2] = (HEAP32[i31 >> 2] | 0) - i34, i28 = HEAP32[i32 >> 2] | 0, i33 = i28 + 20 | 0, i35 = HEAP32[i33 >> 2] | 0, HEAP32[i33 >> 2] = i35 - i34, (i35 | 0) == (i34 | 0)) : 0) {
+ HEAP32[i28 + 16 >> 2] = HEAP32[i28 + 8 >> 2];
+ }
+ }
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + 1;
+ i30 = (HEAP32[i15 >> 2] | 0) + -1 | 0;
+ HEAP32[i15 >> 2] = i30;
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i2 = 0;
+ i30 = 50;
+ break L1;
+ }
+ }
+ i34 = HEAP32[i5 >> 2] | 0;
+ i30 = i34 + -3 + (HEAP32[i15 >> 2] | 0) | 0;
+ i35 = i31 + 253 | 0;
+ i31 = i34 + 65535 - (HEAP32[i12 >> 2] | 0) | 0;
+ i34 = HEAP32[i26 >> 2] | 0;
+ HEAP16[(HEAP32[i27 >> 2] | 0) + (i34 << 1) >> 1] = i31;
+ HEAP32[i26 >> 2] = i34 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i34 | 0] = i35;
+ i35 = i2 + ((HEAPU8[808 + (i35 & 255) | 0] | 0 | 256) + 1 << 2) + 148 | 0;
+ HEAP16[i35 >> 1] = (HEAP16[i35 >> 1] | 0) + 1 << 16 >> 16;
+ i31 = i31 + 65535 & 65535;
+ if (!(i31 >>> 0 < 256)) {
+ i31 = (i31 >>> 7) + 256 | 0;
+ }
+ i32 = i2 + ((HEAPU8[296 + i31 | 0] | 0) << 2) + 2440 | 0;
+ HEAP16[i32 >> 1] = (HEAP16[i32 >> 1] | 0) + 1 << 16 >> 16;
+ i32 = HEAP32[i26 >> 2] | 0;
+ i31 = (HEAP32[i23 >> 2] | 0) + -1 | 0;
+ i34 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i15 >> 2] = 1 - i34 + (HEAP32[i15 >> 2] | 0);
+ i34 = i34 + -2 | 0;
+ HEAP32[i10 >> 2] = i34;
+ i33 = HEAP32[i5 >> 2] | 0;
+ while (1) {
+ i35 = i33 + 1 | 0;
+ HEAP32[i5 >> 2] = i35;
+ if (!(i35 >>> 0 > i30 >>> 0)) {
+ i36 = ((HEAPU8[(HEAP32[i7 >> 2] | 0) + (i33 + 3) | 0] | 0) ^ HEAP32[i17 >> 2] << HEAP32[i18 >> 2]) & HEAP32[i19 >> 2];
+ HEAP32[i17 >> 2] = i36;
+ i36 = (HEAP32[i20 >> 2] | 0) + (i36 << 1) | 0;
+ HEAP16[(HEAP32[i21 >> 2] | 0) + ((HEAP32[i22 >> 2] & i35) << 1) >> 1] = HEAP16[i36 >> 1] | 0;
+ HEAP16[i36 >> 1] = i35;
+ }
+ i34 = i34 + -1 | 0;
+ HEAP32[i10 >> 2] = i34;
+ if ((i34 | 0) == 0) {
+ break;
+ } else {
+ i33 = i35;
+ }
+ }
+ HEAP32[i8 >> 2] = 0;
+ HEAP32[i9 >> 2] = 2;
+ i30 = i33 + 2 | 0;
+ HEAP32[i5 >> 2] = i30;
+ if ((i32 | 0) != (i31 | 0)) {
+ continue;
+ }
+ i32 = HEAP32[i4 >> 2] | 0;
+ if ((i32 | 0) > -1) {
+ i31 = (HEAP32[i7 >> 2] | 0) + i32 | 0;
+ } else {
+ i31 = 0;
+ }
+ __tr_flush_block(i2, i31, i30 - i32 | 0, 0);
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i33 = HEAP32[i2 >> 2] | 0;
+ i31 = i33 + 28 | 0;
+ i32 = HEAP32[i31 >> 2] | 0;
+ i35 = HEAP32[i32 + 20 >> 2] | 0;
+ i30 = i33 + 16 | 0;
+ i34 = HEAP32[i30 >> 2] | 0;
+ i34 = i35 >>> 0 > i34 >>> 0 ? i34 : i35;
+ if ((i34 | 0) != 0 ? (i25 = i33 + 12 | 0, _memcpy(HEAP32[i25 >> 2] | 0, HEAP32[i32 + 16 >> 2] | 0, i34 | 0) | 0, HEAP32[i25 >> 2] = (HEAP32[i25 >> 2] | 0) + i34, i25 = (HEAP32[i31 >> 2] | 0) + 16 | 0, HEAP32[i25 >> 2] = (HEAP32[i25 >> 2] | 0) + i34, i25 = i33 + 20 | 0, HEAP32[i25 >> 2] = (HEAP32[i25 >> 2] | 0) + i34, HEAP32[i30 >> 2] = (HEAP32[i30 >> 2] | 0) - i34, i25 = HEAP32[i31 >> 2] | 0, i35 = i25 + 20 | 0, i36 = HEAP32[i35 >> 2] | 0, HEAP32[i35 >> 2] = i36 - i34, (i36 | 0) == (i34 | 0)) : 0) {
+ HEAP32[i25 + 16 >> 2] = HEAP32[i25 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i2 = 0;
+ i30 = 50;
+ break;
+ }
+ }
+ if ((i30 | 0) == 40) {
+ if ((HEAP32[i8 >> 2] | 0) != 0) {
+ i36 = HEAP8[(HEAP32[i7 >> 2] | 0) + ((HEAP32[i5 >> 2] | 0) + -1) | 0] | 0;
+ i35 = HEAP32[i26 >> 2] | 0;
+ HEAP16[(HEAP32[i27 >> 2] | 0) + (i35 << 1) >> 1] = 0;
+ HEAP32[i26 >> 2] = i35 + 1;
+ HEAP8[(HEAP32[i29 >> 2] | 0) + i35 | 0] = i36;
+ i36 = i2 + ((i36 & 255) << 2) + 148 | 0;
+ HEAP16[i36 >> 1] = (HEAP16[i36 >> 1] | 0) + 1 << 16 >> 16;
+ HEAP32[i8 >> 2] = 0;
+ }
+ i8 = HEAP32[i4 >> 2] | 0;
+ if ((i8 | 0) > -1) {
+ i7 = (HEAP32[i7 >> 2] | 0) + i8 | 0;
+ } else {
+ i7 = 0;
+ }
+ i6 = (i6 | 0) == 4;
+ __tr_flush_block(i2, i7, (HEAP32[i5 >> 2] | 0) - i8 | 0, i6 & 1);
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i4 = HEAP32[i2 >> 2] | 0;
+ i7 = i4 + 28 | 0;
+ i5 = HEAP32[i7 >> 2] | 0;
+ i10 = HEAP32[i5 + 20 >> 2] | 0;
+ i8 = i4 + 16 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ i9 = i10 >>> 0 > i9 >>> 0 ? i9 : i10;
+ if ((i9 | 0) != 0 ? (i3 = i4 + 12 | 0, _memcpy(HEAP32[i3 >> 2] | 0, HEAP32[i5 + 16 >> 2] | 0, i9 | 0) | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, i3 = (HEAP32[i7 >> 2] | 0) + 16 | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, i3 = i4 + 20 | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) - i9, i3 = HEAP32[i7 >> 2] | 0, i35 = i3 + 20 | 0, i36 = HEAP32[i35 >> 2] | 0, HEAP32[i35 >> 2] = i36 - i9, (i36 | 0) == (i9 | 0)) : 0) {
+ HEAP32[i3 + 16 >> 2] = HEAP32[i3 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i36 = i6 ? 2 : 0;
+ STACKTOP = i1;
+ return i36 | 0;
+ } else {
+ i36 = i6 ? 3 : 1;
+ STACKTOP = i1;
+ return i36 | 0;
+ }
+ } else if ((i30 | 0) == 50) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ return 0;
+}
+function _inflate_fast(i7, i19) {
+ i7 = i7 | 0;
+ i19 = i19 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0, i37 = 0;
+ i1 = STACKTOP;
+ i11 = HEAP32[i7 + 28 >> 2] | 0;
+ i29 = HEAP32[i7 >> 2] | 0;
+ i5 = i7 + 4 | 0;
+ i8 = i29 + ((HEAP32[i5 >> 2] | 0) + -6) | 0;
+ i9 = i7 + 12 | 0;
+ i28 = HEAP32[i9 >> 2] | 0;
+ i4 = i7 + 16 | 0;
+ i25 = HEAP32[i4 >> 2] | 0;
+ i6 = i28 + (i25 + -258) | 0;
+ i17 = HEAP32[i11 + 44 >> 2] | 0;
+ i12 = HEAP32[i11 + 48 >> 2] | 0;
+ i18 = HEAP32[i11 + 52 >> 2] | 0;
+ i3 = i11 + 56 | 0;
+ i2 = i11 + 60 | 0;
+ i16 = HEAP32[i11 + 76 >> 2] | 0;
+ i13 = HEAP32[i11 + 80 >> 2] | 0;
+ i14 = (1 << HEAP32[i11 + 84 >> 2]) + -1 | 0;
+ i15 = (1 << HEAP32[i11 + 88 >> 2]) + -1 | 0;
+ i19 = i28 + (i25 + ~i19) | 0;
+ i25 = i11 + 7104 | 0;
+ i20 = i18 + -1 | 0;
+ i27 = (i12 | 0) == 0;
+ i24 = (HEAP32[i11 + 40 >> 2] | 0) + -1 | 0;
+ i21 = i24 + i12 | 0;
+ i22 = i12 + -1 | 0;
+ i23 = i19 + -1 | 0;
+ i26 = i19 - i12 | 0;
+ i31 = HEAP32[i2 >> 2] | 0;
+ i30 = HEAP32[i3 >> 2] | 0;
+ i29 = i29 + -1 | 0;
+ i28 = i28 + -1 | 0;
+ L1 : do {
+ if (i31 >>> 0 < 15) {
+ i37 = i29 + 2 | 0;
+ i33 = i31 + 16 | 0;
+ i30 = ((HEAPU8[i29 + 1 | 0] | 0) << i31) + i30 + ((HEAPU8[i37] | 0) << i31 + 8) | 0;
+ i29 = i37;
+ } else {
+ i33 = i31;
+ }
+ i31 = i30 & i14;
+ i34 = HEAP8[i16 + (i31 << 2) | 0] | 0;
+ i32 = HEAP16[i16 + (i31 << 2) + 2 >> 1] | 0;
+ i31 = HEAPU8[i16 + (i31 << 2) + 1 | 0] | 0;
+ i30 = i30 >>> i31;
+ i31 = i33 - i31 | 0;
+ do {
+ if (!(i34 << 24 >> 24 == 0)) {
+ i33 = i34 & 255;
+ while (1) {
+ if ((i33 & 16 | 0) != 0) {
+ break;
+ }
+ if ((i33 & 64 | 0) != 0) {
+ i10 = 55;
+ break L1;
+ }
+ i37 = (i30 & (1 << i33) + -1) + (i32 & 65535) | 0;
+ i33 = HEAP8[i16 + (i37 << 2) | 0] | 0;
+ i32 = HEAP16[i16 + (i37 << 2) + 2 >> 1] | 0;
+ i37 = HEAPU8[i16 + (i37 << 2) + 1 | 0] | 0;
+ i30 = i30 >>> i37;
+ i31 = i31 - i37 | 0;
+ if (i33 << 24 >> 24 == 0) {
+ i10 = 6;
+ break;
+ } else {
+ i33 = i33 & 255;
+ }
+ }
+ if ((i10 | 0) == 6) {
+ i32 = i32 & 255;
+ i10 = 7;
+ break;
+ }
+ i32 = i32 & 65535;
+ i33 = i33 & 15;
+ if ((i33 | 0) != 0) {
+ if (i31 >>> 0 < i33 >>> 0) {
+ i29 = i29 + 1 | 0;
+ i35 = i31 + 8 | 0;
+ i34 = ((HEAPU8[i29] | 0) << i31) + i30 | 0;
+ } else {
+ i35 = i31;
+ i34 = i30;
+ }
+ i31 = i35 - i33 | 0;
+ i30 = i34 >>> i33;
+ i32 = (i34 & (1 << i33) + -1) + i32 | 0;
+ }
+ if (i31 >>> 0 < 15) {
+ i37 = i29 + 2 | 0;
+ i34 = i31 + 16 | 0;
+ i30 = ((HEAPU8[i29 + 1 | 0] | 0) << i31) + i30 + ((HEAPU8[i37] | 0) << i31 + 8) | 0;
+ i29 = i37;
+ } else {
+ i34 = i31;
+ }
+ i37 = i30 & i15;
+ i33 = HEAP16[i13 + (i37 << 2) + 2 >> 1] | 0;
+ i31 = HEAPU8[i13 + (i37 << 2) + 1 | 0] | 0;
+ i30 = i30 >>> i31;
+ i31 = i34 - i31 | 0;
+ i34 = HEAPU8[i13 + (i37 << 2) | 0] | 0;
+ if ((i34 & 16 | 0) == 0) {
+ do {
+ if ((i34 & 64 | 0) != 0) {
+ i10 = 52;
+ break L1;
+ }
+ i34 = (i30 & (1 << i34) + -1) + (i33 & 65535) | 0;
+ i33 = HEAP16[i13 + (i34 << 2) + 2 >> 1] | 0;
+ i37 = HEAPU8[i13 + (i34 << 2) + 1 | 0] | 0;
+ i30 = i30 >>> i37;
+ i31 = i31 - i37 | 0;
+ i34 = HEAPU8[i13 + (i34 << 2) | 0] | 0;
+ } while ((i34 & 16 | 0) == 0);
+ }
+ i33 = i33 & 65535;
+ i34 = i34 & 15;
+ if (i31 >>> 0 < i34 >>> 0) {
+ i35 = i29 + 1 | 0;
+ i30 = ((HEAPU8[i35] | 0) << i31) + i30 | 0;
+ i36 = i31 + 8 | 0;
+ if (i36 >>> 0 < i34 >>> 0) {
+ i29 = i29 + 2 | 0;
+ i31 = i31 + 16 | 0;
+ i30 = ((HEAPU8[i29] | 0) << i36) + i30 | 0;
+ } else {
+ i31 = i36;
+ i29 = i35;
+ }
+ }
+ i33 = (i30 & (1 << i34) + -1) + i33 | 0;
+ i30 = i30 >>> i34;
+ i31 = i31 - i34 | 0;
+ i35 = i28;
+ i34 = i35 - i19 | 0;
+ if (!(i33 >>> 0 > i34 >>> 0)) {
+ i34 = i28 + (0 - i33) | 0;
+ while (1) {
+ HEAP8[i28 + 1 | 0] = HEAP8[i34 + 1 | 0] | 0;
+ HEAP8[i28 + 2 | 0] = HEAP8[i34 + 2 | 0] | 0;
+ i35 = i34 + 3 | 0;
+ i33 = i28 + 3 | 0;
+ HEAP8[i33] = HEAP8[i35] | 0;
+ i32 = i32 + -3 | 0;
+ if (!(i32 >>> 0 > 2)) {
+ break;
+ } else {
+ i34 = i35;
+ i28 = i33;
+ }
+ }
+ if ((i32 | 0) == 0) {
+ i28 = i33;
+ break;
+ }
+ i33 = i28 + 4 | 0;
+ HEAP8[i33] = HEAP8[i34 + 4 | 0] | 0;
+ if (!(i32 >>> 0 > 1)) {
+ i28 = i33;
+ break;
+ }
+ i28 = i28 + 5 | 0;
+ HEAP8[i28] = HEAP8[i34 + 5 | 0] | 0;
+ break;
+ }
+ i34 = i33 - i34 | 0;
+ if (i34 >>> 0 > i17 >>> 0 ? (HEAP32[i25 >> 2] | 0) != 0 : 0) {
+ i10 = 22;
+ break L1;
+ }
+ do {
+ if (i27) {
+ i36 = i18 + (i24 - i34) | 0;
+ if (i34 >>> 0 < i32 >>> 0) {
+ i32 = i32 - i34 | 0;
+ i35 = i33 - i35 | 0;
+ i37 = i28;
+ do {
+ i36 = i36 + 1 | 0;
+ i37 = i37 + 1 | 0;
+ HEAP8[i37] = HEAP8[i36] | 0;
+ i34 = i34 + -1 | 0;
+ } while ((i34 | 0) != 0);
+ i33 = i28 + (i23 + i35 + (1 - i33)) | 0;
+ i28 = i28 + (i19 + i35) | 0;
+ } else {
+ i33 = i36;
+ }
+ } else {
+ if (!(i12 >>> 0 < i34 >>> 0)) {
+ i36 = i18 + (i22 - i34) | 0;
+ if (!(i34 >>> 0 < i32 >>> 0)) {
+ i33 = i36;
+ break;
+ }
+ i32 = i32 - i34 | 0;
+ i35 = i33 - i35 | 0;
+ i37 = i28;
+ do {
+ i36 = i36 + 1 | 0;
+ i37 = i37 + 1 | 0;
+ HEAP8[i37] = HEAP8[i36] | 0;
+ i34 = i34 + -1 | 0;
+ } while ((i34 | 0) != 0);
+ i33 = i28 + (i23 + i35 + (1 - i33)) | 0;
+ i28 = i28 + (i19 + i35) | 0;
+ break;
+ }
+ i37 = i18 + (i21 - i34) | 0;
+ i36 = i34 - i12 | 0;
+ if (i36 >>> 0 < i32 >>> 0) {
+ i32 = i32 - i36 | 0;
+ i34 = i33 - i35 | 0;
+ i35 = i28;
+ do {
+ i37 = i37 + 1 | 0;
+ i35 = i35 + 1 | 0;
+ HEAP8[i35] = HEAP8[i37] | 0;
+ i36 = i36 + -1 | 0;
+ } while ((i36 | 0) != 0);
+ i35 = i28 + (i26 + i34) | 0;
+ if (i12 >>> 0 < i32 >>> 0) {
+ i32 = i32 - i12 | 0;
+ i37 = i20;
+ i36 = i12;
+ do {
+ i37 = i37 + 1 | 0;
+ i35 = i35 + 1 | 0;
+ HEAP8[i35] = HEAP8[i37] | 0;
+ i36 = i36 + -1 | 0;
+ } while ((i36 | 0) != 0);
+ i33 = i28 + (i23 + i34 + (1 - i33)) | 0;
+ i28 = i28 + (i19 + i34) | 0;
+ } else {
+ i33 = i20;
+ i28 = i35;
+ }
+ } else {
+ i33 = i37;
+ }
+ }
+ } while (0);
+ if (i32 >>> 0 > 2) {
+ do {
+ HEAP8[i28 + 1 | 0] = HEAP8[i33 + 1 | 0] | 0;
+ HEAP8[i28 + 2 | 0] = HEAP8[i33 + 2 | 0] | 0;
+ i33 = i33 + 3 | 0;
+ i28 = i28 + 3 | 0;
+ HEAP8[i28] = HEAP8[i33] | 0;
+ i32 = i32 + -3 | 0;
+ } while (i32 >>> 0 > 2);
+ }
+ if ((i32 | 0) != 0) {
+ i34 = i28 + 1 | 0;
+ HEAP8[i34] = HEAP8[i33 + 1 | 0] | 0;
+ if (i32 >>> 0 > 1) {
+ i28 = i28 + 2 | 0;
+ HEAP8[i28] = HEAP8[i33 + 2 | 0] | 0;
+ } else {
+ i28 = i34;
+ }
+ }
+ } else {
+ i32 = i32 & 255;
+ i10 = 7;
+ }
+ } while (0);
+ if ((i10 | 0) == 7) {
+ i10 = 0;
+ i28 = i28 + 1 | 0;
+ HEAP8[i28] = i32;
+ }
+ } while (i29 >>> 0 < i8 >>> 0 & i28 >>> 0 < i6 >>> 0);
+ do {
+ if ((i10 | 0) == 22) {
+ HEAP32[i7 + 24 >> 2] = 14384;
+ HEAP32[i11 >> 2] = 29;
+ } else if ((i10 | 0) == 52) {
+ HEAP32[i7 + 24 >> 2] = 14416;
+ HEAP32[i11 >> 2] = 29;
+ } else if ((i10 | 0) == 55) {
+ if ((i33 & 32 | 0) == 0) {
+ HEAP32[i7 + 24 >> 2] = 14440;
+ HEAP32[i11 >> 2] = 29;
+ break;
+ } else {
+ HEAP32[i11 >> 2] = 11;
+ break;
+ }
+ }
+ } while (0);
+ i37 = i31 >>> 3;
+ i11 = i29 + (0 - i37) | 0;
+ i10 = i31 - (i37 << 3) | 0;
+ i12 = (1 << i10) + -1 & i30;
+ HEAP32[i7 >> 2] = i29 + (1 - i37);
+ HEAP32[i9 >> 2] = i28 + 1;
+ if (i11 >>> 0 < i8 >>> 0) {
+ i7 = i8 - i11 | 0;
+ } else {
+ i7 = i8 - i11 | 0;
+ }
+ HEAP32[i5 >> 2] = i7 + 5;
+ if (i28 >>> 0 < i6 >>> 0) {
+ i37 = i6 - i28 | 0;
+ i37 = i37 + 257 | 0;
+ HEAP32[i4 >> 2] = i37;
+ HEAP32[i3 >> 2] = i12;
+ HEAP32[i2 >> 2] = i10;
+ STACKTOP = i1;
+ return;
+ } else {
+ i37 = i6 - i28 | 0;
+ i37 = i37 + 257 | 0;
+ HEAP32[i4 >> 2] = i37;
+ HEAP32[i3 >> 2] = i12;
+ HEAP32[i2 >> 2] = i10;
+ STACKTOP = i1;
+ return;
+ }
+}
+function _send_tree(i2, i13, i12) {
+ i2 = i2 | 0;
+ i13 = i13 | 0;
+ i12 = i12 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0;
+ i11 = STACKTOP;
+ i15 = HEAP16[i13 + 2 >> 1] | 0;
+ i16 = i15 << 16 >> 16 == 0;
+ i7 = i2 + 2754 | 0;
+ i4 = i2 + 5820 | 0;
+ i8 = i2 + 2752 | 0;
+ i3 = i2 + 5816 | 0;
+ i14 = i2 + 20 | 0;
+ i10 = i2 + 8 | 0;
+ i9 = i2 + 2758 | 0;
+ i1 = i2 + 2756 | 0;
+ i5 = i2 + 2750 | 0;
+ i6 = i2 + 2748 | 0;
+ i21 = i16 ? 138 : 7;
+ i23 = i16 ? 3 : 4;
+ i18 = 0;
+ i15 = i15 & 65535;
+ i24 = -1;
+ L1 : while (1) {
+ i20 = 0;
+ while (1) {
+ if ((i18 | 0) > (i12 | 0)) {
+ break L1;
+ }
+ i18 = i18 + 1 | 0;
+ i19 = HEAP16[i13 + (i18 << 2) + 2 >> 1] | 0;
+ i16 = i19 & 65535;
+ i22 = i20 + 1 | 0;
+ i17 = (i15 | 0) == (i16 | 0);
+ if (!((i22 | 0) < (i21 | 0) & i17)) {
+ break;
+ } else {
+ i20 = i22;
+ }
+ }
+ do {
+ if ((i22 | 0) >= (i23 | 0)) {
+ if ((i15 | 0) != 0) {
+ if ((i15 | 0) == (i24 | 0)) {
+ i23 = HEAP16[i3 >> 1] | 0;
+ i21 = HEAP32[i4 >> 2] | 0;
+ i20 = i22;
+ } else {
+ i22 = HEAPU16[i2 + (i15 << 2) + 2686 >> 1] | 0;
+ i21 = HEAP32[i4 >> 2] | 0;
+ i24 = HEAPU16[i2 + (i15 << 2) + 2684 >> 1] | 0;
+ i25 = HEAPU16[i3 >> 1] | 0 | i24 << i21;
+ i23 = i25 & 65535;
+ HEAP16[i3 >> 1] = i23;
+ if ((i21 | 0) > (16 - i22 | 0)) {
+ i23 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i23 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i23 | 0] = i25;
+ i23 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i21 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i21 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i21 | 0] = i23;
+ i21 = HEAP32[i4 >> 2] | 0;
+ i23 = i24 >>> (16 - i21 | 0) & 65535;
+ HEAP16[i3 >> 1] = i23;
+ i21 = i22 + -16 + i21 | 0;
+ } else {
+ i21 = i21 + i22 | 0;
+ }
+ HEAP32[i4 >> 2] = i21;
+ }
+ i22 = HEAPU16[i5 >> 1] | 0;
+ i24 = HEAPU16[i6 >> 1] | 0;
+ i23 = i23 & 65535 | i24 << i21;
+ HEAP16[i3 >> 1] = i23;
+ if ((i21 | 0) > (16 - i22 | 0)) {
+ i21 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i21 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i21 | 0] = i23;
+ i23 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i21 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i21 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i21 | 0] = i23;
+ i21 = HEAP32[i4 >> 2] | 0;
+ i23 = i24 >>> (16 - i21 | 0);
+ HEAP16[i3 >> 1] = i23;
+ i21 = i22 + -16 + i21 | 0;
+ } else {
+ i21 = i21 + i22 | 0;
+ }
+ HEAP32[i4 >> 2] = i21;
+ i20 = i20 + 65533 & 65535;
+ i22 = i23 & 65535 | i20 << i21;
+ HEAP16[i3 >> 1] = i22;
+ if ((i21 | 0) > 14) {
+ i26 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i26 | 0] = i22;
+ i26 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i26;
+ i27 = HEAP32[i4 >> 2] | 0;
+ HEAP16[i3 >> 1] = i20 >>> (16 - i27 | 0);
+ HEAP32[i4 >> 2] = i27 + -14;
+ break;
+ } else {
+ HEAP32[i4 >> 2] = i21 + 2;
+ break;
+ }
+ }
+ if ((i22 | 0) < 11) {
+ i24 = HEAPU16[i7 >> 1] | 0;
+ i23 = HEAP32[i4 >> 2] | 0;
+ i21 = HEAPU16[i8 >> 1] | 0;
+ i22 = HEAPU16[i3 >> 1] | 0 | i21 << i23;
+ HEAP16[i3 >> 1] = i22;
+ if ((i23 | 0) > (16 - i24 | 0)) {
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i22;
+ i22 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i22;
+ i27 = HEAP32[i4 >> 2] | 0;
+ i22 = i21 >>> (16 - i27 | 0);
+ HEAP16[i3 >> 1] = i22;
+ i21 = i24 + -16 + i27 | 0;
+ } else {
+ i21 = i23 + i24 | 0;
+ }
+ HEAP32[i4 >> 2] = i21;
+ i20 = i20 + 65534 & 65535;
+ i22 = i22 & 65535 | i20 << i21;
+ HEAP16[i3 >> 1] = i22;
+ if ((i21 | 0) > 13) {
+ i26 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i26 | 0] = i22;
+ i26 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i26;
+ i27 = HEAP32[i4 >> 2] | 0;
+ HEAP16[i3 >> 1] = i20 >>> (16 - i27 | 0);
+ HEAP32[i4 >> 2] = i27 + -13;
+ break;
+ } else {
+ HEAP32[i4 >> 2] = i21 + 3;
+ break;
+ }
+ } else {
+ i21 = HEAPU16[i9 >> 1] | 0;
+ i24 = HEAP32[i4 >> 2] | 0;
+ i23 = HEAPU16[i1 >> 1] | 0;
+ i22 = HEAPU16[i3 >> 1] | 0 | i23 << i24;
+ HEAP16[i3 >> 1] = i22;
+ if ((i24 | 0) > (16 - i21 | 0)) {
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i22;
+ i22 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i22;
+ i27 = HEAP32[i4 >> 2] | 0;
+ i22 = i23 >>> (16 - i27 | 0);
+ HEAP16[i3 >> 1] = i22;
+ i21 = i21 + -16 + i27 | 0;
+ } else {
+ i21 = i24 + i21 | 0;
+ }
+ HEAP32[i4 >> 2] = i21;
+ i20 = i20 + 65526 & 65535;
+ i22 = i22 & 65535 | i20 << i21;
+ HEAP16[i3 >> 1] = i22;
+ if ((i21 | 0) > 9) {
+ i26 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i26 | 0] = i22;
+ i26 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i27 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i27 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i27 | 0] = i26;
+ i27 = HEAP32[i4 >> 2] | 0;
+ HEAP16[i3 >> 1] = i20 >>> (16 - i27 | 0);
+ HEAP32[i4 >> 2] = i27 + -9;
+ break;
+ } else {
+ HEAP32[i4 >> 2] = i21 + 7;
+ break;
+ }
+ }
+ } else {
+ i20 = i2 + (i15 << 2) + 2686 | 0;
+ i21 = i2 + (i15 << 2) + 2684 | 0;
+ i23 = HEAP32[i4 >> 2] | 0;
+ i26 = HEAP16[i3 >> 1] | 0;
+ do {
+ i24 = HEAPU16[i20 >> 1] | 0;
+ i25 = HEAPU16[i21 >> 1] | 0;
+ i27 = i26 & 65535 | i25 << i23;
+ i26 = i27 & 65535;
+ HEAP16[i3 >> 1] = i26;
+ if ((i23 | 0) > (16 - i24 | 0)) {
+ i26 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i26 | 0] = i27;
+ i26 = (HEAPU16[i3 >> 1] | 0) >>> 8 & 255;
+ i23 = HEAP32[i14 >> 2] | 0;
+ HEAP32[i14 >> 2] = i23 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i23 | 0] = i26;
+ i23 = HEAP32[i4 >> 2] | 0;
+ i26 = i25 >>> (16 - i23 | 0) & 65535;
+ HEAP16[i3 >> 1] = i26;
+ i23 = i24 + -16 + i23 | 0;
+ } else {
+ i23 = i23 + i24 | 0;
+ }
+ HEAP32[i4 >> 2] = i23;
+ i22 = i22 + -1 | 0;
+ } while ((i22 | 0) != 0);
+ }
+ } while (0);
+ if (i19 << 16 >> 16 == 0) {
+ i24 = i15;
+ i21 = 138;
+ i23 = 3;
+ i15 = i16;
+ continue;
+ }
+ i24 = i15;
+ i21 = i17 ? 6 : 7;
+ i23 = i17 ? 3 : 4;
+ i15 = i16;
+ }
+ STACKTOP = i11;
+ return;
+}
+function __tr_flush_block(i2, i4, i6, i3) {
+ i2 = i2 | 0;
+ i4 = i4 | 0;
+ i6 = i6 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i1 = STACKTOP;
+ if ((HEAP32[i2 + 132 >> 2] | 0) > 0) {
+ i5 = (HEAP32[i2 >> 2] | 0) + 44 | 0;
+ if ((HEAP32[i5 >> 2] | 0) == 2) {
+ i8 = -201342849;
+ i9 = 0;
+ while (1) {
+ if ((i8 & 1 | 0) != 0 ? (HEAP16[i2 + (i9 << 2) + 148 >> 1] | 0) != 0 : 0) {
+ i8 = 0;
+ break;
+ }
+ i9 = i9 + 1 | 0;
+ if ((i9 | 0) < 32) {
+ i8 = i8 >>> 1;
+ } else {
+ i7 = 6;
+ break;
+ }
+ }
+ L9 : do {
+ if ((i7 | 0) == 6) {
+ if (((HEAP16[i2 + 184 >> 1] | 0) == 0 ? (HEAP16[i2 + 188 >> 1] | 0) == 0 : 0) ? (HEAP16[i2 + 200 >> 1] | 0) == 0 : 0) {
+ i8 = 32;
+ while (1) {
+ i7 = i8 + 1 | 0;
+ if ((HEAP16[i2 + (i8 << 2) + 148 >> 1] | 0) != 0) {
+ i8 = 1;
+ break L9;
+ }
+ if ((i7 | 0) < 256) {
+ i8 = i7;
+ } else {
+ i8 = 0;
+ break;
+ }
+ }
+ } else {
+ i8 = 1;
+ }
+ }
+ } while (0);
+ HEAP32[i5 >> 2] = i8;
+ }
+ _build_tree(i2, i2 + 2840 | 0);
+ _build_tree(i2, i2 + 2852 | 0);
+ _scan_tree(i2, i2 + 148 | 0, HEAP32[i2 + 2844 >> 2] | 0);
+ _scan_tree(i2, i2 + 2440 | 0, HEAP32[i2 + 2856 >> 2] | 0);
+ _build_tree(i2, i2 + 2864 | 0);
+ i5 = 18;
+ while (1) {
+ i7 = i5 + -1 | 0;
+ if ((HEAP16[i2 + (HEAPU8[2888 + i5 | 0] << 2) + 2686 >> 1] | 0) != 0) {
+ break;
+ }
+ if ((i7 | 0) > 2) {
+ i5 = i7;
+ } else {
+ i5 = i7;
+ break;
+ }
+ }
+ i10 = i2 + 5800 | 0;
+ i7 = (i5 * 3 | 0) + 17 + (HEAP32[i10 >> 2] | 0) | 0;
+ HEAP32[i10 >> 2] = i7;
+ i7 = (i7 + 10 | 0) >>> 3;
+ i10 = ((HEAP32[i2 + 5804 >> 2] | 0) + 10 | 0) >>> 3;
+ i9 = i10 >>> 0 > i7 >>> 0 ? i7 : i10;
+ } else {
+ i10 = i6 + 5 | 0;
+ i5 = 0;
+ i9 = i10;
+ }
+ do {
+ if ((i6 + 4 | 0) >>> 0 > i9 >>> 0 | (i4 | 0) == 0) {
+ i4 = i2 + 5820 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ i8 = (i7 | 0) > 13;
+ if ((HEAP32[i2 + 136 >> 2] | 0) == 4 | (i10 | 0) == (i9 | 0)) {
+ i9 = i3 + 2 & 65535;
+ i6 = i2 + 5816 | 0;
+ i5 = HEAPU16[i6 >> 1] | i9 << i7;
+ HEAP16[i6 >> 1] = i5;
+ if (i8) {
+ i12 = i2 + 20 | 0;
+ i13 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i13 + 1;
+ i14 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i13 | 0] = i5;
+ i13 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i5 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i5 + 1;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i5 | 0] = i13;
+ i5 = HEAP32[i4 >> 2] | 0;
+ HEAP16[i6 >> 1] = i9 >>> (16 - i5 | 0);
+ i5 = i5 + -13 | 0;
+ } else {
+ i5 = i7 + 3 | 0;
+ }
+ HEAP32[i4 >> 2] = i5;
+ _compress_block(i2, 1136, 2288);
+ break;
+ }
+ i10 = i3 + 4 & 65535;
+ i6 = i2 + 5816 | 0;
+ i9 = HEAPU16[i6 >> 1] | i10 << i7;
+ HEAP16[i6 >> 1] = i9;
+ if (i8) {
+ i13 = i2 + 20 | 0;
+ i12 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i12 + 1;
+ i14 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i12 | 0] = i9;
+ i9 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i12 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i12 + 1;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i12 | 0] = i9;
+ i12 = HEAP32[i4 >> 2] | 0;
+ i9 = i10 >>> (16 - i12 | 0);
+ HEAP16[i6 >> 1] = i9;
+ i12 = i12 + -13 | 0;
+ } else {
+ i12 = i7 + 3 | 0;
+ }
+ HEAP32[i4 >> 2] = i12;
+ i7 = HEAP32[i2 + 2844 >> 2] | 0;
+ i8 = HEAP32[i2 + 2856 >> 2] | 0;
+ i10 = i7 + 65280 & 65535;
+ i11 = i9 & 65535 | i10 << i12;
+ HEAP16[i6 >> 1] = i11;
+ if ((i12 | 0) > 11) {
+ i13 = i2 + 20 | 0;
+ i9 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i9 + 1;
+ i14 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i9 | 0] = i11;
+ i11 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i9 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i9 + 1;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i9 | 0] = i11;
+ i9 = HEAP32[i4 >> 2] | 0;
+ i11 = i10 >>> (16 - i9 | 0);
+ HEAP16[i6 >> 1] = i11;
+ i9 = i9 + -11 | 0;
+ } else {
+ i9 = i12 + 5 | 0;
+ }
+ HEAP32[i4 >> 2] = i9;
+ i10 = i8 & 65535;
+ i11 = i10 << i9 | i11 & 65535;
+ HEAP16[i6 >> 1] = i11;
+ if ((i9 | 0) > 11) {
+ i13 = i2 + 20 | 0;
+ i9 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i9 + 1;
+ i14 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i9 | 0] = i11;
+ i11 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i9 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i13 >> 2] = i9 + 1;
+ HEAP8[(HEAP32[i14 >> 2] | 0) + i9 | 0] = i11;
+ i9 = HEAP32[i4 >> 2] | 0;
+ i11 = i10 >>> (16 - i9 | 0);
+ HEAP16[i6 >> 1] = i11;
+ i9 = i9 + -11 | 0;
+ } else {
+ i9 = i9 + 5 | 0;
+ }
+ HEAP32[i4 >> 2] = i9;
+ i10 = i5 + 65533 & 65535;
+ i14 = i10 << i9 | i11 & 65535;
+ HEAP16[i6 >> 1] = i14;
+ if ((i9 | 0) > 12) {
+ i12 = i2 + 20 | 0;
+ i11 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i11 + 1;
+ i13 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i13 >> 2] | 0) + i11 | 0] = i14;
+ i14 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i11 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i13 >> 2] | 0) + i11 | 0] = i14;
+ i11 = HEAP32[i4 >> 2] | 0;
+ i14 = i10 >>> (16 - i11 | 0);
+ HEAP16[i6 >> 1] = i14;
+ i11 = i11 + -12 | 0;
+ } else {
+ i11 = i9 + 4 | 0;
+ }
+ HEAP32[i4 >> 2] = i11;
+ if ((i5 | 0) > -1) {
+ i10 = i2 + 20 | 0;
+ i9 = i2 + 8 | 0;
+ i12 = 0;
+ while (1) {
+ i13 = HEAPU16[i2 + (HEAPU8[2888 + i12 | 0] << 2) + 2686 >> 1] | 0;
+ i14 = i13 << i11 | i14 & 65535;
+ HEAP16[i6 >> 1] = i14;
+ if ((i11 | 0) > 13) {
+ i11 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i10 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i9 >> 2] | 0) + i11 | 0] = i14;
+ i14 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i11 = HEAP32[i10 >> 2] | 0;
+ HEAP32[i10 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i9 >> 2] | 0) + i11 | 0] = i14;
+ i11 = HEAP32[i4 >> 2] | 0;
+ i14 = i13 >>> (16 - i11 | 0);
+ HEAP16[i6 >> 1] = i14;
+ i11 = i11 + -13 | 0;
+ } else {
+ i11 = i11 + 3 | 0;
+ }
+ HEAP32[i4 >> 2] = i11;
+ if ((i12 | 0) == (i5 | 0)) {
+ break;
+ } else {
+ i12 = i12 + 1 | 0;
+ }
+ }
+ }
+ i13 = i2 + 148 | 0;
+ _send_tree(i2, i13, i7);
+ i14 = i2 + 2440 | 0;
+ _send_tree(i2, i14, i8);
+ _compress_block(i2, i13, i14);
+ } else {
+ __tr_stored_block(i2, i4, i6, i3);
+ }
+ } while (0);
+ _init_block(i2);
+ if ((i3 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ i3 = i2 + 5820 | 0;
+ i4 = HEAP32[i3 >> 2] | 0;
+ if ((i4 | 0) <= 8) {
+ i5 = i2 + 5816 | 0;
+ if ((i4 | 0) > 0) {
+ i13 = HEAP16[i5 >> 1] & 255;
+ i12 = i2 + 20 | 0;
+ i14 = HEAP32[i12 >> 2] | 0;
+ HEAP32[i12 >> 2] = i14 + 1;
+ HEAP8[(HEAP32[i2 + 8 >> 2] | 0) + i14 | 0] = i13;
+ }
+ } else {
+ i5 = i2 + 5816 | 0;
+ i14 = HEAP16[i5 >> 1] & 255;
+ i11 = i2 + 20 | 0;
+ i12 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i12 + 1;
+ i13 = i2 + 8 | 0;
+ HEAP8[(HEAP32[i13 >> 2] | 0) + i12 | 0] = i14;
+ i12 = (HEAPU16[i5 >> 1] | 0) >>> 8 & 255;
+ i14 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i14 + 1;
+ HEAP8[(HEAP32[i13 >> 2] | 0) + i14 | 0] = i12;
+ }
+ HEAP16[i5 >> 1] = 0;
+ HEAP32[i3 >> 2] = 0;
+ STACKTOP = i1;
+ return;
+}
+function _deflate_fast(i3, i6) {
+ i3 = i3 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i2 = 0, i4 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0, i34 = 0, i35 = 0, i36 = 0;
+ i1 = STACKTOP;
+ i20 = i3 + 116 | 0;
+ i22 = (i6 | 0) == 0;
+ i23 = i3 + 72 | 0;
+ i24 = i3 + 88 | 0;
+ i5 = i3 + 108 | 0;
+ i7 = i3 + 56 | 0;
+ i9 = i3 + 84 | 0;
+ i10 = i3 + 68 | 0;
+ i11 = i3 + 52 | 0;
+ i12 = i3 + 64 | 0;
+ i19 = i3 + 44 | 0;
+ i21 = i3 + 96 | 0;
+ i16 = i3 + 112 | 0;
+ i13 = i3 + 5792 | 0;
+ i17 = i3 + 5796 | 0;
+ i18 = i3 + 5784 | 0;
+ i14 = i3 + 5788 | 0;
+ i15 = i3 + 128 | 0;
+ i4 = i3 + 92 | 0;
+ while (1) {
+ if ((HEAP32[i20 >> 2] | 0) >>> 0 < 262) {
+ _fill_window(i3);
+ i25 = HEAP32[i20 >> 2] | 0;
+ if (i25 >>> 0 < 262 & i22) {
+ i2 = 0;
+ i25 = 34;
+ break;
+ }
+ if ((i25 | 0) == 0) {
+ i25 = 26;
+ break;
+ }
+ if (!(i25 >>> 0 > 2)) {
+ i25 = 9;
+ } else {
+ i25 = 6;
+ }
+ } else {
+ i25 = 6;
+ }
+ if ((i25 | 0) == 6) {
+ i25 = 0;
+ i26 = HEAP32[i5 >> 2] | 0;
+ i34 = ((HEAPU8[(HEAP32[i7 >> 2] | 0) + (i26 + 2) | 0] | 0) ^ HEAP32[i23 >> 2] << HEAP32[i24 >> 2]) & HEAP32[i9 >> 2];
+ HEAP32[i23 >> 2] = i34;
+ i34 = (HEAP32[i10 >> 2] | 0) + (i34 << 1) | 0;
+ i35 = HEAP16[i34 >> 1] | 0;
+ HEAP16[(HEAP32[i12 >> 2] | 0) + ((HEAP32[i11 >> 2] & i26) << 1) >> 1] = i35;
+ i27 = i35 & 65535;
+ HEAP16[i34 >> 1] = i26;
+ if (!(i35 << 16 >> 16 == 0) ? !((i26 - i27 | 0) >>> 0 > ((HEAP32[i19 >> 2] | 0) + -262 | 0) >>> 0) : 0) {
+ i26 = _longest_match(i3, i27) | 0;
+ HEAP32[i21 >> 2] = i26;
+ } else {
+ i25 = 9;
+ }
+ }
+ if ((i25 | 0) == 9) {
+ i26 = HEAP32[i21 >> 2] | 0;
+ }
+ do {
+ if (i26 >>> 0 > 2) {
+ i35 = i26 + 253 | 0;
+ i25 = (HEAP32[i5 >> 2] | 0) - (HEAP32[i16 >> 2] | 0) | 0;
+ i34 = HEAP32[i13 >> 2] | 0;
+ HEAP16[(HEAP32[i17 >> 2] | 0) + (i34 << 1) >> 1] = i25;
+ HEAP32[i13 >> 2] = i34 + 1;
+ HEAP8[(HEAP32[i18 >> 2] | 0) + i34 | 0] = i35;
+ i35 = i3 + ((HEAPU8[808 + (i35 & 255) | 0] | 0 | 256) + 1 << 2) + 148 | 0;
+ HEAP16[i35 >> 1] = (HEAP16[i35 >> 1] | 0) + 1 << 16 >> 16;
+ i25 = i25 + 65535 & 65535;
+ if (!(i25 >>> 0 < 256)) {
+ i25 = (i25 >>> 7) + 256 | 0;
+ }
+ i25 = i3 + ((HEAPU8[296 + i25 | 0] | 0) << 2) + 2440 | 0;
+ HEAP16[i25 >> 1] = (HEAP16[i25 >> 1] | 0) + 1 << 16 >> 16;
+ i25 = (HEAP32[i13 >> 2] | 0) == ((HEAP32[i14 >> 2] | 0) + -1 | 0) | 0;
+ i26 = HEAP32[i21 >> 2] | 0;
+ i35 = (HEAP32[i20 >> 2] | 0) - i26 | 0;
+ HEAP32[i20 >> 2] = i35;
+ if (!(i26 >>> 0 <= (HEAP32[i15 >> 2] | 0) >>> 0 & i35 >>> 0 > 2)) {
+ i26 = (HEAP32[i5 >> 2] | 0) + i26 | 0;
+ HEAP32[i5 >> 2] = i26;
+ HEAP32[i21 >> 2] = 0;
+ i34 = HEAP32[i7 >> 2] | 0;
+ i35 = HEAPU8[i34 + i26 | 0] | 0;
+ HEAP32[i23 >> 2] = i35;
+ HEAP32[i23 >> 2] = ((HEAPU8[i34 + (i26 + 1) | 0] | 0) ^ i35 << HEAP32[i24 >> 2]) & HEAP32[i9 >> 2];
+ break;
+ }
+ i30 = i26 + -1 | 0;
+ HEAP32[i21 >> 2] = i30;
+ i34 = HEAP32[i24 >> 2] | 0;
+ i33 = HEAP32[i7 >> 2] | 0;
+ i35 = HEAP32[i9 >> 2] | 0;
+ i32 = HEAP32[i10 >> 2] | 0;
+ i27 = HEAP32[i11 >> 2] | 0;
+ i29 = HEAP32[i12 >> 2] | 0;
+ i26 = HEAP32[i5 >> 2] | 0;
+ i31 = HEAP32[i23 >> 2] | 0;
+ while (1) {
+ i28 = i26 + 1 | 0;
+ HEAP32[i5 >> 2] = i28;
+ i31 = ((HEAPU8[i33 + (i26 + 3) | 0] | 0) ^ i31 << i34) & i35;
+ HEAP32[i23 >> 2] = i31;
+ i36 = i32 + (i31 << 1) | 0;
+ HEAP16[i29 + ((i27 & i28) << 1) >> 1] = HEAP16[i36 >> 1] | 0;
+ HEAP16[i36 >> 1] = i28;
+ i30 = i30 + -1 | 0;
+ HEAP32[i21 >> 2] = i30;
+ if ((i30 | 0) == 0) {
+ break;
+ } else {
+ i26 = i28;
+ }
+ }
+ i26 = i26 + 2 | 0;
+ HEAP32[i5 >> 2] = i26;
+ } else {
+ i25 = HEAP8[(HEAP32[i7 >> 2] | 0) + (HEAP32[i5 >> 2] | 0) | 0] | 0;
+ i26 = HEAP32[i13 >> 2] | 0;
+ HEAP16[(HEAP32[i17 >> 2] | 0) + (i26 << 1) >> 1] = 0;
+ HEAP32[i13 >> 2] = i26 + 1;
+ HEAP8[(HEAP32[i18 >> 2] | 0) + i26 | 0] = i25;
+ i25 = i3 + ((i25 & 255) << 2) + 148 | 0;
+ HEAP16[i25 >> 1] = (HEAP16[i25 >> 1] | 0) + 1 << 16 >> 16;
+ i25 = (HEAP32[i13 >> 2] | 0) == ((HEAP32[i14 >> 2] | 0) + -1 | 0) | 0;
+ HEAP32[i20 >> 2] = (HEAP32[i20 >> 2] | 0) + -1;
+ i26 = (HEAP32[i5 >> 2] | 0) + 1 | 0;
+ HEAP32[i5 >> 2] = i26;
+ }
+ } while (0);
+ if ((i25 | 0) == 0) {
+ continue;
+ }
+ i25 = HEAP32[i4 >> 2] | 0;
+ if ((i25 | 0) > -1) {
+ i27 = (HEAP32[i7 >> 2] | 0) + i25 | 0;
+ } else {
+ i27 = 0;
+ }
+ __tr_flush_block(i3, i27, i26 - i25 | 0, 0);
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i27 = HEAP32[i3 >> 2] | 0;
+ i28 = i27 + 28 | 0;
+ i25 = HEAP32[i28 >> 2] | 0;
+ i30 = HEAP32[i25 + 20 >> 2] | 0;
+ i26 = i27 + 16 | 0;
+ i29 = HEAP32[i26 >> 2] | 0;
+ i29 = i30 >>> 0 > i29 >>> 0 ? i29 : i30;
+ if ((i29 | 0) != 0 ? (i8 = i27 + 12 | 0, _memcpy(HEAP32[i8 >> 2] | 0, HEAP32[i25 + 16 >> 2] | 0, i29 | 0) | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i29, i8 = (HEAP32[i28 >> 2] | 0) + 16 | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i29, i8 = i27 + 20 | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i29, HEAP32[i26 >> 2] = (HEAP32[i26 >> 2] | 0) - i29, i8 = HEAP32[i28 >> 2] | 0, i35 = i8 + 20 | 0, i36 = HEAP32[i35 >> 2] | 0, HEAP32[i35 >> 2] = i36 - i29, (i36 | 0) == (i29 | 0)) : 0) {
+ HEAP32[i8 + 16 >> 2] = HEAP32[i8 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i3 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i2 = 0;
+ i25 = 34;
+ break;
+ }
+ }
+ if ((i25 | 0) == 26) {
+ i8 = HEAP32[i4 >> 2] | 0;
+ if ((i8 | 0) > -1) {
+ i7 = (HEAP32[i7 >> 2] | 0) + i8 | 0;
+ } else {
+ i7 = 0;
+ }
+ i6 = (i6 | 0) == 4;
+ __tr_flush_block(i3, i7, (HEAP32[i5 >> 2] | 0) - i8 | 0, i6 & 1);
+ HEAP32[i4 >> 2] = HEAP32[i5 >> 2];
+ i5 = HEAP32[i3 >> 2] | 0;
+ i7 = i5 + 28 | 0;
+ i4 = HEAP32[i7 >> 2] | 0;
+ i10 = HEAP32[i4 + 20 >> 2] | 0;
+ i8 = i5 + 16 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ i9 = i10 >>> 0 > i9 >>> 0 ? i9 : i10;
+ if ((i9 | 0) != 0 ? (i2 = i5 + 12 | 0, _memcpy(HEAP32[i2 >> 2] | 0, HEAP32[i4 + 16 >> 2] | 0, i9 | 0) | 0, HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + i9, i2 = (HEAP32[i7 >> 2] | 0) + 16 | 0, HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + i9, i2 = i5 + 20 | 0, HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + i9, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) - i9, i2 = HEAP32[i7 >> 2] | 0, i35 = i2 + 20 | 0, i36 = HEAP32[i35 >> 2] | 0, HEAP32[i35 >> 2] = i36 - i9, (i36 | 0) == (i9 | 0)) : 0) {
+ HEAP32[i2 + 16 >> 2] = HEAP32[i2 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i3 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i36 = i6 ? 2 : 0;
+ STACKTOP = i1;
+ return i36 | 0;
+ } else {
+ i36 = i6 ? 3 : 1;
+ STACKTOP = i1;
+ return i36 | 0;
+ }
+ } else if ((i25 | 0) == 34) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ return 0;
+}
+function _inflate_table(i11, i5, i13, i2, i1, i10) {
+ i11 = i11 | 0;
+ i5 = i5 | 0;
+ i13 = i13 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i10 = i10 | 0;
+ var i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i12 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0, i25 = 0, i26 = 0, i27 = 0, i28 = 0, i29 = 0, i30 = 0, i31 = 0, i32 = 0, i33 = 0;
+ i3 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i7 = i3 + 32 | 0;
+ i12 = i3;
+ i4 = i7 + 0 | 0;
+ i9 = i4 + 32 | 0;
+ do {
+ HEAP16[i4 >> 1] = 0;
+ i4 = i4 + 2 | 0;
+ } while ((i4 | 0) < (i9 | 0));
+ i14 = (i13 | 0) == 0;
+ if (!i14) {
+ i4 = 0;
+ do {
+ i32 = i7 + (HEAPU16[i5 + (i4 << 1) >> 1] << 1) | 0;
+ HEAP16[i32 >> 1] = (HEAP16[i32 >> 1] | 0) + 1 << 16 >> 16;
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) != (i13 | 0));
+ }
+ i4 = HEAP32[i1 >> 2] | 0;
+ i9 = 15;
+ while (1) {
+ i15 = i9 + -1 | 0;
+ if ((HEAP16[i7 + (i9 << 1) >> 1] | 0) != 0) {
+ break;
+ }
+ if ((i15 | 0) == 0) {
+ i6 = 7;
+ break;
+ } else {
+ i9 = i15;
+ }
+ }
+ if ((i6 | 0) == 7) {
+ i32 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i2 >> 2] = i32 + 4;
+ HEAP8[i32] = 64;
+ HEAP8[i32 + 1 | 0] = 1;
+ HEAP16[i32 + 2 >> 1] = 0;
+ i32 = HEAP32[i2 >> 2] | 0;
+ HEAP32[i2 >> 2] = i32 + 4;
+ HEAP8[i32] = 64;
+ HEAP8[i32 + 1 | 0] = 1;
+ HEAP16[i32 + 2 >> 1] = 0;
+ HEAP32[i1 >> 2] = 1;
+ i32 = 0;
+ STACKTOP = i3;
+ return i32 | 0;
+ }
+ i4 = i4 >>> 0 > i9 >>> 0 ? i9 : i4;
+ L12 : do {
+ if (i9 >>> 0 > 1) {
+ i27 = 1;
+ while (1) {
+ i15 = i27 + 1 | 0;
+ if ((HEAP16[i7 + (i27 << 1) >> 1] | 0) != 0) {
+ break L12;
+ }
+ if (i15 >>> 0 < i9 >>> 0) {
+ i27 = i15;
+ } else {
+ i27 = i15;
+ break;
+ }
+ }
+ } else {
+ i27 = 1;
+ }
+ } while (0);
+ i4 = i4 >>> 0 < i27 >>> 0 ? i27 : i4;
+ i16 = 1;
+ i15 = 1;
+ do {
+ i16 = (i16 << 1) - (HEAPU16[i7 + (i15 << 1) >> 1] | 0) | 0;
+ i15 = i15 + 1 | 0;
+ if ((i16 | 0) < 0) {
+ i8 = -1;
+ i6 = 56;
+ break;
+ }
+ } while (i15 >>> 0 < 16);
+ if ((i6 | 0) == 56) {
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ if ((i16 | 0) > 0 ? !((i11 | 0) != 0 & (i9 | 0) == 1) : 0) {
+ i32 = -1;
+ STACKTOP = i3;
+ return i32 | 0;
+ }
+ HEAP16[i12 + 2 >> 1] = 0;
+ i16 = 0;
+ i15 = 1;
+ do {
+ i16 = (HEAPU16[i7 + (i15 << 1) >> 1] | 0) + (i16 & 65535) | 0;
+ i15 = i15 + 1 | 0;
+ HEAP16[i12 + (i15 << 1) >> 1] = i16;
+ } while ((i15 | 0) != 15);
+ if (!i14) {
+ i15 = 0;
+ do {
+ i14 = HEAP16[i5 + (i15 << 1) >> 1] | 0;
+ if (!(i14 << 16 >> 16 == 0)) {
+ i31 = i12 + ((i14 & 65535) << 1) | 0;
+ i32 = HEAP16[i31 >> 1] | 0;
+ HEAP16[i31 >> 1] = i32 + 1 << 16 >> 16;
+ HEAP16[i10 + ((i32 & 65535) << 1) >> 1] = i15;
+ }
+ i15 = i15 + 1 | 0;
+ } while ((i15 | 0) != (i13 | 0));
+ }
+ if ((i11 | 0) == 1) {
+ i14 = 1 << i4;
+ if (i14 >>> 0 > 851) {
+ i32 = 1;
+ STACKTOP = i3;
+ return i32 | 0;
+ } else {
+ i16 = 0;
+ i20 = 1;
+ i17 = 14128 + -514 | 0;
+ i19 = 256;
+ i18 = 14192 + -514 | 0;
+ }
+ } else if ((i11 | 0) != 0) {
+ i14 = 1 << i4;
+ i16 = (i11 | 0) == 2;
+ if (i16 & i14 >>> 0 > 591) {
+ i32 = 1;
+ STACKTOP = i3;
+ return i32 | 0;
+ } else {
+ i20 = 0;
+ i17 = 14256;
+ i19 = -1;
+ i18 = 14320;
+ }
+ } else {
+ i16 = 0;
+ i14 = 1 << i4;
+ i20 = 0;
+ i17 = i10;
+ i19 = 19;
+ i18 = i10;
+ }
+ i11 = i14 + -1 | 0;
+ i12 = i4 & 255;
+ i22 = i4;
+ i21 = 0;
+ i25 = 0;
+ i13 = -1;
+ i15 = HEAP32[i2 >> 2] | 0;
+ i24 = 0;
+ L44 : while (1) {
+ i23 = 1 << i22;
+ while (1) {
+ i29 = i27 - i21 | 0;
+ i22 = i29 & 255;
+ i28 = HEAP16[i10 + (i24 << 1) >> 1] | 0;
+ i30 = i28 & 65535;
+ if ((i30 | 0) >= (i19 | 0)) {
+ if ((i30 | 0) > (i19 | 0)) {
+ i26 = HEAP16[i18 + (i30 << 1) >> 1] & 255;
+ i28 = HEAP16[i17 + (i30 << 1) >> 1] | 0;
+ } else {
+ i26 = 96;
+ i28 = 0;
+ }
+ } else {
+ i26 = 0;
+ }
+ i31 = 1 << i29;
+ i30 = i25 >>> i21;
+ i32 = i23;
+ while (1) {
+ i29 = i32 - i31 | 0;
+ i33 = i29 + i30 | 0;
+ HEAP8[i15 + (i33 << 2) | 0] = i26;
+ HEAP8[i15 + (i33 << 2) + 1 | 0] = i22;
+ HEAP16[i15 + (i33 << 2) + 2 >> 1] = i28;
+ if ((i32 | 0) == (i31 | 0)) {
+ break;
+ } else {
+ i32 = i29;
+ }
+ }
+ i26 = 1 << i27 + -1;
+ while (1) {
+ if ((i26 & i25 | 0) == 0) {
+ break;
+ } else {
+ i26 = i26 >>> 1;
+ }
+ }
+ if ((i26 | 0) == 0) {
+ i25 = 0;
+ } else {
+ i25 = (i26 + -1 & i25) + i26 | 0;
+ }
+ i24 = i24 + 1 | 0;
+ i32 = i7 + (i27 << 1) | 0;
+ i33 = (HEAP16[i32 >> 1] | 0) + -1 << 16 >> 16;
+ HEAP16[i32 >> 1] = i33;
+ if (i33 << 16 >> 16 == 0) {
+ if ((i27 | 0) == (i9 | 0)) {
+ break L44;
+ }
+ i27 = HEAPU16[i5 + (HEAPU16[i10 + (i24 << 1) >> 1] << 1) >> 1] | 0;
+ }
+ if (!(i27 >>> 0 > i4 >>> 0)) {
+ continue;
+ }
+ i26 = i25 & i11;
+ if ((i26 | 0) != (i13 | 0)) {
+ break;
+ }
+ }
+ i28 = (i21 | 0) == 0 ? i4 : i21;
+ i23 = i15 + (i23 << 2) | 0;
+ i31 = i27 - i28 | 0;
+ L67 : do {
+ if (i27 >>> 0 < i9 >>> 0) {
+ i29 = i27;
+ i30 = i31;
+ i31 = 1 << i31;
+ while (1) {
+ i31 = i31 - (HEAPU16[i7 + (i29 << 1) >> 1] | 0) | 0;
+ if ((i31 | 0) < 1) {
+ break L67;
+ }
+ i30 = i30 + 1 | 0;
+ i29 = i30 + i28 | 0;
+ if (i29 >>> 0 < i9 >>> 0) {
+ i31 = i31 << 1;
+ } else {
+ break;
+ }
+ }
+ } else {
+ i30 = i31;
+ }
+ } while (0);
+ i29 = (1 << i30) + i14 | 0;
+ if (i20 & i29 >>> 0 > 851 | i16 & i29 >>> 0 > 591) {
+ i8 = 1;
+ i6 = 56;
+ break;
+ }
+ HEAP8[(HEAP32[i2 >> 2] | 0) + (i26 << 2) | 0] = i30;
+ HEAP8[(HEAP32[i2 >> 2] | 0) + (i26 << 2) + 1 | 0] = i12;
+ i22 = HEAP32[i2 >> 2] | 0;
+ HEAP16[i22 + (i26 << 2) + 2 >> 1] = (i23 - i22 | 0) >>> 2;
+ i22 = i30;
+ i21 = i28;
+ i13 = i26;
+ i15 = i23;
+ i14 = i29;
+ }
+ if ((i6 | 0) == 56) {
+ STACKTOP = i3;
+ return i8 | 0;
+ }
+ L77 : do {
+ if ((i25 | 0) != 0) {
+ do {
+ if ((i21 | 0) != 0) {
+ if ((i25 & i11 | 0) != (i13 | 0)) {
+ i21 = 0;
+ i22 = i12;
+ i9 = i4;
+ i15 = HEAP32[i2 >> 2] | 0;
+ }
+ } else {
+ i21 = 0;
+ }
+ i5 = i25 >>> i21;
+ HEAP8[i15 + (i5 << 2) | 0] = 64;
+ HEAP8[i15 + (i5 << 2) + 1 | 0] = i22;
+ HEAP16[i15 + (i5 << 2) + 2 >> 1] = 0;
+ i5 = 1 << i9 + -1;
+ while (1) {
+ if ((i5 & i25 | 0) == 0) {
+ break;
+ } else {
+ i5 = i5 >>> 1;
+ }
+ }
+ if ((i5 | 0) == 0) {
+ break L77;
+ }
+ i25 = (i5 + -1 & i25) + i5 | 0;
+ } while ((i25 | 0) != 0);
+ }
+ } while (0);
+ HEAP32[i2 >> 2] = (HEAP32[i2 >> 2] | 0) + (i14 << 2);
+ HEAP32[i1 >> 2] = i4;
+ i33 = 0;
+ STACKTOP = i3;
+ return i33 | 0;
+}
+function _compress_block(i1, i3, i7) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i4 = 0, i5 = 0, i6 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0;
+ i2 = STACKTOP;
+ i11 = i1 + 5792 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ i14 = HEAP32[i1 + 5820 >> 2] | 0;
+ i17 = HEAP16[i1 + 5816 >> 1] | 0;
+ } else {
+ i9 = i1 + 5796 | 0;
+ i10 = i1 + 5784 | 0;
+ i8 = i1 + 5820 | 0;
+ i12 = i1 + 5816 | 0;
+ i5 = i1 + 20 | 0;
+ i6 = i1 + 8 | 0;
+ i14 = 0;
+ while (1) {
+ i20 = HEAP16[(HEAP32[i9 >> 2] | 0) + (i14 << 1) >> 1] | 0;
+ i13 = i20 & 65535;
+ i4 = i14 + 1 | 0;
+ i14 = HEAPU8[(HEAP32[i10 >> 2] | 0) + i14 | 0] | 0;
+ do {
+ if (i20 << 16 >> 16 == 0) {
+ i15 = HEAPU16[i3 + (i14 << 2) + 2 >> 1] | 0;
+ i13 = HEAP32[i8 >> 2] | 0;
+ i14 = HEAPU16[i3 + (i14 << 2) >> 1] | 0;
+ i16 = HEAPU16[i12 >> 1] | 0 | i14 << i13;
+ i17 = i16 & 65535;
+ HEAP16[i12 >> 1] = i17;
+ if ((i13 | 0) > (16 - i15 | 0)) {
+ i17 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i17 | 0] = i16;
+ i17 = (HEAPU16[i12 >> 1] | 0) >>> 8 & 255;
+ i20 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i20 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i20 | 0] = i17;
+ i20 = HEAP32[i8 >> 2] | 0;
+ i17 = i14 >>> (16 - i20 | 0) & 65535;
+ HEAP16[i12 >> 1] = i17;
+ i14 = i15 + -16 + i20 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ } else {
+ i14 = i13 + i15 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ }
+ } else {
+ i15 = HEAPU8[808 + i14 | 0] | 0;
+ i19 = (i15 | 256) + 1 | 0;
+ i18 = HEAPU16[i3 + (i19 << 2) + 2 >> 1] | 0;
+ i17 = HEAP32[i8 >> 2] | 0;
+ i19 = HEAPU16[i3 + (i19 << 2) >> 1] | 0;
+ i20 = HEAPU16[i12 >> 1] | 0 | i19 << i17;
+ i16 = i20 & 65535;
+ HEAP16[i12 >> 1] = i16;
+ if ((i17 | 0) > (16 - i18 | 0)) {
+ i16 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i16 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i16 | 0] = i20;
+ i16 = (HEAPU16[i12 >> 1] | 0) >>> 8 & 255;
+ i20 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i20 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i20 | 0] = i16;
+ i20 = HEAP32[i8 >> 2] | 0;
+ i16 = i19 >>> (16 - i20 | 0) & 65535;
+ HEAP16[i12 >> 1] = i16;
+ i18 = i18 + -16 + i20 | 0;
+ } else {
+ i18 = i17 + i18 | 0;
+ }
+ HEAP32[i8 >> 2] = i18;
+ i17 = HEAP32[2408 + (i15 << 2) >> 2] | 0;
+ do {
+ if ((i15 + -8 | 0) >>> 0 < 20) {
+ i14 = i14 - (HEAP32[2528 + (i15 << 2) >> 2] | 0) & 65535;
+ i15 = i14 << i18 | i16 & 65535;
+ i16 = i15 & 65535;
+ HEAP16[i12 >> 1] = i16;
+ if ((i18 | 0) > (16 - i17 | 0)) {
+ i16 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i16 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i16 | 0] = i15;
+ i16 = (HEAPU16[i12 >> 1] | 0) >>> 8 & 255;
+ i20 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i20 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i20 | 0] = i16;
+ i20 = HEAP32[i8 >> 2] | 0;
+ i16 = i14 >>> (16 - i20 | 0) & 65535;
+ HEAP16[i12 >> 1] = i16;
+ i14 = i17 + -16 + i20 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ } else {
+ i14 = i18 + i17 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ }
+ } else {
+ i14 = i18;
+ }
+ } while (0);
+ i13 = i13 + -1 | 0;
+ if (i13 >>> 0 < 256) {
+ i15 = i13;
+ } else {
+ i15 = (i13 >>> 7) + 256 | 0;
+ }
+ i15 = HEAPU8[296 + i15 | 0] | 0;
+ i17 = HEAPU16[i7 + (i15 << 2) + 2 >> 1] | 0;
+ i18 = HEAPU16[i7 + (i15 << 2) >> 1] | 0;
+ i19 = i16 & 65535 | i18 << i14;
+ i16 = i19 & 65535;
+ HEAP16[i12 >> 1] = i16;
+ if ((i14 | 0) > (16 - i17 | 0)) {
+ i20 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i20 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i20 | 0] = i19;
+ i20 = (HEAPU16[i12 >> 1] | 0) >>> 8 & 255;
+ i14 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i14 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i14 | 0] = i20;
+ i14 = HEAP32[i8 >> 2] | 0;
+ i20 = i18 >>> (16 - i14 | 0) & 65535;
+ HEAP16[i12 >> 1] = i20;
+ i14 = i17 + -16 + i14 | 0;
+ i17 = i20;
+ } else {
+ i14 = i14 + i17 | 0;
+ i17 = i16;
+ }
+ HEAP32[i8 >> 2] = i14;
+ i16 = HEAP32[2648 + (i15 << 2) >> 2] | 0;
+ if ((i15 + -4 | 0) >>> 0 < 26) {
+ i13 = i13 - (HEAP32[2768 + (i15 << 2) >> 2] | 0) & 65535;
+ i15 = i13 << i14 | i17 & 65535;
+ i17 = i15 & 65535;
+ HEAP16[i12 >> 1] = i17;
+ if ((i14 | 0) > (16 - i16 | 0)) {
+ i17 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i17 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i17 | 0] = i15;
+ i17 = (HEAPU16[i12 >> 1] | 0) >>> 8 & 255;
+ i14 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i5 >> 2] = i14 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i14 | 0] = i17;
+ i14 = HEAP32[i8 >> 2] | 0;
+ i17 = i13 >>> (16 - i14 | 0) & 65535;
+ HEAP16[i12 >> 1] = i17;
+ i14 = i16 + -16 + i14 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ } else {
+ i14 = i14 + i16 | 0;
+ HEAP32[i8 >> 2] = i14;
+ break;
+ }
+ }
+ }
+ } while (0);
+ if (i4 >>> 0 < (HEAP32[i11 >> 2] | 0) >>> 0) {
+ i14 = i4;
+ } else {
+ break;
+ }
+ }
+ }
+ i5 = i3 + 1026 | 0;
+ i6 = HEAPU16[i5 >> 1] | 0;
+ i4 = i1 + 5820 | 0;
+ i3 = HEAPU16[i3 + 1024 >> 1] | 0;
+ i7 = i1 + 5816 | 0;
+ i8 = i17 & 65535 | i3 << i14;
+ HEAP16[i7 >> 1] = i8;
+ if ((i14 | 0) > (16 - i6 | 0)) {
+ i17 = i1 + 20 | 0;
+ i18 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i17 >> 2] = i18 + 1;
+ i20 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i20 >> 2] | 0) + i18 | 0] = i8;
+ i18 = (HEAPU16[i7 >> 1] | 0) >>> 8 & 255;
+ i19 = HEAP32[i17 >> 2] | 0;
+ HEAP32[i17 >> 2] = i19 + 1;
+ HEAP8[(HEAP32[i20 >> 2] | 0) + i19 | 0] = i18;
+ i19 = HEAP32[i4 >> 2] | 0;
+ HEAP16[i7 >> 1] = i3 >>> (16 - i19 | 0);
+ i19 = i6 + -16 + i19 | 0;
+ HEAP32[i4 >> 2] = i19;
+ i19 = HEAP16[i5 >> 1] | 0;
+ i19 = i19 & 65535;
+ i20 = i1 + 5812 | 0;
+ HEAP32[i20 >> 2] = i19;
+ STACKTOP = i2;
+ return;
+ } else {
+ i19 = i14 + i6 | 0;
+ HEAP32[i4 >> 2] = i19;
+ i19 = HEAP16[i5 >> 1] | 0;
+ i19 = i19 & 65535;
+ i20 = i1 + 5812 | 0;
+ HEAP32[i20 >> 2] = i19;
+ STACKTOP = i2;
+ return;
+ }
+}
+function _deflate_stored(i2, i5) {
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i3 = 0, i4 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0;
+ i1 = STACKTOP;
+ i4 = (HEAP32[i2 + 12 >> 2] | 0) + -5 | 0;
+ i11 = i4 >>> 0 < 65535 ? i4 : 65535;
+ i12 = i2 + 116 | 0;
+ i4 = i2 + 108 | 0;
+ i6 = i2 + 92 | 0;
+ i10 = i2 + 44 | 0;
+ i7 = i2 + 56 | 0;
+ while (1) {
+ i13 = HEAP32[i12 >> 2] | 0;
+ if (i13 >>> 0 < 2) {
+ _fill_window(i2);
+ i13 = HEAP32[i12 >> 2] | 0;
+ if ((i13 | i5 | 0) == 0) {
+ i2 = 0;
+ i8 = 28;
+ break;
+ }
+ if ((i13 | 0) == 0) {
+ i8 = 20;
+ break;
+ }
+ }
+ i13 = (HEAP32[i4 >> 2] | 0) + i13 | 0;
+ HEAP32[i4 >> 2] = i13;
+ HEAP32[i12 >> 2] = 0;
+ i14 = HEAP32[i6 >> 2] | 0;
+ i15 = i14 + i11 | 0;
+ if (!((i13 | 0) != 0 & i13 >>> 0 < i15 >>> 0)) {
+ HEAP32[i12 >> 2] = i13 - i15;
+ HEAP32[i4 >> 2] = i15;
+ if ((i14 | 0) > -1) {
+ i13 = (HEAP32[i7 >> 2] | 0) + i14 | 0;
+ } else {
+ i13 = 0;
+ }
+ __tr_flush_block(i2, i13, i11, 0);
+ HEAP32[i6 >> 2] = HEAP32[i4 >> 2];
+ i16 = HEAP32[i2 >> 2] | 0;
+ i14 = i16 + 28 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ i17 = HEAP32[i15 + 20 >> 2] | 0;
+ i13 = i16 + 16 | 0;
+ i18 = HEAP32[i13 >> 2] | 0;
+ i17 = i17 >>> 0 > i18 >>> 0 ? i18 : i17;
+ if ((i17 | 0) != 0 ? (i8 = i16 + 12 | 0, _memcpy(HEAP32[i8 >> 2] | 0, HEAP32[i15 + 16 >> 2] | 0, i17 | 0) | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i17, i8 = (HEAP32[i14 >> 2] | 0) + 16 | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i17, i8 = i16 + 20 | 0, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) + i17, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) - i17, i8 = HEAP32[i14 >> 2] | 0, i16 = i8 + 20 | 0, i18 = HEAP32[i16 >> 2] | 0, HEAP32[i16 >> 2] = i18 - i17, (i18 | 0) == (i17 | 0)) : 0) {
+ HEAP32[i8 + 16 >> 2] = HEAP32[i8 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i2 = 0;
+ i8 = 28;
+ break;
+ }
+ i14 = HEAP32[i6 >> 2] | 0;
+ i13 = HEAP32[i4 >> 2] | 0;
+ }
+ i13 = i13 - i14 | 0;
+ if (i13 >>> 0 < ((HEAP32[i10 >> 2] | 0) + -262 | 0) >>> 0) {
+ continue;
+ }
+ if ((i14 | 0) > -1) {
+ i14 = (HEAP32[i7 >> 2] | 0) + i14 | 0;
+ } else {
+ i14 = 0;
+ }
+ __tr_flush_block(i2, i14, i13, 0);
+ HEAP32[i6 >> 2] = HEAP32[i4 >> 2];
+ i16 = HEAP32[i2 >> 2] | 0;
+ i14 = i16 + 28 | 0;
+ i15 = HEAP32[i14 >> 2] | 0;
+ i17 = HEAP32[i15 + 20 >> 2] | 0;
+ i13 = i16 + 16 | 0;
+ i18 = HEAP32[i13 >> 2] | 0;
+ i17 = i17 >>> 0 > i18 >>> 0 ? i18 : i17;
+ if ((i17 | 0) != 0 ? (i9 = i16 + 12 | 0, _memcpy(HEAP32[i9 >> 2] | 0, HEAP32[i15 + 16 >> 2] | 0, i17 | 0) | 0, HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + i17, i9 = (HEAP32[i14 >> 2] | 0) + 16 | 0, HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + i17, i9 = i16 + 20 | 0, HEAP32[i9 >> 2] = (HEAP32[i9 >> 2] | 0) + i17, HEAP32[i13 >> 2] = (HEAP32[i13 >> 2] | 0) - i17, i9 = HEAP32[i14 >> 2] | 0, i16 = i9 + 20 | 0, i18 = HEAP32[i16 >> 2] | 0, HEAP32[i16 >> 2] = i18 - i17, (i18 | 0) == (i17 | 0)) : 0) {
+ HEAP32[i9 + 16 >> 2] = HEAP32[i9 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i2 = 0;
+ i8 = 28;
+ break;
+ }
+ }
+ if ((i8 | 0) == 20) {
+ i8 = HEAP32[i6 >> 2] | 0;
+ if ((i8 | 0) > -1) {
+ i7 = (HEAP32[i7 >> 2] | 0) + i8 | 0;
+ } else {
+ i7 = 0;
+ }
+ i5 = (i5 | 0) == 4;
+ __tr_flush_block(i2, i7, (HEAP32[i4 >> 2] | 0) - i8 | 0, i5 & 1);
+ HEAP32[i6 >> 2] = HEAP32[i4 >> 2];
+ i4 = HEAP32[i2 >> 2] | 0;
+ i7 = i4 + 28 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ i9 = HEAP32[i6 + 20 >> 2] | 0;
+ i8 = i4 + 16 | 0;
+ i10 = HEAP32[i8 >> 2] | 0;
+ i9 = i9 >>> 0 > i10 >>> 0 ? i10 : i9;
+ if ((i9 | 0) != 0 ? (i3 = i4 + 12 | 0, _memcpy(HEAP32[i3 >> 2] | 0, HEAP32[i6 + 16 >> 2] | 0, i9 | 0) | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, i3 = (HEAP32[i7 >> 2] | 0) + 16 | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, i3 = i4 + 20 | 0, HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + i9, HEAP32[i8 >> 2] = (HEAP32[i8 >> 2] | 0) - i9, i3 = HEAP32[i7 >> 2] | 0, i17 = i3 + 20 | 0, i18 = HEAP32[i17 >> 2] | 0, HEAP32[i17 >> 2] = i18 - i9, (i18 | 0) == (i9 | 0)) : 0) {
+ HEAP32[i3 + 16 >> 2] = HEAP32[i3 + 8 >> 2];
+ }
+ if ((HEAP32[(HEAP32[i2 >> 2] | 0) + 16 >> 2] | 0) == 0) {
+ i18 = i5 ? 2 : 0;
+ STACKTOP = i1;
+ return i18 | 0;
+ } else {
+ i18 = i5 ? 3 : 1;
+ STACKTOP = i1;
+ return i18 | 0;
+ }
+ } else if ((i8 | 0) == 28) {
+ STACKTOP = i1;
+ return i2 | 0;
+ }
+ return 0;
+}
+function _fill_window(i15) {
+ i15 = i15 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0, i24 = 0;
+ i2 = STACKTOP;
+ i16 = i15 + 44 | 0;
+ i9 = HEAP32[i16 >> 2] | 0;
+ i4 = i15 + 60 | 0;
+ i8 = i15 + 116 | 0;
+ i3 = i15 + 108 | 0;
+ i5 = i9 + -262 | 0;
+ i1 = i15 + 56 | 0;
+ i17 = i15 + 72 | 0;
+ i6 = i15 + 88 | 0;
+ i7 = i15 + 84 | 0;
+ i11 = i15 + 112 | 0;
+ i12 = i15 + 92 | 0;
+ i13 = i15 + 76 | 0;
+ i14 = i15 + 68 | 0;
+ i10 = i15 + 64 | 0;
+ i19 = HEAP32[i8 >> 2] | 0;
+ i21 = i9;
+ while (1) {
+ i20 = HEAP32[i3 >> 2] | 0;
+ i19 = (HEAP32[i4 >> 2] | 0) - i19 - i20 | 0;
+ if (!(i20 >>> 0 < (i5 + i21 | 0) >>> 0)) {
+ i20 = HEAP32[i1 >> 2] | 0;
+ _memcpy(i20 | 0, i20 + i9 | 0, i9 | 0) | 0;
+ HEAP32[i11 >> 2] = (HEAP32[i11 >> 2] | 0) - i9;
+ i20 = (HEAP32[i3 >> 2] | 0) - i9 | 0;
+ HEAP32[i3 >> 2] = i20;
+ HEAP32[i12 >> 2] = (HEAP32[i12 >> 2] | 0) - i9;
+ i22 = HEAP32[i13 >> 2] | 0;
+ i21 = i22;
+ i22 = (HEAP32[i14 >> 2] | 0) + (i22 << 1) | 0;
+ do {
+ i22 = i22 + -2 | 0;
+ i23 = HEAPU16[i22 >> 1] | 0;
+ if (i23 >>> 0 < i9 >>> 0) {
+ i23 = 0;
+ } else {
+ i23 = i23 - i9 & 65535;
+ }
+ HEAP16[i22 >> 1] = i23;
+ i21 = i21 + -1 | 0;
+ } while ((i21 | 0) != 0);
+ i22 = i9;
+ i21 = (HEAP32[i10 >> 2] | 0) + (i9 << 1) | 0;
+ do {
+ i21 = i21 + -2 | 0;
+ i23 = HEAPU16[i21 >> 1] | 0;
+ if (i23 >>> 0 < i9 >>> 0) {
+ i23 = 0;
+ } else {
+ i23 = i23 - i9 & 65535;
+ }
+ HEAP16[i21 >> 1] = i23;
+ i22 = i22 + -1 | 0;
+ } while ((i22 | 0) != 0);
+ i19 = i19 + i9 | 0;
+ }
+ i21 = HEAP32[i15 >> 2] | 0;
+ i24 = i21 + 4 | 0;
+ i23 = HEAP32[i24 >> 2] | 0;
+ if ((i23 | 0) == 0) {
+ i18 = 28;
+ break;
+ }
+ i22 = HEAP32[i8 >> 2] | 0;
+ i20 = (HEAP32[i1 >> 2] | 0) + (i22 + i20) | 0;
+ i19 = i23 >>> 0 > i19 >>> 0 ? i19 : i23;
+ if ((i19 | 0) == 0) {
+ i19 = 0;
+ } else {
+ HEAP32[i24 >> 2] = i23 - i19;
+ i22 = HEAP32[(HEAP32[i21 + 28 >> 2] | 0) + 24 >> 2] | 0;
+ if ((i22 | 0) == 1) {
+ i22 = i21 + 48 | 0;
+ HEAP32[i22 >> 2] = _adler32(HEAP32[i22 >> 2] | 0, HEAP32[i21 >> 2] | 0, i19) | 0;
+ i22 = i21;
+ } else if ((i22 | 0) == 2) {
+ i22 = i21 + 48 | 0;
+ HEAP32[i22 >> 2] = _crc32(HEAP32[i22 >> 2] | 0, HEAP32[i21 >> 2] | 0, i19) | 0;
+ i22 = i21;
+ } else {
+ i22 = i21;
+ }
+ _memcpy(i20 | 0, HEAP32[i22 >> 2] | 0, i19 | 0) | 0;
+ HEAP32[i22 >> 2] = (HEAP32[i22 >> 2] | 0) + i19;
+ i22 = i21 + 8 | 0;
+ HEAP32[i22 >> 2] = (HEAP32[i22 >> 2] | 0) + i19;
+ i22 = HEAP32[i8 >> 2] | 0;
+ }
+ i19 = i22 + i19 | 0;
+ HEAP32[i8 >> 2] = i19;
+ if (i19 >>> 0 > 2 ? (i23 = HEAP32[i3 >> 2] | 0, i22 = HEAP32[i1 >> 2] | 0, i24 = HEAPU8[i22 + i23 | 0] | 0, HEAP32[i17 >> 2] = i24, HEAP32[i17 >> 2] = ((HEAPU8[i22 + (i23 + 1) | 0] | 0) ^ i24 << HEAP32[i6 >> 2]) & HEAP32[i7 >> 2], !(i19 >>> 0 < 262)) : 0) {
+ break;
+ }
+ if ((HEAP32[(HEAP32[i15 >> 2] | 0) + 4 >> 2] | 0) == 0) {
+ break;
+ }
+ i21 = HEAP32[i16 >> 2] | 0;
+ }
+ if ((i18 | 0) == 28) {
+ STACKTOP = i2;
+ return;
+ }
+ i5 = i15 + 5824 | 0;
+ i6 = HEAP32[i5 >> 2] | 0;
+ i4 = HEAP32[i4 >> 2] | 0;
+ if (!(i6 >>> 0 < i4 >>> 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i19 + (HEAP32[i3 >> 2] | 0) | 0;
+ if (i6 >>> 0 < i3 >>> 0) {
+ i4 = i4 - i3 | 0;
+ i24 = i4 >>> 0 > 258 ? 258 : i4;
+ _memset((HEAP32[i1 >> 2] | 0) + i3 | 0, 0, i24 | 0) | 0;
+ HEAP32[i5 >> 2] = i24 + i3;
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i3 + 258 | 0;
+ if (!(i6 >>> 0 < i3 >>> 0)) {
+ STACKTOP = i2;
+ return;
+ }
+ i3 = i3 - i6 | 0;
+ i4 = i4 - i6 | 0;
+ i24 = i3 >>> 0 > i4 >>> 0 ? i4 : i3;
+ _memset((HEAP32[i1 >> 2] | 0) + i6 | 0, 0, i24 | 0) | 0;
+ HEAP32[i5 >> 2] = (HEAP32[i5 >> 2] | 0) + i24;
+ STACKTOP = i2;
+ return;
+}
+function __tr_align(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0;
+ i2 = STACKTOP;
+ i3 = i1 + 5820 | 0;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i4 = i1 + 5816 | 0;
+ i7 = HEAPU16[i4 >> 1] | 0 | 2 << i6;
+ i5 = i7 & 65535;
+ HEAP16[i4 >> 1] = i5;
+ if ((i6 | 0) > 13) {
+ i8 = i1 + 20 | 0;
+ i6 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 + 1;
+ i5 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i5 >> 2] | 0) + i6 | 0] = i7;
+ i7 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i6 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 + 1;
+ HEAP8[(HEAP32[i5 >> 2] | 0) + i6 | 0] = i7;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i5 = 2 >>> (16 - i6 | 0) & 65535;
+ HEAP16[i4 >> 1] = i5;
+ i6 = i6 + -13 | 0;
+ } else {
+ i6 = i6 + 3 | 0;
+ }
+ HEAP32[i3 >> 2] = i6;
+ if ((i6 | 0) > 9) {
+ i7 = i1 + 20 | 0;
+ i6 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i6 + 1;
+ i8 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i6 | 0] = i5;
+ i5 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i6 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i6 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i6 | 0] = i5;
+ HEAP16[i4 >> 1] = 0;
+ i6 = (HEAP32[i3 >> 2] | 0) + -9 | 0;
+ i5 = 0;
+ } else {
+ i6 = i6 + 7 | 0;
+ }
+ HEAP32[i3 >> 2] = i6;
+ if ((i6 | 0) != 16) {
+ if ((i6 | 0) > 7) {
+ i6 = i1 + 20 | 0;
+ i7 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i7 + 1;
+ HEAP8[(HEAP32[i1 + 8 >> 2] | 0) + i7 | 0] = i5;
+ i7 = (HEAPU16[i4 >> 1] | 0) >>> 8;
+ HEAP16[i4 >> 1] = i7;
+ i6 = (HEAP32[i3 >> 2] | 0) + -8 | 0;
+ HEAP32[i3 >> 2] = i6;
+ } else {
+ i7 = i5;
+ }
+ } else {
+ i9 = i1 + 20 | 0;
+ i8 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i8 + 1;
+ i7 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i7 >> 2] | 0) + i8 | 0] = i5;
+ i8 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i6 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i6 + 1;
+ HEAP8[(HEAP32[i7 >> 2] | 0) + i6 | 0] = i8;
+ HEAP16[i4 >> 1] = 0;
+ HEAP32[i3 >> 2] = 0;
+ i6 = 0;
+ i7 = 0;
+ }
+ i5 = i1 + 5812 | 0;
+ if ((11 - i6 + (HEAP32[i5 >> 2] | 0) | 0) >= 9) {
+ HEAP32[i5 >> 2] = 7;
+ STACKTOP = i2;
+ return;
+ }
+ i7 = i7 & 65535 | 2 << i6;
+ HEAP16[i4 >> 1] = i7;
+ if ((i6 | 0) > 13) {
+ i8 = i1 + 20 | 0;
+ i6 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 + 1;
+ i9 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i9 >> 2] | 0) + i6 | 0] = i7;
+ i7 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i6 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i6 + 1;
+ HEAP8[(HEAP32[i9 >> 2] | 0) + i6 | 0] = i7;
+ i6 = HEAP32[i3 >> 2] | 0;
+ i7 = 2 >>> (16 - i6 | 0);
+ HEAP16[i4 >> 1] = i7;
+ i6 = i6 + -13 | 0;
+ } else {
+ i6 = i6 + 3 | 0;
+ }
+ i7 = i7 & 255;
+ HEAP32[i3 >> 2] = i6;
+ if ((i6 | 0) > 9) {
+ i8 = i1 + 20 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i9 + 1;
+ i6 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i9 | 0] = i7;
+ i9 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i7 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i7 + 1;
+ HEAP8[(HEAP32[i6 >> 2] | 0) + i7 | 0] = i9;
+ HEAP16[i4 >> 1] = 0;
+ i7 = 0;
+ i6 = (HEAP32[i3 >> 2] | 0) + -9 | 0;
+ } else {
+ i6 = i6 + 7 | 0;
+ }
+ HEAP32[i3 >> 2] = i6;
+ if ((i6 | 0) == 16) {
+ i6 = i1 + 20 | 0;
+ i9 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i9 + 1;
+ i8 = i1 + 8 | 0;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i9 | 0] = i7;
+ i7 = (HEAPU16[i4 >> 1] | 0) >>> 8 & 255;
+ i9 = HEAP32[i6 >> 2] | 0;
+ HEAP32[i6 >> 2] = i9 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i9 | 0] = i7;
+ HEAP16[i4 >> 1] = 0;
+ HEAP32[i3 >> 2] = 0;
+ HEAP32[i5 >> 2] = 7;
+ STACKTOP = i2;
+ return;
+ }
+ if ((i6 | 0) <= 7) {
+ HEAP32[i5 >> 2] = 7;
+ STACKTOP = i2;
+ return;
+ }
+ i8 = i1 + 20 | 0;
+ i9 = HEAP32[i8 >> 2] | 0;
+ HEAP32[i8 >> 2] = i9 + 1;
+ HEAP8[(HEAP32[i1 + 8 >> 2] | 0) + i9 | 0] = i7;
+ HEAP16[i4 >> 1] = (HEAPU16[i4 >> 1] | 0) >>> 8;
+ HEAP32[i3 >> 2] = (HEAP32[i3 >> 2] | 0) + -8;
+ HEAP32[i5 >> 2] = 7;
+ STACKTOP = i2;
+ return;
+}
+function _adler32(i6, i4, i5) {
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0, i17 = 0, i18 = 0, i19 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0;
+ i1 = STACKTOP;
+ i3 = i6 >>> 16;
+ i6 = i6 & 65535;
+ if ((i5 | 0) == 1) {
+ i2 = (HEAPU8[i4] | 0) + i6 | 0;
+ i2 = i2 >>> 0 > 65520 ? i2 + -65521 | 0 : i2;
+ i3 = i2 + i3 | 0;
+ i8 = (i3 >>> 0 > 65520 ? i3 + 15 | 0 : i3) << 16 | i2;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ if ((i4 | 0) == 0) {
+ i8 = 1;
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ if (i5 >>> 0 < 16) {
+ if ((i5 | 0) != 0) {
+ while (1) {
+ i5 = i5 + -1 | 0;
+ i6 = (HEAPU8[i4] | 0) + i6 | 0;
+ i3 = i6 + i3 | 0;
+ if ((i5 | 0) == 0) {
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ }
+ }
+ }
+ i8 = ((i3 >>> 0) % 65521 | 0) << 16 | (i6 >>> 0 > 65520 ? i6 + -65521 | 0 : i6);
+ STACKTOP = i1;
+ return i8 | 0;
+ }
+ if (i5 >>> 0 > 5551) {
+ do {
+ i5 = i5 + -5552 | 0;
+ i7 = i4;
+ i8 = 347;
+ while (1) {
+ i23 = (HEAPU8[i7] | 0) + i6 | 0;
+ i22 = i23 + (HEAPU8[i7 + 1 | 0] | 0) | 0;
+ i21 = i22 + (HEAPU8[i7 + 2 | 0] | 0) | 0;
+ i20 = i21 + (HEAPU8[i7 + 3 | 0] | 0) | 0;
+ i19 = i20 + (HEAPU8[i7 + 4 | 0] | 0) | 0;
+ i18 = i19 + (HEAPU8[i7 + 5 | 0] | 0) | 0;
+ i17 = i18 + (HEAPU8[i7 + 6 | 0] | 0) | 0;
+ i16 = i17 + (HEAPU8[i7 + 7 | 0] | 0) | 0;
+ i15 = i16 + (HEAPU8[i7 + 8 | 0] | 0) | 0;
+ i14 = i15 + (HEAPU8[i7 + 9 | 0] | 0) | 0;
+ i13 = i14 + (HEAPU8[i7 + 10 | 0] | 0) | 0;
+ i12 = i13 + (HEAPU8[i7 + 11 | 0] | 0) | 0;
+ i11 = i12 + (HEAPU8[i7 + 12 | 0] | 0) | 0;
+ i10 = i11 + (HEAPU8[i7 + 13 | 0] | 0) | 0;
+ i9 = i10 + (HEAPU8[i7 + 14 | 0] | 0) | 0;
+ i6 = i9 + (HEAPU8[i7 + 15 | 0] | 0) | 0;
+ i3 = i23 + i3 + i22 + i21 + i20 + i19 + i18 + i17 + i16 + i15 + i14 + i13 + i12 + i11 + i10 + i9 + i6 | 0;
+ i8 = i8 + -1 | 0;
+ if ((i8 | 0) == 0) {
+ break;
+ } else {
+ i7 = i7 + 16 | 0;
+ }
+ }
+ i4 = i4 + 5552 | 0;
+ i6 = (i6 >>> 0) % 65521 | 0;
+ i3 = (i3 >>> 0) % 65521 | 0;
+ } while (i5 >>> 0 > 5551);
+ if ((i5 | 0) != 0) {
+ if (i5 >>> 0 > 15) {
+ i2 = 15;
+ } else {
+ i2 = 16;
+ }
+ }
+ } else {
+ i2 = 15;
+ }
+ if ((i2 | 0) == 15) {
+ while (1) {
+ i5 = i5 + -16 | 0;
+ i9 = (HEAPU8[i4] | 0) + i6 | 0;
+ i10 = i9 + (HEAPU8[i4 + 1 | 0] | 0) | 0;
+ i11 = i10 + (HEAPU8[i4 + 2 | 0] | 0) | 0;
+ i12 = i11 + (HEAPU8[i4 + 3 | 0] | 0) | 0;
+ i13 = i12 + (HEAPU8[i4 + 4 | 0] | 0) | 0;
+ i14 = i13 + (HEAPU8[i4 + 5 | 0] | 0) | 0;
+ i15 = i14 + (HEAPU8[i4 + 6 | 0] | 0) | 0;
+ i16 = i15 + (HEAPU8[i4 + 7 | 0] | 0) | 0;
+ i17 = i16 + (HEAPU8[i4 + 8 | 0] | 0) | 0;
+ i18 = i17 + (HEAPU8[i4 + 9 | 0] | 0) | 0;
+ i19 = i18 + (HEAPU8[i4 + 10 | 0] | 0) | 0;
+ i20 = i19 + (HEAPU8[i4 + 11 | 0] | 0) | 0;
+ i21 = i20 + (HEAPU8[i4 + 12 | 0] | 0) | 0;
+ i22 = i21 + (HEAPU8[i4 + 13 | 0] | 0) | 0;
+ i23 = i22 + (HEAPU8[i4 + 14 | 0] | 0) | 0;
+ i6 = i23 + (HEAPU8[i4 + 15 | 0] | 0) | 0;
+ i3 = i9 + i3 + i10 + i11 + i12 + i13 + i14 + i15 + i16 + i17 + i18 + i19 + i20 + i21 + i22 + i23 + i6 | 0;
+ i4 = i4 + 16 | 0;
+ if (!(i5 >>> 0 > 15)) {
+ break;
+ } else {
+ i2 = 15;
+ }
+ }
+ if ((i5 | 0) == 0) {
+ i2 = 17;
+ } else {
+ i2 = 16;
+ }
+ }
+ if ((i2 | 0) == 16) {
+ while (1) {
+ i5 = i5 + -1 | 0;
+ i6 = (HEAPU8[i4] | 0) + i6 | 0;
+ i3 = i6 + i3 | 0;
+ if ((i5 | 0) == 0) {
+ i2 = 17;
+ break;
+ } else {
+ i4 = i4 + 1 | 0;
+ i2 = 16;
+ }
+ }
+ }
+ if ((i2 | 0) == 17) {
+ i6 = (i6 >>> 0) % 65521 | 0;
+ i3 = (i3 >>> 0) % 65521 | 0;
+ }
+ i23 = i3 << 16 | i6;
+ STACKTOP = i1;
+ return i23 | 0;
+}
+function _crc32(i4, i2, i3) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ var i1 = 0, i5 = 0;
+ i1 = STACKTOP;
+ if ((i2 | 0) == 0) {
+ i5 = 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ i4 = ~i4;
+ L4 : do {
+ if ((i3 | 0) != 0) {
+ while (1) {
+ if ((i2 & 3 | 0) == 0) {
+ break;
+ }
+ i4 = HEAP32[3192 + (((HEAPU8[i2] | 0) ^ i4 & 255) << 2) >> 2] ^ i4 >>> 8;
+ i3 = i3 + -1 | 0;
+ if ((i3 | 0) == 0) {
+ break L4;
+ } else {
+ i2 = i2 + 1 | 0;
+ }
+ }
+ if (i3 >>> 0 > 31) {
+ while (1) {
+ i4 = HEAP32[i2 >> 2] ^ i4;
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 4 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 8 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 12 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 16 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 20 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 24 >> 2];
+ i5 = i2 + 32 | 0;
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2] ^ HEAP32[i2 + 28 >> 2];
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2];
+ i3 = i3 + -32 | 0;
+ if (i3 >>> 0 > 31) {
+ i2 = i5;
+ } else {
+ i2 = i5;
+ break;
+ }
+ }
+ }
+ if (i3 >>> 0 > 3) {
+ while (1) {
+ i5 = i2 + 4 | 0;
+ i4 = HEAP32[i2 >> 2] ^ i4;
+ i4 = HEAP32[5240 + ((i4 >>> 8 & 255) << 2) >> 2] ^ HEAP32[6264 + ((i4 & 255) << 2) >> 2] ^ HEAP32[4216 + ((i4 >>> 16 & 255) << 2) >> 2] ^ HEAP32[3192 + (i4 >>> 24 << 2) >> 2];
+ i3 = i3 + -4 | 0;
+ if (i3 >>> 0 > 3) {
+ i2 = i5;
+ } else {
+ i2 = i5;
+ break;
+ }
+ }
+ }
+ if ((i3 | 0) != 0) {
+ while (1) {
+ i4 = HEAP32[3192 + (((HEAPU8[i2] | 0) ^ i4 & 255) << 2) >> 2] ^ i4 >>> 8;
+ i3 = i3 + -1 | 0;
+ if ((i3 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 1 | 0;
+ }
+ }
+ }
+ }
+ } while (0);
+ i5 = ~i4;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _deflateInit2_(i3, i7, i8, i10, i4, i1, i5, i6) {
+ i3 = i3 | 0;
+ i7 = i7 | 0;
+ i8 = i8 | 0;
+ i10 = i10 | 0;
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i9 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0;
+ i2 = STACKTOP;
+ if ((i5 | 0) == 0) {
+ i12 = -6;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ if (!((HEAP8[i5] | 0) == 49 & (i6 | 0) == 56)) {
+ i12 = -6;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ if ((i3 | 0) == 0) {
+ i12 = -2;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ i5 = i3 + 24 | 0;
+ HEAP32[i5 >> 2] = 0;
+ i6 = i3 + 32 | 0;
+ i9 = HEAP32[i6 >> 2] | 0;
+ if ((i9 | 0) == 0) {
+ HEAP32[i6 >> 2] = 1;
+ HEAP32[i3 + 40 >> 2] = 0;
+ i9 = 1;
+ }
+ i11 = i3 + 36 | 0;
+ if ((HEAP32[i11 >> 2] | 0) == 0) {
+ HEAP32[i11 >> 2] = 1;
+ }
+ i7 = (i7 | 0) == -1 ? 6 : i7;
+ if ((i10 | 0) < 0) {
+ i10 = 0 - i10 | 0;
+ i11 = 0;
+ } else {
+ i11 = (i10 | 0) > 15;
+ i10 = i11 ? i10 + -16 | 0 : i10;
+ i11 = i11 ? 2 : 1;
+ }
+ if (!((i4 + -1 | 0) >>> 0 < 9 & (i8 | 0) == 8)) {
+ i12 = -2;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ if ((i10 + -8 | 0) >>> 0 > 7 | i7 >>> 0 > 9 | i1 >>> 0 > 4) {
+ i12 = -2;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ i12 = (i10 | 0) == 8 ? 9 : i10;
+ i10 = i3 + 40 | 0;
+ i8 = FUNCTION_TABLE_iiii[i9 & 1](HEAP32[i10 >> 2] | 0, 1, 5828) | 0;
+ if ((i8 | 0) == 0) {
+ i12 = -4;
+ STACKTOP = i2;
+ return i12 | 0;
+ }
+ HEAP32[i3 + 28 >> 2] = i8;
+ HEAP32[i8 >> 2] = i3;
+ HEAP32[i8 + 24 >> 2] = i11;
+ HEAP32[i8 + 28 >> 2] = 0;
+ HEAP32[i8 + 48 >> 2] = i12;
+ i14 = 1 << i12;
+ i11 = i8 + 44 | 0;
+ HEAP32[i11 >> 2] = i14;
+ HEAP32[i8 + 52 >> 2] = i14 + -1;
+ i12 = i4 + 7 | 0;
+ HEAP32[i8 + 80 >> 2] = i12;
+ i12 = 1 << i12;
+ i13 = i8 + 76 | 0;
+ HEAP32[i13 >> 2] = i12;
+ HEAP32[i8 + 84 >> 2] = i12 + -1;
+ HEAP32[i8 + 88 >> 2] = ((i4 + 9 | 0) >>> 0) / 3 | 0;
+ i12 = i8 + 56 | 0;
+ HEAP32[i12 >> 2] = FUNCTION_TABLE_iiii[HEAP32[i6 >> 2] & 1](HEAP32[i10 >> 2] | 0, i14, 2) | 0;
+ i14 = FUNCTION_TABLE_iiii[HEAP32[i6 >> 2] & 1](HEAP32[i10 >> 2] | 0, HEAP32[i11 >> 2] | 0, 2) | 0;
+ i9 = i8 + 64 | 0;
+ HEAP32[i9 >> 2] = i14;
+ _memset(i14 | 0, 0, HEAP32[i11 >> 2] << 1 | 0) | 0;
+ i11 = i8 + 68 | 0;
+ HEAP32[i11 >> 2] = FUNCTION_TABLE_iiii[HEAP32[i6 >> 2] & 1](HEAP32[i10 >> 2] | 0, HEAP32[i13 >> 2] | 0, 2) | 0;
+ HEAP32[i8 + 5824 >> 2] = 0;
+ i4 = 1 << i4 + 6;
+ i13 = i8 + 5788 | 0;
+ HEAP32[i13 >> 2] = i4;
+ i4 = FUNCTION_TABLE_iiii[HEAP32[i6 >> 2] & 1](HEAP32[i10 >> 2] | 0, i4, 4) | 0;
+ HEAP32[i8 + 8 >> 2] = i4;
+ i6 = HEAP32[i13 >> 2] | 0;
+ HEAP32[i8 + 12 >> 2] = i6 << 2;
+ if (((HEAP32[i12 >> 2] | 0) != 0 ? (HEAP32[i9 >> 2] | 0) != 0 : 0) ? !((HEAP32[i11 >> 2] | 0) == 0 | (i4 | 0) == 0) : 0) {
+ HEAP32[i8 + 5796 >> 2] = i4 + (i6 >>> 1 << 1);
+ HEAP32[i8 + 5784 >> 2] = i4 + (i6 * 3 | 0);
+ HEAP32[i8 + 132 >> 2] = i7;
+ HEAP32[i8 + 136 >> 2] = i1;
+ HEAP8[i8 + 36 | 0] = 8;
+ i14 = _deflateReset(i3) | 0;
+ STACKTOP = i2;
+ return i14 | 0;
+ }
+ HEAP32[i8 + 4 >> 2] = 666;
+ HEAP32[i5 >> 2] = HEAP32[3176 >> 2];
+ _deflateEnd(i3) | 0;
+ i14 = -4;
+ STACKTOP = i2;
+ return i14 | 0;
+}
+function _longest_match(i19, i16) {
+ i19 = i19 | 0;
+ i16 = i16 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i4 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i17 = 0, i18 = 0, i20 = 0, i21 = 0, i22 = 0, i23 = 0;
+ i1 = STACKTOP;
+ i18 = HEAP32[i19 + 124 >> 2] | 0;
+ i3 = HEAP32[i19 + 56 >> 2] | 0;
+ i5 = HEAP32[i19 + 108 >> 2] | 0;
+ i4 = i3 + i5 | 0;
+ i20 = HEAP32[i19 + 120 >> 2] | 0;
+ i10 = HEAP32[i19 + 144 >> 2] | 0;
+ i2 = (HEAP32[i19 + 44 >> 2] | 0) + -262 | 0;
+ i8 = i5 >>> 0 > i2 >>> 0 ? i5 - i2 | 0 : 0;
+ i6 = HEAP32[i19 + 64 >> 2] | 0;
+ i7 = HEAP32[i19 + 52 >> 2] | 0;
+ i9 = i3 + (i5 + 258) | 0;
+ i2 = HEAP32[i19 + 116 >> 2] | 0;
+ i12 = i10 >>> 0 > i2 >>> 0 ? i2 : i10;
+ i11 = i19 + 112 | 0;
+ i15 = i3 + (i5 + 1) | 0;
+ i14 = i3 + (i5 + 2) | 0;
+ i13 = i9;
+ i10 = i5 + 257 | 0;
+ i17 = i20;
+ i18 = i20 >>> 0 < (HEAP32[i19 + 140 >> 2] | 0) >>> 0 ? i18 : i18 >>> 2;
+ i19 = HEAP8[i3 + (i20 + i5) | 0] | 0;
+ i20 = HEAP8[i3 + (i5 + -1 + i20) | 0] | 0;
+ while (1) {
+ i21 = i3 + i16 | 0;
+ if ((((HEAP8[i3 + (i16 + i17) | 0] | 0) == i19 << 24 >> 24 ? (HEAP8[i3 + (i17 + -1 + i16) | 0] | 0) == i20 << 24 >> 24 : 0) ? (HEAP8[i21] | 0) == (HEAP8[i4] | 0) : 0) ? (HEAP8[i3 + (i16 + 1) | 0] | 0) == (HEAP8[i15] | 0) : 0) {
+ i21 = i3 + (i16 + 2) | 0;
+ i22 = i14;
+ do {
+ i23 = i22 + 1 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 1 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 2 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 2 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 3 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 3 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 4 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 4 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 5 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 5 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 6 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 6 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i23 = i22 + 7 | 0;
+ if ((HEAP8[i23] | 0) != (HEAP8[i21 + 7 | 0] | 0)) {
+ i22 = i23;
+ break;
+ }
+ i22 = i22 + 8 | 0;
+ i21 = i21 + 8 | 0;
+ } while ((HEAP8[i22] | 0) == (HEAP8[i21] | 0) & i22 >>> 0 < i9 >>> 0);
+ i21 = i22 - i13 | 0;
+ i22 = i21 + 258 | 0;
+ if ((i22 | 0) > (i17 | 0)) {
+ HEAP32[i11 >> 2] = i16;
+ if ((i22 | 0) >= (i12 | 0)) {
+ i17 = i22;
+ i3 = 20;
+ break;
+ }
+ i17 = i22;
+ i19 = HEAP8[i3 + (i22 + i5) | 0] | 0;
+ i20 = HEAP8[i3 + (i10 + i21) | 0] | 0;
+ }
+ }
+ i16 = HEAPU16[i6 + ((i16 & i7) << 1) >> 1] | 0;
+ if (!(i16 >>> 0 > i8 >>> 0)) {
+ i3 = 20;
+ break;
+ }
+ i18 = i18 + -1 | 0;
+ if ((i18 | 0) == 0) {
+ i3 = 20;
+ break;
+ }
+ }
+ if ((i3 | 0) == 20) {
+ STACKTOP = i1;
+ return (i17 >>> 0 > i2 >>> 0 ? i2 : i17) | 0;
+ }
+ return 0;
+}
+function __tr_stored_block(i3, i2, i5, i6) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i1 = 0, i4 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i1 = STACKTOP;
+ i4 = i3 + 5820 | 0;
+ i7 = HEAP32[i4 >> 2] | 0;
+ i9 = i6 & 65535;
+ i6 = i3 + 5816 | 0;
+ i8 = HEAPU16[i6 >> 1] | 0 | i9 << i7;
+ HEAP16[i6 >> 1] = i8;
+ if ((i7 | 0) > 13) {
+ i11 = i3 + 20 | 0;
+ i7 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i7 + 1;
+ i10 = i3 + 8 | 0;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i7 | 0] = i8;
+ i8 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i7 = HEAP32[i11 >> 2] | 0;
+ HEAP32[i11 >> 2] = i7 + 1;
+ HEAP8[(HEAP32[i10 >> 2] | 0) + i7 | 0] = i8;
+ i7 = HEAP32[i4 >> 2] | 0;
+ i8 = i9 >>> (16 - i7 | 0);
+ HEAP16[i6 >> 1] = i8;
+ i7 = i7 + -13 | 0;
+ } else {
+ i7 = i7 + 3 | 0;
+ }
+ i8 = i8 & 255;
+ HEAP32[i4 >> 2] = i7;
+ do {
+ if ((i7 | 0) <= 8) {
+ i9 = i3 + 20 | 0;
+ if ((i7 | 0) > 0) {
+ i7 = HEAP32[i9 >> 2] | 0;
+ HEAP32[i9 >> 2] = i7 + 1;
+ i11 = i3 + 8 | 0;
+ HEAP8[(HEAP32[i11 >> 2] | 0) + i7 | 0] = i8;
+ i7 = i9;
+ i8 = i11;
+ break;
+ } else {
+ i7 = i9;
+ i8 = i3 + 8 | 0;
+ break;
+ }
+ } else {
+ i7 = i3 + 20 | 0;
+ i10 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i10 + 1;
+ i11 = i3 + 8 | 0;
+ HEAP8[(HEAP32[i11 >> 2] | 0) + i10 | 0] = i8;
+ i10 = (HEAPU16[i6 >> 1] | 0) >>> 8 & 255;
+ i8 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i8 + 1;
+ HEAP8[(HEAP32[i11 >> 2] | 0) + i8 | 0] = i10;
+ i8 = i11;
+ }
+ } while (0);
+ HEAP16[i6 >> 1] = 0;
+ HEAP32[i4 >> 2] = 0;
+ HEAP32[i3 + 5812 >> 2] = 8;
+ i10 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i10 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i10 | 0] = i5;
+ i10 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i10 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i10 | 0] = i5 >>> 8;
+ i10 = i5 & 65535 ^ 65535;
+ i11 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i11 | 0] = i10;
+ i11 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i11 | 0] = i10 >>> 8;
+ if ((i5 | 0) == 0) {
+ STACKTOP = i1;
+ return;
+ }
+ while (1) {
+ i5 = i5 + -1 | 0;
+ i10 = HEAP8[i2] | 0;
+ i11 = HEAP32[i7 >> 2] | 0;
+ HEAP32[i7 >> 2] = i11 + 1;
+ HEAP8[(HEAP32[i8 >> 2] | 0) + i11 | 0] = i10;
+ if ((i5 | 0) == 0) {
+ break;
+ } else {
+ i2 = i2 + 1 | 0;
+ }
+ }
+ STACKTOP = i1;
+ return;
+}
+function _inflateInit_(i1, i3, i4) {
+ i1 = i1 | 0;
+ i3 = i3 | 0;
+ i4 = i4 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0;
+ i2 = STACKTOP;
+ if ((i3 | 0) == 0) {
+ i11 = -6;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ if (!((HEAP8[i3] | 0) == 49 & (i4 | 0) == 56)) {
+ i11 = -6;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ if ((i1 | 0) == 0) {
+ i11 = -2;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ i3 = i1 + 24 | 0;
+ HEAP32[i3 >> 2] = 0;
+ i4 = i1 + 32 | 0;
+ i6 = HEAP32[i4 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ HEAP32[i4 >> 2] = 1;
+ HEAP32[i1 + 40 >> 2] = 0;
+ i6 = 1;
+ }
+ i4 = i1 + 36 | 0;
+ if ((HEAP32[i4 >> 2] | 0) == 0) {
+ HEAP32[i4 >> 2] = 1;
+ }
+ i5 = i1 + 40 | 0;
+ i8 = FUNCTION_TABLE_iiii[i6 & 1](HEAP32[i5 >> 2] | 0, 1, 7116) | 0;
+ if ((i8 | 0) == 0) {
+ i11 = -4;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ i6 = i1 + 28 | 0;
+ HEAP32[i6 >> 2] = i8;
+ HEAP32[i8 + 52 >> 2] = 0;
+ i9 = HEAP32[i6 >> 2] | 0;
+ do {
+ if ((i9 | 0) != 0) {
+ i10 = i9 + 52 | 0;
+ i11 = HEAP32[i10 >> 2] | 0;
+ i7 = i9 + 36 | 0;
+ if ((i11 | 0) != 0) {
+ if ((HEAP32[i7 >> 2] | 0) == 15) {
+ i10 = i9;
+ } else {
+ FUNCTION_TABLE_vii[HEAP32[i4 >> 2] & 1](HEAP32[i5 >> 2] | 0, i11);
+ HEAP32[i10 >> 2] = 0;
+ i10 = HEAP32[i6 >> 2] | 0;
+ }
+ HEAP32[i9 + 8 >> 2] = 1;
+ HEAP32[i7 >> 2] = 15;
+ if ((i10 | 0) == 0) {
+ break;
+ } else {
+ i9 = i10;
+ }
+ } else {
+ HEAP32[i9 + 8 >> 2] = 1;
+ HEAP32[i7 >> 2] = 15;
+ }
+ HEAP32[i9 + 28 >> 2] = 0;
+ HEAP32[i1 + 20 >> 2] = 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i3 >> 2] = 0;
+ HEAP32[i1 + 48 >> 2] = 1;
+ HEAP32[i9 >> 2] = 0;
+ HEAP32[i9 + 4 >> 2] = 0;
+ HEAP32[i9 + 12 >> 2] = 0;
+ HEAP32[i9 + 20 >> 2] = 32768;
+ HEAP32[i9 + 32 >> 2] = 0;
+ HEAP32[i9 + 40 >> 2] = 0;
+ HEAP32[i9 + 44 >> 2] = 0;
+ HEAP32[i9 + 48 >> 2] = 0;
+ HEAP32[i9 + 56 >> 2] = 0;
+ HEAP32[i9 + 60 >> 2] = 0;
+ i11 = i9 + 1328 | 0;
+ HEAP32[i9 + 108 >> 2] = i11;
+ HEAP32[i9 + 80 >> 2] = i11;
+ HEAP32[i9 + 76 >> 2] = i11;
+ HEAP32[i9 + 7104 >> 2] = 1;
+ HEAP32[i9 + 7108 >> 2] = -1;
+ i11 = 0;
+ STACKTOP = i2;
+ return i11 | 0;
+ }
+ } while (0);
+ FUNCTION_TABLE_vii[HEAP32[i4 >> 2] & 1](HEAP32[i5 >> 2] | 0, i8);
+ HEAP32[i6 >> 2] = 0;
+ i11 = -2;
+ STACKTOP = i2;
+ return i11 | 0;
+}
+function _init_block(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ i3 = 0;
+ do {
+ HEAP16[i1 + (i3 << 2) + 148 >> 1] = 0;
+ i3 = i3 + 1 | 0;
+ } while ((i3 | 0) != 286);
+ HEAP16[i1 + 2440 >> 1] = 0;
+ HEAP16[i1 + 2444 >> 1] = 0;
+ HEAP16[i1 + 2448 >> 1] = 0;
+ HEAP16[i1 + 2452 >> 1] = 0;
+ HEAP16[i1 + 2456 >> 1] = 0;
+ HEAP16[i1 + 2460 >> 1] = 0;
+ HEAP16[i1 + 2464 >> 1] = 0;
+ HEAP16[i1 + 2468 >> 1] = 0;
+ HEAP16[i1 + 2472 >> 1] = 0;
+ HEAP16[i1 + 2476 >> 1] = 0;
+ HEAP16[i1 + 2480 >> 1] = 0;
+ HEAP16[i1 + 2484 >> 1] = 0;
+ HEAP16[i1 + 2488 >> 1] = 0;
+ HEAP16[i1 + 2492 >> 1] = 0;
+ HEAP16[i1 + 2496 >> 1] = 0;
+ HEAP16[i1 + 2500 >> 1] = 0;
+ HEAP16[i1 + 2504 >> 1] = 0;
+ HEAP16[i1 + 2508 >> 1] = 0;
+ HEAP16[i1 + 2512 >> 1] = 0;
+ HEAP16[i1 + 2516 >> 1] = 0;
+ HEAP16[i1 + 2520 >> 1] = 0;
+ HEAP16[i1 + 2524 >> 1] = 0;
+ HEAP16[i1 + 2528 >> 1] = 0;
+ HEAP16[i1 + 2532 >> 1] = 0;
+ HEAP16[i1 + 2536 >> 1] = 0;
+ HEAP16[i1 + 2540 >> 1] = 0;
+ HEAP16[i1 + 2544 >> 1] = 0;
+ HEAP16[i1 + 2548 >> 1] = 0;
+ HEAP16[i1 + 2552 >> 1] = 0;
+ HEAP16[i1 + 2556 >> 1] = 0;
+ HEAP16[i1 + 2684 >> 1] = 0;
+ HEAP16[i1 + 2688 >> 1] = 0;
+ HEAP16[i1 + 2692 >> 1] = 0;
+ HEAP16[i1 + 2696 >> 1] = 0;
+ HEAP16[i1 + 2700 >> 1] = 0;
+ HEAP16[i1 + 2704 >> 1] = 0;
+ HEAP16[i1 + 2708 >> 1] = 0;
+ HEAP16[i1 + 2712 >> 1] = 0;
+ HEAP16[i1 + 2716 >> 1] = 0;
+ HEAP16[i1 + 2720 >> 1] = 0;
+ HEAP16[i1 + 2724 >> 1] = 0;
+ HEAP16[i1 + 2728 >> 1] = 0;
+ HEAP16[i1 + 2732 >> 1] = 0;
+ HEAP16[i1 + 2736 >> 1] = 0;
+ HEAP16[i1 + 2740 >> 1] = 0;
+ HEAP16[i1 + 2744 >> 1] = 0;
+ HEAP16[i1 + 2748 >> 1] = 0;
+ HEAP16[i1 + 2752 >> 1] = 0;
+ HEAP16[i1 + 2756 >> 1] = 0;
+ HEAP16[i1 + 1172 >> 1] = 1;
+ HEAP32[i1 + 5804 >> 2] = 0;
+ HEAP32[i1 + 5800 >> 2] = 0;
+ HEAP32[i1 + 5808 >> 2] = 0;
+ HEAP32[i1 + 5792 >> 2] = 0;
+ STACKTOP = i2;
+ return;
+}
+function _deflateReset(i1) {
+ i1 = i1 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0;
+ i2 = STACKTOP;
+ if ((i1 | 0) == 0) {
+ i5 = -2;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ i3 = HEAP32[i1 + 28 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ i5 = -2;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ if ((HEAP32[i1 + 32 >> 2] | 0) == 0) {
+ i5 = -2;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ if ((HEAP32[i1 + 36 >> 2] | 0) == 0) {
+ i5 = -2;
+ STACKTOP = i2;
+ return i5 | 0;
+ }
+ HEAP32[i1 + 20 >> 2] = 0;
+ HEAP32[i1 + 8 >> 2] = 0;
+ HEAP32[i1 + 24 >> 2] = 0;
+ HEAP32[i1 + 44 >> 2] = 2;
+ HEAP32[i3 + 20 >> 2] = 0;
+ HEAP32[i3 + 16 >> 2] = HEAP32[i3 + 8 >> 2];
+ i4 = i3 + 24 | 0;
+ i5 = HEAP32[i4 >> 2] | 0;
+ if ((i5 | 0) < 0) {
+ i5 = 0 - i5 | 0;
+ HEAP32[i4 >> 2] = i5;
+ }
+ HEAP32[i3 + 4 >> 2] = (i5 | 0) != 0 ? 42 : 113;
+ if ((i5 | 0) == 2) {
+ i4 = _crc32(0, 0, 0) | 0;
+ } else {
+ i4 = _adler32(0, 0, 0) | 0;
+ }
+ HEAP32[i1 + 48 >> 2] = i4;
+ HEAP32[i3 + 40 >> 2] = 0;
+ __tr_init(i3);
+ HEAP32[i3 + 60 >> 2] = HEAP32[i3 + 44 >> 2] << 1;
+ i5 = HEAP32[i3 + 76 >> 2] | 0;
+ i4 = HEAP32[i3 + 68 >> 2] | 0;
+ HEAP16[i4 + (i5 + -1 << 1) >> 1] = 0;
+ _memset(i4 | 0, 0, (i5 << 1) + -2 | 0) | 0;
+ i5 = HEAP32[i3 + 132 >> 2] | 0;
+ HEAP32[i3 + 128 >> 2] = HEAPU16[178 + (i5 * 12 | 0) >> 1] | 0;
+ HEAP32[i3 + 140 >> 2] = HEAPU16[176 + (i5 * 12 | 0) >> 1] | 0;
+ HEAP32[i3 + 144 >> 2] = HEAPU16[180 + (i5 * 12 | 0) >> 1] | 0;
+ HEAP32[i3 + 124 >> 2] = HEAPU16[182 + (i5 * 12 | 0) >> 1] | 0;
+ HEAP32[i3 + 108 >> 2] = 0;
+ HEAP32[i3 + 92 >> 2] = 0;
+ HEAP32[i3 + 116 >> 2] = 0;
+ HEAP32[i3 + 120 >> 2] = 2;
+ HEAP32[i3 + 96 >> 2] = 2;
+ HEAP32[i3 + 112 >> 2] = 0;
+ HEAP32[i3 + 104 >> 2] = 0;
+ HEAP32[i3 + 72 >> 2] = 0;
+ i5 = 0;
+ STACKTOP = i2;
+ return i5 | 0;
+}
+function _updatewindow(i6, i4) {
+ i6 = i6 | 0;
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0;
+ i1 = STACKTOP;
+ i2 = HEAP32[i6 + 28 >> 2] | 0;
+ i3 = i2 + 52 | 0;
+ i8 = HEAP32[i3 >> 2] | 0;
+ if ((i8 | 0) == 0) {
+ i8 = FUNCTION_TABLE_iiii[HEAP32[i6 + 32 >> 2] & 1](HEAP32[i6 + 40 >> 2] | 0, 1 << HEAP32[i2 + 36 >> 2], 1) | 0;
+ HEAP32[i3 >> 2] = i8;
+ if ((i8 | 0) == 0) {
+ i10 = 1;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ }
+ i5 = i2 + 40 | 0;
+ i10 = HEAP32[i5 >> 2] | 0;
+ if ((i10 | 0) == 0) {
+ i10 = 1 << HEAP32[i2 + 36 >> 2];
+ HEAP32[i5 >> 2] = i10;
+ HEAP32[i2 + 48 >> 2] = 0;
+ HEAP32[i2 + 44 >> 2] = 0;
+ }
+ i4 = i4 - (HEAP32[i6 + 16 >> 2] | 0) | 0;
+ if (!(i4 >>> 0 < i10 >>> 0)) {
+ _memcpy(i8 | 0, (HEAP32[i6 + 12 >> 2] | 0) + (0 - i10) | 0, i10 | 0) | 0;
+ HEAP32[i2 + 48 >> 2] = 0;
+ HEAP32[i2 + 44 >> 2] = HEAP32[i5 >> 2];
+ i10 = 0;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i7 = i2 + 48 | 0;
+ i9 = HEAP32[i7 >> 2] | 0;
+ i10 = i10 - i9 | 0;
+ i10 = i10 >>> 0 > i4 >>> 0 ? i4 : i10;
+ i6 = i6 + 12 | 0;
+ _memcpy(i8 + i9 | 0, (HEAP32[i6 >> 2] | 0) + (0 - i4) | 0, i10 | 0) | 0;
+ i8 = i4 - i10 | 0;
+ if ((i4 | 0) != (i10 | 0)) {
+ _memcpy(HEAP32[i3 >> 2] | 0, (HEAP32[i6 >> 2] | 0) + (0 - i8) | 0, i8 | 0) | 0;
+ HEAP32[i7 >> 2] = i8;
+ HEAP32[i2 + 44 >> 2] = HEAP32[i5 >> 2];
+ i10 = 0;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ i6 = (HEAP32[i7 >> 2] | 0) + i4 | 0;
+ i3 = HEAP32[i5 >> 2] | 0;
+ HEAP32[i7 >> 2] = (i6 | 0) == (i3 | 0) ? 0 : i6;
+ i5 = i2 + 44 | 0;
+ i2 = HEAP32[i5 >> 2] | 0;
+ if (!(i2 >>> 0 < i3 >>> 0)) {
+ i10 = 0;
+ STACKTOP = i1;
+ return i10 | 0;
+ }
+ HEAP32[i5 >> 2] = i2 + i4;
+ i10 = 0;
+ STACKTOP = i1;
+ return i10 | 0;
+}
+function _scan_tree(i1, i5, i6) {
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i6 = i6 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i7 = 0, i8 = 0, i9 = 0, i10 = 0, i11 = 0, i12 = 0, i13 = 0, i14 = 0, i15 = 0, i16 = 0;
+ i8 = STACKTOP;
+ i10 = HEAP16[i5 + 2 >> 1] | 0;
+ i9 = i10 << 16 >> 16 == 0;
+ HEAP16[i5 + (i6 + 1 << 2) + 2 >> 1] = -1;
+ i2 = i1 + 2752 | 0;
+ i3 = i1 + 2756 | 0;
+ i4 = i1 + 2748 | 0;
+ i7 = i9 ? 138 : 7;
+ i9 = i9 ? 3 : 4;
+ i13 = 0;
+ i11 = i10 & 65535;
+ i12 = -1;
+ L1 : while (1) {
+ i14 = 0;
+ do {
+ if ((i13 | 0) > (i6 | 0)) {
+ break L1;
+ }
+ i13 = i13 + 1 | 0;
+ i16 = HEAP16[i5 + (i13 << 2) + 2 >> 1] | 0;
+ i10 = i16 & 65535;
+ i14 = i14 + 1 | 0;
+ i15 = (i11 | 0) == (i10 | 0);
+ } while ((i14 | 0) < (i7 | 0) & i15);
+ do {
+ if ((i14 | 0) >= (i9 | 0)) {
+ if ((i11 | 0) == 0) {
+ if ((i14 | 0) < 11) {
+ HEAP16[i2 >> 1] = (HEAP16[i2 >> 1] | 0) + 1 << 16 >> 16;
+ break;
+ } else {
+ HEAP16[i3 >> 1] = (HEAP16[i3 >> 1] | 0) + 1 << 16 >> 16;
+ break;
+ }
+ } else {
+ if ((i11 | 0) != (i12 | 0)) {
+ i14 = i1 + (i11 << 2) + 2684 | 0;
+ HEAP16[i14 >> 1] = (HEAP16[i14 >> 1] | 0) + 1 << 16 >> 16;
+ }
+ HEAP16[i4 >> 1] = (HEAP16[i4 >> 1] | 0) + 1 << 16 >> 16;
+ break;
+ }
+ } else {
+ i12 = i1 + (i11 << 2) + 2684 | 0;
+ HEAP16[i12 >> 1] = (HEAPU16[i12 >> 1] | 0) + i14;
+ }
+ } while (0);
+ if (i16 << 16 >> 16 == 0) {
+ i12 = i11;
+ i7 = 138;
+ i9 = 3;
+ i11 = i10;
+ continue;
+ }
+ i12 = i11;
+ i7 = i15 ? 6 : 7;
+ i9 = i15 ? 3 : 4;
+ i11 = i10;
+ }
+ STACKTOP = i8;
+ return;
+}
+function _deflateEnd(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i3 = STACKTOP;
+ if ((i4 | 0) == 0) {
+ i7 = -2;
+ STACKTOP = i3;
+ return i7 | 0;
+ }
+ i1 = i4 + 28 | 0;
+ i6 = HEAP32[i1 >> 2] | 0;
+ if ((i6 | 0) == 0) {
+ i7 = -2;
+ STACKTOP = i3;
+ return i7 | 0;
+ }
+ i2 = HEAP32[i6 + 4 >> 2] | 0;
+ switch (i2 | 0) {
+ case 42:
+ case 69:
+ case 73:
+ case 91:
+ case 103:
+ case 113:
+ case 666:
+ {
+ break;
+ }
+ default:
+ {
+ i7 = -2;
+ STACKTOP = i3;
+ return i7 | 0;
+ }
+ }
+ i5 = HEAP32[i6 + 8 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ FUNCTION_TABLE_vii[HEAP32[i4 + 36 >> 2] & 1](HEAP32[i4 + 40 >> 2] | 0, i5);
+ i6 = HEAP32[i1 >> 2] | 0;
+ }
+ i5 = HEAP32[i6 + 68 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ FUNCTION_TABLE_vii[HEAP32[i4 + 36 >> 2] & 1](HEAP32[i4 + 40 >> 2] | 0, i5);
+ i6 = HEAP32[i1 >> 2] | 0;
+ }
+ i5 = HEAP32[i6 + 64 >> 2] | 0;
+ if ((i5 | 0) != 0) {
+ FUNCTION_TABLE_vii[HEAP32[i4 + 36 >> 2] & 1](HEAP32[i4 + 40 >> 2] | 0, i5);
+ i6 = HEAP32[i1 >> 2] | 0;
+ }
+ i7 = HEAP32[i6 + 56 >> 2] | 0;
+ i5 = i4 + 36 | 0;
+ if ((i7 | 0) == 0) {
+ i4 = i4 + 40 | 0;
+ } else {
+ i4 = i4 + 40 | 0;
+ FUNCTION_TABLE_vii[HEAP32[i5 >> 2] & 1](HEAP32[i4 >> 2] | 0, i7);
+ i6 = HEAP32[i1 >> 2] | 0;
+ }
+ FUNCTION_TABLE_vii[HEAP32[i5 >> 2] & 1](HEAP32[i4 >> 2] | 0, i6);
+ HEAP32[i1 >> 2] = 0;
+ i7 = (i2 | 0) == 113 ? -3 : 0;
+ STACKTOP = i3;
+ return i7 | 0;
+}
+function _main(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i6 = 0;
+ i1 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i2 = i1;
+ L1 : do {
+ if ((i4 | 0) > 1) {
+ i4 = HEAP8[HEAP32[i5 + 4 >> 2] | 0] | 0;
+ switch (i4 | 0) {
+ case 50:
+ {
+ i2 = 250;
+ break L1;
+ }
+ case 51:
+ {
+ i3 = 4;
+ break L1;
+ }
+ case 52:
+ {
+ i2 = 2500;
+ break L1;
+ }
+ case 53:
+ {
+ i2 = 5e3;
+ break L1;
+ }
+ case 48:
+ {
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ case 49:
+ {
+ i2 = 60;
+ break L1;
+ }
+ default:
+ {
+ HEAP32[i2 >> 2] = i4 + -48;
+ _printf(144, i2 | 0) | 0;
+ i6 = -1;
+ STACKTOP = i1;
+ return i6 | 0;
+ }
+ }
+ } else {
+ i3 = 4;
+ }
+ } while (0);
+ if ((i3 | 0) == 4) {
+ i2 = 500;
+ }
+ i3 = _malloc(1e5) | 0;
+ i4 = 0;
+ i6 = 0;
+ i5 = 17;
+ while (1) {
+ do {
+ if ((i6 | 0) <= 0) {
+ if ((i4 & 7 | 0) == 0) {
+ i6 = i4 & 31;
+ i5 = 0;
+ break;
+ } else {
+ i5 = (((Math_imul(i4, i4) | 0) >>> 0) % 6714 | 0) & 255;
+ break;
+ }
+ } else {
+ i6 = i6 + -1 | 0;
+ }
+ } while (0);
+ HEAP8[i3 + i4 | 0] = i5;
+ i4 = i4 + 1 | 0;
+ if ((i4 | 0) == 1e5) {
+ i4 = 0;
+ break;
+ }
+ }
+ do {
+ _doit(i3, 1e5, i4);
+ i4 = i4 + 1 | 0;
+ } while ((i4 | 0) < (i2 | 0));
+ _puts(160) | 0;
+ i6 = 0;
+ STACKTOP = i1;
+ return i6 | 0;
+}
+function _doit(i6, i1, i7) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0, i5 = 0, i8 = 0, i9 = 0;
+ i5 = STACKTOP;
+ STACKTOP = STACKTOP + 16 | 0;
+ i4 = i5;
+ i3 = i5 + 12 | 0;
+ i2 = i5 + 8 | 0;
+ i8 = _compressBound(i1) | 0;
+ i9 = HEAP32[2] | 0;
+ if ((i9 | 0) == 0) {
+ i9 = _malloc(i8) | 0;
+ HEAP32[2] = i9;
+ }
+ if ((HEAP32[4] | 0) == 0) {
+ HEAP32[4] = _malloc(i1) | 0;
+ }
+ HEAP32[i3 >> 2] = i8;
+ _compress(i9, i3, i6, i1) | 0;
+ i7 = (i7 | 0) == 0;
+ if (i7) {
+ i9 = HEAP32[i3 >> 2] | 0;
+ HEAP32[i4 >> 2] = i1;
+ HEAP32[i4 + 4 >> 2] = i9;
+ _printf(24, i4 | 0) | 0;
+ }
+ HEAP32[i2 >> 2] = i1;
+ _uncompress(HEAP32[4] | 0, i2, HEAP32[2] | 0, HEAP32[i3 >> 2] | 0) | 0;
+ if ((HEAP32[i2 >> 2] | 0) != (i1 | 0)) {
+ ___assert_fail(40, 72, 24, 104);
+ }
+ if (!i7) {
+ STACKTOP = i5;
+ return;
+ }
+ if ((_strcmp(i6, HEAP32[4] | 0) | 0) == 0) {
+ STACKTOP = i5;
+ return;
+ } else {
+ ___assert_fail(112, 72, 25, 104);
+ }
+}
+function _uncompress(i6, i1, i5, i7) {
+ i6 = i6 | 0;
+ i1 = i1 | 0;
+ i5 = i5 | 0;
+ i7 = i7 | 0;
+ var i2 = 0, i3 = 0, i4 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i3 = i2;
+ HEAP32[i3 >> 2] = i5;
+ i5 = i3 + 4 | 0;
+ HEAP32[i5 >> 2] = i7;
+ HEAP32[i3 + 12 >> 2] = i6;
+ HEAP32[i3 + 16 >> 2] = HEAP32[i1 >> 2];
+ HEAP32[i3 + 32 >> 2] = 0;
+ HEAP32[i3 + 36 >> 2] = 0;
+ i6 = _inflateInit_(i3, 2992, 56) | 0;
+ if ((i6 | 0) != 0) {
+ i7 = i6;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ i6 = _inflate(i3, 4) | 0;
+ if ((i6 | 0) == 1) {
+ HEAP32[i1 >> 2] = HEAP32[i3 + 20 >> 2];
+ i7 = _inflateEnd(i3) | 0;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ _inflateEnd(i3) | 0;
+ if ((i6 | 0) == 2) {
+ i7 = -3;
+ STACKTOP = i2;
+ return i7 | 0;
+ } else if ((i6 | 0) == -5) {
+ i4 = 4;
+ }
+ if ((i4 | 0) == 4 ? (HEAP32[i5 >> 2] | 0) == 0 : 0) {
+ i7 = -3;
+ STACKTOP = i2;
+ return i7 | 0;
+ }
+ i7 = i6;
+ STACKTOP = i2;
+ return i7 | 0;
+}
+function _compress(i4, i1, i6, i5) {
+ i4 = i4 | 0;
+ i1 = i1 | 0;
+ i6 = i6 | 0;
+ i5 = i5 | 0;
+ var i2 = 0, i3 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + 64 | 0;
+ i3 = i2;
+ HEAP32[i3 >> 2] = i6;
+ HEAP32[i3 + 4 >> 2] = i5;
+ HEAP32[i3 + 12 >> 2] = i4;
+ HEAP32[i3 + 16 >> 2] = HEAP32[i1 >> 2];
+ HEAP32[i3 + 32 >> 2] = 0;
+ HEAP32[i3 + 36 >> 2] = 0;
+ HEAP32[i3 + 40 >> 2] = 0;
+ i4 = _deflateInit_(i3, -1, 168, 56) | 0;
+ if ((i4 | 0) != 0) {
+ i6 = i4;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ i4 = _deflate(i3, 4) | 0;
+ if ((i4 | 0) == 1) {
+ HEAP32[i1 >> 2] = HEAP32[i3 + 20 >> 2];
+ i6 = _deflateEnd(i3) | 0;
+ STACKTOP = i2;
+ return i6 | 0;
+ } else {
+ _deflateEnd(i3) | 0;
+ i6 = (i4 | 0) == 0 ? -5 : i4;
+ STACKTOP = i2;
+ return i6 | 0;
+ }
+ return 0;
+}
+function _inflateEnd(i4) {
+ i4 = i4 | 0;
+ var i1 = 0, i2 = 0, i3 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i1 = STACKTOP;
+ if ((i4 | 0) == 0) {
+ i7 = -2;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i2 = i4 + 28 | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ if ((i3 | 0) == 0) {
+ i7 = -2;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i6 = i4 + 36 | 0;
+ i5 = HEAP32[i6 >> 2] | 0;
+ if ((i5 | 0) == 0) {
+ i7 = -2;
+ STACKTOP = i1;
+ return i7 | 0;
+ }
+ i7 = HEAP32[i3 + 52 >> 2] | 0;
+ i4 = i4 + 40 | 0;
+ if ((i7 | 0) != 0) {
+ FUNCTION_TABLE_vii[i5 & 1](HEAP32[i4 >> 2] | 0, i7);
+ i5 = HEAP32[i6 >> 2] | 0;
+ i3 = HEAP32[i2 >> 2] | 0;
+ }
+ FUNCTION_TABLE_vii[i5 & 1](HEAP32[i4 >> 2] | 0, i3);
+ HEAP32[i2 >> 2] = 0;
+ i7 = 0;
+ STACKTOP = i1;
+ return i7 | 0;
+}
+function _memcpy(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i4 = 0;
+ if ((i1 | 0) >= 4096) return _emscripten_memcpy_big(i3 | 0, i2 | 0, i1 | 0) | 0;
+ i4 = i3 | 0;
+ if ((i3 & 3) == (i2 & 3)) {
+ while (i3 & 3) {
+ if ((i1 | 0) == 0) return i4 | 0;
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ while ((i1 | 0) >= 4) {
+ HEAP32[i3 >> 2] = HEAP32[i2 >> 2];
+ i3 = i3 + 4 | 0;
+ i2 = i2 + 4 | 0;
+ i1 = i1 - 4 | 0;
+ }
+ }
+ while ((i1 | 0) > 0) {
+ HEAP8[i3] = HEAP8[i2] | 0;
+ i3 = i3 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i1 = i1 - 1 | 0;
+ }
+ return i4 | 0;
+}
+function _strcmp(i4, i2) {
+ i4 = i4 | 0;
+ i2 = i2 | 0;
+ var i1 = 0, i3 = 0, i5 = 0;
+ i1 = STACKTOP;
+ i5 = HEAP8[i4] | 0;
+ i3 = HEAP8[i2] | 0;
+ if (i5 << 24 >> 24 != i3 << 24 >> 24 | i5 << 24 >> 24 == 0 | i3 << 24 >> 24 == 0) {
+ i4 = i5;
+ i5 = i3;
+ i4 = i4 & 255;
+ i5 = i5 & 255;
+ i5 = i4 - i5 | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+ }
+ do {
+ i4 = i4 + 1 | 0;
+ i2 = i2 + 1 | 0;
+ i5 = HEAP8[i4] | 0;
+ i3 = HEAP8[i2] | 0;
+ } while (!(i5 << 24 >> 24 != i3 << 24 >> 24 | i5 << 24 >> 24 == 0 | i3 << 24 >> 24 == 0));
+ i4 = i5 & 255;
+ i5 = i3 & 255;
+ i5 = i4 - i5 | 0;
+ STACKTOP = i1;
+ return i5 | 0;
+}
+function _memset(i1, i4, i3) {
+ i1 = i1 | 0;
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ var i2 = 0, i5 = 0, i6 = 0, i7 = 0;
+ i2 = i1 + i3 | 0;
+ if ((i3 | 0) >= 20) {
+ i4 = i4 & 255;
+ i7 = i1 & 3;
+ i6 = i4 | i4 << 8 | i4 << 16 | i4 << 24;
+ i5 = i2 & ~3;
+ if (i7) {
+ i7 = i1 + 4 - i7 | 0;
+ while ((i1 | 0) < (i7 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ }
+ while ((i1 | 0) < (i5 | 0)) {
+ HEAP32[i1 >> 2] = i6;
+ i1 = i1 + 4 | 0;
+ }
+ }
+ while ((i1 | 0) < (i2 | 0)) {
+ HEAP8[i1] = i4;
+ i1 = i1 + 1 | 0;
+ }
+ return i1 - i3 | 0;
+}
+function copyTempDouble(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+ HEAP8[tempDoublePtr + 4 | 0] = HEAP8[i1 + 4 | 0];
+ HEAP8[tempDoublePtr + 5 | 0] = HEAP8[i1 + 5 | 0];
+ HEAP8[tempDoublePtr + 6 | 0] = HEAP8[i1 + 6 | 0];
+ HEAP8[tempDoublePtr + 7 | 0] = HEAP8[i1 + 7 | 0];
+}
+function __tr_init(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ HEAP32[i1 + 2840 >> 2] = i1 + 148;
+ HEAP32[i1 + 2848 >> 2] = 1064;
+ HEAP32[i1 + 2852 >> 2] = i1 + 2440;
+ HEAP32[i1 + 2860 >> 2] = 1088;
+ HEAP32[i1 + 2864 >> 2] = i1 + 2684;
+ HEAP32[i1 + 2872 >> 2] = 1112;
+ HEAP16[i1 + 5816 >> 1] = 0;
+ HEAP32[i1 + 5820 >> 2] = 0;
+ HEAP32[i1 + 5812 >> 2] = 8;
+ _init_block(i1);
+ STACKTOP = i2;
+ return;
+}
+function copyTempFloat(i1) {
+ i1 = i1 | 0;
+ HEAP8[tempDoublePtr] = HEAP8[i1];
+ HEAP8[tempDoublePtr + 1 | 0] = HEAP8[i1 + 1 | 0];
+ HEAP8[tempDoublePtr + 2 | 0] = HEAP8[i1 + 2 | 0];
+ HEAP8[tempDoublePtr + 3 | 0] = HEAP8[i1 + 3 | 0];
+}
+function _deflateInit_(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ var i5 = 0;
+ i5 = STACKTOP;
+ i4 = _deflateInit2_(i4, i3, 8, 15, 8, 0, i2, i1) | 0;
+ STACKTOP = i5;
+ return i4 | 0;
+}
+function _zcalloc(i3, i1, i2) {
+ i3 = i3 | 0;
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ var i4 = 0;
+ i4 = STACKTOP;
+ i3 = _malloc(Math_imul(i2, i1) | 0) | 0;
+ STACKTOP = i4;
+ return i3 | 0;
+}
+function dynCall_iiii(i4, i3, i2, i1) {
+ i4 = i4 | 0;
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iiii[i4 & 1](i3 | 0, i2 | 0, i1 | 0) | 0;
+}
+function runPostSets() {}
+function _strlen(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = i1;
+ while (HEAP8[i2] | 0) {
+ i2 = i2 + 1 | 0;
+ }
+ return i2 - i1 | 0;
+}
+function stackAlloc(i1) {
+ i1 = i1 | 0;
+ var i2 = 0;
+ i2 = STACKTOP;
+ STACKTOP = STACKTOP + i1 | 0;
+ STACKTOP = STACKTOP + 7 & -8;
+ return i2 | 0;
+}
+function dynCall_iii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ return FUNCTION_TABLE_iii[i3 & 3](i2 | 0, i1 | 0) | 0;
+}
+function setThrew(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ if ((__THREW__ | 0) == 0) {
+ __THREW__ = i1;
+ threwValue = i2;
+ }
+}
+function dynCall_vii(i3, i2, i1) {
+ i3 = i3 | 0;
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ FUNCTION_TABLE_vii[i3 & 1](i2 | 0, i1 | 0);
+}
+function _zcfree(i2, i1) {
+ i2 = i2 | 0;
+ i1 = i1 | 0;
+ i2 = STACKTOP;
+ _free(i1);
+ STACKTOP = i2;
+ return;
+}
+function _compressBound(i1) {
+ i1 = i1 | 0;
+ return i1 + 13 + (i1 >>> 12) + (i1 >>> 14) + (i1 >>> 25) | 0;
+}
+function b0(i1, i2, i3) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ i3 = i3 | 0;
+ abort(0);
+ return 0;
+}
+function b2(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(2);
+ return 0;
+}
+function b1(i1, i2) {
+ i1 = i1 | 0;
+ i2 = i2 | 0;
+ abort(1);
+}
+function stackRestore(i1) {
+ i1 = i1 | 0;
+ STACKTOP = i1;
+}
+function setTempRet9(i1) {
+ i1 = i1 | 0;
+ tempRet9 = i1;
+}
+function setTempRet8(i1) {
+ i1 = i1 | 0;
+ tempRet8 = i1;
+}
+function setTempRet7(i1) {
+ i1 = i1 | 0;
+ tempRet7 = i1;
+}
+function setTempRet6(i1) {
+ i1 = i1 | 0;
+ tempRet6 = i1;
+}
+function setTempRet5(i1) {
+ i1 = i1 | 0;
+ tempRet5 = i1;
+}
+function setTempRet4(i1) {
+ i1 = i1 | 0;
+ tempRet4 = i1;
+}
+function setTempRet3(i1) {
+ i1 = i1 | 0;
+ tempRet3 = i1;
+}
+function setTempRet2(i1) {
+ i1 = i1 | 0;
+ tempRet2 = i1;
+}
+function setTempRet1(i1) {
+ i1 = i1 | 0;
+ tempRet1 = i1;
+}
+function setTempRet0(i1) {
+ i1 = i1 | 0;
+ tempRet0 = i1;
+}
+function stackSave() {
+ return STACKTOP | 0;
+}
+
+// EMSCRIPTEN_END_FUNCS
+ var FUNCTION_TABLE_iiii = [b0,_zcalloc];
+ var FUNCTION_TABLE_vii = [b1,_zcfree];
+ var FUNCTION_TABLE_iii = [b2,_deflate_stored,_deflate_fast,_deflate_slow];
+
+ return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_vii: dynCall_vii, dynCall_iii: dynCall_iii };
+}).toString(),
+// EMSCRIPTEN_END_ASM
+{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vii": invoke_vii, "invoke_iii": invoke_iii, "_send": _send, "___setErrNo": ___setErrNo, "___assert_fail": ___assert_fail, "_fflush": _fflush, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "___errno_location": ___errno_location, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "_llvm_bswap_i32": _llvm_bswap_i32, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+var _strlen = Module["_strlen"] = asm["_strlen"];
+var _free = Module["_free"] = asm["_free"];
+var _main = Module["_main"] = asm["_main"];
+var _memset = Module["_memset"] = asm["_memset"];
+var _malloc = Module["_malloc"] = asm["_malloc"];
+var _memcpy = Module["_memcpy"] = asm["_memcpy"];
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+var dynCall_iiii = Module["dynCall_iiii"] = asm["dynCall_iiii"];
+var dynCall_vii = Module["dynCall_vii"] = asm["dynCall_vii"];
+var dynCall_iii = Module["dynCall_iii"] = asm["dynCall_iii"];
+
+Runtime.stackAlloc = function(size) { return asm['stackAlloc'](size) };
+Runtime.stackSave = function() { return asm['stackSave']() };
+Runtime.stackRestore = function(top) { asm['stackRestore'](top) };
+
+
+// Warning: printing of i64 values may be slightly rounded! No deep i64 math used, so precise i64 code not included
+var i64Math = null;
+
+// === Auto-generated postamble setup entry stuff ===
+
+if (memoryInitializer) {
+ if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
+ var data = Module['readBinary'](memoryInitializer);
+ HEAPU8.set(data, STATIC_BASE);
+ } else {
+ addRunDependency('memory initializer');
+ Browser.asyncLoad(memoryInitializer, function(data) {
+ HEAPU8.set(data, STATIC_BASE);
+ removeRunDependency('memory initializer');
+ }, function(data) {
+ throw 'could not load memory initializer ' + memoryInitializer;
+ });
+ }
+}
+
+function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = "Program terminated with exit(" + status + ")";
+ this.status = status;
+};
+ExitStatus.prototype = new Error();
+ExitStatus.prototype.constructor = ExitStatus;
+
+var initialStackTop;
+var preloadStartTime = null;
+var calledMain = false;
+
+dependenciesFulfilled = function runCaller() {
+ // If run has never been called, and we should call run (INVOKE_RUN is true, and Module.noInitialRun is not false)
+ if (!Module['calledRun'] && shouldRunNow) run([].concat(Module["arguments"]));
+ if (!Module['calledRun']) dependenciesFulfilled = runCaller; // try this again later, after new deps are fulfilled
+}
+
+Module['callMain'] = Module.callMain = function callMain(args) {
+ assert(runDependencies == 0, 'cannot call main when async dependencies remain! (listen on __ATMAIN__)');
+ assert(__ATPRERUN__.length == 0, 'cannot call main when preRun functions remain to be called');
+
+ args = args || [];
+
+ ensureInitRuntime();
+
+ var argc = args.length+1;
+ function pad() {
+ for (var i = 0; i < 4-1; i++) {
+ argv.push(0);
+ }
+ }
+ var argv = [allocate(intArrayFromString("/bin/this.program"), 'i8', ALLOC_NORMAL) ];
+ pad();
+ for (var i = 0; i < argc-1; i = i + 1) {
+ argv.push(allocate(intArrayFromString(args[i]), 'i8', ALLOC_NORMAL));
+ pad();
+ }
+ argv.push(0);
+ argv = allocate(argv, 'i32', ALLOC_NORMAL);
+
+ initialStackTop = STACKTOP;
+
+ try {
+
+ var ret = Module['_main'](argc, argv, 0);
+
+
+ // if we're not running an evented main loop, it's time to exit
+ if (!Module['noExitRuntime']) {
+ exit(ret);
+ }
+ }
+ catch(e) {
+ if (e instanceof ExitStatus) {
+ // exit() throws this once it's done to make sure execution
+ // has been stopped completely
+ return;
+ } else if (e == 'SimulateInfiniteLoop') {
+ // running an evented main loop, don't immediately exit
+ Module['noExitRuntime'] = true;
+ return;
+ } else {
+ if (e && typeof e === 'object' && e.stack) Module.printErr('exception thrown: ' + [e, e.stack]);
+ throw e;
+ }
+ } finally {
+ calledMain = true;
+ }
+}
+
+
+
+
+function run(args) {
+ args = args || Module['arguments'];
+
+ if (preloadStartTime === null) preloadStartTime = Date.now();
+
+ if (runDependencies > 0) {
+ Module.printErr('run() called, but dependencies remain, so not running');
+ return;
+ }
+
+ preRun();
+
+ if (runDependencies > 0) return; // a preRun added a dependency, run will be called later
+ if (Module['calledRun']) return; // run may have just been called through dependencies being fulfilled just in this very frame
+
+ function doRun() {
+ if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ ensureInitRuntime();
+
+ preMain();
+
+ if (ENVIRONMENT_IS_WEB && preloadStartTime !== null) {
+ Module.printErr('pre-main prep time: ' + (Date.now() - preloadStartTime) + ' ms');
+ }
+
+ if (Module['_main'] && shouldRunNow) {
+ Module['callMain'](args);
+ }
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(function() {
+ setTimeout(function() {
+ Module['setStatus']('');
+ }, 1);
+ if (!ABORT) doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+}
+Module['run'] = Module.run = run;
+
+function exit(status) {
+ ABORT = true;
+ EXITSTATUS = status;
+ STACKTOP = initialStackTop;
+
+ // exit the runtime
+ exitRuntime();
+
+ // TODO We should handle this differently based on environment.
+ // In the browser, the best we can do is throw an exception
+ // to halt execution, but in node we could process.exit and
+ // I'd imagine SM shell would have something equivalent.
+ // This would let us set a proper exit status (which
+ // would be great for checking test exit statuses).
+ // https://github.com/kripken/emscripten/issues/1371
+
+ // throw an exception to halt the current execution
+ throw new ExitStatus(status);
+}
+Module['exit'] = Module.exit = exit;
+
+function abort(text) {
+ if (text) {
+ Module.print(text);
+ Module.printErr(text);
+ }
+
+ ABORT = true;
+ EXITSTATUS = 1;
+
+ var extra = '\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.';
+
+ throw 'abort() at ' + stackTrace() + extra;
+}
+Module['abort'] = Module.abort = abort;
+
+// {{PRE_RUN_ADDITIONS}}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+// shouldRunNow refers to calling main(), not run().
+var shouldRunNow = true;
+if (Module['noInitialRun']) {
+ shouldRunNow = false;
+}
+
+
+run([].concat(Module["arguments"]));
diff --git a/deps/v8/test/mjsunit/wasm/export-table.js b/deps/v8/test/mjsunit/wasm/export-table.js
new file mode 100644
index 0000000000..e85da9b664
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/export-table.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function testExportedMain() {
+ var kReturnValue = 88;
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", [kAstI32])
+ .addBody([
+ kExprReturn,
+ kExprI8Const,
+ kReturnValue])
+ .exportFunc();
+
+ var module = builder.instantiate();
+
+ assertEquals("object", typeof module.exports);
+ assertEquals("function", typeof module.exports.main);
+
+ assertEquals(kReturnValue, module.exports.main());
+})();
+
+(function testExportedTwice() {
+ var kReturnValue = 99;
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", [kAstI32])
+ .addBody([
+ kExprReturn,
+ kExprI8Const,
+ kReturnValue])
+ .exportAs("blah")
+ .exportAs("foo");
+
+ var module = builder.instantiate();
+
+ assertEquals("object", typeof module.exports);
+ assertEquals("function", typeof module.exports.blah);
+ assertEquals("function", typeof module.exports.foo);
+
+ assertEquals(kReturnValue, module.exports.foo());
+ assertEquals(kReturnValue, module.exports.blah());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index 3359429055..649ee273ae 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -5,40 +5,21 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallFFI(ffi) {
- var kBodySize = 6;
- var kNameAddOffset = 28 + kBodySize + 1;
- var kNameMainOffset = kNameAddOffset + 4;
-
- var data = bytes(
- kDeclMemory,
- 12, 12, 1, // memory
- // -- signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstF64, kAstF64, // (f64,f64)->int
- // -- foreign function
- kDeclFunctions, 2,
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature index
- kNameAddOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature index
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- print("instantiate FFI");
- var module = _WASMEXP_.instantiateModule(data, ffi);
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = [kAstI32, kAstF64, kAstF64];
+ builder.addImport("fun", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportFunc();
+
+ var module = builder.instantiate(ffi);
}
// everything is good.
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index 95d655dc6d..61fcf02b3c 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -5,52 +5,31 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallFFI(func, check) {
- var kBodySize = 6;
- var kNameFunOffset = 24 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
-
- var ffi = new Object();
- ffi.fun = func;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
- // -- foreign function
- kDeclFunctions, 2,
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0,
- kNameFunOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({func: func}).exports.main;
for (var i = 0; i < 100000; i += 10003) {
var a = 22.5 + i, b = 10.5 + i;
- var r = module.main(a, b);
+ var r = main(a, b);
check(r, a, b);
}
}
var global = (function() { return this; })();
-var params = [-99, -99, -99, -99];
+var params = [-99, -99, -99, -99, -99];
var was_called = false;
var length = -1;
@@ -189,62 +168,39 @@ testCallFFI(returnValue(objWithValueOf), checkReturn(198));
function testCallBinopVoid(type, func, check) {
- var kBodySize = 10;
- var kNameFunOffset = 28 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
-
- var ffi = new Object();
-
var passed_length = -1;
var passed_a = -1;
var passed_b = -1;
var args_a = -1;
var args_b = -1;
- ffi.fun = function(a, b) {
+ ffi = {func: function(a, b) {
passed_length = arguments.length;
passed_a = a;
passed_b = b;
args_a = arguments[0];
args_b = arguments[1];
- }
+ }};
+
+ var builder = new WasmModuleBuilder();
- var data = bytes(
- // -- signatures
- kDeclSignatures, 2,
- 2, kAstStmt, type, type, // (type,type)->void
- 2, kAstI32, type, type, // (type,type)->int
- // -- foreign function
- kDeclFunctions, 2,
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature index
- kNameFunOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport,
- 1, 0, // signature index
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // main body
- kExprBlock, 2, // --
- kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprI8Const, 99, // --
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
+ builder.addImport("func", [kAstStmt, type, type]);
+ builder.addFunction("main", [kAstI32, type, type])
+ .addBody([
+ kExprBlock, 2, // --
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI8Const, 99]) // --
+ .exportFunc()
+
+ var main = builder.instantiate(ffi).exports.main;
print("testCallBinopVoid", type);
for (var i = 0; i < 100000; i += 10003.1) {
var a = 22.5 + i, b = 10.5 + i;
- var r = module.main(a, b);
+ var r = main(a, b);
assertEquals(99, r);
assertEquals(2, passed_length);
var expected_a, expected_b;
@@ -282,51 +238,21 @@ testCallBinopVoid(kAstF64);
function testCallPrint() {
- var kBodySize = 10;
- var kNamePrintOffset = 10 + 7 + 7 + 9 + kBodySize + 1;
- var kNameMainOffset = kNamePrintOffset + 6;
-
- var ffi = new Object();
- ffi.print = print;
-
- var data = bytes(
- // -- signatures
- kDeclSignatures, 2,
- 1, kAstStmt, kAstI32, // i32->void
- 1, kAstStmt, kAstF64, // f64->int
- kDeclFunctions, 3,
- // -- import print i32
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature index
- kNamePrintOffset, 0, 0, 0, // name offset
- // -- import print f64
- kDeclFunctionName | kDeclFunctionImport,
- 1, 0, // signature index
- kNamePrintOffset, 0, 0, 0, // name offset
- // -- decl main
- kDeclFunctionName | kDeclFunctionExport,
- 1, 0, // signature index
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // main body
- kExprBlock, 2, // --
- kExprCallFunction, 0, // --
- kExprI8Const, 97, // --
- kExprCallFunction, 1, // --
- kExprGetLocal, 0, // --
- // names
- kDeclEnd,
- 'p', 'r', 'i', 'n', 't', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
-
- for (var i = -9; i < 900; i += 6.125) {
- module.main(i);
- }
+ var builder = new WasmModuleBuilder();
+
+ builder.addImport("print", [kAstStmt, kAstI32]);
+ builder.addImport("print", [kAstStmt, kAstF64]);
+ builder.addFunction("main", [kAstStmt, kAstF64])
+ .addBody([
+ kExprBlock, 2, // --
+ kExprCallImport, 0, // --
+ kExprI8Const, 97, // --
+ kExprCallImport, 1, // --
+ kExprGetLocal, 0]) // --
+ .exportFunc()
+
+ var main = builder.instantiate({print: print}).exports.main;
+ for (var i = -9; i < 900; i += 6.125) main(i);
}
testCallPrint();
diff --git a/deps/v8/test/mjsunit/wasm/function-prototype.js b/deps/v8/test/mjsunit/wasm/function-prototype.js
new file mode 100644
index 0000000000..db04b950bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/function-prototype.js
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestFunctionPrototype() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("nine", [kAstI32])
+ .addBody([kExprI8Const, 9])
+ .exportFunc();
+
+ var func = builder.instantiate().exports.nine;
+
+ // Check type and existence of prototype
+ assertEquals("function", typeof func.apply);
+ assertTrue(func.prototype != undefined);
+ assertEquals("nine", func.name);
+ assertEquals(undefined, func.displayName);
+
+ // Check that .apply() works.
+ assertEquals(9, func.apply([]));
+ assertEquals(9, func.apply([1]));
+ assertEquals(9, func.apply([2, 3]));
+ assertEquals(9, func.apply([6, 7, 9, 9]));
+
+ // TODO(titzer): assertEquals(1, func.length);
+
+ // Check we don't crash when converting to a string.
+ print(func.toString());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-frame.js b/deps/v8/test/mjsunit/wasm/gc-frame.js
new file mode 100644
index 0000000000..8387d26176
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-frame.js
@@ -0,0 +1,74 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function makeFFI(func, t) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addSignature([t,t,t,t,t,t,t,t,t,t,t]);
+ builder.addImport("func", sig_index);
+ // Try to create a frame with lots of spilled values and parameters
+ // on the stack to try to catch GC bugs in the reference maps for
+ // the different parts of the stack.
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprGetLocal, 2, // --
+ kExprGetLocal, 3, // --
+ kExprGetLocal, 4, // --
+ kExprGetLocal, 5, // --
+ kExprGetLocal, 6, // --
+ kExprGetLocal, 7, // --
+ kExprGetLocal, 8, // --
+ kExprGetLocal, 9, // --
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprGetLocal, 2, // --
+ kExprGetLocal, 3, // --
+ kExprGetLocal, 4, // --
+ kExprGetLocal, 5, // --
+ kExprGetLocal, 6, // --
+ kExprGetLocal, 7, // --
+ kExprGetLocal, 8, // --
+ kExprGetLocal, 9 // --
+ ]) // --
+ .exportFunc();
+
+ return builder.instantiate({func: func}).exports.main;
+}
+
+
+function print10(a, b, c, d, e, f, g, h, i) {
+ print(a + ",", b + ",", c + ",", d + ",", e + ",", f + ",", g + ",", h + ",", i);
+ gc();
+ print(a + ",", b + ",", c + ",", d + ",", e + ",", f + ",", g + ",", h + ",", i);
+}
+
+(function I32Test() {
+ var main = makeFFI(print10, kAstI32);
+ for (var i = 1; i < 0xFFFFFFF; i <<= 2) {
+ main(i - 1, i, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7, i + 8);
+ }
+})();
+
+(function F32Test() {
+ var main = makeFFI(print10, kAstF32);
+ for (var i = 1; i < 2e+30; i *= -157) {
+ main(i - 1, i, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7, i + 8);
+ }
+})();
+
+(function I32Test() {
+ var main = makeFFI(print10, kAstF64);
+ for (var i = 1; i < 2e+80; i *= -1137) {
+ main(i - 1, i, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7, i + 8);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index 33d1c3551c..7579901651 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -5,49 +5,25 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallImport(func, check) {
- var kBodySize = 6;
- var kNameFunOffset = 29 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
+ var builder = new WasmModuleBuilder();
- var ffi = new Object();
- ffi.fun = func;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
- // -- main function
- kDeclFunctions,
- 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprCallImport, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // imports
- kDeclImportTable,
- 1,
- 0, 0, // sig index
- 0, 0, 0, 0, // module name offset
- kNameFunOffset, 0, 0, 0, // function name offset
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
+ var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportAs("main");
+
+ var main = builder.instantiate({func: func}).exports.main;
for (var i = 0; i < 100000; i += 10003) {
var a = 22.5 + i, b = 10.5 + i;
- var r = module.main(a, b);
+ var r = main(a, b);
check(r, a, b);
}
}
@@ -192,10 +168,6 @@ testCallImport(returnValue(objWithValueOf), checkReturn(198));
function testCallBinopVoid(type, func, check) {
- var kBodySize = 10;
- var kNameFunOffset = 28 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
-
var ffi = new Object();
var passed_length = -1;
@@ -204,7 +176,7 @@ function testCallBinopVoid(type, func, check) {
var args_a = -1;
var args_b = -1;
- ffi.fun = function(a, b) {
+ ffi.func = function(a, b) {
passed_length = arguments.length;
passed_a = a;
passed_b = b;
@@ -212,42 +184,25 @@ function testCallBinopVoid(type, func, check) {
args_b = arguments[1];
}
- var data = bytes(
- // -- signatures
- kDeclSignatures, 2,
- 2, kAstStmt, type, type, // (type,type)->void
- 2, kAstI32, type, type, // (type,type)->int
- // -- foreign function
- kDeclFunctions, 2,
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature index
- kNameFunOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport,
- 1, 0, // signature index
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // main body
- kExprBlock, 2, // --
- kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprI8Const, 99, // --
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
+ var builder = new WasmModuleBuilder();
+
+ builder.addImport("func", [kAstStmt, type, type]);
+ builder.addFunction("main", [kAstI32, type, type])
+ .addBody([
+ kExprBlock, 2, // --
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI8Const, 99])
+ .exportFunc("main");
+
+ var main = builder.instantiate(ffi).exports.main;
print("testCallBinopVoid", type);
for (var i = 0; i < 100000; i += 10003.1) {
var a = 22.5 + i, b = 10.5 + i;
- var r = module.main(a, b);
+ var r = main(a, b);
assertEquals(99, r);
assertEquals(2, passed_length);
var expected_a, expected_b;
@@ -285,50 +240,22 @@ testCallBinopVoid(kAstF64);
function testCallPrint() {
- var kBodySize = 10;
- var kNamePrintOffset = 10 + 7 + 7 + 9 + kBodySize + 1;
- var kNameMainOffset = kNamePrintOffset + 6;
-
- var ffi = new Object();
- ffi.print = print;
-
- var data = bytes(
- // -- signatures
- kDeclSignatures, 2,
- 1, kAstStmt, kAstI32, // i32->void
- 1, kAstStmt, kAstF64, // f64->int
- kDeclFunctions, 3,
- // -- import print i32
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature index
- kNamePrintOffset, 0, 0, 0, // name offset
- // -- import print f64
- kDeclFunctionName | kDeclFunctionImport,
- 1, 0, // signature index
- kNamePrintOffset, 0, 0, 0, // name offset
- // -- decl main
- kDeclFunctionName | kDeclFunctionExport,
- 1, 0, // signature index
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // main body
- kExprBlock, 2, // --
- kExprCallFunction, 0, // --
- kExprI8Const, 97, // --
- kExprCallFunction, 1, // --
- kExprGetLocal, 0, // --
- // names
- kDeclEnd,
- 'p', 'r', 'i', 'n', 't', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
-
- for (var i = -9; i < 900; i += 6.125) {
- module.main(i);
+ var builder = new WasmModuleBuilder();
+ builder.addImport("print", [kAstStmt, kAstI32]);
+ builder.addImport("print", [kAstStmt, kAstF64]);
+ builder.addFunction("main", [kAstStmt, kAstF64])
+ .addBody([
+ kExprBlock, 2, // --
+ kExprCallImport, 0, // --
+ kExprI8Const, 97, // --
+ kExprCallImport, 1, // --
+ kExprGetLocal, 0]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({print: print}).exports.main;
+
+ for (var i = -9; i < 900; i += 16.125) {
+ main(i);
}
}
@@ -337,51 +264,19 @@ testCallPrint();
function testCallImport2(foo, bar, expected) {
- var kBodySize = 5;
- var kNameFooOffset = 37 + kBodySize + 1;
- var kNameBarOffset = kNameFooOffset + 4;
- var kNameMainOffset = kNameBarOffset + 4;
-
- var ffi = new Object();
- ffi.foo = foo;
- ffi.bar = bar;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1,
- 0, kAstI32, // void -> i32
- // -- main function
- kDeclFunctions,
- 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprI32Add, // --
- kExprCallImport, 0, // --
- kExprCallImport, 1, // --
- // imports
- kDeclImportTable,
- 2,
- 0, 0, // sig index
- 0, 0, 0, 0, // module name offset
- kNameFooOffset, 0, 0, 0, // function name offset
- 0, 0, // sig index
- 0, 0, 0, 0, // module name offset
- kNameBarOffset, 0, 0, 0, // function name offset
- // names
- kDeclEnd,
- 'f', 'o', 'o', 0, // --
- 'b', 'a', 'r', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
-
- assertEquals(expected, module.main());
+ var builder = new WasmModuleBuilder();
+
+ builder.addImport("foo", [kAstI32]);
+ builder.addImport("bar", [kAstI32]);
+ builder.addFunction("main", [kAstI32])
+ .addBody([
+ kExprI32Add, // --
+ kExprCallImport, 0, // --
+ kExprCallImport, 1]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({foo: foo, bar: bar}).exports.main;
+ assertEquals(expected, main());
}
testCallImport2(function() { return 33; }, function () { return 44; }, 77);
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 560c8baa08..3258687431 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -5,69 +5,42 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
var module = (function () {
- var kFuncWithBody = 9;
- var kFuncImported = 7;
- var kBodySize1 = 5;
- var kBodySize2 = 8;
- var kFuncTableSize = 8;
- var kSubOffset = 13 + kFuncWithBody + kBodySize1 + kFuncImported + kFuncWithBody + kBodySize2 + kFuncTableSize + 1;
- var kAddOffset = kSubOffset + 4;
- var kMainOffset = kAddOffset + 4;
-
- var ffi = new Object();
- ffi.add = (function(a, b) { return a + b | 0; });
-
- return _WASMEXP_.instantiateModule(bytes(
- // -- signatures
- kDeclSignatures, 2,
- 2, kAstI32, kAstI32, kAstI32, // int, int -> int
- 3, kAstI32, kAstI32, kAstI32, kAstI32, // int, int, int -> int
- // -- function #0 (sub)
- kDeclFunctions, 3,
- kDeclFunctionName,
- 0, 0, // signature offset
- kSubOffset, 0, 0, 0, // name offset
- kBodySize1, 0, // body size
- kExprI32Sub, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // -- function #1 (add)
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0, // signature offset
- kAddOffset, 0, 0, 0, // name offset
- // -- function #2 (main)
- kDeclFunctionName | kDeclFunctionExport,
- 1, 0, // signature offset
- kMainOffset, 0, 0, 0, // name offset
- kBodySize2, 0, // body size
- kExprCallIndirect, 0,
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- // -- function table
- kDeclFunctionTable,
- 3,
- 0, 0,
- 1, 0,
- 2, 0,
- kDeclEnd,
- 's', 'u', 'b', 0, // name
- 'a', 'd', 'd', 0, // name
- 'm', 'a', 'i', 'n', 0 // name
- ), ffi);
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addSignature([kAstI32, kAstI32, kAstI32]);
+ builder.addImport("add", sig_index);
+ builder.addFunction("add", sig_index)
+ .addBody([
+ kExprCallImport, 0, kExprGetLocal, 0, kExprGetLocal, 1
+ ]);
+ builder.addFunction("sub", sig_index)
+ .addBody([
+ kExprI32Sub, kExprGetLocal, 0, kExprGetLocal, 1
+ ]);
+ builder.addFunction("main", [kAstI32, kAstI32, kAstI32, kAstI32])
+ .addBody([
+ kExprCallIndirect, sig_index,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2])
+ .exportFunc()
+ builder.appendToFunctionTable([0, 1, 2]);
+
+ return builder.instantiate({add: function(a, b) { return a + b | 0; }});
})();
// Check the module exists.
assertFalse(module === undefined);
assertFalse(module === null);
assertFalse(module === 0);
-assertEquals("object", typeof module);
-assertEquals("function", typeof module.main);
+assertEquals("object", typeof module.exports);
+assertEquals("function", typeof module.exports.main);
-assertEquals(5, module.main(0, 12, 7));
-assertEquals(19, module.main(1, 12, 7));
+assertEquals(5, module.exports.main(1, 12, 7));
+assertEquals(19, module.exports.main(0, 12, 7));
-assertTraps(kTrapFuncSigMismatch, "module.main(2, 12, 33)");
-assertTraps(kTrapFuncInvalid, "module.main(3, 12, 33)");
+assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
+assertTraps(kTrapFuncInvalid, "module.exports.main(3, 12, 33)");
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index 13a22615fc..bc13122f1b 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -5,33 +5,20 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
var kReturnValue = 117;
-var kBodySize = 2;
-var kNameOffset = 19 + kBodySize + 1;
-
-var data = bytes(
- // -- memory
- kDeclMemory,
- 10, 10, 1,
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstI32, // signature: void -> int
- // -- main function
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature index
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // -- body
- kExprI8Const, // --
- kReturnValue, // --
- kDeclEnd,
- 'm', 'a', 'i', 'n', 0 // name
-);
-
-var module = _WASMEXP_.instantiateModule(data);
+var module = (function Build() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("main", [kAstI32])
+ .addBody([kExprI8Const, kReturnValue])
+ .exportFunc();
+
+ return builder.instantiate();
+})();
// Check the module exists.
assertFalse(module === undefined);
@@ -40,23 +27,24 @@ assertFalse(module === 0);
assertEquals("object", typeof module);
// Check the memory is an ArrayBuffer.
-var mem = module.memory;
+var mem = module.exports.memory;
assertFalse(mem === undefined);
assertFalse(mem === null);
assertFalse(mem === 0);
assertEquals("object", typeof mem);
assertTrue(mem instanceof ArrayBuffer);
for (var i = 0; i < 4; i++) {
- module.memory = 0; // should be ignored
- assertEquals(mem, module.memory);
+ module.exports.memory = 0; // should be ignored
+ assertEquals(mem, module.exports.memory);
}
-assertEquals(1024, module.memory.byteLength);
+assertEquals(65536, module.exports.memory.byteLength);
// Check the properties of the main function.
-assertFalse(module.main === undefined);
-assertFalse(module.main === null);
-assertFalse(module.main === 0);
-assertEquals("function", typeof module.main);
+var main = module.exports.main;
+assertFalse(main === undefined);
+assertFalse(main === null);
+assertFalse(main === 0);
+assertEquals("function", typeof main);
-assertEquals(kReturnValue, module.main());
+assertEquals(kReturnValue, main());
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
index b103e8f439..2e649a0bd2 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
@@ -5,27 +5,16 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
-var kReturnValue = 97;
+(function BasicTest() {
+ var kReturnValue = 107;
+ var builder = new WasmModuleBuilder();
-var kBodySize = 2;
-var kNameOffset = 15 + kBodySize + 1;
+ builder.addFunction("main", [kAstI32])
+ .addBody([kExprI8Const, kReturnValue])
+ .exportFunc();
-var data = bytes(
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstI32, // signature: void -> int
- // -- main function
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature index
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- // -- body
- kExprI8Const, // --
- kReturnValue, // --
- kDeclEnd,
- 'm', 'a', 'i', 'n', 0 // name
-);
-
-assertEquals(kReturnValue, _WASMEXP_.instantiateModule(data).main());
+ var main = builder.instantiate().exports.main;
+ assertEquals(kReturnValue, main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index e9c1404a4f..ef85eb2357 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -5,52 +5,36 @@
// Flags: --expose-wasm --expose-gc --stress-compaction
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
-var kMemSize = 4096;
+var kMemSize = 65536;
function genModule(memory) {
- var kBodySize = 27;
- var kNameMainOffset = 28 + kBodySize + 1;
-
- var data = bytes(
- kDeclMemory,
- 12, 12, 1, // memory
- // -- signatures
- kDeclSignatures, 1,
- 1, kAstI32, kAstI32, // int->int
- // -- main function
- kDeclFunctions, 1,
- kDeclFunctionLocals | kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- 1, 0, // local int32 count
- 0, 0, // local int64 count
- 0, 0, // local float32 count
- 0, 0, // local float64 count
- kBodySize, 0, // code size
- // main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
- kExprBlock,2,
- kExprLoop,1,
- kExprIf,
- kExprGetLocal,0,
- kExprBr, 0,
- kExprIfElse,
- kExprI32LoadMem,0,kExprGetLocal,0,
- kExprBr,2, kExprI8Const, 255,
- kExprSetLocal,0,
- kExprI32Sub,kExprGetLocal,0,kExprI8Const,4,
- kExprI8Const,0,
- // names
- kDeclEnd,
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- return _WASMEXP_.instantiateModule(data, null, memory);
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("main", [kAstI32, kAstI32])
+ .addBody([
+ kExprBlock,2,
+ kExprLoop,1,
+ kExprIf,
+ kExprGetLocal,0,
+ kExprBr, 0,
+ kExprIfElse,
+ kExprI32LoadMem,0,0,kExprGetLocal,0,
+ kExprBr,2, kExprI8Const, 255,
+ kExprSetLocal,0,
+ kExprI32Sub,kExprGetLocal,0,kExprI8Const,4,
+ kExprI8Const,0])
+ .exportFunc();
+
+ return builder.instantiate(null, memory);
}
function testPokeMemory() {
var module = genModule(null);
- var buffer = module.memory;
+ var buffer = module.exports.memory;
+ var main = module.exports.main;
assertEquals(kMemSize, buffer.byteLength);
var array = new Int8Array(buffer);
@@ -61,21 +45,21 @@ function testPokeMemory() {
}
for (var i = 0; i < 10; i++) {
- assertEquals(0, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize - 4));
array[kMemSize/2 + i] = 1;
- assertEquals(0, module.main(kMemSize/2 - 4));
- assertEquals(-1, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize/2 - 4));
+ assertEquals(-1, main(kMemSize - 4));
array[kMemSize/2 + i] = 0;
- assertEquals(0, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize - 4));
}
}
testPokeMemory();
function testSurvivalAcrossGc() {
- var checker = genModule(null).main;
+ var checker = genModule(null).exports.main;
for (var i = 0; i < 5; i++) {
print("gc run ", i);
assertEquals(0, checker(kMemSize - 4));
@@ -92,6 +76,7 @@ testSurvivalAcrossGc();
function testPokeOuterMemory() {
var buffer = new ArrayBuffer(kMemSize);
var module = genModule(buffer);
+ var main = module.exports.main;
assertEquals(kMemSize, buffer.byteLength);
var array = new Int8Array(buffer);
@@ -102,14 +87,14 @@ function testPokeOuterMemory() {
}
for (var i = 0; i < 10; i++) {
- assertEquals(0, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize - 4));
array[kMemSize/2 + i] = 1;
- assertEquals(0, module.main(kMemSize/2 - 4));
- assertEquals(-1, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize/2 - 4));
+ assertEquals(-1, main(kMemSize - 4));
array[kMemSize/2 + i] = 0;
- assertEquals(0, module.main(kMemSize - 4));
+ assertEquals(0, main(kMemSize - 4));
}
}
@@ -117,7 +102,7 @@ testPokeOuterMemory();
function testOuterMemorySurvivalAcrossGc() {
var buffer = new ArrayBuffer(kMemSize);
- var checker = genModule(buffer).main;
+ var checker = genModule(buffer).exports.main;
for (var i = 0; i < 5; i++) {
print("gc run ", i);
assertEquals(0, checker(kMemSize - 4));
@@ -132,47 +117,29 @@ testOuterMemorySurvivalAcrossGc();
function testOOBThrows() {
- var kBodySize = 8;
- var kNameMainOffset = 29 + kBodySize + 1;
-
- var data = bytes(
- kDeclMemory,
- 12, 12, 1, // memory = 4KB
- // -- signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstI32, kAstI32, // int->int
- // -- main function
- kDeclFunctions, 1,
- kDeclFunctionLocals | kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- 1, 0, // local int32 count
- 0, 0, // local int64 count
- 0, 0, // local float32 count
- 0, 0, // local float64 count
- kBodySize, 0, // code size
- // geti: return mem[a] = mem[b]
- kExprI32StoreMem, 0, kExprGetLocal, 0, kExprI32LoadMem, 0, kExprGetLocal, 1,
- // names
- kDeclEnd,
- 'g','e','t','i', 0 // --
- );
-
- var memory = null;
- var module = _WASMEXP_.instantiateModule(data, null, memory);
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("geti", [kAstI32, kAstI32, kAstI32])
+ .addBody([
+ kExprI32StoreMem, 0, 0, kExprGetLocal, 0, kExprI32LoadMem, 0, 0, kExprGetLocal, 1
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
var offset;
- function read() { return module.geti(0, offset); }
- function write() { return module.geti(offset, 0); }
+ function read() { return module.exports.geti(0, offset); }
+ function write() { return module.exports.geti(offset, 0); }
- for (offset = 0; offset < 4092; offset++) {
+ for (offset = 0; offset < 65533; offset++) {
assertEquals(0, read());
assertEquals(0, write());
}
- for (offset = 4093; offset < 4124; offset++) {
+ for (offset = 65534; offset < 66536; offset++) {
assertTraps(kTrapMemOutOfBounds, read);
assertTraps(kTrapMemOutOfBounds, write);
}
diff --git a/deps/v8/test/mjsunit/wasm/params.js b/deps/v8/test/mjsunit/wasm/params.js
index 52d6214751..7c2b3d1794 100644
--- a/deps/v8/test/mjsunit/wasm/params.js
+++ b/deps/v8/test/mjsunit/wasm/params.js
@@ -5,54 +5,39 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
-function runSelect2(module, which, a, b) {
- assertEquals(which == 0 ? a : b, module.select(a, b));
+function runSelect2(select, which, a, b) {
+ assertEquals(which == 0 ? a : b, select(a, b));
}
function testSelect2(type) {
- var kBodySize = 2;
- var kNameOffset = 21 + kBodySize + 1;
-
for (var which = 0; which < 2; which++) {
print("type = " + type + ", which = " + which);
- var data = bytes(
- // -- memory
- kDeclMemory,
- 12, 12, 1, // memory
- // -- signatures
- kDeclSignatures, 1,
- 2, type, type, type, // signature: (t,t)->t
- // -- select
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- kExprGetLocal, which, // --
- kDeclEnd,
- 's','e','l','e','c','t',0 // name
- );
-
- var module = _WASMEXP_.instantiateModule(data);
-
- assertEquals("function", typeof module.select);
- runSelect2(module, which, 99, 97);
- runSelect2(module, which, -99, -97);
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("select", [type, type, type])
+ .addBody([kExprGetLocal, which])
+ .exportFunc()
+
+ var select = builder.instantiate().exports.select;
+
+ runSelect2(select, which, 99, 97);
+ runSelect2(select, which, -99, -97);
if (type != kAstF32) {
- runSelect2(module, which, 0x80000000 | 0, 0x7fffffff | 0);
- runSelect2(module, which, 0x80000001 | 0, 0x7ffffffe | 0);
- runSelect2(module, which, 0xffffffff | 0, 0xfffffffe | 0);
- runSelect2(module, which, -2147483647, 2147483646);
- runSelect2(module, which, -2147483646, 2147483645);
- runSelect2(module, which, -2147483648, 2147483647);
+ runSelect2(select, which, 0x80000000 | 0, 0x7fffffff | 0);
+ runSelect2(select, which, 0x80000001 | 0, 0x7ffffffe | 0);
+ runSelect2(select, which, 0xffffffff | 0, 0xfffffffe | 0);
+ runSelect2(select, which, -2147483647, 2147483646);
+ runSelect2(select, which, -2147483646, 2147483645);
+ runSelect2(select, which, -2147483648, 2147483647);
}
if (type != kAstI32 && type != kAstI64) {
- runSelect2(module, which, -1.25, 5.25);
- runSelect2(module, which, Infinity, -Infinity);
+ runSelect2(select, which, -1.25, 5.25);
+ runSelect2(select, which, Infinity, -Infinity);
}
}
}
@@ -63,20 +48,20 @@ testSelect2(kAstF32);
testSelect2(kAstF64);
-function runSelect10(module, which, a, b) {
+function runSelect10(select, which, a, b) {
var x = -1;
var result = [
- module.select(a, b, x, x, x, x, x, x, x, x),
- module.select(x, a, b, x, x, x, x, x, x, x),
- module.select(x, x, a, b, x, x, x, x, x, x),
- module.select(x, x, x, a, b, x, x, x, x, x),
- module.select(x, x, x, x, a, b, x, x, x, x),
- module.select(x, x, x, x, x, a, b, x, x, x),
- module.select(x, x, x, x, x, x, a, b, x, x),
- module.select(x, x, x, x, x, x, x, a, b, x),
- module.select(x, x, x, x, x, x, x, x, a, b),
- module.select(x, x, x, x, x, x, x, x, x, a)
+ select(a, b, x, x, x, x, x, x, x, x),
+ select(x, a, b, x, x, x, x, x, x, x),
+ select(x, x, a, b, x, x, x, x, x, x),
+ select(x, x, x, a, b, x, x, x, x, x),
+ select(x, x, x, x, a, b, x, x, x, x),
+ select(x, x, x, x, x, a, b, x, x, x),
+ select(x, x, x, x, x, x, a, b, x, x),
+ select(x, x, x, x, x, x, x, a, b, x),
+ select(x, x, x, x, x, x, x, x, a, b),
+ select(x, x, x, x, x, x, x, x, x, a)
];
for (var i = 0; i < 10; i++) {
@@ -86,49 +71,36 @@ function runSelect10(module, which, a, b) {
}
}
-function testSelect10(type) {
+function testSelect10(t) {
var kBodySize = 2;
- var kNameOffset = 29 + kBodySize + 1;
+ var kNameOffset = kHeaderSize + 29 + kBodySize + 1;
for (var which = 0; which < 10; which++) {
- print("type = " + type + ", which = " + which);
-
- var t = type;
- var data = bytes(
- kDeclMemory,
- 12, 12, 1, // memory
- // signatures
- kDeclSignatures, 1,
- 10, t,t,t,t,t,t,t,t,t,t,t, // (tx10)->t
- // main function
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameOffset, 0, 0, 0, // name offset
- kBodySize, 0, // body size
- kExprGetLocal, which, // --
- kDeclEnd,
- 's','e','l','e','c','t',0 // name
- );
-
- var module = _WASMEXP_.instantiateModule(data);
-
- assertEquals("function", typeof module.select);
- runSelect10(module, which, 99, 97);
- runSelect10(module, which, -99, -97);
-
- if (type != kAstF32) {
- runSelect10(module, which, 0x80000000 | 0, 0x7fffffff | 0);
- runSelect10(module, which, 0x80000001 | 0, 0x7ffffffe | 0);
- runSelect10(module, which, 0xffffffff | 0, 0xfffffffe | 0);
- runSelect10(module, which, -2147483647, 2147483646);
- runSelect10(module, which, -2147483646, 2147483645);
- runSelect10(module, which, -2147483648, 2147483647);
+ print("type = " + t + ", which = " + which);
+
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("select", [t,t,t,t,t,t,t,t,t,t,t])
+ .addBody([kExprGetLocal, which])
+ .exportFunc();
+
+ var select = builder.instantiate().exports.select;
+
+ assertEquals("function", typeof select);
+ runSelect10(select, which, 99, 97);
+ runSelect10(select, which, -99, -97);
+
+ if (t != kAstF32) {
+ runSelect10(select, which, 0x80000000 | 0, 0x7fffffff | 0);
+ runSelect10(select, which, 0x80000001 | 0, 0x7ffffffe | 0);
+ runSelect10(select, which, 0xffffffff | 0, 0xfffffffe | 0);
+ runSelect10(select, which, -2147483647, 2147483646);
+ runSelect10(select, which, -2147483646, 2147483645);
+ runSelect10(select, which, -2147483648, 2147483647);
}
- if (type != kAstI32 && type != kAstI64) {
- runSelect10(module, which, -1.25, 5.25);
- runSelect10(module, which, Infinity, -Infinity);
+ if (t != kAstI32 && t != kAstI64) {
+ runSelect10(select, which, -1.25, 5.25);
+ runSelect10(select, which, Infinity, -Infinity);
}
}
}
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index d4b72c0085..ed05517ae5 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -5,44 +5,14 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
-function testStack(func, check) {
- var kBodySize = 2;
- var kNameFunOffset = 22 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
-
- var ffi = new Object();
- ffi.fun = func;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1, // --
- 0, kAstStmt, // () -> void
- // -- foreign function
- kDeclFunctions, 2, // --
- kDeclFunctionName | kDeclFunctionImport, // --
- 0, 0, // --
- kNameFunOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport, // --
- 0, 0, // --
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprCallFunction, 0, // --
- // names
- kDeclEnd, // --
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
-
- module.main();
- check();
-}
+var expected = "Error\n" +
+ // The line numbers below will change as this test gains / loses lines..
+ " at STACK (stack.js:24:11)\n" + // --
+ " at <WASM> (<anonymous>)\n" + // TODO(jfb): wasm stack here.
+ " at testStack (stack.js:38:18)\n" + // --
+ " at stack.js:40:3"; // --
// The stack trace contains file path, only keep "stack.js".
function stripPath(s) {
@@ -55,15 +25,16 @@ function STACK() {
stack = e.stack;
}
-function check_STACK() {
- assertEquals(expected, stripPath(stack));
-}
+(function testStack() {
+ var builder = new WasmModuleBuilder();
-var expected = "Error\n" +
- // The line numbers below will change as this test gains / loses lines..
- " at STACK (stack.js:54:11)\n" + // --
- " at testStack (stack.js:43:10)\n" +
- // TODO(jfb) Add WebAssembly stack here.
- " at stack.js:69:1";
+ builder.addImport("func", [kAstStmt]);
+
+ builder.addFunction(undefined, [kAstStmt])
+ .addBody([kExprCallImport, 0])
+ .exportAs("main");
-testStack(STACK, check_STACK);
+ var module = builder.instantiate({func: STACK});
+ module.exports.main();
+ assertEquals(expected, stripPath(stack));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
index e863b07b4f..8b8fb7e4d4 100644
--- a/deps/v8/test/mjsunit/wasm/stackwalk.js
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -5,44 +5,21 @@
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function makeFFI(func) {
- var kBodySize = 6;
- var kNameFunOffset = 24 + kBodySize + 1;
- var kNameMainOffset = kNameFunOffset + 4;
-
- var ffi = new Object();
- ffi.fun = func;
-
- var data = bytes(
- // signatures
- kDeclSignatures, 1,
- 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
- // -- foreign function
- kDeclFunctions, 2,
- kDeclFunctionName | kDeclFunctionImport,
- 0, 0,
- kNameFunOffset, 0, 0, 0, // name offset
- // -- main function
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0,
- kNameMainOffset, 0, 0, 0, // name offset
- kBodySize, 0,
- // main body
- kExprCallFunction, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- // names
- kDeclEnd,
- 'f', 'u', 'n', 0, // --
- 'm', 'a', 'i', 'n', 0 // --
- );
-
- var module = _WASMEXP_.instantiateModule(data, ffi);
-
- assertEquals("function", typeof module.main);
-
- return module.main;
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1]) // --
+ .exportFunc()
+
+ return builder.instantiate({func: func}).exports.main;
}
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index 4008efa563..bd4ccf22c3 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -5,33 +5,17 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
function instantiate(sig, body) {
- var module = new Array();
- module = module.concat([
- // -- signatures
- kDeclSignatures, 1,
- ]);
- module = module.concat(sig);
- module = module.concat([
- // -- functions
- kDeclFunctions, 1,
- 0, // decl flags
- 0, 0, // signature
- body.length, 0, // body size
- ]);
- module = module.concat(body);
- module = module.concat([
- // -- declare start function
- kDeclStartFunction,
- 0
- ]);
-
- var data = bytes.apply(this, module);
- print(module);
- print(data instanceof ArrayBuffer);
- print(data.byteLength);
- return _WASMEXP_.instantiateModule(data);
+ var builder = new WasmModuleBuilder();
+
+ var func = builder.addFunction("", sig)
+ .addBody(body);
+
+ builder.addStart(func.index);
+
+ return builder.instantiate();
}
function assertFails(sig, body) {
@@ -53,120 +37,75 @@ function assertVerifies(sig, body) {
return module;
}
-assertVerifies([0, kAstStmt], [kExprNop]);
-assertVerifies([0, kAstI32], [kExprI8Const, 0]);
+assertVerifies([kAstStmt], [kExprNop]);
+assertVerifies([kAstI32], [kExprI8Const, 0]);
// Arguments aren't allow to start functions.
-assertFails([1, kAstI32, kAstI32], [kExprGetLocal, 0]);
-assertFails([2, kAstI32, kAstI32, kAstF32], [kExprGetLocal, 0]);
-assertFails([3, kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
+assertFails([kAstI32, kAstI32], [kExprGetLocal, 0]);
+assertFails([kAstI32, kAstI32, kAstF32], [kExprGetLocal, 0]);
+assertFails([kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
(function testInvalidIndex() {
- var kBodySize = 1;
- var data = bytes(
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt,
- // -- functions
- kDeclFunctions, 1,
- 0, // decl flags
- 0, 0, // signature
- kBodySize, 0, // body size
- kExprNop, // body
- // -- declare start function
- kDeclStartFunction,
- 1
- );
-
- assertThrows(function() { _WASMEXP_.instantiateModule(data); });
+ print("testInvalidIndex");
+ var builder = new WasmModuleBuilder();
+
+ var func = builder.addFunction("", [kAstStmt])
+ .addBody([kExprNop]);
+
+ builder.addStart(func.index + 1);
+
+ assertThrows(builder.instantiate);
})();
(function testTwoStartFuncs() {
- var kBodySize = 1;
- var data = bytes(
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt,
- // -- functions
- kDeclFunctions, 1,
- 0, // decl flags
- 0, 0, // signature
- kBodySize, 0, // body size
- kExprNop, // body
- // -- declare start function
- kDeclStartFunction,
- 0,
- // -- declare start function
- kDeclStartFunction,
- 0
- );
-
- assertThrows(function() { _WASMEXP_.instantiateModule(data); });
+ print("testTwoStartFuncs");
+ var builder = new WasmModuleBuilder();
+
+ var func = builder.addFunction("", [kAstStmt])
+ .addBody([kExprNop]);
+
+ builder.addExplicitSection([kDeclStartFunction, 0]);
+ builder.addExplicitSection([kDeclStartFunction, 0]);
+
+ assertThrows(builder.instantiate);
})();
(function testRun() {
- var kBodySize = 6;
-
- var data = bytes(
- kDeclMemory,
- 12, 12, 1, // memory
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt,
- // -- start function
- kDeclFunctions, 1,
- 0, // decl flags
- 0, 0, // signature
- kBodySize, 0, // code size
- // -- start body
- kExprI32StoreMem, 0, kExprI8Const, 0, kExprI8Const, 77,
- // -- declare start function
- kDeclStartFunction,
- 0
- );
-
- var module = _WASMEXP_.instantiateModule(data);
- var memory = module.memory;
+ print("testRun");
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(12, 12, true);
+
+ var func = builder.addFunction("", [kAstStmt])
+ .addBody([kExprI32StoreMem, 0, 0, kExprI8Const, 0, kExprI8Const, 77]);
+
+ builder.addStart(func.index);
+
+ var module = builder.instantiate();
+ var memory = module.exports.memory;
var view = new Int8Array(memory);
assertEquals(77, view[0]);
})();
(function testStartFFI() {
- var kBodySize = 2;
- var kNameOffset = 4 + 9 + 7 + 3;
-
- var data = bytes(
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt,
- // -- imported function
- kDeclFunctions, 2,
- kDeclFunctionImport | kDeclFunctionName, // decl flags
- 0, 0, // signature
- kNameOffset, 0, 0, 0,
- // -- start function
- 0, // decl flags
- 0, 0, // signature
- kBodySize, 0, // code size
- // -- start body
- kExprCallFunction, 0,
- // -- declare start function
- kDeclStartFunction,
- 1,
- kDeclEnd,
- 'f', 'o', 'o', 0
- );
-
+ print("testStartFFI");
var ranned = false;
- var ffi = new Object();
- ffi.foo = function() {
+ var ffi = { foo : function() {
print("we ranned at stert!");
ranned = true;
- }
- var module = _WASMEXP_.instantiateModule(data, ffi);
- var memory = module.memory;
- var view = new Int8Array(memory);
+ }};
+
+ var builder = new WasmModuleBuilder();
+ var sig_index = builder.addSignature([kAstStmt]);
+
+ builder.addImport("foo", sig_index);
+ var func = builder.addFunction("", sig_index)
+ .addBody([kExprCallImport, 0]);
+
+ builder.addStart(func.index);
+
+ var module = builder.instantiate(ffi);
assertTrue(ranned);
})();
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
new file mode 100644
index 0000000000..50797d0554
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -0,0 +1,153 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var debug = false;
+
+(function BasicTest() {
+ var module = new WasmModuleBuilder();
+ module.addMemory(1, 2, false);
+ module.addFunction("foo", [kAstI32])
+ .addBody([kExprI8Const, 11])
+ .exportAs("blarg");
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer);
+ assertEquals(11, instance.exports.blarg());
+})();
+
+(function ImportTest() {
+ var module = new WasmModuleBuilder();
+ var index = module.addImport("print", [kAstStmt, kAstI32]);
+ module.addFunction("foo", [kAstStmt])
+ .addBody([kExprCallImport, index, kExprI8Const, 13])
+ .exportAs("main");
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer, {print: print});
+ print("should print 13! ");
+ instance.exports.main();
+})();
+
+(function LocalsTest() {
+ var module = new WasmModuleBuilder();
+ module.addFunction(undefined, [kAstI32, kAstI32])
+ .addLocals({i32_count: 1})
+ .addBody([kExprSetLocal, 1, kExprGetLocal, 0])
+ .exportAs("main");
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer);
+ assertEquals(19, instance.exports.main(19));
+ assertEquals(27777, instance.exports.main(27777));
+})();
+
+(function LocalsTest2() {
+ // TODO(titzer): i64 only works on 64-bit platforms.
+ var types = [
+ {locals: {i32_count: 1}, type: kAstI32},
+// {locals: {i64_count: 1}, type: kAstI64},
+ {locals: {f32_count: 1}, type: kAstF32},
+ {locals: {f64_count: 1}, type: kAstF64},
+ ];
+
+ for (p of types) {
+ var module = new WasmModuleBuilder();
+ module.addFunction(undefined, [p.type, p.type])
+ .addLocals(p.locals)
+ .addBody([kExprSetLocal, 1, kExprGetLocal, 0])
+ .exportAs("main");
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer);
+ assertEquals(19, instance.exports.main(19));
+ assertEquals(27777, instance.exports.main(27777));
+ }
+})();
+
+(function CallTest() {
+ var module = new WasmModuleBuilder();
+ module.addFunction("add", [kAstI32, kAstI32, kAstI32])
+ .addBody([kExprI32Add, kExprGetLocal, 0, kExprGetLocal, 1]);
+ module.addFunction("main", [kAstI32, kAstI32, kAstI32])
+ .addBody([kExprCallFunction, 0, kExprGetLocal, 0, kExprGetLocal, 1])
+ .exportAs("main");
+
+ var instance = module.instantiate();
+ assertEquals(44, instance.exports.main(11, 33));
+ assertEquals(7777, instance.exports.main(2222, 5555));
+})();
+
+(function IndirectCallTest() {
+ var module = new WasmModuleBuilder();
+ module.addFunction("add", [kAstI32, kAstI32, kAstI32])
+ .addBody([kExprI32Add, kExprGetLocal, 0, kExprGetLocal, 1]);
+ module.addFunction("main", [kAstI32, kAstI32, kAstI32, kAstI32])
+ .addBody([kExprCallIndirect, 0, kExprGetLocal,
+ 0, kExprGetLocal, 1, kExprGetLocal, 2])
+ .exportAs("main");
+ module.appendToFunctionTable([0]);
+
+ var instance = module.instantiate();
+ assertEquals(44, instance.exports.main(0, 11, 33));
+ assertEquals(7777, instance.exports.main(0, 2222, 5555));
+ assertThrows(function() { instance.exports.main(1, 1, 1); });
+})();
+
+(function DataSegmentTest() {
+ var module = new WasmModuleBuilder();
+ module.addMemory(1, 1, false);
+ module.addFunction("load", [kAstI32, kAstI32])
+ .addBody([kExprI32LoadMem, 0, 0, kExprGetLocal, 0])
+ .exportAs("load");
+ module.addDataSegment(0, [9, 9, 9, 9], true);
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer);
+ assertEquals(151587081, instance.exports.load(0));
+})();
+
+
+(function BasicTestWithUint8Array() {
+ var module = new WasmModuleBuilder();
+ module.addMemory(1, 2, false);
+ module.addFunction("foo", [kAstI32])
+ .addBody([kExprI8Const, 17])
+ .exportAs("blarg");
+
+ var buffer = module.toBuffer(debug);
+ var array = new Uint8Array(buffer);
+ var instance = Wasm.instantiateModule(array);
+ assertEquals(17, instance.exports.blarg());
+
+ var kPad = 5;
+ var buffer2 = new ArrayBuffer(kPad + buffer.byteLength + kPad);
+ var whole = new Uint8Array(buffer2);
+ for (var i = 0; i < whole.byteLength; i++) {
+ whole[i] = 0xff;
+ }
+ var array2 = new Uint8Array(buffer2, kPad, buffer.byteLength);
+ for (var i = 0; i < array2.byteLength; i++) {
+ array2[i] = array[i];
+ }
+ var instance = Wasm.instantiateModule(array2);
+ assertEquals(17, instance.exports.blarg());
+})();
+
+(function ImportTestTwoLevel() {
+ var module = new WasmModuleBuilder();
+ var index = module.addImportWithModule("mod", "print", [kAstStmt, kAstI32]);
+ module.addFunction("foo", [kAstStmt])
+ .addBody([kExprCallImport, index, kExprI8Const, 19])
+ .exportAs("main");
+
+ var buffer = module.toBuffer(debug);
+ var instance = Wasm.instantiateModule(buffer, {mod: {print: print}});
+ print("should print 19! ");
+ instance.exports.main();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/unreachable.js b/deps/v8/test/mjsunit/wasm/unreachable.js
index 10eea23230..3e2dffb4e9 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable.js
@@ -5,42 +5,20 @@
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
-var module = (function () {
- var kFuncWithBody = 9;
- var kFuncImported = 7;
- var kBodySize1 = 1;
- var kMainOffset = 6 + kFuncWithBody + kBodySize1 + 1;
+var main = (function () {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("main", [kAstStmt])
+ .addBody([kExprUnreachable])
+ .exportAs("main");
- var ffi = new Object();
- ffi.add = (function(a, b) { return a + b | 0; });
-
- return _WASMEXP_.instantiateModule(bytes(
- // -- signatures
- kDeclSignatures, 1,
- 0, kAstStmt, // void -> void
- // -- function #0 (unreachable)
- kDeclFunctions, 1,
- kDeclFunctionName | kDeclFunctionExport,
- 0, 0, // signature offset
- kMainOffset, 0, 0, 0, // name offset
- kBodySize1, 0, // body size
- kExprUnreachable,
- kDeclEnd,
- 'm', 'a', 'i', 'n', 0 // name
- ), ffi);
+ return builder.instantiate().exports.main;
})();
-// Check the module exists.
-assertFalse(module === undefined);
-assertFalse(module === null);
-assertFalse(module === 0);
-assertEquals("object", typeof module);
-assertEquals("function", typeof module.main);
-
var exception = "";
try {
- assertEquals(0, module.main());
+ assertEquals(0, main());
} catch(e) {
print("correctly caught: " + e);
exception = e;
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
index c7383c8327..74c9a96d68 100644
--- a/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
+++ b/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
@@ -6,13 +6,13 @@
function Foo() { }
-assertThrows(function() { _WASMEXP_.verifyFunction(); })
-assertThrows(function() { _WASMEXP_.verifyFunction(0); })
-assertThrows(function() { _WASMEXP_.verifyFunction("s"); })
-assertThrows(function() { _WASMEXP_.verifyFunction(undefined); })
-assertThrows(function() { _WASMEXP_.verifyFunction(1.1); })
-assertThrows(function() { _WASMEXP_.verifyFunction(1/0); })
-assertThrows(function() { _WASMEXP_.verifyFunction(null); })
-assertThrows(function() { _WASMEXP_.verifyFunction(new Foo()); })
-assertThrows(function() { _WASMEXP_.verifyFunction(new ArrayBuffer(0)); })
-assertThrows(function() { _WASMEXP_.verifyFunction(new ArrayBuffer(140000)); })
+assertThrows(function() { Wasm.verifyFunction(); })
+assertThrows(function() { Wasm.verifyFunction(0); })
+assertThrows(function() { Wasm.verifyFunction("s"); })
+assertThrows(function() { Wasm.verifyFunction(undefined); })
+assertThrows(function() { Wasm.verifyFunction(1.1); })
+assertThrows(function() { Wasm.verifyFunction(1/0); })
+assertThrows(function() { Wasm.verifyFunction(null); })
+assertThrows(function() { Wasm.verifyFunction(new Foo()); })
+assertThrows(function() { Wasm.verifyFunction(new ArrayBuffer(0)); })
+assertThrows(function() { Wasm.verifyFunction(new ArrayBuffer(140000)); })
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-simple.js b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
index c4d51c7423..aa5c67683e 100644
--- a/deps/v8/test/mjsunit/wasm/verify-function-simple.js
+++ b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
@@ -9,14 +9,11 @@ load("test/mjsunit/wasm/wasm-constants.js");
try {
var data = bytes(
0, kAstStmt, // signature
- 3, 0, // local int32 count
- 4, 0, // local int64 count
- 5, 0, // local float32 count
- 6, 0, // local float64 count
+ kDeclNoLocals, // --
kExprNop // body
);
- _WASMEXP_.verifyFunction(data);
+ Wasm.verifyFunction(data);
print("ok");
} catch (e) {
assertTrue(false);
@@ -27,14 +24,11 @@ var threw = false;
try {
var data = bytes(
0, kAstI32, // signature
- 2, 0, // local int32 count
- 3, 0, // local int64 count
- 4, 0, // local float32 count
- 5, 0, // local float64 count
+ kDeclNoLocals, // --
kExprBlock, 2, kExprNop, kExprNop // body
);
- _WASMEXP_.verifyFunction(data);
+ Wasm.verifyFunction(data);
print("not ok");
} catch (e) {
print("ok: " + e);
diff --git a/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
index 37658d3786..29ef2aa611 100644
--- a/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
+++ b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
@@ -6,13 +6,13 @@
function Foo() { }
-assertThrows(function() { _WASMEXP_.verifyModule(); })
-assertThrows(function() { _WASMEXP_.verifyModule(0); })
-assertThrows(function() { _WASMEXP_.verifyModule("s"); })
-assertThrows(function() { _WASMEXP_.verifyModule(undefined); })
-assertThrows(function() { _WASMEXP_.verifyModule(1.1); })
-assertThrows(function() { _WASMEXP_.verifyModule(1/0); })
-assertThrows(function() { _WASMEXP_.verifyModule(null); })
-assertThrows(function() { _WASMEXP_.verifyModule(new Foo()); })
-assertThrows(function() { _WASMEXP_.verifyModule(new ArrayBuffer(0)); })
-assertThrows(function() { _WASMEXP_.verifyModule(new ArrayBuffer(7)); })
+assertThrows(function() { Wasm.verifyModule(); })
+assertThrows(function() { Wasm.verifyModule(0); })
+assertThrows(function() { Wasm.verifyModule("s"); })
+assertThrows(function() { Wasm.verifyModule(undefined); })
+assertThrows(function() { Wasm.verifyModule(1.1); })
+assertThrows(function() { Wasm.verifyModule(1/0); })
+assertThrows(function() { Wasm.verifyModule(null); })
+assertThrows(function() { Wasm.verifyModule(new Foo()); })
+assertThrows(function() { Wasm.verifyModule(new ArrayBuffer(0)); })
+assertThrows(function() { Wasm.verifyModule(new ArrayBuffer(7)); })
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 458b51ad07..cc620bb458 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -15,6 +15,41 @@ function bytes() {
return buffer;
}
+// Header declaration constants
+var kWasmH0 = 0;
+var kWasmH1 = 0x61;
+var kWasmH2 = 0x73;
+var kWasmH3 = 0x6d;
+
+var kWasmV0 = 10;
+var kWasmV1 = 0;
+var kWasmV2 = 0;
+var kWasmV3 = 0;
+
+var kHeaderSize = 8;
+var kPageSize = 65536;
+
+function bytesWithHeader() {
+ var buffer = new ArrayBuffer(kHeaderSize + arguments.length);
+ var view = new Uint8Array(buffer);
+ view[0] = kWasmH0;
+ view[1] = kWasmH1;
+ view[2] = kWasmH2;
+ view[3] = kWasmH3;
+ view[4] = kWasmV0;
+ view[5] = kWasmV1;
+ view[6] = kWasmV2;
+ view[7] = kWasmV3;
+ for (var i = 0; i < arguments.length; i++) {
+ var val = arguments[i];
+ if ((typeof val) == "string") val = val.charCodeAt(0);
+ view[kHeaderSize + i] = val | 0;
+ }
+ return buffer;
+}
+
+var kDeclNoLocals = 0;
+
// Section declaration constants
var kDeclMemory = 0x00;
var kDeclSignatures = 0x01;
@@ -22,9 +57,18 @@ var kDeclFunctions = 0x02;
var kDeclGlobals = 0x03;
var kDeclDataSegments = 0x04;
var kDeclFunctionTable = 0x05;
+var kDeclEnd = 0x06;
var kDeclStartFunction = 0x07;
var kDeclImportTable = 0x08;
-var kDeclEnd = 0x06;
+var kDeclExportTable = 0x09;
+var kDeclFunctionSignatures = 0x0a;
+var kDeclFunctionBodies = 0x0b;
+var kDeclNames = 0x0c;
+
+var section_names = [
+ "memory", "signatures", "functions", "globals", "data_segments",
+ "function_table", "end", "start_function", "import_table", "export_table",
+ "function_signatures", "function_bodies", "names"];
// Function declaration flags
var kDeclFunctionName = 0x01;
@@ -119,7 +163,7 @@ var kExprI32GeU = 0x56;
var kExprI32Clz = 0x57;
var kExprI32Ctz = 0x58;
var kExprI32Popcnt = 0x59;
-var kExprBoolNot = 0x5a;
+var kExprI32Eqz = 0x5a;
var kExprI64Add = 0x5b;
var kExprI64Sub = 0x5c;
var kExprI64Mul = 0x5d;
@@ -211,6 +255,10 @@ var kExprF64ConvertF32 = 0xb2;
var kExprF64ReinterpretI64 = 0xb3;
var kExprI32ReinterpretF32 = 0xb4;
var kExprI64ReinterpretF64 = 0xb5;
+var kExprI32Ror = 0xb6;
+var kExprI32Rol = 0xb7;
+var kExprI64Ror = 0xb8;
+var kExprI64Rol = 0xb9;
var kTrapUnreachable = 0;
var kTrapMemOutOfBounds = 1;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
new file mode 100644
index 0000000000..e1d996338c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -0,0 +1,335 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function WasmFunctionBuilder(name, sig_index) {
+ this.name = name;
+ this.sig_index = sig_index;
+ this.exports = [];
+}
+
+WasmFunctionBuilder.prototype.exportAs = function(name) {
+ this.exports.push(name);
+ return this;
+}
+
+WasmFunctionBuilder.prototype.exportFunc = function() {
+ this.exports.push(this.name);
+ return this;
+}
+
+WasmFunctionBuilder.prototype.addBody = function(body) {
+ this.body = body;
+ return this;
+}
+
+WasmFunctionBuilder.prototype.addLocals = function(locals) {
+ this.locals = locals;
+ return this;
+}
+
+function WasmModuleBuilder() {
+ this.signatures = [];
+ this.imports = [];
+ this.functions = [];
+ this.exports = [];
+ this.function_table = [];
+ this.data_segments = [];
+ this.explicit = [];
+ return this;
+}
+
+WasmModuleBuilder.prototype.addStart = function(start_index) {
+ this.start_index = start_index;
+}
+
+WasmModuleBuilder.prototype.addMemory = function(min, max, exp) {
+ this.memory = {min: min, max: max, exp: exp};
+ return this;
+}
+
+WasmModuleBuilder.prototype.addExplicitSection = function(bytes) {
+ this.explicit.push(bytes);
+ return this;
+}
+
+// Add a signature; format is [rettype, param0, param1, ...]
+WasmModuleBuilder.prototype.addSignature = function(sig) {
+ // TODO: canonicalize signatures?
+ this.signatures.push(sig);
+ return this.signatures.length - 1;
+}
+
+WasmModuleBuilder.prototype.addFunction = function(name, sig) {
+ var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
+ var func = new WasmFunctionBuilder(name, sig_index);
+ func.index = this.functions.length;
+ this.functions.push(func);
+ return func;
+}
+
+WasmModuleBuilder.prototype.addImportWithModule = function(module, name, sig) {
+ var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
+ this.imports.push({module: module, name: name, sig_index: sig_index});
+ return this.imports.length - 1;
+}
+
+WasmModuleBuilder.prototype.addImport = function(name, sig) {
+ var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
+ this.imports.push({module: name, name: undefined, sig_index: sig_index});
+ return this.imports.length - 1;
+}
+
+WasmModuleBuilder.prototype.addDataSegment = function(addr, data, init) {
+ this.data_segments.push({addr: addr, data: data, init: init});
+ return this.data_segments.length - 1;
+}
+
+WasmModuleBuilder.prototype.appendToFunctionTable = function(array) {
+ this.function_table = this.function_table.concat(array);
+ return this;
+}
+
+function emit_u8(bytes, val) {
+ bytes.push(val & 0xff);
+}
+
+function emit_u16(bytes, val) {
+ bytes.push(val & 0xff);
+ bytes.push((val >> 8) & 0xff);
+}
+
+function emit_u32(bytes, val) {
+ bytes.push(val & 0xff);
+ bytes.push((val >> 8) & 0xff);
+ bytes.push((val >> 16) & 0xff);
+ bytes.push((val >> 24) & 0xff);
+}
+
+function emit_string(bytes, string) {
+ emit_varint(bytes, string.length);
+ for (var i = 0; i < string.length; i++) {
+ emit_u8(bytes, string.charCodeAt(i));
+ }
+}
+
+function emit_varint(bytes, val) {
+ while (true) {
+ var v = val & 0xff;
+ val = val >>> 7;
+ if (val == 0) {
+ bytes.push(v);
+ break;
+ }
+ bytes.push(v | 0x80);
+ }
+}
+
+function emit_bytes(bytes, data) {
+ for (var i = 0; i < data.length; i++) {
+ bytes.push(data[i] & 0xff);
+ }
+}
+
+function emit_section(bytes, section_code, content_generator) {
+ // Start the section in a temporary buffer: its full length isn't know yet.
+ var tmp_bytes = [];
+ emit_string(tmp_bytes, section_names[section_code]);
+ content_generator(tmp_bytes);
+ // Now that we know the section length, emit it and copy the section.
+ emit_varint(bytes, tmp_bytes.length);
+ Array.prototype.push.apply(bytes, tmp_bytes);
+}
+
+WasmModuleBuilder.prototype.toArray = function(debug) {
+ // Add header bytes
+ var bytes = [];
+ bytes = bytes.concat([kWasmH0, kWasmH1, kWasmH2, kWasmH3,
+ kWasmV0, kWasmV1, kWasmV2, kWasmV3]);
+
+ var wasm = this;
+
+ // Add memory section
+ if (wasm.memory != undefined) {
+ if (debug) print("emitting memory @ " + bytes.length);
+ emit_section(bytes, kDeclMemory, function(bytes) {
+ emit_varint(bytes, wasm.memory.min);
+ emit_varint(bytes, wasm.memory.max);
+ emit_u8(bytes, wasm.memory.exp ? 1 : 0);
+ });
+ }
+
+ // Add signatures section
+ if (wasm.signatures.length > 0) {
+ if (debug) print("emitting signatures @ " + bytes.length);
+ emit_section(bytes, kDeclSignatures, function(bytes) {
+ emit_varint(bytes, wasm.signatures.length);
+ for (sig of wasm.signatures) {
+ var params = sig.length - 1;
+ emit_varint(bytes, params);
+ for (var j = 0; j < sig.length; j++) {
+ emit_u8(bytes, sig[j]);
+ }
+ }
+ });
+ }
+
+ // Add imports section
+ if (wasm.imports.length > 0) {
+ if (debug) print("emitting imports @ " + bytes.length);
+ emit_section(bytes, kDeclImportTable, function(bytes) {
+ emit_varint(bytes, wasm.imports.length);
+ for (imp of wasm.imports) {
+ emit_varint(bytes, imp.sig_index);
+ emit_string(bytes, imp.module);
+ emit_string(bytes, imp.name || '');
+ }
+ });
+ }
+
+ // Add functions section
+ var names = false;
+ var exports = 0;
+ if (wasm.functions.length > 0) {
+ var has_names = false;
+
+ // emit function signatures
+ if (debug) print("emitting function sigs @ " + bytes.length);
+ emit_section(bytes, kDeclFunctionSignatures, function(bytes) {
+ emit_varint(bytes, wasm.functions.length);
+ for (func of wasm.functions) {
+ has_names = has_names || (func.name != undefined &&
+ func.name.length > 0);
+ exports += func.exports.length;
+
+ emit_varint(bytes, func.sig_index);
+ }
+ });
+
+ // emit function bodies
+ if (debug) print("emitting function bodies @ " + bytes.length);
+ emit_section(bytes, kDeclFunctionBodies, function(bytes) {
+ emit_varint(bytes, wasm.functions.length);
+ for (func of wasm.functions) {
+ // Function body length will be patched later.
+ var local_decls = [];
+ var l = func.locals;
+ if (l != undefined) {
+ var local_decls_count = 0;
+ if (l.i32_count > 0) {
+ local_decls.push({count: l.i32_count, type: kAstI32});
+ }
+ if (l.i64_count > 0) {
+ local_decls.push({count: l.i64_count, type: kAstI64});
+ }
+ if (l.f32_count > 0) {
+ local_decls.push({count: l.f32_count, type: kAstF32});
+ }
+ if (l.f64_count > 0) {
+ local_decls.push({count: l.f64_count, type: kAstF64});
+ }
+ }
+ var header = new Array();
+
+ emit_varint(header, local_decls.length);
+ for (decl of local_decls) {
+ emit_varint(header, decl.count);
+ emit_u8(header, decl.type);
+ }
+
+ emit_varint(bytes, header.length + func.body.length);
+ emit_bytes(bytes, header);
+ emit_bytes(bytes, func.body);
+ }
+ });
+ }
+
+ // emit function names
+ if (has_names) {
+ if (debug) print("emitting names @ " + bytes.length);
+ emit_section(bytes, kDeclNames, function(bytes) {
+ emit_varint(bytes, wasm.functions.length);
+ for (func of wasm.functions) {
+ var name = func.name == undefined ? "" : func.name;
+ emit_string(bytes, name);
+ emit_u8(bytes, 0); // local names count == 0
+ }
+ });
+ }
+
+ // Add start function section.
+ if (wasm.start_index != undefined) {
+ if (debug) print("emitting start function @ " + bytes.length);
+ emit_section(bytes, kDeclStartFunction, function(bytes) {
+ emit_varint(bytes, wasm.start_index);
+ });
+ }
+
+ if (wasm.function_table.length > 0) {
+ if (debug) print("emitting function table @ " + bytes.length);
+ emit_section(bytes, kDeclFunctionTable, function(bytes) {
+ emit_varint(bytes, wasm.function_table.length);
+ for (index of wasm.function_table) {
+ emit_varint(bytes, index);
+ }
+ });
+ }
+
+ if (exports > 0) {
+ if (debug) print("emitting exports @ " + bytes.length);
+ emit_section(bytes, kDeclExportTable, function(bytes) {
+ emit_varint(bytes, exports);
+ for (func of wasm.functions) {
+ for (exp of func.exports) {
+ emit_varint(bytes, func.index);
+ emit_string(bytes, exp);
+ }
+ }
+ });
+ }
+
+ if (wasm.data_segments.length > 0) {
+ if (debug) print("emitting data segments @ " + bytes.length);
+ emit_section(bytes, kDeclDataSegments, function(bytes) {
+ emit_varint(bytes, wasm.data_segments.length);
+ for (seg of wasm.data_segments) {
+ emit_varint(bytes, seg.addr);
+ emit_varint(bytes, seg.data.length);
+ emit_bytes(bytes, seg.data);
+ }
+ });
+ }
+
+ // Emit any explicitly added sections
+ for (exp of wasm.explicit) {
+ if (debug) print("emitting explicit @ " + bytes.length);
+ emit_bytes(bytes, exp);
+ }
+
+ // End the module.
+ if (debug) print("emitting end @ " + bytes.length);
+ emit_section(bytes, kDeclEnd, function(bytes) {});
+
+ return bytes;
+}
+
+WasmModuleBuilder.prototype.toBuffer = function(debug) {
+ var bytes = this.toArray(debug);
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; i++) {
+ var val = bytes[i];
+ if ((typeof val) == "string") val = val.charCodeAt(0);
+ view[i] = val | 0;
+ }
+ return buffer;
+}
+
+WasmModuleBuilder.prototype.instantiate = function(ffi, memory) {
+ var buffer = this.toBuffer();
+ if (memory != undefined) {
+ return Wasm.instantiateModule(buffer, ffi, memory);
+ } else {
+ return Wasm.instantiateModule(buffer, ffi);
+ }
+}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
index 8912271c23..2f25c66fce 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-object-api.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -4,9 +4,9 @@
// Flags: --expose-wasm
-assertFalse(undefined === _WASMEXP_);
-assertFalse(undefined == _WASMEXP_);
-assertEquals("function", typeof _WASMEXP_.verifyModule);
-assertEquals("function", typeof _WASMEXP_.verifyFunction);
-assertEquals("function", typeof _WASMEXP_.instantiateModule);
-assertEquals("function", typeof _WASMEXP_.instantiateModuleFromAsm);
+assertFalse(undefined === Wasm);
+assertFalse(undefined == Wasm);
+assertEquals("function", typeof Wasm.verifyModule);
+assertEquals("function", typeof Wasm.verifyFunction);
+assertEquals("function", typeof Wasm.instantiateModule);
+assertEquals("function", typeof Wasm.instantiateModuleFromAsm);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index c54d154ab3..0f7d4aa1ba 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -109,6 +109,9 @@
'js1_5/Regress/regress-360969-03': [FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'js1_5/Regress/regress-360969-04': [FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ # Function declarations are no longer allowed as the body of a with statement.
+ 'js1_5/Regress/regress-326453': [FAIL],
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
@@ -168,6 +171,8 @@
# in the test: "This test will probably run out of memory".
'js1_5/extensions/regress-345967': [SKIP],
+ # Slow with arm64 simulator in debug.
+ 'ecma_3/Statements/regress-302439': [PASS, ['mode == debug', SLOW]],
##################### FLAKY TESTS #####################
@@ -672,9 +677,6 @@
# We do not correctly handle assignments within "with"
'ecma_3/Statements/12.10-01': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4647
- 'ecma_3/FunExpr/fe-001': [FAIL_OK],
-
##################### MOZILLA EXTENSION TESTS #####################
'ecma/extensions/15.1.2.1-1': [FAIL_OK],
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 5739391102..0eb32c87bd 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -107,10 +107,10 @@ class MozillaTestSuite(testsuite.TestSuite):
def IsNegativeTest(self, testcase):
return testcase.path.endswith("-n")
- def IsFailureOutput(self, output, testpath):
- if output.exit_code != 0:
+ def IsFailureOutput(self, testcase):
+ if testcase.output.exit_code != 0:
return True
- return "FAILED!" in output.stdout
+ return "FAILED!" in testcase.output.stdout
def DownloadData(self):
print "Mozilla download is deprecated. It's part of DEPS."
diff --git a/deps/v8/test/preparser/functions-only.js b/deps/v8/test/preparser/functions-only.js
deleted file mode 100644
index 4dcde57979..0000000000
--- a/deps/v8/test/preparser/functions-only.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains no identifiers or string literals, but does contain
-// symbols.
-
-(function () {
- if (this != null) {
- return this;
- }
- while (true) {
- if ([][2]) return false;
- }
-})({}, function() { return [true]; } );
diff --git a/deps/v8/test/preparser/preparser.expectation b/deps/v8/test/preparser/preparser.expectation
deleted file mode 100644
index 638f90e0b8..0000000000
--- a/deps/v8/test/preparser/preparser.expectation
+++ /dev/null
@@ -1,14 +0,0 @@
-# Expectations for .js preparser tests.
-# Only mentions tests that throw SyntaxError, and optionally specifies
-# the message and location expected in the exception.
-# Format:
-# testname[:message[:beg_pos,end_pos]]
-strict-octal-number:strict_octal_literal
-strict-octal-string:strict_octal_literal
-strict-octal-regexp:strict_octal_literal
-strict-octal-use-strict-after:strict_octal_literal
-strict-octal-use-strict-before:strict_octal_literal
-
-strict-const:strict_const
-
-strict-with:strict_mode_with
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index 9d69988f71..43049d46e0 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -27,18 +27,5 @@
[
[ALWAYS, {
- # TODO(mstarzinger): This script parses but throws a TypeError when run.
- 'non-alphanum': [FAIL],
-
- # We don't parse RegExps at scanning time, so we can't fail on octal
- # escapes (we need to parse to distinguish octal escapes from valid
- # back-references).
- 'strict-octal-regexp': [FAIL],
}], # ALWAYS
-
-['arch == android_arm or arch == android_ia32', {
- # Remove this once the issue above is fixed. Android test runner does not
- # handle "FAIL" test expectation correctly.
- 'strict-octal-regexp': [SKIP],
-}], # 'arch == android_arm or arch == android_ia32'
]
diff --git a/deps/v8/test/preparser/symbols-only.js b/deps/v8/test/preparser/symbols-only.js
deleted file mode 100644
index b6520636bc..0000000000
--- a/deps/v8/test/preparser/symbols-only.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file contains no function declarations.
-
-var x = 42;
-var y = "hello world";
-if (x == y) {
- with ({ x: 10, y: "20", z: 42 }) {
- print(z);
- }
-}
-try {
- x = 2;
- throw y;
- y = 4;
-} catch (e) {
- y = e;
-} finally {
- x = y;
-}
-for (var i = 0; i < 10; i++) {
- x += x;
-}
-print(y);
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index 7e51b8ef58..c55e4a85a8 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -27,17 +27,11 @@
import os
-import re
from testrunner.local import testsuite
-from testrunner.local import utils
from testrunner.objects import testcase
-FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
-INVALID_FLAGS = ["--enable-slow-asserts"]
-
-
class PreparserTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(PreparserTestSuite, self).__init__(name, root)
@@ -45,22 +39,6 @@ class PreparserTestSuite(testsuite.TestSuite):
def shell(self):
return "d8"
- def _GetExpectations(self):
- expects_file = os.path.join(self.root, "preparser.expectation")
- expectations_map = {}
- if not os.path.exists(expects_file): return expectations_map
- rule_regex = re.compile("^([\w\-]+)(?::([\w\-]+))?(?::(\d+),(\d+))?$")
- for line in utils.ReadLinesFrom(expects_file):
- rule_match = rule_regex.match(line)
- if not rule_match: continue
- expects = []
- if (rule_match.group(2)):
- expects += [rule_match.group(2)]
- if (rule_match.group(3)):
- expects += [rule_match.group(3), rule_match.group(4)]
- expectations_map[rule_match.group(1)] = " ".join(expects)
- return expectations_map
-
def _ParsePythonTestTemplates(self, result, filename):
pathname = os.path.join(self.root, filename + ".pyt")
def Test(name, source, expectation, extra_flags=[]):
@@ -84,20 +62,8 @@ class PreparserTestSuite(testsuite.TestSuite):
execfile(pathname, {"Test": Test, "Template": Template})
def ListTests(self, context):
- expectations = self._GetExpectations()
result = []
- # Find all .js files in this directory.
- filenames = [f[:-3] for f in os.listdir(self.root) if f.endswith(".js")]
- filenames.sort()
- for f in filenames:
- throws = expectations.get(f, None)
- flags = [f + ".js"]
- if throws:
- flags += ["--throws"]
- test = testcase.TestCase(self, f, flags=flags)
- result.append(test)
-
# Find all .pyt files in this directory.
filenames = [f[:-4] for f in os.listdir(self.root) if f.endswith(".pyt")]
filenames.sort()
@@ -106,25 +72,11 @@ class PreparserTestSuite(testsuite.TestSuite):
return result
def GetFlagsForTestCase(self, testcase, context):
- first = testcase.flags[0]
- if first != "-e":
- testcase.flags[0] = os.path.join(self.root, first)
- source = self.GetSourceForTest(testcase)
- result = []
- flags_match = re.findall(FLAGS_PATTERN, source)
- for match in flags_match:
- result += match.strip().split()
- result += context.mode_flags
- result = [x for x in result if x not in INVALID_FLAGS]
- result.append(os.path.join(self.root, testcase.path + ".js"))
- return testcase.flags + result
return testcase.flags
def GetSourceForTest(self, testcase):
- if testcase.flags[0] == "-e":
- return testcase.flags[1]
- with open(testcase.flags[0]) as f:
- return f.read()
+ assert testcase.flags[0] == "-e"
+ return testcase.flags[1]
def _VariantGeneratorFactory(self):
return testsuite.StandardVariantGenerator
diff --git a/deps/v8/test/promises-aplus/testcfg.py b/deps/v8/test/promises-aplus/testcfg.py
index 5f447c3f90..bd80f97a13 100644
--- a/deps/v8/test/promises-aplus/testcfg.py
+++ b/deps/v8/test/promises-aplus/testcfg.py
@@ -91,11 +91,11 @@ class PromiseAplusTestSuite(testsuite.TestSuite):
def IsNegativeTest(self, testcase):
return '@negative' in self.GetSourceForTest(testcase)
- def IsFailureOutput(self, output, testpath):
- if output.exit_code != 0:
+ def IsFailureOutput(self, testcase):
+ if testcase.output.exit_code != 0:
return True
- return not 'All tests have run.' in output.stdout or \
- 'FAIL:' in output.stdout
+ return not 'All tests have run.' in testcase.output.stdout or \
+ 'FAIL:' in testcase.output.stdout
def DownloadTestData(self):
archive = os.path.join(self.root, TEST_ARCHIVE)
diff --git a/deps/v8/test/simdjs/testcfg.py b/deps/v8/test/simdjs/testcfg.py
index d2dbd71fb0..c055d5a3d3 100644
--- a/deps/v8/test/simdjs/testcfg.py
+++ b/deps/v8/test/simdjs/testcfg.py
@@ -48,10 +48,10 @@ class SimdJsTestSuite(testsuite.TestSuite):
def IsNegativeTest(self, testcase):
return False
- def IsFailureOutput(self, output, testpath):
- if output.exit_code != 0:
+ def IsFailureOutput(self, testcase):
+ if testcase.output.exit_code != 0:
return True
- return "FAILED!" in output.stdout
+ return "FAILED!" in testcase.output.stdout
def DownloadData(self):
print "SimdJs download is deprecated. It's part of DEPS."
diff --git a/deps/v8/test/test262/archive.py b/deps/v8/test/test262/archive.py
index 8398e51716..c265b32421 100755
--- a/deps/v8/test/test262/archive.py
+++ b/deps/v8/test/test262/archive.py
@@ -8,10 +8,15 @@ import tarfile
os.chdir(os.path.dirname(os.path.abspath(__file__)))
+# Workaround for slow grp and pwd calls.
+tarfile.grp = None
+tarfile.pwd = None
+
def filter_git(tar_info):
if tar_info.name.startswith(os.path.join('data', '.git')):
return None
else:
+ tar_info.uname = tar_info.gname = "test262"
return tar_info
with tarfile.open('data.tar', 'w') as tar:
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index a926bcc92d..b1bdd0adb0 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -27,19 +27,8 @@
[
[ALWAYS, {
- ############################### BUGS ###################################
-
- # BUG(v8:3455)
- 'intl402/11.2.3_b': [FAIL],
- 'intl402/12.2.3_b': [FAIL],
-
###################### NEEDS INVESTIGATION #######################
- # Possibly same cause as S8.5_A2.1, below: floating-point tests.
- 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
- 'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
- 'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
-
# This is an incompatibility between ES5 and V8 on enumerating
# shadowed elements in a for..in loop.
# https://code.google.com/p/v8/issues/detail?id=705
@@ -47,63 +36,12 @@
###################### MISSING ES6 FEATURES #######################
- # https://bugs.chromium.org/p/v8/issues/detail?id=4768
- # The Reflect.enumerate trap is removed
- 'built-ins/Reflect/enumerate/*': [SKIP],
- 'built-ins/Proxy/enumerate/*': [SKIP],
-
- # https://code.google.com/p/v8/issues/detail?id=4163
- 'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'built-ins/Array/from/iter-map-fn-err': [FAIL],
- 'built-ins/Array/from/iter-set-elem-prop-err': [FAIL],
- 'built-ins/Map/iterator-close-after-set-failure': [FAIL],
- 'built-ins/Map/iterator-item-first-entry-returns-abrupt': [FAIL],
- 'built-ins/Map/iterator-item-second-entry-returns-abrupt': [FAIL],
- 'built-ins/Map/iterator-items-are-not-object-close-iterator': [FAIL],
- 'built-ins/Promise/all/iter-close': [FAIL],
- 'built-ins/Promise/race/iter-close': [PASS, FAIL],
- 'built-ins/Set/set-iterator-close-after-add-failure': [FAIL],
- 'built-ins/WeakMap/iterator-close-after-set-failure': [FAIL],
- 'built-ins/WeakMap/iterator-item-first-entry-returns-abrupt': [FAIL],
- 'built-ins/WeakMap/iterator-item-second-entry-returns-abrupt': [FAIL],
- 'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
- 'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4348
- 'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
-
# The order of adding the name property is wrong
# https://code.google.com/p/v8/issues/detail?id=4199
'language/computed-property-names/class/static/method-number': [FAIL, FAIL_SLOPPY],
'language/computed-property-names/class/static/method-symbol': [FAIL, FAIL_SLOPPY],
'language/computed-property-names/class/static/method-string': [FAIL, FAIL_SLOPPY],
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'language/statements/for-of/body-dstr-assign-error': [FAIL],
- 'language/statements/for-of/body-put-error': [FAIL],
- 'language/statements/for-of/generator-close-via-break': [FAIL],
- 'language/statements/for-of/generator-close-via-return': [FAIL],
- 'language/statements/for-of/generator-close-via-throw': [FAIL],
- 'language/statements/for-of/iterator-close-get-method-error': [FAIL],
- 'language/statements/for-of/iterator-close-non-object': [FAIL],
- 'language/statements/for-of/iterator-close-via-break': [FAIL],
- 'language/statements/for-of/iterator-close-via-return': [FAIL],
- 'language/statements/for-of/iterator-close-via-throw': [FAIL],
-
- # We do not expose Array.prototype.values
- # https://code.google.com/p/v8/issues/detail?id=4247
- 'built-ins/Array/prototype/Symbol.iterator': [FAIL],
- 'built-ins/Array/prototype/values/returns-iterator': [FAIL],
- 'built-ins/Array/prototype/values/returns-iterator-from-object': [FAIL],
- 'built-ins/Array/prototype/values/prop-desc': [FAIL],
- 'built-ins/Array/prototype/values/name': [FAIL],
- 'built-ins/Array/prototype/values/length': [FAIL],
- 'built-ins/Array/prototype/values/iteration': [FAIL],
- 'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
- 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4248
'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
'language/expressions/compound-assignment/S11.13.2_A6.*': [FAIL],
@@ -139,6 +77,14 @@
'language/expressions/class/name': [FAIL],
'language/expressions/function/name': [FAIL],
'language/expressions/generators/name': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-function-name': [FAIL],
+ 'intl402/DateTimeFormat/prototype/format/format-function-name': [FAIL],
+ 'intl402/Collator/prototype/compare/compare-function-name': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4778
+ 'intl402/Collator/prototype/compare/name': [FAIL],
+ 'intl402/DateTimeFormat/prototype/format/name': [FAIL],
+ 'intl402/NumberFormat/prototype/format/name': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4251
'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
@@ -153,42 +99,49 @@
# https://code.google.com/p/v8/issues/detail?id=4253
'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
- # https://code.google.com/p/v8/issues/detail?id=4602
+ ###### BEGIN REGEXP SUBCLASSING SECTION ######
+ # Spec change in progress https://github.com/tc39/ecma262/pull/494
+ # RegExpBuiltinMatch reads flags from [[OriginalFlags]]
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-sticky': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-get-global-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/coerce-sticky': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/coerce-global': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/coerce-unicode': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/get-sticky-err': [SKIP],
+ 'built-ins/RegExp/prototype/Symbol.search/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.search/get-sticky-err': [FAIL],
'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4504
- 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [PASS, FAIL],
- 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [PASS, FAIL],
+ # Missing lastIndex support
+ 'built-ins/RegExp/prototype/Symbol.split/str-result-coerce-length-err': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4305
- # SKIP rather than FAIL some tests, as they may check for an exception which
- # happens to be thrown for some other reason (e.g,
- # built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err)
- 'built-ins/RegExp/prototype/Symbol.match/*': [SKIP],
- 'built-ins/String/prototype/endsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/includes/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/startsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/match/invoke-builtin-match': [FAIL],
+ # Times out
+ 'built-ins/RegExp/prototype/Symbol.split/str-coerce-lastindex': [SKIP],
+ 'built-ins/RegExp/prototype/Symbol.match/coerce-global': [SKIP],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-global': [SKIP],
- # https://code.google.com/p/v8/issues/detail?id=4343
- 'built-ins/RegExp/prototype/Symbol.replace/*': [SKIP],
+ # Sticky support busted
+ 'built-ins/RegExp/prototype/Symbol.replace/y-init-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/y-set-lastindex': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4344
- 'built-ins/RegExp/prototype/Symbol.search/*': [SKIP],
+ # SKIP rather than FAIL, as the test checks for an exception which
+ # happens to be thrown for some other reason.
+ 'built-ins/RegExp/prototype/Symbol.split/str-result-get-length-err': [SKIP],
- # https://code.google.com/p/v8/issues/detail?id=4345
- 'built-ins/RegExp/prototype/Symbol.split/*': [SKIP],
+ ###### END REGEXP SUBCLASSING SECTION ######
# https://code.google.com/p/v8/issues/detail?id=4360
'intl402/Collator/10.1.1_1': [FAIL],
'intl402/DateTimeFormat/12.1.1_1': [FAIL],
'intl402/NumberFormat/11.1.1_1': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4361
- 'intl402/Collator/10.1.1_a': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4476
'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
@@ -220,12 +173,6 @@
'built-ins/Promise/resolve-function-name': [FAIL],
'built-ins/Promise/all/resolve-element-function-name': [FAIL],
'built-ins/Promise/executor-function-name': [FAIL],
- 'built-ins/Promise/all/capability-executor-not-callable': [FAIL],
- 'built-ins/Promise/reject/capability-executor-not-callable': [FAIL],
- 'built-ins/Promise/race/capability-executor-not-callable': [FAIL],
- 'built-ins/Promise/prototype/then/capability-executor-not-callable': [FAIL],
- 'built-ins/Promise/resolve/capability-executor-not-callable': [FAIL],
- 'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4634
'built-ins/DataView/prototype/setFloat64/index-check-before-value-conversion': [FAIL],
@@ -246,18 +193,21 @@
'language/statements/class/subclass/builtin-objects/NativeError/URIError-message': [FAIL],
'language/statements/class/subclass/builtin-objects/Error/message-property-assignment': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4663
- 'built-ins/object/entries/*': [SKIP],
- 'built-ins/object/values/*': [SKIP],
- 'built-ins/Object/entries/*': [SKIP],
- 'built-ins/Object/values/*': [SKIP],
-
# https://code.google.com/p/chromium/issues/detail?id=581577
'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4727
+ 'built-ins/TypedArrays/length-arg-is-undefined-throws': [FAIL],
+ 'built-ins/TypedArrays/length-arg-is-symbol-throws': [FAIL],
+ 'built-ins/TypedArrays/length-arg-is-float-throws-rangeerror': [FAIL],
+ 'built-ins/TypedArrays/length-arg-is-nan-throws-rangeerror': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4784
+ 'built-ins/TypedArrays/buffer-arg-defined-negative-length': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
@@ -272,16 +222,12 @@
'intl402/Collator/10.2.3_b': [PASS, FAIL],
'intl402/Collator/prototype/10.3_a': [FAIL],
'intl402/DateTimeFormat/12.1.1': [FAIL],
- 'intl402/DateTimeFormat/12.1.1_a': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3.3': [FAIL],
'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
'intl402/NumberFormat/11.1.1_20_c': [FAIL],
- 'intl402/NumberFormat/11.1.1_a': [FAIL],
- 'intl402/NumberFormat/11.1.1': [FAIL],
'intl402/NumberFormat/11.1.2': [PASS, FAIL],
'intl402/NumberFormat/11.1.2.1_4': [FAIL],
'intl402/NumberFormat/11.2.3_b': [FAIL],
@@ -290,18 +236,22 @@
##################### DELIBERATE INCOMPATIBILITIES #####################
- 'built-ins/Math/exp/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
-
- # Linux for ia32 (and therefore simulators) default to extended 80 bit
- # floating point formats, so these tests checking 64-bit FP precision fail.
- # The other platforms/arch's pass these tests.
- # We follow the other major JS engines by keeping this default.
- 'language/types/number/S8.5_A2.1': [PASS, FAIL_OK],
- 'language/types/number/S8.5_A2.2': [PASS, FAIL_OK],
-
# https://code.google.com/p/v8/issues/detail?id=4693
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
+ # We do not expose Array.prototype.values due to webcompat issues.
+ # Most recent incompatability: https://crbug.com/615873
+ # https://code.google.com/p/v8/issues/detail?id=4247
+ 'built-ins/Array/prototype/Symbol.iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator-from-object': [FAIL],
+ 'built-ins/Array/prototype/values/prop-desc': [FAIL],
+ 'built-ins/Array/prototype/values/name': [FAIL],
+ 'built-ins/Array/prototype/values/length': [FAIL],
+ 'built-ins/Array/prototype/values/iteration': [FAIL],
+ 'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
+ 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
+
############################ INVALID TESTS #############################
# The reference value calculated by Test262 is incorrect if you run these
@@ -319,26 +269,19 @@
# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
- # Tests do not return boolean.
- 'built-ins/Object/keys/15.2.3.14-1-1': [PASS, FAIL_OK],
- 'built-ins/Object/keys/15.2.3.14-1-2': [PASS, FAIL_OK],
- 'built-ins/Object/keys/15.2.3.14-1-3': [PASS, FAIL_OK],
-
- # Test bug https://github.com/tc39/test262/issues/405
- 'intl402/Collator/prototype/compare/10.3.2_1_c': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_b_NN': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_c_NN': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_d_NN': [PASS, FAIL_OK],
- 'intl402/Date/prototype/13.3.0_7': [PASS, FAIL_OK],
+ # Test bug https://github.com/tc39/test262/issues/518
+ 'built-ins/TypedArrays/object-arg-throws-setting-typedarray-property': [FAIL],
+ 'built-ins/Object/getOwnPropertyDescriptors/duplicate-keys': [FAIL],
+ 'built-ins/Object/getOwnPropertyDescriptors/symbols-included': [FAIL],
- # Some tests are too strict, checking SameValue rather than ===
- # https://github.com/tc39/test262/issues/435
- 'built-ins/Array/prototype/indexOf/15.4.4.14-5-9': [FAIL],
- 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-5-9': [FAIL],
+ # Test bug https://github.com/tc39/test262/issues/521
+ 'built-ins/TypedArray/from/mapfn-is-not-callable': [FAIL],
- # https://github.com/tc39/test262/issues/489
- # Test will pass in 0 or -GMT, but fail in +GMT
- 'language/statements/class/subclass/builtin-objects/Date/regular-subclassing': [PASS, FAIL_OK],
+ # Test bug https://github.com/tc39/test262/issues/529
+ 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
+ 'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
+ 'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
+ 'built-ins/Math/exp/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
############################ SKIPPED TESTS #############################
@@ -373,9 +316,6 @@
}], # system == linux
['system == macos', {
- 'intl402/11.3.2_TRP': [FAIL],
- 'intl402/9.2.5_11_g_ii_2': [FAIL],
-
# BUG(v8:4437).
'intl402/Collator/10.1.1_19_c': [SKIP],
'intl402/Collator/9.2.5_11_g_ii_2': [SKIP],
@@ -426,195 +366,4 @@
'built-ins/ArrayBuffer/allocation-limit': [SKIP],
}], # asan == True or msan == True or tsan == True
-['ignition == True', {
- 'annexB/B.2.3.*': [SKIP],
- 'built-ins/Array/prototype/reduce/*': [SKIP],
- 'built-ins/Array/prototype/reduceRight/*': [SKIP],
- 'built-ins/GeneratorFunction/*': [SKIP],
- 'built-ins/GeneratorPrototype/*': [SKIP],
- 'built-ins/Promise/prototype/then/capability-executor-not-callable': [SKIP],
- 'built-ins/Reflect/enumerate/*': [SKIP],
- 'language/computed-property-names/class/*': [SKIP],
- 'language/computed-property-names/to-name-side-effects/*': [SKIP],
- 'language/directive-prologue/*': [SKIP],
- 'language/expressions/arrow-function/*': [SKIP],
- 'language/expressions/assignment/destructuring/*': [SKIP],
- 'language/expressions/class/subclass/builtin-objects/GeneratorFunction/*': [SKIP],
- 'language/expressions/generators/*': [SKIP],
- 'language/expressions/instanceof/primitive-prototype-with-object': [SKIP],
- 'language/expressions/instanceof/prototype-getter-with-object-throws': [SKIP],
- 'language/expressions/instanceof/prototype-getter-with-object': [SKIP],
- 'language/expressions/object/method-definition/yield*': [SKIP],
- 'language/expressions/object/method-definition/generator*': [SKIP],
- 'language/expressions/yield/*': [SKIP],
- 'language/statements/class/definition/methods-gen-no-yield': [SKIP],
- 'language/statements/class/definition/methods-gen-return': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-expression-with-rhs': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-generator-method-binding-identifier': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-literal-property-name': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-property-name': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-statement': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-expression-without-rhs': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-as-yield-operand': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-newline': [SKIP],
- 'language/statements/class/definition/methods-gen-yield-star-before-newline': [SKIP],
- 'language/statements/class/subclass/builtin-objects/GeneratorFunction/*': [SKIP],
- 'language/statements/generators/*': [SKIP],
-
- 'built-ins/Array/prototype/concat/Array.prototype.concat_non-array': [SKIP],
- 'built-ins/Date/prototype/toISOString/15.9.5.43-0-13': [SKIP],
- 'built-ins/Object/prototype/hasOwnProperty/S15.2.4.5_A12': [SKIP],
- 'built-ins/Object/prototype/isPrototypeOf/S15.2.4.6_A12': [SKIP],
- 'built-ins/Object/prototype/propertyIsEnumerable/S15.2.4.7_A12': [SKIP],
- 'built-ins/Object/prototype/toLocaleString/S15.2.4.3_A12': [SKIP],
- 'built-ins/Object/prototype/toString/15.2.4.2-1-1': [SKIP],
- 'built-ins/Object/prototype/toString/15.2.4.2-1-2': [SKIP],
- 'built-ins/Object/prototype/toString/S15.2.4.2_A12': [SKIP],
- 'built-ins/Object/prototype/valueOf/S15.2.4.4_A12': [SKIP],
- 'built-ins/Object/prototype/valueOf/S15.2.4.4_A14': [SKIP],
- 'built-ins/Object/prototype/valueOf/S15.2.4.4_A15': [SKIP],
- 'built-ins/Promise/all/S25.4.4.1_A4.1_T1': [SKIP],
- 'built-ins/Promise/prototype/then/on-rejected-throw': [SKIP],
- 'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [SKIP],
- 'built-ins/String/prototype/codePointAt/this-is-undefined-throws': [SKIP],
- 'built-ins/String/prototype/concat/S15.5.4.6_A2': [SKIP],
- 'built-ins/String/prototype/endsWith/this-is-undefined-throws': [SKIP],
- 'built-ins/String/prototype/includes/this-is-undefined-throws': [SKIP],
- 'built-ins/String/prototype/repeat/this-is-undefined-throws': [SKIP],
- 'built-ins/String/prototype/startsWith/this-is-undefined-throws': [SKIP],
- 'built-ins/String/prototype/trim/15.5.4.20-1-1': [SKIP],
- 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-1': [SKIP],
- 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-2': [SKIP],
- 'language/block-scope/leave/verify-context-in-labelled-block': [SKIP],
- 'language/block-scope/leave/x-after-break-to-label': [SKIP],
- 'language/default-parameters/class-definitions': [SKIP],
- 'language/default-parameters/generators': [SKIP],
- 'language/expressions/object/method-definition/name-prop-name-yield-expr': [SKIP],
- 'language/expressions/tagged-template/call-expression-context-no-strict': [SKIP],
- 'language/expressions/tagged-template/call-expression-context-strict': [SKIP],
- 'language/expressions/template-literal/evaluation-order': [SKIP],
- 'language/statements/for-of/body-dstr-assign': [SKIP],
- 'language/statements/for-of/break': [SKIP],
- 'language/statements/for-of/break-from-catch': [SKIP],
- 'language/statements/for-of/break-from-finally': [SKIP],
- 'language/statements/for-of/break-from-try': [SKIP],
- 'language/statements/for-of/break-label': [SKIP],
- 'language/statements/for-of/break-label-from-catch': [SKIP],
- 'language/statements/for-of/break-label-from-finally': [SKIP],
- 'language/statements/for-of/break-label-from-try': [SKIP],
- 'language/statements/for-of/continue': [SKIP],
- 'language/statements/for-of/continue-from-catch': [SKIP],
- 'language/statements/for-of/continue-from-finally': [SKIP],
- 'language/statements/for-of/continue-from-try': [SKIP],
- 'language/statements/for-of/continue-label': [SKIP],
- 'language/statements/for-of/continue-label-from-catch': [SKIP],
- 'language/statements/for-of/continue-label-from-finally': [SKIP],
- 'language/statements/for-of/continue-label-from-try': [SKIP],
- 'language/statements/for-of/generator': [SKIP],
- 'language/statements/for-of/generator-next-error': [SKIP],
- 'language/statements/for-of/nested': [SKIP],
- 'language/statements/for-of/return': [SKIP],
- 'language/statements/for-of/return-from-catch': [SKIP],
- 'language/statements/for-of/return-from-finally': [SKIP],
- 'language/statements/for-of/return-from-try': [SKIP],
- 'language/statements/for-of/throw': [SKIP],
- 'language/statements/for-of/throw-from-catch': [SKIP],
- 'language/statements/for-of/throw-from-finally': [SKIP],
- 'language/statements/for-of/yield': [SKIP],
- 'language/statements/for-of/yield-from-catch': [SKIP],
- 'language/statements/for-of/yield-from-finally': [SKIP],
- 'language/statements/for-of/yield-from-try': [SKIP],
- 'language/statements/for-of/yield-star': [SKIP],
- 'language/statements/for-of/yield-star-from-catch': [SKIP],
- 'language/statements/for-of/yield-star-from-finally': [SKIP],
- 'language/statements/for-of/yield-star-from-try': [SKIP],
- 'language/object-literal/concise-generator': [SKIP],
- 'language/statements/do-while/S12.6.1_A4_T5': [SKIP],
- 'language/statements/while/S12.6.2_A4_T5': [SKIP],
- 'language/expressions/instanceof/symbol-hasinstance-not-callable': [SKIP],
-
-}], # ignition == True
-
-['ignition == True and (arch == arm or arch == arm64)', {
- 'built-ins/Promise/all/ctx-ctor': [SKIP],
- 'built-ins/Promise/race/ctx-ctor': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A1.12_T3': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.10_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T2': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T2': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T3': [SKIP],
- 'intl402/9.2.2': [SKIP],
- 'language/statements/class/arguments/default-constructor': [SKIP],
- 'language/statements/class/definition/constructor-strict-by-default': [SKIP],
- 'language/statements/class/definition/fn-name-accessor-get': [SKIP],
- 'language/statements/class/definition/fn-name-accessor-set': [SKIP],
- 'language/statements/class/definition/fn-name-gen-method': [SKIP],
- 'language/statements/class/definition/fn-name-method': [SKIP],
- 'language/statements/class/definition/methods-restricted-properties': [SKIP],
- 'language/statements/class/definition/prototype-getter': [SKIP],
- 'language/statements/class/definition/prototype-wiring': [SKIP],
- 'language/statements/class/definition/this-access-restriction': [SKIP],
- 'language/statements/class/definition/this-access-restriction-2': [SKIP],
- 'language/statements/class/definition/this-check-ordering': [SKIP],
- 'language/statements/class/name': [SKIP],
- 'language/statements/class/restricted-properties': [SKIP],
- 'language/statements/class/subclass/binding': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Array/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/ArrayBuffer/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Boolean/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/DataView/regular-subclassing': [SKIP],
- 'language/statements/class/subclass/builtin-objects/DataView/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Date/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Error/regular-subclassing': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Error/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Function/instance-length': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Function/instance-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Function/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Map/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/URIError-name': [SKIP],
- 'language/statements/class/subclass/builtin-objects/NativeError/URIError-super': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Number/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Object/constructor-return-undefined-throws': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Object/constructor-returns-non-object': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Promise/regular-subclassing': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Promise/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/RegExp/lastIndex': [SKIP],
- 'language/statements/class/subclass/builtin-objects/RegExp/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Set/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/String/length': [SKIP],
- 'language/statements/class/subclass/builtin-objects/String/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/Symbol/new-symbol-with-super-throws': [SKIP],
- 'language/statements/class/subclass/builtin-objects/WeakMap/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/builtin-objects/WeakSet/super-must-be-called': [SKIP],
- 'language/statements/class/subclass/class-definition-null-proto-missing-return-override': [SKIP],
- 'language/statements/class/subclass/default-constructor': [SKIP],
- 'language/statements/class/subclass/default-constructor-2': [SKIP],
- 'language/statements/class/subclass/derived-class-return-override-with-boolean': [SKIP],
- 'language/statements/class/subclass/derived-class-return-override-with-null': [SKIP],
- 'language/statements/class/subclass/derived-class-return-override-with-number': [SKIP],
- 'language/statements/class/subclass/derived-class-return-override-with-string': [SKIP],
- 'language/statements/class/subclass/derived-class-return-override-with-symbol': [SKIP],
- 'language/statements/const/fn-name-arrow': [SKIP],
- 'language/statements/const/fn-name-class': [SKIP],
- 'language/statements/const/fn-name-cover': [SKIP],
- 'language/statements/const/fn-name-fn': [SKIP],
- 'language/statements/const/fn-name-gen': [SKIP],
- 'language/statements/let/fn-name-arrow': [SKIP],
- 'language/statements/let/fn-name-class': [SKIP],
- 'language/statements/let/fn-name-cover': [SKIP],
- 'language/statements/let/fn-name-fn': [SKIP],
- 'language/statements/let/fn-name-gen': [SKIP],
- 'test-api/Regress470113': [SKIP],
-}], # ignition == True and (arch == arm or arch == arm64)
-
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index b5ad30949d..bf007bd46f 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -39,7 +39,8 @@ from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
-ARCHIVE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data.tar")
+DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
+ARCHIVE = DATA + ".tar"
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
@@ -128,7 +129,8 @@ class Test262TestSuite(testsuite.TestSuite):
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags + self.harness +
self.GetIncludesForTest(testcase) + ["--harmony"] +
- [os.path.join(self.testroot, testcase.path + ".js")])
+ [os.path.join(self.testroot, testcase.path + ".js")] +
+ (["--throws"] if "negative" in self.GetTestRecord(testcase) else []))
def _VariantGeneratorFactory(self):
return Test262VariantGenerator
@@ -143,7 +145,7 @@ class Test262TestSuite(testsuite.TestSuite):
self.ParseTestRecord = module.parseTestRecord
except:
raise ImportError("Cannot load parseTestRecord; you may need to "
- "--download-data for test262")
+ "gclient sync for test262")
finally:
if f:
f.close()
@@ -170,13 +172,20 @@ class Test262TestSuite(testsuite.TestSuite):
with open(filename) as f:
return f.read()
- def IsNegativeTest(self, testcase):
- test_record = self.GetTestRecord(testcase)
- return "negative" in test_record
+ def _ParseException(self, str):
+ for line in str.split("\n")[::-1]:
+ if line and not line[0].isspace() and ":" in line:
+ return line.split(":")[0]
+
- def IsFailureOutput(self, output, testpath):
+ def IsFailureOutput(self, testcase):
+ output = testcase.output
+ test_record = self.GetTestRecord(testcase)
if output.exit_code != 0:
return True
+ if "negative" in test_record:
+ if self._ParseException(output.stdout) != test_record["negative"]:
+ return True
return "FAILED!" in output.stdout
def HasUnexpectedOutput(self, testcase):
@@ -201,10 +210,13 @@ class Test262TestSuite(testsuite.TestSuite):
for f in archive_files:
os.remove(os.path.join(self.root, f))
- print "Extracting archive..."
- tar = tarfile.open(ARCHIVE)
- tar.extractall(path=os.path.dirname(ARCHIVE))
- tar.close()
+ # The archive is created only on swarming. Local checkouts have the
+ # data folder.
+ if os.path.exists(ARCHIVE) and not os.path.exists(DATA):
+ print "Extracting archive..."
+ tar = tarfile.open(ARCHIVE)
+ tar.extractall(path=os.path.dirname(ARCHIVE))
+ tar.close()
def GetSuite(name, root):
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 9c99992511..31bae6d415 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -29,7 +29,6 @@ class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
explicit GraphTest(int num_parameters = 1);
~GraphTest() override;
- protected:
Node* start() { return graph()->start(); }
Node* end() { return graph()->end(); }
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index eff6d4a931..08f3038754 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -28,7 +28,10 @@ namespace compiler {
class Int64LoweringTest : public GraphTest {
public:
- Int64LoweringTest() : GraphTest(), machine_(zone()) {
+ Int64LoweringTest()
+ : GraphTest(),
+ machine_(zone(), MachineRepresentation::kWord32,
+ MachineOperatorBuilder::Flag::kAllOptionalOps) {
value_[0] = 0x1234567890abcdef;
value_[1] = 0x1edcba098765432f;
value_[2] = 0x1133557799886644;
@@ -86,14 +89,34 @@ class Int64LoweringTest : public GraphTest {
return static_cast<int32_t>(value_[i] >> 32);
}
+ void TestComparison(
+ const Operator* op,
+ Matcher<Node*> (*high_word_matcher)(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher),
+ Matcher<Node*> (*low_word_matcher)(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher)) {
+ LowerGraph(
+ graph()->NewNode(op, Int64Constant(value(0)), Int64Constant(value(1))),
+ MachineRepresentation::kWord32);
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(IsWord32Or(
+ high_word_matcher(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1))),
+ IsWord32And(
+ IsWord32Equal(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1))),
+ low_word_matcher(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1))))),
+ start(), start()));
+ }
+
private:
MachineOperatorBuilder machine_;
int64_t value_[3];
};
TEST_F(Int64LoweringTest, Int64Constant) {
- if (4 != kPointerSize) return;
-
LowerGraph(Int64Constant(value(0)), MachineRepresentation::kWord64);
EXPECT_THAT(graph()->end()->InputAt(1),
IsReturn2(IsInt32Constant(low_word_value(0)),
@@ -101,8 +124,6 @@ TEST_F(Int64LoweringTest, Int64Constant) {
}
TEST_F(Int64LoweringTest, Int64Load) {
- if (4 != kPointerSize) return;
-
int32_t base = 0x1234;
int32_t index = 0x5678;
@@ -128,8 +149,6 @@ TEST_F(Int64LoweringTest, Int64Load) {
}
TEST_F(Int64LoweringTest, Int64Store) {
- if (4 != kPointerSize) return;
-
// We have to build the TF graph explicitly here because Store does not return
// a value.
@@ -173,8 +192,6 @@ TEST_F(Int64LoweringTest, Int64Store) {
}
TEST_F(Int64LoweringTest, Int64And) {
- if (4 != kPointerSize) return;
-
LowerGraph(graph()->NewNode(machine()->Word64And(), Int64Constant(value(0)),
Int64Constant(value(1))),
MachineRepresentation::kWord64);
@@ -187,8 +204,6 @@ TEST_F(Int64LoweringTest, Int64And) {
}
TEST_F(Int64LoweringTest, TruncateInt64ToInt32) {
- if (4 != kPointerSize) return;
-
LowerGraph(graph()->NewNode(machine()->TruncateInt64ToInt32(),
Int64Constant(value(0))),
MachineRepresentation::kWord32);
@@ -197,8 +212,6 @@ TEST_F(Int64LoweringTest, TruncateInt64ToInt32) {
}
TEST_F(Int64LoweringTest, Parameter) {
- if (4 != kPointerSize) return;
-
LowerGraph(Parameter(0), MachineRepresentation::kWord64,
MachineRepresentation::kWord64, 1);
@@ -207,8 +220,6 @@ TEST_F(Int64LoweringTest, Parameter) {
}
TEST_F(Int64LoweringTest, Parameter2) {
- if (4 != kPointerSize) return;
-
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 5);
sig_builder.AddReturn(MachineRepresentation::kWord32);
@@ -229,8 +240,6 @@ TEST_F(Int64LoweringTest, Parameter2) {
}
TEST_F(Int64LoweringTest, CallI64Return) {
- if (4 != kPointerSize) return;
-
int32_t function = 0x9999;
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
@@ -259,8 +268,6 @@ TEST_F(Int64LoweringTest, CallI64Return) {
}
TEST_F(Int64LoweringTest, CallI64Parameter) {
- if (4 != kPointerSize) return;
-
int32_t function = 0x9999;
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 3);
@@ -294,6 +301,505 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
}
+// todo(ahaas): I added a list of missing instructions here to make merging
+// easier when I do them one by one.
+// kExprI64Add:
+TEST_F(Int64LoweringTest, Int64Add) {
+ LowerGraph(graph()->NewNode(machine()->Int64Add(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> add;
+ Matcher<Node*> add_matcher = IsInt32PairAdd(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)), IsInt32Constant(high_word_value(1)));
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&add), add_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&add), add_matcher)),
+ start(), start()));
+}
+// kExprI64Sub:
+TEST_F(Int64LoweringTest, Int64Sub) {
+ LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> sub;
+ Matcher<Node*> sub_matcher = IsInt32PairSub(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)), IsInt32Constant(high_word_value(1)));
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&sub), sub_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&sub), sub_matcher)),
+ start(), start()));
+}
+
+// kExprI64Mul:
+TEST_F(Int64LoweringTest, Int64Mul) {
+ LowerGraph(graph()->NewNode(machine()->Int64Mul(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> mul_capture;
+ Matcher<Node*> mul_matcher = IsInt32PairMul(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)), IsInt32Constant(high_word_value(1)));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&mul_capture), mul_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&mul_capture), mul_matcher)),
+ start(), start()));
+}
+
+// kExprI64DivS:
+// kExprI64DivU:
+// kExprI64RemS:
+// kExprI64RemU:
+// kExprI64Ior:
+TEST_F(Int64LoweringTest, Int64Ior) {
+ LowerGraph(graph()->NewNode(machine()->Word64Or(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsWord32Or(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1))),
+ IsWord32Or(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1))),
+ start(), start()));
+}
+
+// kExprI64Xor:
+TEST_F(Int64LoweringTest, Int64Xor) {
+ LowerGraph(graph()->NewNode(machine()->Word64Xor(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsWord32Xor(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1))),
+ IsWord32Xor(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1))),
+ start(), start()));
+}
+// kExprI64Shl:
+TEST_F(Int64LoweringTest, Int64Shl) {
+ LowerGraph(graph()->NewNode(machine()->Word64Shl(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> shl;
+ Matcher<Node*> shl_matcher = IsWord32PairShl(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)));
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&shl), shl_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&shl), shl_matcher)),
+ start(), start()));
+}
+// kExprI64ShrU:
+TEST_F(Int64LoweringTest, Int64ShrU) {
+ LowerGraph(graph()->NewNode(machine()->Word64Shr(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> shr;
+ Matcher<Node*> shr_matcher = IsWord32PairShr(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)));
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&shr), shr_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&shr), shr_matcher)),
+ start(), start()));
+}
+// kExprI64ShrS:
+TEST_F(Int64LoweringTest, Int64ShrS) {
+ LowerGraph(graph()->NewNode(machine()->Word64Sar(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> sar;
+ Matcher<Node*> sar_matcher = IsWord32PairSar(
+ IsInt32Constant(low_word_value(0)), IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)));
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&sar), sar_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&sar), sar_matcher)),
+ start(), start()));
+}
+// kExprI64Eq:
+TEST_F(Int64LoweringTest, Int64Eq) {
+ LowerGraph(graph()->NewNode(machine()->Word64Equal(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord32);
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(IsWord32Equal(
+ IsWord32Or(IsWord32Xor(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1))),
+ IsWord32Xor(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1)))),
+ IsInt32Constant(0)),
+ start(), start()));
+}
+
+// kExprI64LtS:
+TEST_F(Int64LoweringTest, Int64LtS) {
+ TestComparison(machine()->Int64LessThan(), IsInt32LessThan, IsUint32LessThan);
+}
+// kExprI64LeS:
+TEST_F(Int64LoweringTest, Int64LeS) {
+ TestComparison(machine()->Int64LessThanOrEqual(), IsInt32LessThan,
+ IsUint32LessThanOrEqual);
+}
+// kExprI64LtU:
+TEST_F(Int64LoweringTest, Int64LtU) {
+ TestComparison(machine()->Uint64LessThan(), IsUint32LessThan,
+ IsUint32LessThan);
+}
+// kExprI64LeU:
+TEST_F(Int64LoweringTest, Int64LeU) {
+ TestComparison(machine()->Uint64LessThanOrEqual(), IsUint32LessThan,
+ IsUint32LessThanOrEqual);
+}
+
+// kExprI32ConvertI64:
+TEST_F(Int64LoweringTest, I32ConvertI64) {
+ LowerGraph(graph()->NewNode(machine()->TruncateInt64ToInt32(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord32);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn(IsInt32Constant(low_word_value(0)), start(), start()));
+}
+// kExprI64SConvertI32:
+TEST_F(Int64LoweringTest, I64SConvertI32) {
+ LowerGraph(graph()->NewNode(machine()->ChangeInt32ToInt64(),
+ Int32Constant(low_word_value(0))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)),
+ IsWord32Sar(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(31)),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64SConvertI32_2) {
+ LowerGraph(
+ graph()->NewNode(machine()->ChangeInt32ToInt64(),
+ graph()->NewNode(machine()->TruncateInt64ToInt32(),
+ Int64Constant(value(0)))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)),
+ IsWord32Sar(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(31)),
+ start(), start()));
+}
+// kExprI64UConvertI32:
+TEST_F(Int64LoweringTest, I64UConvertI32) {
+ LowerGraph(graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ Int32Constant(low_word_value(0))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)), IsInt32Constant(0),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64UConvertI32_2) {
+ LowerGraph(
+ graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ graph()->NewNode(machine()->TruncateInt64ToInt32(),
+ Int64Constant(value(0)))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)), IsInt32Constant(0),
+ start(), start()));
+}
+// kExprF64ReinterpretI64:
+TEST_F(Int64LoweringTest, F64ReinterpretI64) {
+ LowerGraph(graph()->NewNode(machine()->BitcastInt64ToFloat64(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kFloat64);
+
+ Capture<Node*> stack_slot_capture;
+ Matcher<Node*> stack_slot_matcher =
+ IsStackSlot(MachineRepresentation::kWord64);
+
+ Capture<Node*> store_capture;
+ Matcher<Node*> store_matcher =
+ IsStore(StoreRepresentation(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier),
+ AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
+ IsInt32Constant(0), IsInt32Constant(low_word_value(0)),
+ IsStore(StoreRepresentation(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier),
+ AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
+ IsInt32Constant(4), IsInt32Constant(high_word_value(0)),
+ start(), start()),
+ start());
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(IsLoad(MachineType::Float64(),
+ AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
+ IsInt32Constant(0),
+ AllOf(CaptureEq(&store_capture), store_matcher), start()),
+ start(), start()));
+}
+// kExprI64ReinterpretF64:
+TEST_F(Int64LoweringTest, I64ReinterpretF64) {
+ LowerGraph(graph()->NewNode(machine()->BitcastFloat64ToInt64(),
+ Float64Constant(bit_cast<double>(value(0)))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> stack_slot;
+ Matcher<Node*> stack_slot_matcher =
+ IsStackSlot(MachineRepresentation::kWord64);
+
+ Capture<Node*> store;
+ Matcher<Node*> store_matcher = IsStore(
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ WriteBarrierKind::kNoWriteBarrier),
+ AllOf(CaptureEq(&stack_slot), stack_slot_matcher), IsInt32Constant(0),
+ IsFloat64Constant(bit_cast<double>(value(0))), start(), start());
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoad(MachineType::Int32(),
+ AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
+ IsInt32Constant(0),
+ AllOf(CaptureEq(&store), store_matcher), start()),
+ IsLoad(MachineType::Int32(),
+ AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
+ IsInt32Constant(0x4),
+ AllOf(CaptureEq(&store), store_matcher), start()),
+ start(), start()));
+}
+// kExprI64Clz:
+TEST_F(Int64LoweringTest, I64Clz) {
+ LowerGraph(graph()->NewNode(machine()->Word64Clz(), Int64Constant(value(0))),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> branch_capture;
+ Matcher<Node*> branch_matcher = IsBranch(
+ IsWord32Equal(IsInt32Constant(high_word_value(0)), IsInt32Constant(0)),
+ start());
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(
+ IsPhi(MachineRepresentation::kWord32,
+ IsInt32Add(IsWord32Clz(IsInt32Constant(low_word_value(0))),
+ IsInt32Constant(32)),
+ IsWord32Clz(IsInt32Constant(high_word_value(0))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch_capture), branch_matcher)),
+ IsIfFalse(
+ AllOf(CaptureEq(&branch_capture), branch_matcher)))),
+ IsInt32Constant(0), start(), start()));
+}
+// kExprI64Ctz:
+TEST_F(Int64LoweringTest, I64Ctz) {
+ LowerGraph(graph()->NewNode(machine()->Word64CtzPlaceholder(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord64);
+ Capture<Node*> branch_capture;
+ Matcher<Node*> branch_matcher = IsBranch(
+ IsWord32Equal(IsInt32Constant(low_word_value(0)), IsInt32Constant(0)),
+ start());
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(
+ IsPhi(MachineRepresentation::kWord32,
+ IsInt32Add(IsWord32Ctz(IsInt32Constant(high_word_value(0))),
+ IsInt32Constant(32)),
+ IsWord32Ctz(IsInt32Constant(low_word_value(0))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch_capture), branch_matcher)),
+ IsIfFalse(
+ AllOf(CaptureEq(&branch_capture), branch_matcher)))),
+ IsInt32Constant(0), start(), start()));
+}
+// kExprI64Popcnt:
+
+TEST_F(Int64LoweringTest, Dfs) {
+ Node* common = Int64Constant(value(0));
+ LowerGraph(graph()->NewNode(machine()->Word64And(), common,
+ graph()->NewNode(machine()->Word64And(), common,
+ Int64Constant(value(1)))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsWord32And(IsInt32Constant(low_word_value(0)),
+ IsWord32And(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1)))),
+ IsWord32And(IsInt32Constant(high_word_value(0)),
+ IsWord32And(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1)))),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Popcnt) {
+ LowerGraph(graph()->NewNode(machine()->Word64PopcntPlaceholder(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Add(IsWord32Popcnt(IsInt32Constant(low_word_value(0))),
+ IsWord32Popcnt(IsInt32Constant(high_word_value(0)))),
+ IsInt32Constant(0), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Ror) {
+ LowerGraph(graph()->NewNode(machine()->Word64Ror(), Int64Constant(value(0)),
+ Parameter(0)),
+ MachineRepresentation::kWord64, MachineRepresentation::kWord64, 1);
+
+ Matcher<Node*> branch_lt32_matcher =
+ IsBranch(IsInt32LessThan(IsParameter(0), IsInt32Constant(32)), start());
+
+ Matcher<Node*> low_input_matcher = IsPhi(
+ MachineRepresentation::kWord32, IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(high_word_value(0)),
+ IsMerge(IsIfTrue(branch_lt32_matcher), IsIfFalse(branch_lt32_matcher)));
+
+ Matcher<Node*> high_input_matcher = IsPhi(
+ MachineRepresentation::kWord32, IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(0)),
+ IsMerge(IsIfTrue(branch_lt32_matcher), IsIfFalse(branch_lt32_matcher)));
+
+ Matcher<Node*> shift_matcher =
+ IsWord32And(IsParameter(0), IsInt32Constant(0x1f));
+
+ Matcher<Node*> bit_mask_matcher = IsWord32Shl(
+ IsWord32Sar(IsInt32Constant(std::numeric_limits<int32_t>::min()),
+ shift_matcher),
+ IsInt32Constant(1));
+
+ Matcher<Node*> inv_mask_matcher =
+ IsWord32Xor(bit_mask_matcher, IsInt32Constant(-1));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(
+ IsWord32Or(IsWord32And(IsWord32Ror(low_input_matcher, shift_matcher),
+ inv_mask_matcher),
+ IsWord32And(IsWord32Ror(high_input_matcher, shift_matcher),
+ bit_mask_matcher)),
+ IsWord32Or(IsWord32And(IsWord32Ror(high_input_matcher, shift_matcher),
+ inv_mask_matcher),
+ IsWord32And(IsWord32Ror(low_input_matcher, shift_matcher),
+ bit_mask_matcher)),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Ror_0) {
+ LowerGraph(graph()->NewNode(machine()->Word64Ror(), Int64Constant(value(0)),
+ Int32Constant(0)),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(high_word_value(0)), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Ror_32) {
+ LowerGraph(graph()->NewNode(machine()->Word64Ror(), Int64Constant(value(0)),
+ Int32Constant(32)),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(0)), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Ror_11) {
+ LowerGraph(graph()->NewNode(machine()->Word64Ror(), Int64Constant(value(0)),
+ Int32Constant(11)),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsWord32Or(IsWord32Shr(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(11)),
+ IsWord32Shl(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(21))),
+ IsWord32Or(IsWord32Shr(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(11)),
+ IsWord32Shl(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(21))),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64Ror_43) {
+ LowerGraph(graph()->NewNode(machine()->Word64Ror(), Int64Constant(value(0)),
+ Int32Constant(43)),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsWord32Or(IsWord32Shr(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(11)),
+ IsWord32Shl(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(21))),
+ IsWord32Or(IsWord32Shr(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(11)),
+ IsWord32Shl(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(21))),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, I64PhiWord64) {
+ LowerGraph(graph()->NewNode(common()->Phi(MachineRepresentation::kWord64, 2),
+ Int64Constant(value(0)), Int64Constant(value(1)),
+ start()),
+ MachineRepresentation::kWord64);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsPhi(MachineRepresentation::kWord32,
+ IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1)), start()),
+ IsPhi(MachineRepresentation::kWord32,
+ IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1)), start()),
+ start(), start()));
+}
+
+void TestPhi(Int64LoweringTest* test, MachineRepresentation rep, Node* v1,
+ Node* v2) {
+ test->LowerGraph(test->graph()->NewNode(test->common()->Phi(rep, 2), v1, v2,
+ test->start()),
+ rep);
+
+ EXPECT_THAT(test->graph()->end()->InputAt(1),
+ IsReturn(IsPhi(rep, v1, v2, test->start()), test->start(),
+ test->start()));
+}
+
+TEST_F(Int64LoweringTest, I64PhiFloat32) {
+ TestPhi(this, MachineRepresentation::kFloat32, Float32Constant(1.5),
+ Float32Constant(2.5));
+}
+
+TEST_F(Int64LoweringTest, I64PhiFloat64) {
+ TestPhi(this, MachineRepresentation::kFloat64, Float32Constant(1.5),
+ Float32Constant(2.5));
+}
+
+TEST_F(Int64LoweringTest, I64PhiWord32) {
+ TestPhi(this, MachineRepresentation::kWord32, Float32Constant(1),
+ Float32Constant(2));
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 9e14cda7fb..0f8eed7958 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -43,7 +43,7 @@ class JSBuiltinReducerTest : public TypedGraphTest {
isolate()->factory()->NewStringFromAsciiChecked(
"Math")).ToHandleChecked();
Handle<JSFunction> f = Handle<JSFunction>::cast(
- JSObject::GetProperty(
+ Object::GetProperty(
m, isolate()->factory()->NewStringFromAsciiChecked(name))
.ToHandleChecked());
return HeapConstant(f);
@@ -149,8 +149,8 @@ TEST_F(JSBuiltinReducerTest, MathImul) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kIntegral32Types) {
- TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
Node* p0 = Parameter(t0, 0);
Node* p1 = Parameter(t1, 1);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
@@ -159,7 +159,8 @@ TEST_F(JSBuiltinReducerTest, MathImul) {
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
}
}
}
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 919c1b2237..de0eefc531 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -91,7 +91,8 @@ TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleLo, 1),
input, context, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64ExtractLowWord32(input));
+ EXPECT_THAT(r.replacement(),
+ IsFloat64ExtractLowWord32(IsGuard(Type::Number(), input, _)));
}
@@ -108,7 +109,8 @@ TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleHi, 1),
input, context, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64ExtractHighWord32(input));
+ EXPECT_THAT(r.replacement(),
+ IsFloat64ExtractHighWord32(IsGuard(Type::Number(), input, _)));
}
@@ -240,58 +242,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
// -----------------------------------------------------------------------------
-// %_MathFloor
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineMathFloor) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathFloor, 1),
- input, context, effect, control),
- MachineOperatorBuilder::kFloat64RoundDown);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64RoundDown(input));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_MathSqrt
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineMathSqrt) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathSqrt, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Sqrt(input));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_MathClz32
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineMathClz32) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathClz32, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsWord32Clz(input));
-}
-
-
-// -----------------------------------------------------------------------------
// %_ValueOf
@@ -334,6 +284,23 @@ TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
AllOf(CaptureEq(&if_false0), IsIfFalse(CaptureEq(&branch0))))));
}
+// -----------------------------------------------------------------------------
+// %_GetOrdinaryHasInstance
+
+TEST_F(JSIntrinsicLoweringTest, InlineGetOrdinaryHasInstance) {
+ Node* const context = Parameter(0);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineGetOrdinaryHasInstance, 0),
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadContext(
+ ContextAccess(0, Context::ORDINARY_HAS_INSTANCE_INDEX, true), _));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index e37d4a2913..1adb5dae93 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -67,7 +67,7 @@ Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
STATIC_ASSERT(LANGUAGE_END == 3);
-const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
+const LanguageMode kLanguageModes[] = {SLOPPY, STRICT};
} // namespace
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 3ad11cf43f..38bb151dba 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -12,30 +12,33 @@ namespace v8 {
namespace internal {
namespace compiler {
-class LoadEliminationTest : public GraphTest {
+class LoadEliminationTest : public TypedGraphTest {
public:
- LoadEliminationTest() : GraphTest(3), simplified_(zone()) {}
+ LoadEliminationTest()
+ : TypedGraphTest(3), common_(zone()), simplified_(zone()) {}
~LoadEliminationTest() override {}
protected:
Reduction Reduce(Node* node) {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- LoadElimination reducer(&graph_reducer);
+ LoadElimination reducer(&graph_reducer, graph(), common());
return reducer.Reduce(node);
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ CommonOperatorBuilder* common() { return &common_; }
private:
+ CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
};
TEST_F(LoadEliminationTest, LoadFieldWithStoreField) {
- Node* object1 = Parameter(0);
- Node* object2 = Parameter(1);
- Node* value = Parameter(2);
+ Node* object1 = Parameter(Type::Any(), 0);
+ Node* object2 = Parameter(Type::Any(), 1);
+ Node* value = Parameter(Type::Any(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 9dcec85ebf..9db490560d 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -64,7 +64,7 @@ class LoopPeelingTest : public GraphTest {
OFStream os(stdout);
os << AsRPO(*graph());
}
- Zone zone;
+ Zone zone(isolate()->allocator());
return LoopFinder::BuildLoopTree(graph(), &zone);
}
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index ee4cf5446e..6e5d39f68d 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1330,6 +1330,53 @@ class IsStoreMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
+class IsStackSlotMatcher final : public NodeMatcher {
+ public:
+ explicit IsStackSlotMatcher(const Matcher<MachineRepresentation>& rep_matcher)
+ : NodeMatcher(IrOpcode::kStackSlot), rep_matcher_(rep_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<MachineRepresentation>(node),
+ "rep", rep_matcher_, listener));
+ }
+
+ private:
+ const Matcher<MachineRepresentation> rep_matcher_;
+};
+
+class IsGuardMatcher final : public NodeMatcher {
+ public:
+ IsGuardMatcher(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kGuard),
+ type_matcher_(type_matcher),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<Type*>(node->op()), "type",
+ type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Type*> type_matcher_;
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
class IsToNumberMatcher final : public NodeMatcher {
public:
@@ -1406,6 +1453,86 @@ class IsLoadContextMatcher final : public NodeMatcher {
const Matcher<Node*> context_matcher_;
};
+class IsQuadopMatcher final : public NodeMatcher {
+ public:
+ IsQuadopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& a_matcher,
+ const Matcher<Node*>& b_matcher,
+ const Matcher<Node*>& c_matcher,
+ const Matcher<Node*>& d_matcher)
+ : NodeMatcher(opcode),
+ a_matcher_(a_matcher),
+ b_matcher_(b_matcher),
+ c_matcher_(c_matcher),
+ d_matcher_(d_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose a (";
+ a_matcher_.DescribeTo(os);
+ *os << ") and b (";
+ b_matcher_.DescribeTo(os);
+ *os << ") and c (";
+ c_matcher_.DescribeTo(os);
+ *os << ") and d (";
+ d_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "a",
+ a_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "b",
+ b_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "c",
+ c_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3), "d",
+ d_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> a_matcher_;
+ const Matcher<Node*> b_matcher_;
+ const Matcher<Node*> c_matcher_;
+ const Matcher<Node*> d_matcher_;
+};
+
+class IsTernopMatcher final : public NodeMatcher {
+ public:
+ IsTernopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& mid_matcher,
+ const Matcher<Node*>& rhs_matcher)
+ : NodeMatcher(opcode),
+ lhs_matcher_(lhs_matcher),
+ mid_matcher_(mid_matcher),
+ rhs_matcher_(rhs_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose lhs (";
+ lhs_matcher_.DescribeTo(os);
+ *os << ") and mid (";
+ mid_matcher_.DescribeTo(os);
+ *os << ") and rhs (";
+ rhs_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "mid",
+ mid_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "rhs",
+ rhs_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> mid_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+};
class IsBinopMatcher final : public NodeMatcher {
public:
@@ -1484,7 +1611,6 @@ class IsParameterMatcher final : public NodeMatcher {
} // namespace
-
Matcher<Node*> IsDead() {
return MakeMatcher(new NodeMatcher(IrOpcode::kDead));
}
@@ -1938,6 +2064,12 @@ Matcher<Node*> IsTailCall(
effect_matcher, control_matcher));
}
+Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsGuardMatcher(type_matcher, value_matcher, control_matcher));
+}
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
@@ -2044,6 +2176,9 @@ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
+Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher) {
+ return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
+}
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
@@ -2069,6 +2204,29 @@ Matcher<Node*> IsLoadFramePointer() {
return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
}
+#define IS_QUADOP_MATCHER(Name) \
+ Matcher<Node*> Is##Name( \
+ const Matcher<Node*>& a_matcher, const Matcher<Node*>& b_matcher, \
+ const Matcher<Node*>& c_matcher, const Matcher<Node*>& d_matcher) { \
+ return MakeMatcher(new IsQuadopMatcher(IrOpcode::k##Name, a_matcher, \
+ b_matcher, c_matcher, d_matcher)); \
+ }
+
+IS_QUADOP_MATCHER(Int32PairAdd)
+IS_QUADOP_MATCHER(Int32PairSub)
+IS_QUADOP_MATCHER(Int32PairMul)
+
+#define IS_TERNOP_MATCHER(Name) \
+ Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& mid_matcher, \
+ const Matcher<Node*>& rhs_matcher) { \
+ return MakeMatcher(new IsTernopMatcher(IrOpcode::k##Name, lhs_matcher, \
+ mid_matcher, rhs_matcher)); \
+ }
+
+IS_TERNOP_MATCHER(Word32PairShl)
+IS_TERNOP_MATCHER(Word32PairShr)
+IS_TERNOP_MATCHER(Word32PairSar)
#define IS_BINOP_MATCHER(Name) \
Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
@@ -2083,8 +2241,10 @@ IS_BINOP_MATCHER(NumberMultiply)
IS_BINOP_MATCHER(NumberShiftLeft)
IS_BINOP_MATCHER(NumberShiftRight)
IS_BINOP_MATCHER(NumberShiftRightLogical)
+IS_BINOP_MATCHER(NumberImul)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Or)
+IS_BINOP_MATCHER(Word32Xor)
IS_BINOP_MATCHER(Word32Sar)
IS_BINOP_MATCHER(Word32Shl)
IS_BINOP_MATCHER(Word32Shr)
@@ -2146,6 +2306,8 @@ IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
IS_UNOP_MATCHER(Word32Clz)
+IS_UNOP_MATCHER(Word32Ctz)
+IS_UNOP_MATCHER(Word32Popcnt)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 03f2a3b88f..dd036c9939 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -211,6 +211,8 @@ Matcher<Node*> IsNumberShiftRight(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberShiftRightLogical(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberImul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
@@ -261,10 +263,13 @@ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Xor(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
@@ -276,6 +281,8 @@ Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Clz(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsWord32Ctz(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Or(const Matcher<Node*>& lhs_matcher,
@@ -356,6 +363,35 @@ Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
Matcher<Node*> IsLoadFramePointer();
+Matcher<Node*> IsInt32PairAdd(const Matcher<Node*>& a_matcher,
+ const Matcher<Node*>& b_matcher,
+ const Matcher<Node*>& c_matcher,
+ const Matcher<Node*>& d_matcher);
+Matcher<Node*> IsInt32PairSub(const Matcher<Node*>& a_matcher,
+ const Matcher<Node*>& b_matcher,
+ const Matcher<Node*>& c_matcher,
+ const Matcher<Node*>& d_matcher);
+Matcher<Node*> IsInt32PairMul(const Matcher<Node*>& a_matcher,
+ const Matcher<Node*>& b_matcher,
+ const Matcher<Node*>& c_matcher,
+ const Matcher<Node*>& d_matcher);
+
+Matcher<Node*> IsWord32PairShl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& mid_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32PairShr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& mid_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
+Matcher<Node*> IsWord32PairSar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& mid_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
+Matcher<Node*> IsStackSlot();
+Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/s390/OWNERS b/deps/v8/test/unittests/compiler/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
new file mode 100644
index 0000000000..5fe72eec40
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 6cf07345a2..da77bdcb4c 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/schedule.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
@@ -136,6 +136,51 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond1) {
ComputeAndVerifySchedule(13);
}
+TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond1) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ USE(d1);
+ Node* ret = graph()->NewNode(common()->Return(), p0, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(4);
+}
+
+TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond2) {
+ Graph* g = graph();
+ Node* start = g->NewNode(common()->Start(1));
+ g->SetStart(start);
+
+ Node* n1 = g->NewNode(common()->Parameter(1), start);
+
+ Node* n2 = g->NewNode(common()->Branch(), n1, start);
+ Node* n3 = g->NewNode(common()->IfTrue(), n2);
+ Node* n4 = g->NewNode(common()->IfFalse(), n2);
+ Node* n5 = g->NewNode(common()->Int32Constant(-100));
+ Node* n6 = g->NewNode(common()->Return(), n5, start, n4);
+ Node* n7 = g->NewNode(common()->Int32Constant(0));
+ Node* n8 = g->NewNode(common()->Return(), n7, start, n3);
+ Node* n9 = g->NewNode(common()->End(2), n6, n8);
+
+ // Dead nodes
+ Node* n10 = g->NewNode(common()->Branch(), n1, n3);
+ Node* n11 = g->NewNode(common()->IfTrue(), n10);
+ Node* n12 = g->NewNode(common()->IfFalse(), n10);
+ Node* n13 = g->NewNode(common()->Merge(2), n11, n12);
+ Node* n14 =
+ g->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), n1, n7, n13);
+
+ USE(n14);
+
+ g->SetEnd(n9);
+
+ ComputeAndVerifySchedule(10);
+}
TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
Node* start = graph()->NewNode(common()->Start(2));
diff --git a/deps/v8/test/unittests/compiler/select-lowering-unittest.cc b/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
deleted file mode 100644
index 43cfd8484a..0000000000
--- a/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/select-lowering.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-#include "testing/gmock-support.h"
-
-using testing::AllOf;
-using testing::Capture;
-using testing::CaptureEq;
-using testing::Not;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class SelectLoweringTest : public GraphTest {
- public:
- SelectLoweringTest() : GraphTest(5), lowering_(graph(), common()) {}
-
- protected:
- Reduction Reduce(Node* node) { return lowering_.Reduce(node); }
-
- private:
- SelectLowering lowering_;
-};
-
-
-TEST_F(SelectLoweringTest, SelectWithSameConditions) {
- Node* const p0 = Parameter(0);
- Node* const p1 = Parameter(1);
- Node* const p2 = Parameter(2);
- Node* const p3 = Parameter(3);
- Node* const p4 = Parameter(4);
- Node* const s0 = graph()->NewNode(
- common()->Select(MachineRepresentation::kWord32), p0, p1, p2);
-
- Capture<Node*> branch;
- Capture<Node*> merge;
- {
- Reduction const r = Reduce(s0);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32, p1, p2,
- AllOf(CaptureEq(&merge),
- IsMerge(IsIfTrue(CaptureEq(&branch)),
- IsIfFalse(AllOf(CaptureEq(&branch),
- IsBranch(p0, graph()->start())))))));
- }
- {
- Reduction const r = Reduce(graph()->NewNode(
- common()->Select(MachineRepresentation::kWord32), p0, p3, p4));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, p3, p4,
- CaptureEq(&merge)));
- }
- {
- // We must not reuse the diamond if it is reachable from either else/then
- // values of the Select, because the resulting graph can not be scheduled.
- Reduction const r = Reduce(graph()->NewNode(
- common()->Select(MachineRepresentation::kWord32), p0, s0, p0));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, s0, p0,
- Not(CaptureEq(&merge))));
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
index 3bfde4bdce..47f1cc5c75 100644
--- a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
@@ -12,7 +12,7 @@ namespace compiler {
class ZonePoolTest : public TestWithIsolate {
public:
- ZonePoolTest() {}
+ ZonePoolTest() : zone_pool_(&allocator_) {}
protected:
ZonePool* zone_pool() { return &zone_pool_; }
@@ -38,6 +38,7 @@ class ZonePoolTest : public TestWithIsolate {
}
private:
+ base::AccountingAllocator allocator_;
ZonePool zone_pool_;
base::RandomNumberGenerator rng;
};
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 6413e363f3..99351b5a5e 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -74,43 +74,6 @@ TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
}
-TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
- size_t size = 100 * MB;
- size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
- EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
- time);
-}
-
-
-TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
- size_t size = 100 * MB;
- size_t speed = 1 * MB;
- size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
- EXPECT_EQ(size / speed, time);
-}
-
-
-TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
- size_t size = std::numeric_limits<size_t>::max();
- size_t speed = 1;
- size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
- EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
- size_t idle_time_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_ms, 0, 0));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
- size_t idle_time_ms = 1;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoMarkCompact(
- idle_time_ms, kSizeOfObjects, kMarkingSpeed));
-}
-
-
TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
size_t idle_time_ms = 16;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
new file mode 100644
index 0000000000..2bf4d037d3
--- /dev/null
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+#include <limits>
+
+#include "src/heap/gc-tracer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(GCTracer, AverageSpeed) {
+ RingBuffer<BytesAndDuration> buffer;
+ EXPECT_EQ(100 / 2,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 0));
+ buffer.Push(MakeBytesAndDuration(100, 8));
+ EXPECT_EQ(100 / 2,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 2));
+ EXPECT_EQ(200 / 10,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 3));
+ const int max_speed = 1024 * MB;
+ buffer.Reset();
+ buffer.Push(MakeBytesAndDuration(max_speed, 0.5));
+ EXPECT_EQ(max_speed,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 1));
+ const int min_speed = 1;
+ buffer.Reset();
+ buffer.Push(MakeBytesAndDuration(1, 10000));
+ EXPECT_EQ(min_speed,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 1));
+ buffer.Reset();
+ int sum = 0;
+ for (int i = 0; i < buffer.kSize; i++) {
+ sum += i + 1;
+ buffer.Push(MakeBytesAndDuration(i + 1, 1));
+ }
+ EXPECT_EQ(
+ sum * 1.0 / buffer.kSize,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
+ buffer.Push(MakeBytesAndDuration(100, 1));
+ EXPECT_EQ(
+ (sum * 1.0 - 1 + 100) / buffer.kSize,
+ GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
index dbd463c2d5..91abbb1f8f 100644
--- a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
@@ -71,7 +71,7 @@ TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, expected_size - 1, kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
- scavenge_speed, expected_size, kNewSpaceCapacity));
+ scavenge_speed, expected_size + 1, kNewSpaceCapacity));
}
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 376188915a..26a26f0258 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -55,9 +55,9 @@ TEST(SlotSet, Iterate) {
set.Iterate([](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
- return SlotSet::KEEP_SLOT;
+ return KEEP_SLOT;
} else {
- return SlotSet::REMOVE_SLOT;
+ return REMOVE_SLOT;
}
});
@@ -139,5 +139,33 @@ TEST(SlotSet, RemoveRange) {
}
}
+TEST(TypedSlotSet, Iterate) {
+ TypedSlotSet set(0);
+ const int kDelta = 10000001;
+ int added = 0;
+ for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
+ SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
+ set.Insert(type, i);
+ ++added;
+ }
+ int iterated = 0;
+ set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
+ EXPECT_EQ(0, i % kDelta);
+ ++iterated;
+ return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
+ });
+ EXPECT_EQ(added, iterated);
+ iterated = 0;
+ set.Iterate([&iterated](SlotType type, Address addr) {
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ EXPECT_EQ(0, i % 2);
+ ++iterated;
+ return KEEP_SLOT;
+ });
+ EXPECT_EQ(added / 2, iterated);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 839215f743..255d836af5 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -139,7 +139,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.CompareOperation(Token::Value::EQ, reg)
.CompareOperation(Token::Value::NE, reg)
.CompareOperation(Token::Value::EQ_STRICT, reg)
- .CompareOperation(Token::Value::NE_STRICT, reg)
.CompareOperation(Token::Value::LT, reg)
.CompareOperation(Token::Value::GT, reg)
.CompareOperation(Token::Value::LTE, reg)
@@ -161,6 +160,21 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfUndefined(&start)
.JumpIfNotHole(&start);
+ // Longer jumps with constant operands
+ BytecodeLabel end[8];
+ builder.Jump(&end[0])
+ .LoadTrue()
+ .JumpIfTrue(&end[1])
+ .LoadTrue()
+ .JumpIfFalse(&end[2])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfTrue(&end[3])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfFalse(&end[4])
+ .JumpIfNull(&end[5])
+ .JumpIfUndefined(&end[6])
+ .JumpIfNotHole(&end[7]);
+
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
builder.CompareOperation(Token::Value::EQ, reg)
@@ -205,11 +219,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.ForInPrepare(reg)
.ForInDone(reg, reg)
- .ForInNext(reg, reg, reg)
+ .ForInNext(reg, reg, reg, 1)
.ForInStep(reg);
builder.ForInPrepare(wide)
.ForInDone(reg, other)
- .ForInNext(wide, wide, wide)
+ .ForInNext(wide, wide, wide, 1024)
.ForInStep(reg);
// Wide constant pool loads
@@ -223,9 +237,13 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit wide global load / store operations.
builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
.LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1024, LanguageMode::SLOPPY)
.StoreGlobal(wide_name, 1, LanguageMode::STRICT);
+ // Emit extra wide global load.
+ builder.LoadGlobal(name, 1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
+
// Emit wide load / store property operations.
builder.LoadNamedProperty(reg, wide_name, 0)
.LoadKeyedProperty(reg, 2056)
@@ -271,28 +289,44 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&start);
- builder.Debugger();
+ // Intrinsics handled by the interpreter.
+ builder.CallRuntime(Runtime::kInlineIsArray, reg, 1)
+ .CallRuntime(Runtime::kInlineIsArray, wide, 1);
+ builder.Debugger();
+ for (size_t i = 0; i < arraysize(end); i++) {
+ builder.Bind(&end[i]);
+ }
builder.Return();
// Generate BytecodeArray.
Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
CHECK_EQ(the_array->frame_size(),
- (builder.fixed_and_temporary_register_count() +
- builder.translation_register_count()) *
- kPointerSize);
+ builder.fixed_and_temporary_register_count() * kPointerSize);
// Build scorecard of bytecodes encountered in the BytecodeArray.
std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
+
Bytecode final_bytecode = Bytecode::kLdaZero;
int i = 0;
while (i < the_array->length()) {
uint8_t code = the_array->get(i);
scorecard[code] += 1;
final_bytecode = Bytecodes::FromByte(code);
- i += Bytecodes::Size(Bytecodes::FromByte(code));
+ OperandScale operand_scale = OperandScale::kSingle;
+ int prefix_offset = 0;
+ if (Bytecodes::IsPrefixScalingBytecode(final_bytecode)) {
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(final_bytecode);
+ prefix_offset = 1;
+ code = the_array->get(i + 1);
+ final_bytecode = Bytecodes::FromByte(code);
+ }
+ i += prefix_offset + Bytecodes::Size(final_bytecode, operand_scale);
}
+ // Insert entry for illegal bytecode as this is never willingly emitted.
+ scorecard[Bytecodes::ToByte(Bytecode::kIllegal)] = 1;
+
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
@@ -330,7 +364,7 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
int index = 1;
- uint8_t operand = static_cast<uint8_t>(-index);
+ int32_t operand = -index;
Register the_register(index);
CHECK_EQ(the_register.index(), index);
@@ -531,6 +565,12 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
for (int i = 0; i < 63; i++) {
builder.Jump(&label4);
}
+
+ // Add padding to force wide backwards jumps.
+ for (int i = 0; i < 256; i++) {
+ builder.LoadTrue();
+ }
+
builder.BinaryOperation(Token::Value::ADD, reg).JumpIfFalse(&label4);
builder.BinaryOperation(Token::Value::ADD, reg).JumpIfTrue(&label3);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
@@ -546,51 +586,65 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore binary operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore binary operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
for (int i = 0; i < 63; i++) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 4);
iterator.Advance();
}
+ // Check padding to force wide backwards jumps.
+ for (int i = 0; i < 256; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaTrue);
+ iterator.Advance();
+ }
// Ignore binary operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(),
- Bytecode::kJumpIfToBooleanFalseConstant);
- CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -132);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -389);
iterator.Advance();
// Ignore binary operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
- CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -140);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -399);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
- CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -148);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -409);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
- CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -156);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -419);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
- CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -160);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -425);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -652,6 +706,85 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK(iterator.done());
}
+TEST_F(BytecodeArrayBuilderTest, OperandScales) {
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kByte),
+ OperandScale::kSingle);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kShort),
+ OperandScale::kDouble);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kQuad),
+ OperandScale::kQuadruple);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
+ OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
+ OperandSize::kShort),
+ OperandScale::kDouble);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
+ OperandSize::kQuad, OperandSize::kShort, OperandSize::kShort,
+ OperandSize::kShort),
+ OperandScale::kQuadruple);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
+ OperandSize::kShort, OperandSize::kQuad, OperandSize::kShort,
+ OperandSize::kShort),
+ OperandScale::kQuadruple);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
+ OperandSize::kShort, OperandSize::kShort, OperandSize::kQuad,
+ OperandSize::kShort),
+ OperandScale::kQuadruple);
+ CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
+ OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
+ OperandSize::kQuad),
+ OperandScale::kQuadruple);
+}
+
+TEST_F(BytecodeArrayBuilderTest, SizesForSignOperands) {
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(0) == OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8) ==
+ OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8) ==
+ OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8 + 1) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8 - 1) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16 + 1) ==
+ OperandSize::kQuad);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16 - 1) ==
+ OperandSize::kQuad);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt) ==
+ OperandSize::kQuad);
+ CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt) ==
+ OperandSize::kQuad);
+}
+
+TEST_F(BytecodeArrayBuilderTest, SizesForUnsignOperands) {
+ // int overloads
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(0) == OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8) ==
+ OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16) ==
+ OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
+ OperandSize::kQuad);
+ // size_t overloads
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
+ OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt8)) == OperandSize::kByte);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt8 + 1)) == OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt16)) == OperandSize::kShort);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
+ CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt32)) == OperandSize::kQuad);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index f2dcd7107c..43c6caa8cf 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -22,7 +22,7 @@ class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone(), 3, 2, 0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 3, 3, 0);
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_0 = factory->NewHeapNumber(2.718);
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(2147483647);
@@ -31,9 +31,9 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
- Register reg_2 = Register::FromParameterIndex(2, builder.parameter_count());
+ Register param = Register::FromParameterIndex(2, builder.parameter_count());
Handle<String> name = factory->NewStringFromStaticChars("abc");
- int name_index = 3;
+ int name_index = 2;
int feedback_slot = 97;
builder.LoadLiteral(heap_num_0)
@@ -43,67 +43,139 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(smi_1)
.LoadAccumulatorWithRegister(reg_0)
.LoadNamedProperty(reg_1, name, feedback_slot)
- .StoreAccumulatorInRegister(reg_2)
+ .StoreAccumulatorInRegister(param)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, 1, reg_0)
+ .ForInPrepare(reg_0)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
.Debugger()
+ .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
BytecodeArrayIterator iterator(builder.ToBytecodeArray());
+ const int kPrefixByteSize = 1;
+ int offset = 0;
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_0));
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_1));
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi8);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
- CHECK_EQ(*iterator.GetConstantForIndexOperand(0), smi_1);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
+ CHECK_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
+ kPrefixByteSize;
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
CHECK_EQ(iterator.GetIndexOperand(1), name_index);
CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kLoadIC, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
- CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_2.index());
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), param.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
+ CHECK_EQ(iterator.GetRegisterOperand(1).index(), param.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(1), 1);
+ CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
+ CHECK_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(3), 2);
CHECK(!iterator.done());
+ offset +=
+ Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 3);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
- CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0)),
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetRuntimeIdOperand(0)),
Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
+ CHECK_EQ(iterator.current_bytecode_size(), 10);
+ CHECK_EQ(iterator.GetIndexOperand(1), 0x10000000);
+ offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
+ kPrefixByteSize;
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
iterator.Advance();
CHECK(iterator.done());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index ec29935b2f..d4dc111d69 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -140,29 +140,6 @@ TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAvailableInTemporaries) {
}
}
-TEST_F(TemporaryRegisterAllocatorTest, RangeAvoidsTranslationBoundary) {
- int boundary = RegisterTranslator::DistanceToTranslationWindow(Register(0));
- int limit = boundary + 64;
-
- for (int run_length = 2; run_length < 32; run_length += 7) {
- ZoneVector<int> run_starts(zone());
- for (int start = 0; start < limit; start += run_length) {
- int run_start =
- allocator()->PrepareForConsecutiveTemporaryRegisters(run_length);
- run_starts.push_back(run_start);
- for (int i = 0; i < run_length; i++) {
- allocator()->BorrowConsecutiveTemporaryRegister(run_start + i);
- }
- CHECK(run_start >= boundary || run_start + run_length <= boundary);
- }
- for (size_t batch = 0; batch < run_starts.size(); batch++) {
- for (int i = run_starts[batch]; i < run_starts[batch] + run_length; i++) {
- allocator()->ReturnTemporaryRegister(i);
- }
- }
- }
-}
-
TEST_F(TemporaryRegisterAllocatorTest, NotInRange) {
for (int i = 0; i < 10; i++) {
int reg = allocator()->BorrowTemporaryRegisterNotInRange(2, 5);
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 212e02996b..b3554c3853 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -14,28 +14,27 @@ namespace internal {
namespace interpreter {
TEST(OperandConversion, Registers) {
- int register_count = Register::MaxRegisterIndex() + 1;
+ int register_count = 128;
int step = register_count / 7;
for (int i = 0; i < register_count; i += step) {
if (i <= kMaxInt8) {
- uint8_t operand0 = Register(i).ToOperand();
+ uint32_t operand0 = Register(i).ToOperand();
Register reg0 = Register::FromOperand(operand0);
CHECK_EQ(i, reg0.index());
}
- uint16_t operand1 = Register(i).ToWideOperand();
- Register reg1 = Register::FromWideOperand(operand1);
+ uint32_t operand1 = Register(i).ToOperand();
+ Register reg1 = Register::FromOperand(operand1);
CHECK_EQ(i, reg1.index());
- uint32_t operand2 = Register(i).ToRawOperand();
- Register reg2 = Register::FromRawOperand(operand2);
+ uint32_t operand2 = Register(i).ToOperand();
+ Register reg2 = Register::FromOperand(operand2);
CHECK_EQ(i, reg2.index());
}
for (int i = 0; i <= kMaxUInt8; i++) {
- uint8_t operand = static_cast<uint8_t>(i);
- Register reg = Register::FromOperand(operand);
- if (i > 0 && i < -kMinInt8) {
+ Register reg = Register::FromOperand(i);
+ if (i > 0) {
CHECK(reg.is_parameter());
} else {
CHECK(!reg.is_parameter());
@@ -51,7 +50,7 @@ TEST(OperandConversion, Parameters) {
int parameter_count = parameter_counts[p];
for (int i = 0; i < parameter_count; i++) {
Register r = Register::FromParameterIndex(i, parameter_count);
- uint8_t operand_value = r.ToOperand();
+ uint32_t operand_value = r.ToOperand();
Register s = Register::FromOperand(operand_value);
CHECK_EQ(i, s.ToParameterIndex(parameter_count));
}
@@ -59,8 +58,8 @@ TEST(OperandConversion, Parameters) {
}
TEST(OperandConversion, RegistersParametersNoOverlap) {
- int register_count = Register::MaxRegisterIndex() + 1;
- int parameter_count = Register::MaxParameterIndex() + 1;
+ int register_count = 128;
+ int parameter_count = 100;
int32_t register_space_size = base::bits::RoundUpToPowerOfTwo32(
static_cast<uint32_t>(register_count + parameter_count));
uint32_t range = static_cast<uint32_t>(register_space_size);
@@ -68,18 +67,33 @@ TEST(OperandConversion, RegistersParametersNoOverlap) {
for (int i = 0; i < register_count; i += 1) {
Register r = Register(i);
- uint32_t operand = r.ToWideOperand();
- CHECK_LT(operand, operand_count.size());
- operand_count[operand] += 1;
- CHECK_EQ(operand_count[operand], 1);
+ int32_t operand = r.ToOperand();
+ uint8_t index = static_cast<uint8_t>(operand);
+ CHECK_LT(index, operand_count.size());
+ operand_count[index] += 1;
+ CHECK_EQ(operand_count[index], 1);
}
for (int i = 0; i < parameter_count; i += 1) {
Register r = Register::FromParameterIndex(i, parameter_count);
- uint32_t operand = r.ToWideOperand();
- CHECK_LT(operand, operand_count.size());
- operand_count[operand] += 1;
- CHECK_EQ(operand_count[operand], 1);
+ uint32_t operand = r.ToOperand();
+ uint8_t index = static_cast<uint8_t>(operand);
+ CHECK_LT(index, operand_count.size());
+ operand_count[index] += 1;
+ CHECK_EQ(operand_count[index], 1);
+ }
+}
+
+TEST(OperandScaling, ScalableAndNonScalable) {
+ for (OperandScale operand_scale = OperandScale::kSingle;
+ operand_scale <= OperandScale::kMaxValid;
+ operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+ int scale = static_cast<int>(operand_scale);
+ CHECK_EQ(Bytecodes::Size(Bytecode::kCallRuntime, operand_scale),
+ 1 + 2 + 2 * scale);
+ CHECK_EQ(Bytecodes::Size(Bytecode::kCreateObjectLiteral, operand_scale),
+ 1 + 2 * scale + 1);
+ CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + scale);
}
}
@@ -87,16 +101,11 @@ TEST(Bytecodes, HasAnyRegisterOperands) {
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kAdd), 1);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCall), 2);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntime), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeWide), 1);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPair),
2);
- CHECK_EQ(
- Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPairWide),
- 2);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kDeletePropertyStrict),
1);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepare), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepareWide), 1);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kInc), 0);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kJumpIfTrue), 0);
CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kNew), 2);
@@ -116,11 +125,11 @@ TEST(Bytecodes, RegisterOperandBitmaps) {
}
TEST(Bytecodes, RegisterOperands) {
- CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg8));
- CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg8));
- CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::kReg8));
- CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::kRegOut8));
- CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOut8));
+ CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg));
+ CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg));
+ CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::kReg));
+ CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::kRegOut));
+ CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOut));
#define IS_REGISTER_OPERAND_TYPE(Name, _) \
CHECK(Bytecodes::IsRegisterOperandType(OperandType::k##Name));
@@ -155,18 +164,157 @@ TEST(Bytecodes, RegisterOperands) {
#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
}
-TEST(Bytecodes, DebugBreak) {
- for (uint32_t i = 0; i < Bytecodes::ToByte(Bytecode::kLast); i++) {
- Bytecode bytecode = Bytecodes::FromByte(i);
- Bytecode debugbreak = Bytecodes::GetDebugBreak(bytecode);
- if (!Bytecodes::IsDebugBreak(debugbreak)) {
- PrintF("Bytecode %s has no matching debug break with length %d\n",
- Bytecodes::ToString(bytecode), Bytecodes::Size(bytecode));
- CHECK(false);
+TEST(Bytecodes, DebugBreakExistForEachBytecode) {
+ static const OperandScale kOperandScale = OperandScale::kSingle;
+#define CHECK_DEBUG_BREAK_SIZE(Name, ...) \
+ if (!Bytecodes::IsDebugBreak(Bytecode::k##Name) && \
+ !Bytecodes::IsPrefixScalingBytecode(Bytecode::k##Name)) { \
+ Bytecode debug_bytecode = Bytecodes::GetDebugBreak(Bytecode::k##Name); \
+ CHECK_EQ(Bytecodes::Size(Bytecode::k##Name, kOperandScale), \
+ Bytecodes::Size(debug_bytecode, kOperandScale)); \
+ }
+ BYTECODE_LIST(CHECK_DEBUG_BREAK_SIZE)
+#undef CHECK_DEBUG_BREAK_SIZE
+}
+
+TEST(Bytecodes, DecodeBytecodeAndOperands) {
+ struct BytecodesAndResult {
+ const uint8_t bytecode[32];
+ const size_t length;
+ int parameter_count;
+ const char* output;
+ };
+
+#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
+ const BytecodesAndResult cases[] = {
+ {{B(LdaSmi), 0x01}, 2, 0, " LdaSmi [1]"},
+ {{B(Wide), B(LdaSmi), 0xe8, 0x03}, 4, 0, " LdaSmi.Wide [1000]"},
+ {{B(ExtraWide), B(LdaSmi), 0xa0, 0x86, 0x01, 0x00},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [100000]"},
+ {{B(LdaSmi), 0xff}, 2, 0, " LdaSmi [-1]"},
+ {{B(Wide), B(LdaSmi), 0x18, 0xfc}, 4, 0, " LdaSmi.Wide [-1000]"},
+ {{B(ExtraWide), B(LdaSmi), 0x60, 0x79, 0xfe, 0xff},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [-100000]"},
+ {{B(Star), 0xfb}, 2, 0, " Star r5"},
+ {{B(Wide), B(Star), 0x78, 0xff}, 4, 0, " Star.Wide r136"},
+ {{B(Wide), B(Call), 0x7a, 0xff, 0x79, 0xff, 0x02, 0x00, 0xb1, 0x00},
+ 10,
+ 0,
+ "Call.Wide r134, r135, #2, [177]"},
+ {{B(Ldar),
+ static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
+ 2,
+ 3,
+ " Ldar a1"},
+ {{B(Wide), B(CreateObjectLiteral), 0x01, 0x02, 0x03, 0x04, 0xa5},
+ 7,
+ 0,
+ "CreateObjectLiteral.Wide [513], [1027], #165"},
+ {{B(ExtraWide), B(JumpIfNull), 0x15, 0xcd, 0x5b, 0x07},
+ 6,
+ 0,
+ "JumpIfNull.ExtraWide [123456789]"},
+ };
+#undef B
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ // Generate reference string by prepending formatted bytes.
+ std::stringstream expected_ss;
+ std::ios default_format(nullptr);
+ default_format.copyfmt(expected_ss);
+ // Match format of Bytecodes::Decode() for byte representations.
+ expected_ss.fill('0');
+ expected_ss.flags(std::ios::right | std::ios::hex);
+ for (size_t b = 0; b < cases[i].length; b++) {
+ expected_ss << std::setw(2) << static_cast<uint32_t>(cases[i].bytecode[b])
+ << ' ';
}
+ expected_ss.copyfmt(default_format);
+ expected_ss << cases[i].output;
+
+ // Generate decoded byte output.
+ std::stringstream actual_ss;
+ Bytecodes::Decode(actual_ss, cases[i].bytecode, cases[i].parameter_count);
+
+ // Compare.
+ CHECK_EQ(actual_ss.str(), expected_ss.str());
}
}
+TEST(Bytecodes, DebugBreakForPrefixBytecodes) {
+ CHECK_EQ(Bytecode::kDebugBreakWide,
+ Bytecodes::GetDebugBreak(Bytecode::kWide));
+ CHECK_EQ(Bytecode::kDebugBreakExtraWide,
+ Bytecodes::GetDebugBreak(Bytecode::kExtraWide));
+}
+
+TEST(Bytecodes, PrefixMappings) {
+ Bytecode prefixes[] = {Bytecode::kWide, Bytecode::kExtraWide};
+ TRACED_FOREACH(Bytecode, prefix, prefixes) {
+ CHECK_EQ(prefix, Bytecodes::OperandScaleToPrefixBytecode(
+ Bytecodes::PrefixBytecodeToOperandScale(prefix)));
+ }
+}
+
+TEST(OperandScale, PrefixesScale) {
+ CHECK(Bytecodes::NextOperandScale(OperandScale::kSingle) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::NextOperandScale(OperandScale::kDouble) ==
+ OperandScale::kQuadruple);
+ CHECK(Bytecodes::NextOperandScale(OperandScale::kQuadruple) ==
+ OperandScale::kInvalid);
+}
+
+TEST(OperandScale, PrefixesRequired) {
+ CHECK(!Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale::kSingle));
+ CHECK(Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale::kDouble));
+ CHECK(
+ Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale::kQuadruple));
+ CHECK(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kDouble) ==
+ Bytecode::kWide);
+ CHECK(Bytecodes::OperandScaleToPrefixBytecode(OperandScale::kQuadruple) ==
+ Bytecode::kExtraWide);
+}
+
+TEST(AccumulatorUse, LogicalOperators) {
+ CHECK_EQ(AccumulatorUse::kNone | AccumulatorUse::kRead,
+ AccumulatorUse::kRead);
+ CHECK_EQ(AccumulatorUse::kRead | AccumulatorUse::kWrite,
+ AccumulatorUse::kReadWrite);
+ CHECK_EQ(AccumulatorUse::kRead & AccumulatorUse::kReadWrite,
+ AccumulatorUse::kRead);
+ CHECK_EQ(AccumulatorUse::kRead & AccumulatorUse::kWrite,
+ AccumulatorUse::kNone);
+}
+
+TEST(AccumulatorUse, SampleBytecodes) {
+ CHECK(Bytecodes::ReadsAccumulator(Bytecode::kStar));
+ CHECK(!Bytecodes::WritesAccumulator(Bytecode::kStar));
+ CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kStar),
+ AccumulatorUse::kRead);
+ CHECK(!Bytecodes::ReadsAccumulator(Bytecode::kLdar));
+ CHECK(Bytecodes::WritesAccumulator(Bytecode::kLdar));
+ CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kLdar),
+ AccumulatorUse::kWrite);
+ CHECK(Bytecodes::ReadsAccumulator(Bytecode::kAdd));
+ CHECK(Bytecodes::WritesAccumulator(Bytecode::kAdd));
+ CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kAdd),
+ AccumulatorUse::kReadWrite);
+}
+
+TEST(AccumulatorUse, AccumulatorUseToString) {
+ std::set<std::string> names;
+ names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kNone));
+ names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kRead));
+ names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kWrite));
+ names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kReadWrite));
+ CHECK_EQ(names.size(), 4);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index b3ec5ff668..71224370cc 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -19,79 +19,76 @@ class ConstantArrayBuilderTest : public TestWithIsolateAndZone {
ConstantArrayBuilderTest() {}
~ConstantArrayBuilderTest() override {}
- static const size_t kLowCapacity = ConstantArrayBuilder::kLowCapacity;
- static const size_t kMaxCapacity = ConstantArrayBuilder::kMaxCapacity;
+ static const size_t k8BitCapacity = ConstantArrayBuilder::k8BitCapacity;
+ static const size_t k16BitCapacity = ConstantArrayBuilder::k16BitCapacity;
};
-
STATIC_CONST_MEMBER_DEFINITION const size_t
- ConstantArrayBuilderTest::kMaxCapacity;
+ ConstantArrayBuilderTest::k16BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
- ConstantArrayBuilderTest::kLowCapacity;
-
+ ConstantArrayBuilderTest::k8BitCapacity;
TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
ConstantArrayBuilder builder(isolate(), zone());
- for (size_t i = 0; i < kMaxCapacity; i++) {
+ for (size_t i = 0; i < k16BitCapacity; i++) {
builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
}
- CHECK_EQ(builder.size(), kMaxCapacity);
- for (size_t i = 0; i < kMaxCapacity; i++) {
+ CHECK_EQ(builder.size(), k16BitCapacity);
+ for (size_t i = 0; i < k16BitCapacity; i++) {
CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
}
}
-
TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
- for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+ for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
ConstantArrayBuilder builder(isolate(), zone());
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kByte);
}
- for (size_t i = 0; i < 2 * kLowCapacity; i++) {
+ for (size_t i = 0; i < 2 * k8BitCapacity; i++) {
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
builder.Insert(object);
- if (i + reserved < kLowCapacity) {
- CHECK_LE(builder.size(), kLowCapacity);
+ if (i + reserved < k8BitCapacity) {
+ CHECK_LE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + 1);
CHECK(builder.At(i)->SameValue(*object));
} else {
- CHECK_GE(builder.size(), kLowCapacity);
+ CHECK_GE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + reserved + 1);
CHECK(builder.At(i + reserved)->SameValue(*object));
}
}
- CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+ CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
// Check reserved values represented by the hole.
for (size_t i = 0; i < reserved; i++) {
- Handle<Object> empty = builder.At(kLowCapacity - reserved + i);
+ Handle<Object> empty = builder.At(k8BitCapacity - reserved + i);
CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
}
// Commmit reserved entries with duplicates and check size does not change.
- DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
size_t duplicates_in_idx8_space =
- std::min(reserved, kLowCapacity - reserved);
+ std::min(reserved, k8BitCapacity - reserved);
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
isolate()->factory()->NewNumberFromSize(i));
- DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
}
// Check all committed values match expected (holes where
// duplicates_in_idx8_space allocated).
- for (size_t i = 0; i < kLowCapacity - reserved; i++) {
+ for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
Smi* smi = Smi::FromInt(static_cast<int>(i));
CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
}
- for (size_t i = kLowCapacity; i < 2 * kLowCapacity + reserved; i++) {
+ for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
}
for (size_t i = 0; i < reserved; i++) {
- size_t index = kLowCapacity - reserved + i;
+ size_t index = k8BitCapacity - reserved + i;
CHECK(builder.At(index)->IsTheHole());
}
@@ -102,20 +99,19 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
}
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
Handle<Object> object =
- isolate()->factory()->NewNumberFromSize(2 * kLowCapacity + i);
+ isolate()->factory()->NewNumberFromSize(2 * k8BitCapacity + i);
size_t index = builder.CommitReservedEntry(OperandSize::kByte, object);
- CHECK_EQ(static_cast<int>(index), kLowCapacity - reserved + i);
+ CHECK_EQ(static_cast<int>(index), k8BitCapacity - reserved + i);
CHECK(builder.At(static_cast<int>(index))->SameValue(*object));
}
- CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+ CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
}
}
-
-TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx16Reservations) {
- for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
+ for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
ConstantArrayBuilder builder(isolate(), zone());
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
builder.Insert(object);
CHECK(builder.At(i)->SameValue(*object));
@@ -124,20 +120,20 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx16Reservations) {
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
- CHECK_EQ(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = 0; i < reserved; i++) {
builder.DiscardReservedEntry(OperandSize::kShort);
- CHECK_EQ(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
builder.CommitReservedEntry(operand_size, object);
- CHECK_EQ(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), k8BitCapacity);
}
- for (size_t i = kLowCapacity; i < kLowCapacity + reserved; i++) {
+ for (size_t i = k8BitCapacity; i < k8BitCapacity + reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
@@ -163,26 +159,40 @@ TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
}
}
+TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ static const size_t kNumberOfElements = 37373;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray();
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
ConstantArrayBuilder builder(isolate(), zone());
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
CHECK_EQ(builder.size(), 0);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
builder.Insert(object);
- CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ CHECK_EQ(builder.size(), i + k8BitCapacity + 1);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
- builder.At(i + kLowCapacity));
- CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ builder.At(i + k8BitCapacity));
+ CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
- Handle<Object> original = builder.At(kLowCapacity + i);
+ for (size_t i = 0; i < k8BitCapacity; i++) {
+ Handle<Object> original = builder.At(k8BitCapacity + i);
Handle<Object> duplicate = builder.At(i);
CHECK(original->SameValue(*duplicate));
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
@@ -190,33 +200,89 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
}
}
-
TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
ConstantArrayBuilder builder(isolate(), zone());
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
CHECK_EQ(builder.size(), 0);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
builder.Insert(object);
- CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ CHECK_EQ(builder.size(), i + k8BitCapacity + 1);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
builder.DiscardReservedEntry(OperandSize::kByte);
- builder.Insert(builder.At(i + kLowCapacity));
- CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ builder.Insert(builder.At(i + k8BitCapacity));
+ CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
- for (size_t i = 0; i < kLowCapacity; i++) {
+ for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
- Handle<Object> original = builder.At(kLowCapacity + i);
+ Handle<Object> original = builder.At(k8BitCapacity + i);
CHECK(original->SameValue(*reference));
Handle<Object> duplicate = builder.At(i);
CHECK(duplicate->SameValue(*isolate()->factory()->the_hole_value()));
}
}
+TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
+ static int kNumberOfHoles = 128;
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (int i = 0; i < kNumberOfHoles; ++i) {
+ CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
+ }
+ for (int i = 0; i < 128; ++i) {
+ CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(i)), i);
+ }
+ CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(256)), 256);
+
+ Handle<FixedArray> constant_array = builder.ToFixedArray();
+ CHECK_EQ(constant_array->length(), 257);
+ for (int i = 128; i < 256; i++) {
+ CHECK(constant_array->get(i)->SameValue(
+ *isolate()->factory()->the_hole_value()));
+ }
+ CHECK(!constant_array->get(127)->SameValue(
+ *isolate()->factory()->the_hole_value()));
+ CHECK(!constant_array->get(256)->SameValue(
+ *isolate()->factory()->the_hole_value()));
+}
+
+TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (int i = 0; i < 256; i++) {
+ CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
+ }
+ for (int i = 256; i < 65536; ++i) {
+ CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kShort);
+ }
+ for (int i = 65536; i < 131072; ++i) {
+ CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kQuad);
+ }
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte,
+ isolate()->factory()->NewNumber(1)),
+ 0);
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort,
+ isolate()->factory()->NewNumber(2)),
+ 256);
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad,
+ isolate()->factory()->NewNumber(3)),
+ 65536);
+ Handle<FixedArray> constant_array = builder.ToFixedArray();
+ CHECK_EQ(constant_array->length(), 65537);
+ int count = 1;
+ for (int i = 0; i < constant_array->length(); ++i) {
+ Handle<Object> expected;
+ if (i == 0 || i == 256 || i == 65536) {
+ expected = isolate()->factory()->NewNumber(count++);
+ } else {
+ expected = isolate()->factory()->the_hole_value();
+ }
+ CHECK(constant_array->get(i)->SameValue(*expected));
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 3375a6b817..0106c577bd 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -62,6 +62,18 @@ Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
: IsWord32Or(lhs_matcher, rhs_matcher);
}
+InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ ~InterpreterAssemblerForTest() {
+ // Tests don't necessarily read and write accumulator but
+ // InterpreterAssembler checks accumulator uses.
+ if (Bytecodes::ReadsAccumulator(bytecode())) {
+ GetAccumulator();
+ }
+ if (Bytecodes::WritesAccumulator(bytecode())) {
+ SetAccumulator(nullptr);
+ }
+}
+
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
@@ -77,24 +89,25 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
}
Matcher<Node*>
-InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsIntPtrConstant(offset)));
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
- IsBytecodeOperandSignExtended(int offset) {
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
+ int offset) {
Matcher<Node*> load_matcher = IsLoad(
MachineType::Int8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsIntPtrConstant(offset)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
@@ -102,7 +115,7 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
}
Matcher<Node*>
-InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
@@ -111,36 +124,35 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsIntPtrConstant(offset)));
} else {
- Matcher<Node*> first_byte = IsLoad(
- MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset)));
- Matcher<Node*> second_byte = IsLoad(
- MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
- return IsWordOr(IsWordShl(second_byte, IsInt32Constant(kBitsPerByte)),
- first_byte);
+ const int kStep = -1;
+ const int kMsbOffset = 1;
#elif V8_TARGET_BIG_ENDIAN
- return IsWordOr(IsWordShl(first_byte, IsInt32Constant(kBitsPerByte)),
- second_byte);
+ const int kStep = 1;
+ const int kMsbOffset = 0;
#else
#error "Unknown Architecture"
#endif
+ Matcher<Node*> bytes[2];
+ for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
+ bytes[i] = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ }
+ return IsWord32Or(IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)),
+ bytes[1]);
}
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
- IsBytecodeOperandShortSignExtended(int offset) {
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
+ int offset) {
Matcher<Node*> load_matcher;
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
@@ -149,34 +161,115 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
- int hi_byte_offset = offset + 1;
- int lo_byte_offset = offset;
-
+ const int kStep = -1;
+ const int kMsbOffset = 1;
#elif V8_TARGET_BIG_ENDIAN
- int hi_byte_offset = offset;
- int lo_byte_offset = offset + 1;
+ const int kStep = 1;
+ const int kMsbOffset = 0;
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> hi_byte = IsLoad(
- MachineType::Int8(),
+ Matcher<Node*> bytes[2];
+ for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
+ bytes[i] = IsLoad(
+ (i == 0) ? MachineType::Int8() : MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ }
+ load_matcher = IsWord32Or(
+ IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)), bytes[1]);
+ }
+
+ if (kPointerSize == 8) {
+ load_matcher = IsChangeInt32ToInt64(load_matcher);
+ }
+ return load_matcher;
+}
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
+ int offset) {
+ if (TargetSupportsUnalignedAccess()) {
+ return IsLoad(
+ MachineType::Uint32(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(hi_byte_offset)));
- hi_byte = IsWord32Shl(hi_byte, IsInt32Constant(kBitsPerByte));
- Matcher<Node*> lo_byte = IsLoad(
- MachineType::Uint8(),
+ IsIntPtrConstant(offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ const int kStep = -1;
+ const int kMsbOffset = 3;
+#elif V8_TARGET_BIG_ENDIAN
+ const int kStep = 1;
+ const int kMsbOffset = 0;
+#else
+#error "Unknown Architecture"
+#endif
+ Matcher<Node*> bytes[4];
+ for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
+ bytes[i] = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ }
+ return IsWord32Or(
+ IsWord32Shl(bytes[0], IsInt32Constant(3 * kBitsPerByte)),
+ IsWord32Or(
+ IsWord32Shl(bytes[1], IsInt32Constant(2 * kBitsPerByte)),
+ IsWord32Or(IsWord32Shl(bytes[2], IsInt32Constant(1 * kBitsPerByte)),
+ bytes[3])));
+ }
+}
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
+ int offset) {
+ Matcher<Node*> load_matcher;
+ if (TargetSupportsUnalignedAccess()) {
+ load_matcher = IsLoad(
+ MachineType::Int32(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(lo_byte_offset)));
- load_matcher = IsWord32Or(hi_byte, lo_byte);
+ IsIntPtrConstant(offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ const int kStep = -1;
+ int kMsbOffset = 3;
+#elif V8_TARGET_BIG_ENDIAN
+ const int kStep = 1;
+ int kMsbOffset = 0;
+#else
+#error "Unknown Architecture"
+#endif
+ Matcher<Node*> bytes[4];
+ for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
+ bytes[i] = IsLoad(
+ (i == 0) ? MachineType::Int8() : MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
+ }
+ load_matcher = IsWord32Or(
+ IsWord32Shl(bytes[0], IsInt32Constant(3 * kBitsPerByte)),
+ IsWord32Or(
+ IsWord32Shl(bytes[1], IsInt32Constant(2 * kBitsPerByte)),
+ IsWord32Or(IsWord32Shl(bytes[2], IsInt32Constant(1 * kBitsPerByte)),
+ bytes[3])));
}
if (kPointerSize == 8) {
@@ -185,6 +278,38 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
return load_matcher;
}
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
+ int offset, OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kByte:
+ return IsSignedByteOperand(offset);
+ case OperandSize::kShort:
+ return IsSignedShortOperand(offset);
+ case OperandSize::kQuad:
+ return IsSignedQuadOperand(offset);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return nullptr;
+}
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
+ int offset, OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kByte:
+ return IsUnsignedByteOperand(offset);
+ case OperandSize::kShort:
+ return IsUnsignedShortOperand(offset);
+ case OperandSize::kQuad:
+ return IsUnsignedQuadOperand(offset);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return nullptr;
+}
+
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -195,18 +320,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
+ OperandScale operand_scale = OperandScale::kSingle;
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
+ IsIntPtrConstant(
+ interpreter::Bytecodes::Size(bytecode, operand_scale)));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
+ if (kPointerSize == 8) {
+ target_bytecode_matcher = IsChangeUint32ToUint64(target_bytecode_matcher);
+ }
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ IsWordShl(target_bytecode_matcher, IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
@@ -230,7 +359,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- m.Jump(m.Int32Constant(jump_offset));
+ m.Jump(m.IntPtrConstant(jump_offset));
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
@@ -238,14 +367,18 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(jump_offset));
+ IsIntPtrConstant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
+ if (kPointerSize == 8) {
+ target_bytecode_matcher =
+ IsChangeUint32ToUint64(target_bytecode_matcher);
+ }
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
@@ -275,24 +408,29 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
InterpreterAssemblerForTest m(this, bytecode);
Node* lhs = m.IntPtrConstant(0);
Node* rhs = m.IntPtrConstant(1);
- m.JumpIfWordEqual(lhs, rhs, m.Int32Constant(kJumpIfTrueOffset));
+ m.JumpIfWordEqual(lhs, rhs, m.IntPtrConstant(kJumpIfTrueOffset));
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(2, end->InputCount());
- int jump_offsets[] = {kJumpIfTrueOffset,
- interpreter::Bytecodes::Size(bytecode)};
+ OperandScale operand_scale = OperandScale::kSingle;
+ int jump_offsets[] = {kJumpIfTrueOffset, interpreter::Bytecodes::Size(
+ bytecode, operand_scale)};
for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsInt32Constant(jump_offsets[i]));
+ IsIntPtrConstant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
+ if (kPointerSize == 8) {
+ target_bytecode_matcher =
+ IsChangeUint32ToUint64(target_bytecode_matcher);
+ }
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
end->InputAt(i),
IsTailCall(
@@ -342,51 +480,55 @@ TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
}
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
+ static const OperandScale kOperandScales[] = {
+ OperandScale::kSingle, OperandScale::kDouble, OperandScale::kQuadruple};
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- int number_of_operands = interpreter::Bytecodes::NumberOfOperands(bytecode);
- for (int i = 0; i < number_of_operands; i++) {
- int offset = interpreter::Bytecodes::GetOperandOffset(bytecode, i);
- switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
- case interpreter::OperandType::kRegCount8:
- EXPECT_THAT(m.BytecodeOperandCount(i), m.IsBytecodeOperand(offset));
- break;
- case interpreter::OperandType::kIdx8:
- EXPECT_THAT(m.BytecodeOperandIdx(i), m.IsBytecodeOperand(offset));
- break;
- case interpreter::OperandType::kImm8:
- EXPECT_THAT(m.BytecodeOperandImm(i),
- m.IsBytecodeOperandSignExtended(offset));
- break;
- case interpreter::OperandType::kMaybeReg8:
- case interpreter::OperandType::kReg8:
- case interpreter::OperandType::kRegOut8:
- case interpreter::OperandType::kRegOutPair8:
- case interpreter::OperandType::kRegOutTriple8:
- case interpreter::OperandType::kRegPair8:
- EXPECT_THAT(m.BytecodeOperandReg(i),
- m.IsBytecodeOperandSignExtended(offset));
- break;
- case interpreter::OperandType::kRegCount16:
- EXPECT_THAT(m.BytecodeOperandCount(i),
- m.IsBytecodeOperandShort(offset));
- break;
- case interpreter::OperandType::kIdx16:
- EXPECT_THAT(m.BytecodeOperandIdx(i),
- m.IsBytecodeOperandShort(offset));
- break;
- case interpreter::OperandType::kMaybeReg16:
- case interpreter::OperandType::kReg16:
- case interpreter::OperandType::kRegOut16:
- case interpreter::OperandType::kRegOutPair16:
- case interpreter::OperandType::kRegOutTriple16:
- case interpreter::OperandType::kRegPair16:
- EXPECT_THAT(m.BytecodeOperandReg(i),
- m.IsBytecodeOperandShortSignExtended(offset));
- break;
- case interpreter::OperandType::kNone:
- UNREACHABLE();
- break;
+ TRACED_FOREACH(interpreter::OperandScale, operand_scale, kOperandScales) {
+ InterpreterAssemblerForTest m(this, bytecode, operand_scale);
+ int number_of_operands =
+ interpreter::Bytecodes::NumberOfOperands(bytecode);
+ for (int i = 0; i < number_of_operands; i++) {
+ int offset = interpreter::Bytecodes::GetOperandOffset(bytecode, i,
+ operand_scale);
+ OperandType operand_type =
+ interpreter::Bytecodes::GetOperandType(bytecode, i);
+ OperandSize operand_size =
+ Bytecodes::SizeOfOperand(operand_type, operand_scale);
+ switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
+ case interpreter::OperandType::kRegCount:
+ EXPECT_THAT(m.BytecodeOperandCount(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
+ case interpreter::OperandType::kFlag8:
+ EXPECT_THAT(m.BytecodeOperandFlag(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
+ case interpreter::OperandType::kIdx:
+ EXPECT_THAT(m.BytecodeOperandIdx(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
+ case interpreter::OperandType::kImm: {
+ EXPECT_THAT(m.BytecodeOperandImm(i),
+ m.IsSignedOperand(offset, operand_size));
+ break;
+ }
+ case interpreter::OperandType::kMaybeReg:
+ case interpreter::OperandType::kReg:
+ case interpreter::OperandType::kRegOut:
+ case interpreter::OperandType::kRegOutPair:
+ case interpreter::OperandType::kRegOutTriple:
+ case interpreter::OperandType::kRegPair:
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ m.IsSignedOperand(offset, operand_size));
+ break;
+ case interpreter::OperandType::kRuntimeId:
+ EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
}
}
}
@@ -394,12 +536,16 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ if (!interpreter::Bytecodes::ReadsAccumulator(bytecode) ||
+ !interpreter::Bytecodes::WritesAccumulator(bytecode)) {
+ continue;
+ }
+
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
EXPECT_THAT(
m.GetAccumulator(),
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
-
// Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
@@ -433,27 +579,27 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* reg_index_node = m.Int32Constant(44);
+ Node* reg_index_node = m.IntPtrConstant(44);
Node* reg_location_node = m.RegisterLocation(reg_index_node);
EXPECT_THAT(
reg_location_node,
IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* reg_index_node = m.Int32Constant(44);
+ Node* reg_index_node = m.IntPtrConstant(44);
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
m.IsLoad(
MachineType::AnyTagged(),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -461,7 +607,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* store_value = m.Int32Constant(0xdeadbeef);
- Node* reg_index_node = m.Int32Constant(44);
+ Node* reg_index_node = m.IntPtrConstant(44);
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
@@ -469,7 +615,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
StoreRepresentation(MachineRepresentation::kTagged,
kNoWriteBarrier),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
+ IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
store_value));
}
}
@@ -478,10 +624,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44);
- EXPECT_THAT(m.SmiTag(value),
- IsWordShl(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
- EXPECT_THAT(m.SmiUntag(value),
- IsWordSar(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(
+ m.SmiTag(value),
+ IsWordShl(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(
+ m.SmiUntag(value),
+ IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
}
}
@@ -508,16 +656,16 @@ TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* a = m.Int32Constant(0);
+ Node* a = m.IntPtrConstant(0);
Node* add = m.WordShl(a, 10);
- EXPECT_THAT(add, IsWordShl(a, IsInt32Constant(10)));
+ EXPECT_THAT(add, IsWordShl(a, IsIntPtrConstant(10)));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* index = m.Int32Constant(2);
+ Node* index = m.IntPtrConstant(2);
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(),
@@ -528,23 +676,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
IsIntPtrAdd(
IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- IsWordShl(index, IsInt32Constant(kPointerSizeLog2)))));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- int index = 3;
- Node* fixed_array = m.IntPtrConstant(0xdeadbeef);
- Node* load_element = m.LoadFixedArrayElement(fixed_array, index);
- EXPECT_THAT(
- load_element,
- m.IsLoad(MachineType::AnyTagged(), fixed_array,
- IsIntPtrAdd(
- IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- IsWordShl(IsInt32Constant(index),
- IsInt32Constant(kPointerSizeLog2)))));
+ IsWordShl(index, IsIntPtrConstant(kPointerSizeLog2)))));
}
}
@@ -563,13 +695,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* context = m.Int32Constant(1);
- Node* slot_index = m.Int32Constant(22);
+ Node* context = m.IntPtrConstant(1);
+ Node* slot_index = m.IntPtrConstant(22);
Node* load_context_slot = m.LoadContextSlot(context, slot_index);
Matcher<Node*> offset =
- IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
- IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
+ IsIntPtrAdd(IsWordShl(slot_index, IsIntPtrConstant(kPointerSizeLog2)),
+ IsIntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
EXPECT_THAT(load_context_slot,
m.IsLoad(MachineType::AnyTagged(), context, offset));
}
@@ -578,14 +710,14 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* context = m.Int32Constant(1);
- Node* slot_index = m.Int32Constant(22);
- Node* value = m.Int32Constant(100);
+ Node* context = m.IntPtrConstant(1);
+ Node* slot_index = m.IntPtrConstant(22);
+ Node* value = m.SmiConstant(Smi::FromInt(100));
Node* store_context_slot = m.StoreContextSlot(context, slot_index, value);
Matcher<Node*> offset =
- IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
- IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
+ IsIntPtrAdd(IsWordShl(slot_index, IsIntPtrConstant(kPointerSizeLog2)),
+ IsIntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
EXPECT_THAT(store_context_slot,
m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
kFullWriteBarrier),
@@ -629,7 +761,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
IsInt32Mul(function_id, IsInt32Constant(sizeof(Runtime::Function))));
Matcher<Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
- IsInt32Constant(offsetof(Runtime::Function, entry)));
+ IsIntPtrConstant(offsetof(Runtime::Function, entry)));
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 321c72490b..1ebdc77c18 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -23,10 +23,12 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
class InterpreterAssemblerForTest final : public InterpreterAssembler {
public:
- InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
- Bytecode bytecode)
- : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
- ~InterpreterAssemblerForTest() override {}
+ InterpreterAssemblerForTest(
+ InterpreterAssemblerTest* test, Bytecode bytecode,
+ OperandScale operand_scale = OperandScale::kSingle)
+ : InterpreterAssembler(test->isolate(), test->zone(), bytecode,
+ operand_scale) {}
+ ~InterpreterAssemblerForTest() override;
Matcher<compiler::Node*> IsLoad(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
@@ -38,10 +40,17 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
- Matcher<compiler::Node*> IsBytecodeOperand(int offset);
- Matcher<compiler::Node*> IsBytecodeOperandSignExtended(int offset);
- Matcher<compiler::Node*> IsBytecodeOperandShort(int offset);
- Matcher<compiler::Node*> IsBytecodeOperandShortSignExtended(int offset);
+ Matcher<compiler::Node*> IsUnsignedByteOperand(int offset);
+ Matcher<compiler::Node*> IsSignedByteOperand(int offset);
+ Matcher<compiler::Node*> IsUnsignedShortOperand(int offset);
+ Matcher<compiler::Node*> IsSignedShortOperand(int offset);
+ Matcher<compiler::Node*> IsUnsignedQuadOperand(int offset);
+ Matcher<compiler::Node*> IsSignedQuadOperand(int offset);
+
+ Matcher<compiler::Node*> IsSignedOperand(int offset,
+ OperandSize operand_size);
+ Matcher<compiler::Node*> IsUnsignedOperand(int offset,
+ OperandSize operand_size);
using InterpreterAssembler::graph;
diff --git a/deps/v8/test/unittests/interpreter/register-translator-unittest.cc b/deps/v8/test/unittests/interpreter/register-translator-unittest.cc
deleted file mode 100644
index e9f65a6af0..0000000000
--- a/deps/v8/test/unittests/interpreter/register-translator-unittest.cc
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stack>
-
-#include "src/v8.h"
-
-#include "src/interpreter/register-translator.h"
-#include "src/isolate.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class RegisterTranslatorTest : public TestWithIsolateAndZone,
- private RegisterMover {
- public:
- RegisterTranslatorTest() : translator_(this), move_count_(0) {
- window_start_ =
- RegisterTranslator::DistanceToTranslationWindow(Register(0));
- window_width_ =
- Register::MaxRegisterIndexForByteOperand() - window_start_ + 1;
- }
-
- ~RegisterTranslatorTest() override {}
-
- bool PopMoveAndMatch(Register from, Register to) {
- if (!moves_.empty()) {
- CHECK(from.is_valid() && to.is_valid());
- const std::pair<Register, Register> top = moves_.top();
- moves_.pop();
- return top.first == from && top.second == to;
- } else {
- return false;
- }
- }
-
- int move_count() const { return move_count_; }
- RegisterTranslator* translator() { return &translator_; }
-
- int window_start() const { return window_start_; }
- int window_width() const { return window_width_; }
- int window_limit() const { return window_start_ + window_width_; }
-
- protected:
- static const char* const kBadOperandRegex;
-
- private:
- void MoveRegisterUntranslated(Register from, Register to) override {
- moves_.push(std::make_pair(from, to));
- move_count_++;
- }
-
- RegisterTranslator translator_;
- std::stack<std::pair<Register, Register>> moves_;
- int move_count_;
- int window_start_;
- int window_width_;
-};
-
-const char* const RegisterTranslatorTest::kBadOperandRegex =
- ".*OperandType::kReg8 \\|\\| .*OperandType::kRegOut8\\) && "
- "RegisterIsMovableToWindow.*";
-
-TEST_F(RegisterTranslatorTest, TestFrameSizeAdjustmentsForTranslationWindow) {
- EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(0, 0));
- EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(10, 10));
- EXPECT_EQ(window_width(),
- RegisterTranslator::RegisterCountAdjustment(173, 0));
- EXPECT_EQ(window_width(),
- RegisterTranslator::RegisterCountAdjustment(173, 137));
- EXPECT_EQ(window_width(),
- RegisterTranslator::RegisterCountAdjustment(173, 137));
- // TODO(oth): Add a kMaxParameters8 that derives this info from the frame.
- int param_limit = FLAG_enable_embedded_constant_pool ? 119 : 120;
- EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(0, param_limit));
- EXPECT_EQ(window_limit(),
- RegisterTranslator::RegisterCountAdjustment(0, 128));
- EXPECT_EQ(window_limit(),
- RegisterTranslator::RegisterCountAdjustment(0, 129));
- EXPECT_EQ(window_limit() - 32,
- RegisterTranslator::RegisterCountAdjustment(32, 129));
-}
-
-TEST_F(RegisterTranslatorTest, TestInTranslationWindow) {
- EXPECT_GE(window_start(), 0);
- EXPECT_FALSE(
- RegisterTranslator::InTranslationWindow(Register(window_start() - 1)));
- EXPECT_TRUE(RegisterTranslator::InTranslationWindow(
- Register(Register::MaxRegisterIndexForByteOperand())));
- EXPECT_FALSE(RegisterTranslator::InTranslationWindow(
- Register(Register::MaxRegisterIndexForByteOperand() + 1)));
- for (int index = window_start(); index < window_limit(); index += 1) {
- EXPECT_TRUE(RegisterTranslator::InTranslationWindow(Register(index)));
- }
-}
-
-TEST_F(RegisterTranslatorTest, FitsInReg8Operand) {
- EXPECT_GT(window_start(), 0);
- EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(
- Register::FromParameterIndex(0, 3)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(
- Register::FromParameterIndex(2, 3)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(Register(0)));
- EXPECT_TRUE(
- RegisterTranslator::FitsInReg8Operand(Register(window_start() - 1)));
- EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(kMaxInt8)));
- EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(kMaxInt8 + 1)));
- for (int index = window_start(); index < window_limit(); index += 1) {
- EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(index)));
- }
-}
-
-TEST_F(RegisterTranslatorTest, FitsInReg16Operand) {
- EXPECT_GT(window_start(), 0);
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
- Register::FromParameterIndex(0, 3)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
- Register::FromParameterIndex(2, 3)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
- Register::FromParameterIndex(0, 999)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
- Register::FromParameterIndex(0, Register::MaxParameterIndex() + 1)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(0)));
- EXPECT_TRUE(
- RegisterTranslator::FitsInReg16Operand(Register(window_start() - 1)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(kMaxInt8 + 1)));
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(kMaxInt8 + 2)));
- for (int index = 0; index <= kMaxInt16 - window_width(); index += 1) {
- EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(index)));
- }
- for (int index = Register::MaxRegisterIndex() - window_width() + 1;
- index < Register::MaxRegisterIndex() + 2; index += 1) {
- EXPECT_FALSE(RegisterTranslator::FitsInReg16Operand(Register(index)));
- }
-}
-
-TEST_F(RegisterTranslatorTest, NoTranslationRequired) {
- Register window_reg(window_start());
- Register local_reg(57);
- uint32_t operands[] = {local_reg.ToRawOperand()};
- translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(0, move_count());
-
- Register param_reg = Register::FromParameterIndex(129, 130);
- operands[0] = param_reg.ToRawOperand();
- translator()->TranslateInputRegisters(Bytecode::kAdd, operands, 1);
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(0, move_count());
-}
-
-TEST_F(RegisterTranslatorTest, TranslationRequired) {
- Register window_reg(window_start());
- Register local_reg(137);
- Register local_reg_translated(local_reg.index() + window_width());
-
- uint32_t operands[] = {local_reg.ToRawOperand()};
- translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
- EXPECT_EQ(1, move_count());
- EXPECT_TRUE(PopMoveAndMatch(local_reg_translated, window_reg));
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(1, move_count());
- EXPECT_FALSE(PopMoveAndMatch(window_reg, local_reg_translated));
-
- operands[0] = local_reg.ToRawOperand();
- translator()->TranslateInputRegisters(Bytecode::kStar, operands, 1);
- EXPECT_EQ(1, move_count());
- EXPECT_FALSE(PopMoveAndMatch(local_reg_translated, window_reg));
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(2, move_count());
- EXPECT_TRUE(PopMoveAndMatch(window_reg, local_reg_translated));
-
- Register param_reg = Register::FromParameterIndex(0, 130);
- operands[0] = {param_reg.ToRawOperand()};
- translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
- EXPECT_EQ(3, move_count());
- EXPECT_TRUE(PopMoveAndMatch(param_reg, window_reg));
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(3, move_count());
- EXPECT_FALSE(PopMoveAndMatch(window_reg, param_reg));
-
- operands[0] = {param_reg.ToRawOperand()};
- translator()->TranslateInputRegisters(Bytecode::kStar, operands, 1);
- EXPECT_EQ(3, move_count());
- EXPECT_FALSE(PopMoveAndMatch(local_reg_translated, window_reg));
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(4, move_count());
- EXPECT_TRUE(PopMoveAndMatch(window_reg, param_reg));
-}
-
-TEST_F(RegisterTranslatorTest, RangeTranslation) {
- Register window0(window_start());
- Register window1(window_start() + 1);
- Register window2(window_start() + 2);
- uint32_t operands[3];
-
- // Bytecode::kNew with valid range operand.
- Register constructor0(0);
- Register args0(1);
- operands[0] = constructor0.ToRawOperand();
- operands[1] = args0.ToRawOperand();
- operands[2] = 1;
- translator()->TranslateInputRegisters(Bytecode::kNew, operands, 3);
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(0, move_count());
-
- // Bytecode::kNewWide with valid range operand.
- Register constructor1(128);
- Register constructor1_translated(constructor1.index() + window_width());
- Register args1(129);
- Register args1_translated(args1.index() + window_width());
- operands[0] = constructor1.ToRawOperand();
- operands[1] = args1.ToRawOperand();
- operands[2] = 3;
- translator()->TranslateInputRegisters(Bytecode::kNewWide, operands, 3);
- translator()->TranslateOutputRegisters();
- EXPECT_EQ(0, move_count());
-}
-
-TEST_F(RegisterTranslatorTest, BadRange0) {
- // Bytecode::kNew with invalid range operand (kMaybeReg8).
- Register constructor1(128);
- Register args1(129);
- uint32_t operands[] = {constructor1.ToRawOperand(), args1.ToRawOperand(), 3};
- ASSERT_DEATH_IF_SUPPORTED(
- translator()->TranslateInputRegisters(Bytecode::kNew, operands, 3),
- kBadOperandRegex);
-}
-
-TEST_F(RegisterTranslatorTest, BadRange1) {
- // Bytecode::kForInPrepare with invalid range operand (kRegTriple8)
- Register for_in_state(160);
- Register for_in_state_translated(for_in_state.index() + window_width());
- uint32_t operands[] = {for_in_state.ToRawOperand()};
- ASSERT_DEATH_IF_SUPPORTED(translator()->TranslateInputRegisters(
- Bytecode::kForInPrepare, operands, 1),
- kBadOperandRegex);
-}
-
-TEST_F(RegisterTranslatorTest, BadRange2) {
- // Bytecode::kForInNext with invalid range operand (kRegPair8)
- Register receiver(192);
- Register receiver_translated(receiver.index() + window_width());
- Register index(193);
- Register index_translated(index.index() + window_width());
- Register cache_info_pair(194);
- Register cache_info_pair_translated(cache_info_pair.index() + window_width());
- uint32_t operands[] = {receiver.ToRawOperand(), index.ToRawOperand(),
- cache_info_pair.ToRawOperand()};
- ASSERT_DEATH_IF_SUPPORTED(
- translator()->TranslateInputRegisters(Bytecode::kForInNext, operands, 3),
- kBadOperandRegex);
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc b/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc
new file mode 100644
index 0000000000..d62302a2cd
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/source-position-table.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class SourcePositionTableTest : public TestWithIsolateAndZone {
+ public:
+ SourcePositionTableTest() {}
+ ~SourcePositionTableTest() override {}
+};
+
+// Some random offsets, mostly at 'suspicious' bit boundaries.
+static int offsets[] = {0, 1, 2, 3, 4, 30, 31, 32,
+ 33, 62, 63, 64, 65, 126, 127, 128,
+ 129, 250, 1000, 9999, 12000, 31415926};
+
+TEST_F(SourcePositionTableTest, EncodeStatement) {
+ SourcePositionTableBuilder builder(isolate(), zone());
+ for (int i = 0; i < arraysize(offsets); i++) {
+ builder.AddStatementPosition(offsets[i], offsets[i]);
+ }
+
+ // To test correctness, we rely on the assertions in ToSourcePositionTable().
+ // (Also below.)
+ CHECK(!builder.ToSourcePositionTable().is_null());
+}
+
+TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
+ SourcePositionTableBuilder builder(isolate(), zone());
+ for (int i = 0; i < arraysize(offsets); i++) {
+ builder.AddStatementPosition(offsets[i], offsets[i]);
+ builder.AddStatementPosition(offsets[i], offsets[i] + 1);
+ }
+
+ // To test correctness, we rely on the assertions in ToSourcePositionTable().
+ // (Also below.)
+ CHECK(!builder.ToSourcePositionTable().is_null());
+}
+
+TEST_F(SourcePositionTableTest, EncodeExpression) {
+ SourcePositionTableBuilder builder(isolate(), zone());
+ for (int i = 0; i < arraysize(offsets); i++) {
+ builder.AddExpressionPosition(offsets[i], offsets[i]);
+ }
+ CHECK(!builder.ToSourcePositionTable().is_null());
+}
+
+TEST_F(SourcePositionTableTest, EncodeAscending) {
+ SourcePositionTableBuilder builder(isolate(), zone());
+
+ int accumulator = 0;
+ for (int i = 0; i < arraysize(offsets); i++) {
+ accumulator += offsets[i];
+ if (i % 2) {
+ builder.AddStatementPosition(accumulator, accumulator);
+ } else {
+ builder.AddExpressionPosition(accumulator, accumulator);
+ }
+ }
+
+ // Also test negative offsets:
+ for (int i = 0; i < arraysize(offsets); i++) {
+ accumulator -= offsets[i];
+ if (i % 2) {
+ builder.AddStatementPosition(accumulator, accumulator);
+ } else {
+ builder.AddExpressionPosition(accumulator, accumulator);
+ }
+ }
+
+ CHECK(!builder.ToSourcePositionTable().is_null());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc b/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc
deleted file mode 100644
index c10ddcd319..0000000000
--- a/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/factory.h"
-#include "src/heap/heap.h"
-#include "src/heap/heap-inl.h"
-#include "src/runtime/runtime.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class RuntimeInterpreterTest : public TestWithIsolateAndZone {
- public:
- typedef Object* (*RuntimeMethod)(int, Object**, Isolate*);
-
- RuntimeInterpreterTest() {}
- ~RuntimeInterpreterTest() override {}
-
- bool TestOperatorWithObjects(RuntimeMethod method, Handle<Object> lhs,
- Handle<Object> rhs, bool expected);
-};
-
-
-bool RuntimeInterpreterTest::TestOperatorWithObjects(RuntimeMethod method,
- Handle<Object> lhs,
- Handle<Object> rhs,
- bool expected) {
- Object* args_object[] = {*rhs, *lhs};
- Handle<Object> result =
- handle(method(2, &args_object[1], isolate()), isolate());
- CHECK(result->IsTrue() || result->IsFalse());
- return result->IsTrue() == expected;
-}
-
-
-TEST_F(RuntimeInterpreterTest, ToBoolean) {
- double quiet_nan = std::numeric_limits<double>::quiet_NaN();
- std::pair<Handle<Object>, bool> cases[] = {
- std::make_pair(isolate()->factory()->NewNumberFromInt(0), false),
- std::make_pair(isolate()->factory()->NewNumberFromInt(1), true),
- std::make_pair(isolate()->factory()->NewNumberFromInt(100), true),
- std::make_pair(isolate()->factory()->NewNumberFromInt(-1), true),
- std::make_pair(isolate()->factory()->NewNumber(7.7), true),
- std::make_pair(isolate()->factory()->NewNumber(0.00001), true),
- std::make_pair(isolate()->factory()->NewNumber(quiet_nan), false),
- std::make_pair(isolate()->factory()->NewHeapNumber(0.0), false),
- std::make_pair(isolate()->factory()->undefined_value(), false),
- std::make_pair(isolate()->factory()->null_value(), false),
- std::make_pair(isolate()->factory()->true_value(), true),
- std::make_pair(isolate()->factory()->false_value(), false),
- std::make_pair(isolate()->factory()->NewStringFromStaticChars(""), false),
- std::make_pair(isolate()->factory()->NewStringFromStaticChars("_"), true),
- };
-
- for (size_t i = 0; i < arraysize(cases); i++) {
- auto& value_expected_tuple = cases[i];
- Object* args_object[] = {*value_expected_tuple.first};
- Handle<Object> result = handle(
- Runtime_InterpreterToBoolean(1, &args_object[0], isolate()), isolate());
- CHECK(result->IsBoolean());
- CHECK_EQ(result->IsTrue(), value_expected_tuple.second);
- }
-}
-
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 78283bff57..1342510b61 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -93,12 +93,13 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
class TestWithZone : public virtual ::testing::Test {
public:
- TestWithZone() {}
+ TestWithZone() : zone_(&allocator_) {}
virtual ~TestWithZone();
Zone* zone() { return &zone_; }
private:
+ base::AccountingAllocator allocator_;
Zone zone_;
DISALLOW_COPY_AND_ASSIGN(TestWithZone);
@@ -107,12 +108,13 @@ class TestWithZone : public virtual ::testing::Test {
class TestWithIsolateAndZone : public virtual TestWithIsolate {
public:
- TestWithIsolateAndZone() {}
+ TestWithIsolateAndZone() : zone_(&allocator_) {}
virtual ~TestWithIsolateAndZone();
Zone* zone() { return &zone_; }
private:
+ base::AccountingAllocator allocator_;
Zone zone_;
DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 638fd847bf..003281b020 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -83,7 +83,6 @@
'compiler/opcodes-unittest.cc',
'compiler/register-allocator-unittest.cc',
'compiler/schedule-unittest.cc',
- 'compiler/select-lowering-unittest.cc',
'compiler/scheduler-unittest.cc',
'compiler/scheduler-rpo-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
@@ -101,22 +100,23 @@
'interpreter/constant-array-builder-unittest.cc',
'interpreter/interpreter-assembler-unittest.cc',
'interpreter/interpreter-assembler-unittest.h',
- 'interpreter/register-translator-unittest.cc',
+ 'interpreter/source-position-table-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
'heap/bitmap-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
+ 'heap/gc-tracer-unittest.cc',
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'locked-queue-unittest.cc',
'run-all-unittests.cc',
- 'runtime/runtime-interpreter-unittest.cc',
'test-utils.h',
'test-utils.cc',
'wasm/ast-decoder-unittest.cc',
+ 'wasm/decoder-unittest.cc',
'wasm/encoder-unittest.cc',
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
@@ -158,6 +158,11 @@
'compiler/ppc/instruction-selector-ppc-unittest.cc',
],
}],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [ ### gcmole(arch:s390) ###
+ 'compiler/s390/instruction-selector-s390-unittest.cc',
+ ],
+ }],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
}],
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 18201cd81c..40b5754f2a 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -9,6 +9,8 @@
'WasmFunctionVerifyTest*': [SKIP],
'WasmDecoderTest.TableSwitch*': [SKIP],
'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
+ 'AstDecoderTest.AllLoadMemCombinations': [SKIP],
+ 'AstDecoderTest.AllStoreMemCombinations': [SKIP],
+ 'Bytecodes.DecodeBytecodeAndOperands': [SKIP],
}], # 'byteorder == big'
-
]
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index 672158714a..0b1b79ea86 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -55,51 +55,34 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
Verify(kError, env, code, code + arraysize(code)); \
} while (false)
-#define VERIFY(...) \
- do { \
- static const byte code[] = {__VA_ARGS__}; \
- Verify(kSuccess, &env_v_i, code, code + sizeof(code)); \
+#define VERIFY(...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(kSuccess, sigs.v_i(), code, code + sizeof(code)); \
} while (false)
-
-class WasmDecoderTest : public TestWithZone {
+class AstDecoderTest : public TestWithZone {
public:
- WasmDecoderTest() : TestWithZone(), sigs() {
- init_env(&env_i_i, sigs.i_i());
- init_env(&env_v_v, sigs.v_v());
- init_env(&env_v_i, sigs.v_i());
- init_env(&env_i_f, sigs.i_f());
- init_env(&env_i_d, sigs.i_d());
- init_env(&env_l_l, sigs.l_l());
- init_env(&env_f_ff, sigs.f_ff());
- init_env(&env_d_dd, sigs.d_dd());
- }
+ typedef std::pair<uint32_t, LocalType> LocalsDecl;
+ AstDecoderTest() : module(nullptr) {}
TestSignatures sigs;
+ ModuleEnv* module;
+ LocalDeclEncoder local_decls;
+
+ void AddLocals(LocalType type, uint32_t count) {
+ local_decls.AddLocals(count, type);
+ }
- FunctionEnv env_i_i;
- FunctionEnv env_v_v;
- FunctionEnv env_v_i;
- FunctionEnv env_i_f;
- FunctionEnv env_i_d;
- FunctionEnv env_l_l;
- FunctionEnv env_f_ff;
- FunctionEnv env_d_dd;
-
- static void init_env(FunctionEnv* env, FunctionSig* sig) {
- env->module = nullptr;
- env->sig = sig;
- env->local_i32_count = 0;
- env->local_i64_count = 0;
- env->local_f32_count = 0;
- env->local_f64_count = 0;
- env->SumLocals();
- }
-
- // A wrapper around VerifyWasmCode() that renders a nice failure message.
- void Verify(ErrorCode expected, FunctionEnv* env, const byte* start,
+ // Preprends local variable declarations and renders nice error messages for
+ // verification failures.
+ void Verify(ErrorCode expected, FunctionSig* sig, const byte* start,
const byte* end) {
- TreeResult result = VerifyWasmCode(env, start, end);
+ local_decls.Prepend(&start, &end);
+ // Verify the code.
+ TreeResult result =
+ VerifyWasmCode(zone()->allocator(), module, sig, start, end);
+
if (result.error_code != expected) {
ptrdiff_t pc = result.error_pc - result.start;
ptrdiff_t pt = result.error_pt - result.start;
@@ -118,15 +101,15 @@ class WasmDecoderTest : public TestWithZone {
}
FATAL(str.str().c_str());
}
+
+ delete[] start; // local_decls.Prepend() allocated a new buffer.
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
// op(local[0], local[1])
byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
1};
- FunctionEnv env;
- init_env(&env, success);
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(success, code);
// Try all combinations of return and parameter types.
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
@@ -138,8 +121,7 @@ class WasmDecoderTest : public TestWithZone {
types[2] != success->GetParam(1)) {
// Test signature mismatch.
FunctionSig sig(1, 2, types);
- init_env(&env, &sig);
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
@@ -153,12 +135,10 @@ class WasmDecoderTest : public TestWithZone {
void TestUnop(WasmOpcode opcode, LocalType ret_type, LocalType param_type) {
// Return(op(local[0]))
byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
- FunctionEnv env;
{
LocalType types[] = {ret_type, param_type};
FunctionSig sig(1, 1, types);
- init_env(&env, &sig);
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
}
// Try all combinations of return and parameter types.
@@ -168,8 +148,7 @@ class WasmDecoderTest : public TestWithZone {
if (types[0] != ret_type || types[1] != param_type) {
// Test signature mismatch.
FunctionSig sig(1, 1, types);
- init_env(&env, &sig);
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
@@ -177,211 +156,164 @@ class WasmDecoderTest : public TestWithZone {
};
-static FunctionEnv CreateInt32FunctionEnv(FunctionSig* sig, int count) {
- FunctionEnv env;
- env.module = nullptr;
- env.sig = sig;
- env.local_i32_count = count;
- env.local_f64_count = 0;
- env.local_f32_count = 0;
- env.total_locals = static_cast<unsigned>(count + sig->parameter_count());
- return env;
-}
-
-
-TEST_F(WasmDecoderTest, Int8Const) {
+TEST_F(AstDecoderTest, Int8Const) {
byte code[] = {kExprI8Const, 0};
for (int i = -128; i < 128; i++) {
code[1] = static_cast<byte>(i);
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-
-TEST_F(WasmDecoderTest, EmptyFunction) {
+TEST_F(AstDecoderTest, EmptyFunction) {
byte code[] = {0};
- Verify(kSuccess, &env_v_v, code, code);
- Verify(kError, &env_i_i, code, code);
+ Verify(kSuccess, sigs.v_v(), code, code);
+ Verify(kError, sigs.i_i(), code, code);
}
-
-TEST_F(WasmDecoderTest, IncompleteIf1) {
+TEST_F(AstDecoderTest, IncompleteIf1) {
byte code[] = {kExprIf};
- EXPECT_FAILURE(&env_v_v, code);
- EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, IncompleteIf2) {
+TEST_F(AstDecoderTest, IncompleteIf2) {
byte code[] = {kExprIf, kExprI8Const, 0};
- EXPECT_FAILURE(&env_v_v, code);
- EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Int8Const_fallthru) {
+TEST_F(AstDecoderTest, Int8Const_fallthru) {
byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Int32Const) {
- byte code[] = {kExprI32Const, 0, 0, 0, 0};
- int32_t* ptr = reinterpret_cast<int32_t*>(code + 1);
+TEST_F(AstDecoderTest, Int32Const) {
const int kInc = 4498211;
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
- *ptr = i;
- EXPECT_VERIFIES(&env_i_i, code);
+ // TODO(binji): expand test for other sized int32s; 1 through 5 bytes.
+ byte code[] = {WASM_I32V(i)};
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-
-TEST_F(WasmDecoderTest, Int8Const_fallthru2) {
- byte code[] = {kExprI8Const, 0, kExprI32Const, 1, 2, 3, 4};
- EXPECT_VERIFIES(&env_i_i, code);
+TEST_F(AstDecoderTest, Int8Const_fallthru2) {
+ byte code[] = {WASM_I8(0), WASM_I32V_4(0x1122334)};
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Int64Const) {
- byte code[] = {kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0};
- int64_t* ptr = reinterpret_cast<int64_t*>(code + 1);
+TEST_F(AstDecoderTest, Int64Const) {
const int kInc = 4498211;
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
- *ptr = (static_cast<int64_t>(i) << 32) | i;
- EXPECT_VERIFIES(&env_l_l, code);
+ byte code[] = {WASM_I64V((static_cast<int64_t>(i) << 32) | i)};
+ EXPECT_VERIFIES(sigs.l_l(), code);
}
}
-
-TEST_F(WasmDecoderTest, Float32Const) {
+TEST_F(AstDecoderTest, Float32Const) {
byte code[] = {kExprF32Const, 0, 0, 0, 0};
float* ptr = reinterpret_cast<float*>(code + 1);
for (int i = 0; i < 30; i++) {
*ptr = i * -7.75f;
- EXPECT_VERIFIES(&env_f_ff, code);
+ EXPECT_VERIFIES(sigs.f_ff(), code);
}
}
-
-TEST_F(WasmDecoderTest, Float64Const) {
+TEST_F(AstDecoderTest, Float64Const) {
byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
double* ptr = reinterpret_cast<double*>(code + 1);
for (int i = 0; i < 30; i++) {
*ptr = i * 33.45;
- EXPECT_VERIFIES(&env_d_dd, code);
+ EXPECT_VERIFIES(sigs.d_dd(), code);
}
}
-
-TEST_F(WasmDecoderTest, Int32Const_off_end) {
+TEST_F(AstDecoderTest, Int32Const_off_end) {
byte code[] = {kExprI32Const, 0xaa, 0xbb, 0xcc, 0x44};
for (int size = 1; size <= 4; size++) {
- Verify(kError, &env_i_i, code, code + size);
+ Verify(kError, sigs.i_i(), code, code + size);
}
}
-
-TEST_F(WasmDecoderTest, GetLocal0_param) {
- EXPECT_VERIFIES(&env_i_i, kCodeGetLocal0);
+TEST_F(AstDecoderTest, GetLocal0_param) {
+ EXPECT_VERIFIES(sigs.i_i(), kCodeGetLocal0);
}
-
-TEST_F(WasmDecoderTest, GetLocal0_local) {
- FunctionEnv env;
- init_env(&env, sigs.i_v());
- env.AddLocals(kAstI32, 1);
- EXPECT_VERIFIES(&env, kCodeGetLocal0);
+TEST_F(AstDecoderTest, GetLocal0_local) {
+ AddLocals(kAstI32, 1);
+ EXPECT_VERIFIES(sigs.i_v(), kCodeGetLocal0);
}
-
-TEST_F(WasmDecoderTest, GetLocal0_param_n) {
+TEST_F(AstDecoderTest, GetLocal0_param_n) {
FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
for (size_t i = 0; i < arraysize(array); i++) {
- FunctionEnv env = CreateInt32FunctionEnv(array[i], 0);
- EXPECT_VERIFIES(&env, kCodeGetLocal0);
+ EXPECT_VERIFIES(array[i], kCodeGetLocal0);
}
}
-
-TEST_F(WasmDecoderTest, GetLocalN_local) {
+TEST_F(AstDecoderTest, GetLocalN_local) {
for (byte i = 1; i < 8; i++) {
- FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(sigs.i_v(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, GetLocal0_fail_no_params) {
- FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 0);
-
- EXPECT_FAILURE(&env, kCodeGetLocal0);
+TEST_F(AstDecoderTest, GetLocal0_fail_no_params) {
+ EXPECT_FAILURE(sigs.i_v(), kCodeGetLocal0);
}
-
-TEST_F(WasmDecoderTest, GetLocal1_fail_no_locals) {
- EXPECT_FAILURE(&env_i_i, kCodeGetLocal1);
+TEST_F(AstDecoderTest, GetLocal1_fail_no_locals) {
+ EXPECT_FAILURE(sigs.i_i(), kCodeGetLocal1);
}
-
-TEST_F(WasmDecoderTest, GetLocal_off_end) {
+TEST_F(AstDecoderTest, GetLocal_off_end) {
static const byte code[] = {kExprGetLocal};
- EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
+TEST_F(AstDecoderTest, GetLocal_varint) {
+ const int kMaxLocals = 8000000;
+ AddLocals(kAstI32, kMaxLocals);
-TEST_F(WasmDecoderTest, GetLocal_varint) {
- env_i_i.local_i32_count = 1000000000;
- env_i_i.total_locals += 1000000000;
-
- {
- static const byte code[] = {kExprGetLocal, 0xFF, 0x01};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
+ for (int index = 0; index < kMaxLocals; index = index * 11 + 5) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_1(index));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_2(index));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_3(index));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(index));
}
- {
- static const byte code[] = {kExprGetLocal, 0xF0, 0x80, 0x01};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
- }
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_5(kMaxLocals - 1));
- {
- static const byte code[] = {kExprGetLocal, 0xF2, 0x81, 0x82, 0x01};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
- }
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals - 1));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals + 1));
- {
- static const byte code[] = {kExprGetLocal, 0xF3, 0xA1, 0xB1, 0xC1, 0x01};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
- }
+ EXPECT_FAILURE_INLINE(sigs.i_v(), kExprGetLocal, U32V_4(kMaxLocals));
+ EXPECT_FAILURE_INLINE(sigs.i_v(), kExprGetLocal, U32V_4(kMaxLocals + 1));
}
-
-TEST_F(WasmDecoderTest, Binops_off_end) {
+TEST_F(AstDecoderTest, Binops_off_end) {
byte code1[] = {0}; // [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code1[0] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(&env_i_i, code1);
+ EXPECT_FAILURE(sigs.i_i(), code1);
}
byte code3[] = {0, kExprGetLocal, 0}; // [opcode] [expr]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code3[0] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(&env_i_i, code3);
+ EXPECT_FAILURE(sigs.i_i(), code3);
}
byte code4[] = {0, kExprGetLocal, 0, 0}; // [opcode] [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code4[0] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(&env_i_i, code4);
+ EXPECT_FAILURE(sigs.i_i(), code4);
}
}
@@ -389,79 +321,68 @@ TEST_F(WasmDecoderTest, Binops_off_end) {
//===================================================================
//== Statements
//===================================================================
-TEST_F(WasmDecoderTest, Nop) {
+TEST_F(AstDecoderTest, Nop) {
static const byte code[] = {kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, SetLocal0_param) {
+TEST_F(AstDecoderTest, SetLocal0_param) {
static const byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, SetLocal0_local) {
+TEST_F(AstDecoderTest, SetLocal0_local) {
byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
- FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 1);
-
- EXPECT_VERIFIES(&env, code);
+ AddLocals(kAstI32, 1);
+ EXPECT_VERIFIES(sigs.i_v(), code);
}
-
-TEST_F(WasmDecoderTest, SetLocalN_local) {
+TEST_F(AstDecoderTest, SetLocalN_local) {
for (byte i = 1; i < 8; i++) {
- FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
byte code[] = {kExprSetLocal, j, kExprI8Const, i};
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, Block0) {
+TEST_F(AstDecoderTest, Block0) {
static const byte code[] = {kExprBlock, 0};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Block0_fallthru1) {
+TEST_F(AstDecoderTest, Block0_fallthru1) {
static const byte code[] = {kExprBlock, 0, kExprBlock, 0};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Block1) {
+TEST_F(AstDecoderTest, Block1) {
static const byte code[] = {kExprBlock, 1, kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Block0_fallthru2) {
+TEST_F(AstDecoderTest, Block0_fallthru2) {
static const byte code[] = {kExprBlock, 0, kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Block2) {
+TEST_F(AstDecoderTest, Block2) {
static const byte code[] = {kExprBlock, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprSetLocal, 0, kExprI8Const, 0}; // --
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, Block2_fallthru) {
+TEST_F(AstDecoderTest, Block2_fallthru) {
static const byte code[] = {kExprBlock, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprI8Const, 11}; // --
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, BlockN) {
+TEST_F(AstDecoderTest, BlockN) {
byte block[] = {kExprBlock, 2};
for (size_t i = 0; i < 10; i++) {
@@ -473,106 +394,91 @@ TEST_F(WasmDecoderTest, BlockN) {
memcpy(code + sizeof(block) + j * sizeof(kCodeSetLocal0), kCodeSetLocal0,
sizeof(kCodeSetLocal0));
}
- Verify(kSuccess, &env_v_i, code, code + total);
+ Verify(kSuccess, sigs.v_i(), code, code + total);
free(code);
}
}
-
-TEST_F(WasmDecoderTest, BlockN_off_end) {
+TEST_F(AstDecoderTest, BlockN_off_end) {
for (byte i = 2; i < 10; i++) {
byte code[] = {kExprBlock, i, kExprNop};
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
}
-
-TEST_F(WasmDecoderTest, Block1_break) {
+TEST_F(AstDecoderTest, Block1_break) {
static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Block2_break) {
+TEST_F(AstDecoderTest, Block2_break) {
static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Block1_continue) {
+TEST_F(AstDecoderTest, Block1_continue) {
static const byte code[] = {kExprBlock, 1, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Block2_continue) {
+TEST_F(AstDecoderTest, Block2_continue) {
static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, ExprBlock0) {
+TEST_F(AstDecoderTest, ExprBlock0) {
static const byte code[] = {kExprBlock, 0};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, ExprBlock1a) {
+TEST_F(AstDecoderTest, ExprBlock1a) {
static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, ExprBlock1b) {
+TEST_F(AstDecoderTest, ExprBlock1b) {
static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
-
-TEST_F(WasmDecoderTest, ExprBlock1c) {
+TEST_F(AstDecoderTest, ExprBlock1c) {
static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
- EXPECT_VERIFIES(&env_f_ff, code);
+ EXPECT_VERIFIES(sigs.f_ff(), code);
}
-
-TEST_F(WasmDecoderTest, IfEmpty) {
+TEST_F(AstDecoderTest, IfEmpty) {
static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfSet) {
+TEST_F(AstDecoderTest, IfSet) {
static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprSetLocal,
0, kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfBlock1) {
+TEST_F(AstDecoderTest, IfBlock1) {
static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprBlock,
1, kExprSetLocal, 0, kExprI8Const,
0, kExprNop};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfBlock2) {
+TEST_F(AstDecoderTest, IfBlock2) {
static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprBlock,
2, kExprSetLocal, 0, kExprI8Const,
0, kExprSetLocal, 0, kExprI8Const,
0};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfElseEmpty) {
+TEST_F(AstDecoderTest, IfElseEmpty) {
static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprNop,
kExprNop};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfElseSet) {
+TEST_F(AstDecoderTest, IfElseSet) {
static const byte code[] = {kExprIfElse,
kExprGetLocal,
0, // --
@@ -584,218 +490,192 @@ TEST_F(WasmDecoderTest, IfElseSet) {
0,
kExprI8Const,
1}; // --
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, IfElseUnreachable) {
+TEST_F(AstDecoderTest, IfElseUnreachable) {
static const byte code[] = {kExprIfElse, kExprI8Const, 0,
kExprUnreachable, kExprGetLocal, 0};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType types[] = {kAstI32, kLocalTypes[i]};
- FunctionEnv env;
FunctionSig sig(1, 1, types);
- init_env(&env, &sig);
if (kLocalTypes[i] == kAstI32) {
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
} else {
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
-
-TEST_F(WasmDecoderTest, Loop0) {
+TEST_F(AstDecoderTest, Loop0) {
static const byte code[] = {kExprLoop, 0};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Loop1) {
+TEST_F(AstDecoderTest, Loop1) {
static const byte code[] = {kExprLoop, 1, kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, Loop2) {
+TEST_F(AstDecoderTest, Loop2) {
static const byte code[] = {kExprLoop, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprSetLocal, 0, kExprI8Const, 0}; // --
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, Loop1_continue) {
+TEST_F(AstDecoderTest, Loop1_continue) {
static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Loop1_break) {
+TEST_F(AstDecoderTest, Loop1_break) {
static const byte code[] = {kExprLoop, 1, kExprBr, 1, kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, Loop2_continue) {
+TEST_F(AstDecoderTest, Loop2_continue) {
static const byte code[] = {kExprLoop, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprBr, 0, kExprNop}; // --
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, Loop2_break) {
+TEST_F(AstDecoderTest, Loop2_break) {
static const byte code[] = {kExprLoop, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprBr, 1, kExprNop}; // --
- EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
}
-
-TEST_F(WasmDecoderTest, ExprLoop0) {
+TEST_F(AstDecoderTest, ExprLoop0) {
static const byte code[] = {kExprLoop, 0};
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, ExprLoop1a) {
+TEST_F(AstDecoderTest, ExprLoop1a) {
static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, ExprLoop1b) {
+TEST_F(AstDecoderTest, ExprLoop1b) {
static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, ExprLoop2_unreachable) {
+TEST_F(AstDecoderTest, ExprLoop2_unreachable) {
static const byte code[] = {kExprLoop, 2, kExprBr, 0,
kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, ReturnVoid1) {
+TEST_F(AstDecoderTest, ReturnVoid1) {
static const byte code[] = {kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
- EXPECT_FAILURE(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.i_f(), code);
}
-
-TEST_F(WasmDecoderTest, ReturnVoid2) {
+TEST_F(AstDecoderTest, ReturnVoid2) {
static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(&env_v_v, code);
- EXPECT_FAILURE(&env_i_i, code);
- EXPECT_FAILURE(&env_i_f, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.i_f(), code);
}
+TEST_F(AstDecoderTest, ReturnVoid3) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI8Const, 0);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprF32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
-TEST_F(WasmDecoderTest, ReturnVoid3) {
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprI8Const, 0);
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprI32Const, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprF32Const, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
-
- EXPECT_VERIFIES_INLINE(&env_v_i, kExprGetLocal, 0);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprGetLocal, 0);
}
-
-TEST_F(WasmDecoderTest, Unreachable1) {
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable);
- EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable, kExprUnreachable);
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
+TEST_F(AstDecoderTest, Unreachable1) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable, kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(),
+ WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
}
-
-TEST_F(WasmDecoderTest, Codeiness) {
+TEST_F(AstDecoderTest, Codeiness) {
VERIFY(kExprLoop, 2, // --
kExprSetLocal, 0, kExprI8Const, 0, // --
kExprBr, 0, kExprNop); // --
}
-
-TEST_F(WasmDecoderTest, ExprIf1) {
+TEST_F(AstDecoderTest, ExprIf1) {
VERIFY(kExprIf, kExprGetLocal, 0, kExprI8Const, 0, kExprI8Const, 1);
VERIFY(kExprIf, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0);
VERIFY(kExprIf, kExprGetLocal, 0, kExprI32Add, kExprGetLocal, 0,
kExprGetLocal, 0, kExprI8Const, 1);
}
-
-TEST_F(WasmDecoderTest, ExprIf_off_end) {
+TEST_F(AstDecoderTest, ExprIf_off_end) {
static const byte kCode[] = {kExprIf, kExprGetLocal, 0, kExprGetLocal,
0, kExprGetLocal, 0};
for (size_t len = 1; len < arraysize(kCode); len++) {
- Verify(kError, &env_i_i, kCode, kCode + len);
+ Verify(kError, sigs.i_i(), kCode, kCode + len);
}
}
-
-TEST_F(WasmDecoderTest, ExprIf_type) {
+TEST_F(AstDecoderTest, ExprIf_type) {
{
// float|double ? 1 : 2
static const byte kCode[] = {kExprIfElse, kExprGetLocal, 0, kExprI8Const,
1, kExprI8Const, 2};
- EXPECT_FAILURE(&env_i_f, kCode);
- EXPECT_FAILURE(&env_i_d, kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
}
{
// 1 ? float|double : 2
static const byte kCode[] = {kExprIfElse, kExprI8Const, 1, kExprGetLocal,
0, kExprI8Const, 2};
- EXPECT_FAILURE(&env_i_f, kCode);
- EXPECT_FAILURE(&env_i_d, kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
}
{
// stmt ? 0 : 1
static const byte kCode[] = {kExprIfElse, kExprNop, kExprI8Const,
0, kExprI8Const, 1};
- EXPECT_FAILURE(&env_i_i, kCode);
+ EXPECT_FAILURE(sigs.i_i(), kCode);
}
{
// 0 ? stmt : 1
static const byte kCode[] = {kExprIfElse, kExprI8Const, 0,
kExprNop, kExprI8Const, 1};
- EXPECT_FAILURE(&env_i_i, kCode);
+ EXPECT_FAILURE(sigs.i_i(), kCode);
}
{
// 0 ? 1 : stmt
static const byte kCode[] = {kExprIfElse, kExprI8Const, 0, kExprI8Const, 1,
0, kExprBlock};
- EXPECT_FAILURE(&env_i_i, kCode);
+ EXPECT_FAILURE(sigs.i_i(), kCode);
}
}
-
-TEST_F(WasmDecoderTest, Int64Local_param) {
- EXPECT_VERIFIES(&env_l_l, kCodeGetLocal0);
+TEST_F(AstDecoderTest, Int64Local_param) {
+ EXPECT_VERIFIES(sigs.l_l(), kCodeGetLocal0);
}
-
-TEST_F(WasmDecoderTest, Int64Locals) {
+TEST_F(AstDecoderTest, Int64Locals) {
for (byte i = 1; i < 8; i++) {
- FunctionEnv env;
- init_env(&env, sigs.l_v());
- env.AddLocals(kAstI64, i);
+ AddLocals(kAstI64, 1);
for (byte j = 0; j < i; j++) {
byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(sigs.l_v(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, Int32Binops) {
+TEST_F(AstDecoderTest, Int32Binops) {
TestBinop(kExprI32Add, sigs.i_ii());
TestBinop(kExprI32Sub, sigs.i_ii());
TestBinop(kExprI32Mul, sigs.i_ii());
@@ -816,8 +696,7 @@ TEST_F(WasmDecoderTest, Int32Binops) {
TestBinop(kExprI32LeU, sigs.i_ii());
}
-
-TEST_F(WasmDecoderTest, DoubleBinops) {
+TEST_F(AstDecoderTest, DoubleBinops) {
TestBinop(kExprF64Add, sigs.d_dd());
TestBinop(kExprF64Sub, sigs.d_dd());
TestBinop(kExprF64Mul, sigs.d_dd());
@@ -828,8 +707,7 @@ TEST_F(WasmDecoderTest, DoubleBinops) {
TestBinop(kExprF64Le, sigs.i_dd());
}
-
-TEST_F(WasmDecoderTest, FloatBinops) {
+TEST_F(AstDecoderTest, FloatBinops) {
TestBinop(kExprF32Add, sigs.f_ff());
TestBinop(kExprF32Sub, sigs.f_ff());
TestBinop(kExprF32Mul, sigs.f_ff());
@@ -840,8 +718,7 @@ TEST_F(WasmDecoderTest, FloatBinops) {
TestBinop(kExprF32Le, sigs.i_ff());
}
-
-TEST_F(WasmDecoderTest, TypeConversions) {
+TEST_F(AstDecoderTest, TypeConversions) {
TestUnop(kExprI32SConvertF32, kAstI32, kAstF32);
TestUnop(kExprI32SConvertF64, kAstI32, kAstF64);
TestUnop(kExprI32UConvertF32, kAstI32, kAstF32);
@@ -854,9 +731,8 @@ TEST_F(WasmDecoderTest, TypeConversions) {
TestUnop(kExprF32ConvertF64, kAstF32, kAstF64);
}
-
-TEST_F(WasmDecoderTest, MacrosStmt) {
- VERIFY(WASM_SET_LOCAL(0, WASM_I32(87348)));
+TEST_F(AstDecoderTest, MacrosStmt) {
+ VERIFY(WASM_SET_LOCAL(0, WASM_I32V_3(87348)));
VERIFY(WASM_STORE_MEM(MachineType::Int32(), WASM_I8(24), WASM_I8(40)));
VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
@@ -867,33 +743,30 @@ TEST_F(WasmDecoderTest, MacrosStmt) {
VERIFY(WASM_LOOP(1, WASM_CONTINUE(0)));
}
+TEST_F(AstDecoderTest, MacrosBreak) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BREAK(0)));
-TEST_F(WasmDecoderTest, MacrosBreak) {
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BREAK(0)));
-
- EXPECT_VERIFIES_INLINE(&env_i_i, WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_l_l, WASM_LOOP(1, WASM_BREAKV(0, WASM_I64(0))));
- EXPECT_VERIFIES_INLINE(&env_f_ff,
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.l_l(),
+ WASM_LOOP(1, WASM_BREAKV(0, WASM_I64V_1(0))));
+ EXPECT_VERIFIES_INLINE(sigs.f_ff(),
WASM_LOOP(1, WASM_BREAKV(0, WASM_F32(0.0))));
- EXPECT_VERIFIES_INLINE(&env_d_dd,
+ EXPECT_VERIFIES_INLINE(sigs.d_dd(),
WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
}
-
-TEST_F(WasmDecoderTest, MacrosContinue) {
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_CONTINUE(0)));
+TEST_F(AstDecoderTest, MacrosContinue) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_CONTINUE(0)));
}
-
-TEST_F(WasmDecoderTest, MacrosVariadic) {
+TEST_F(AstDecoderTest, MacrosVariadic) {
VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_NOP));
VERIFY(WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
VERIFY(WASM_LOOP(2, WASM_NOP, WASM_NOP));
VERIFY(WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
}
-
-TEST_F(WasmDecoderTest, MacrosNestedBlocks) {
+TEST_F(AstDecoderTest, MacrosNestedBlocks) {
VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_BLOCK(2, WASM_NOP, WASM_NOP)));
VERIFY(WASM_BLOCK(3, WASM_NOP, // --
WASM_BLOCK(2, WASM_NOP, WASM_NOP), // --
@@ -901,42 +774,31 @@ TEST_F(WasmDecoderTest, MacrosNestedBlocks) {
VERIFY(WASM_BLOCK(1, WASM_BLOCK(1, WASM_BLOCK(2, WASM_NOP, WASM_NOP))));
}
-
-TEST_F(WasmDecoderTest, MultipleReturn) {
+TEST_F(AstDecoderTest, MultipleReturn) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- FunctionEnv env_ii_v;
- init_env(&env_ii_v, &sig_ii_v);
- EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
- EXPECT_FAILURE_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO));
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- FunctionEnv env_iii_v;
- init_env(&env_iii_v, &sig_iii_v);
- EXPECT_VERIFIES_INLINE(&env_iii_v,
+ EXPECT_VERIFIES_INLINE(&sig_iii_v,
WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I8(44)));
- EXPECT_FAILURE_INLINE(&env_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
}
-
-TEST_F(WasmDecoderTest, MultipleReturn_fallthru) {
+TEST_F(AstDecoderTest, MultipleReturn_fallthru) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- FunctionEnv env_ii_v;
- init_env(&env_ii_v, &sig_ii_v);
- EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_ZERO, WASM_ONE);
- EXPECT_FAILURE_INLINE(&env_ii_v, WASM_ZERO);
+ EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_ZERO);
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- FunctionEnv env_iii_v;
- init_env(&env_iii_v, &sig_iii_v);
- EXPECT_VERIFIES_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
- EXPECT_FAILURE_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_VERIFIES_INLINE(&sig_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
+ EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_ZERO, WASM_ONE);
}
-
-TEST_F(WasmDecoderTest, MacrosInt32) {
+TEST_F(AstDecoderTest, MacrosInt32) {
VERIFY(WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I8(12)));
VERIFY(WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(13)));
VERIFY(WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I8(14)));
@@ -950,6 +812,8 @@ TEST_F(WasmDecoderTest, MacrosInt32) {
VERIFY(WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I8(22)));
VERIFY(WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I8(23)));
VERIFY(WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ VERIFY(WASM_I32_ROR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ VERIFY(WASM_I32_ROL(WASM_GET_LOCAL(0), WASM_I8(24)));
VERIFY(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(25)));
VERIFY(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I8(25)));
@@ -964,47 +828,42 @@ TEST_F(WasmDecoderTest, MacrosInt32) {
VERIFY(WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I8(29)));
}
+TEST_F(AstDecoderTest, MacrosInt64) {
+#define VERIFY_L_LL(...) EXPECT_VERIFIES_INLINE(sigs.l_ll(), __VA_ARGS__)
+#define VERIFY_I_LL(...) EXPECT_VERIFIES_INLINE(sigs.i_ll(), __VA_ARGS__)
-TEST_F(WasmDecoderTest, MacrosInt64) {
- FunctionEnv env_i_ll;
- FunctionEnv env_l_ll;
- init_env(&env_i_ll, sigs.i_ll());
- init_env(&env_l_ll, sigs.l_ll());
-
-#define VERIFY_L_LL(...) EXPECT_VERIFIES_INLINE(&env_l_ll, __VA_ARGS__)
-#define VERIFY_I_LL(...) EXPECT_VERIFIES_INLINE(&env_i_ll, __VA_ARGS__)
+ VERIFY_L_LL(WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64V_1(12)));
+ VERIFY_L_LL(WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64V_1(13)));
+ VERIFY_L_LL(WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64V_1(14)));
+ VERIFY_L_LL(WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(15)));
+ VERIFY_L_LL(WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(16)));
+ VERIFY_L_LL(WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64V_1(17)));
+ VERIFY_L_LL(WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64V_1(18)));
+ VERIFY_L_LL(WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64V_1(19)));
+ VERIFY_L_LL(WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
+ VERIFY_L_LL(WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64V_1(21)));
- VERIFY_L_LL(WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64(12)));
- VERIFY_L_LL(WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64(13)));
- VERIFY_L_LL(WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64(14)));
- VERIFY_L_LL(WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64(15)));
- VERIFY_L_LL(WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64(16)));
- VERIFY_L_LL(WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64(17)));
- VERIFY_L_LL(WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64(18)));
- VERIFY_L_LL(WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64(19)));
- VERIFY_L_LL(WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64(20)));
- VERIFY_L_LL(WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64(21)));
+ VERIFY_L_LL(WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(22)));
+ VERIFY_L_LL(WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(23)));
+ VERIFY_L_LL(WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
+ VERIFY_L_LL(WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
+ VERIFY_L_LL(WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
- VERIFY_L_LL(WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64(22)));
- VERIFY_L_LL(WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64(23)));
- VERIFY_L_LL(WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64(24)));
+ VERIFY_I_LL(WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
+ VERIFY_I_LL(WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
+ VERIFY_I_LL(WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
+ VERIFY_I_LL(WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
- VERIFY_I_LL(WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64(26)));
- VERIFY_I_LL(WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64(27)));
- VERIFY_I_LL(WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64(28)));
- VERIFY_I_LL(WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64(29)));
+ VERIFY_I_LL(WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
+ VERIFY_I_LL(WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
+ VERIFY_I_LL(WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
+ VERIFY_I_LL(WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
- VERIFY_I_LL(WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64(26)));
- VERIFY_I_LL(WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64(27)));
- VERIFY_I_LL(WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64(28)));
- VERIFY_I_LL(WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64(29)));
-
- VERIFY_I_LL(WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64(25)));
- VERIFY_I_LL(WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64(25)));
+ VERIFY_I_LL(WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
+ VERIFY_I_LL(WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
}
-
-TEST_F(WasmDecoderTest, AllSimpleExpressions) {
+TEST_F(AstDecoderTest, AllSimpleExpressions) {
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
{ \
@@ -1021,86 +880,55 @@ TEST_F(WasmDecoderTest, AllSimpleExpressions) {
#undef DECODE_TEST
}
-
-TEST_F(WasmDecoderTest, MemorySize) {
+TEST_F(AstDecoderTest, MemorySize) {
byte code[] = {kExprMemorySize};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
-
-TEST_F(WasmDecoderTest, GrowMemory) {
+TEST_F(AstDecoderTest, GrowMemory) {
byte code[] = {kExprGrowMemory, kExprGetLocal, 0};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_FAILURE(&env_i_d, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.i_d(), code);
}
-
-TEST_F(WasmDecoderTest, LoadMemOffset) {
+TEST_F(AstDecoderTest, LoadMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true),
- static_cast<byte>(offset), kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ byte code[] = {kExprI32LoadMem, ZERO_ALIGNMENT, static_cast<byte>(offset),
+ kExprI8Const, 0};
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-
-TEST_F(WasmDecoderTest, StoreMemOffset) {
+TEST_F(AstDecoderTest, StoreMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {kExprI32StoreMem,
- WasmOpcodes::LoadStoreAccessOf(true),
- static_cast<byte>(offset),
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
- EXPECT_VERIFIES(&env_i_i, code);
+ byte code[] = {
+ kExprI32StoreMem, 0, static_cast<byte>(offset), kExprI8Const, 0,
+ kExprI8Const, 0};
+ EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-
-TEST_F(WasmDecoderTest, LoadMemOffset_varint) {
- byte code1[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true), 0,
- kExprI8Const, 0};
- byte code2[] = {kExprI32LoadMem,
- WasmOpcodes::LoadStoreAccessOf(true),
- 0x80,
- 1,
- kExprI8Const,
- 0};
- byte code3[] = {kExprI32LoadMem,
- WasmOpcodes::LoadStoreAccessOf(true),
- 0x81,
- 0x82,
- 5,
- kExprI8Const,
- 0};
- byte code4[] = {kExprI32LoadMem,
- WasmOpcodes::LoadStoreAccessOf(true),
- 0x83,
- 0x84,
- 0x85,
- 7,
- kExprI8Const,
+TEST_F(AstDecoderTest, LoadMemOffset_varint) {
+ byte code1[] = {kExprI32LoadMem, ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const,
0};
+ byte code2[] = {kExprI32LoadMem, ZERO_ALIGNMENT, 0x80, 1, kExprI8Const, 0};
+ byte code3[] = {
+ kExprI32LoadMem, ZERO_ALIGNMENT, 0x81, 0x82, 5, kExprI8Const, 0};
+ byte code4[] = {
+ kExprI32LoadMem, ZERO_ALIGNMENT, 0x83, 0x84, 0x85, 7, kExprI8Const, 0};
- EXPECT_VERIFIES(&env_i_i, code1);
- EXPECT_VERIFIES(&env_i_i, code2);
- EXPECT_VERIFIES(&env_i_i, code3);
- EXPECT_VERIFIES(&env_i_i, code4);
+ EXPECT_VERIFIES(sigs.i_i(), code1);
+ EXPECT_VERIFIES(sigs.i_i(), code2);
+ EXPECT_VERIFIES(sigs.i_i(), code3);
+ EXPECT_VERIFIES(sigs.i_i(), code4);
}
-
-TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
- byte code1[] = {kExprI32StoreMem,
- WasmOpcodes::LoadStoreAccessOf(true),
- 0,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
+TEST_F(AstDecoderTest, StoreMemOffset_varint) {
+ byte code1[] = {
+ kExprI32StoreMem, ZERO_ALIGNMENT, 0, kExprI8Const, 0, kExprI8Const, 0};
byte code2[] = {kExprI32StoreMem,
- WasmOpcodes::LoadStoreAccessOf(true),
+ ZERO_ALIGNMENT,
0x80,
1,
kExprI8Const,
@@ -1108,7 +936,7 @@ TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
kExprI8Const,
0};
byte code3[] = {kExprI32StoreMem,
- WasmOpcodes::LoadStoreAccessOf(true),
+ ZERO_ALIGNMENT,
0x81,
0x82,
5,
@@ -1117,7 +945,7 @@ TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
kExprI8Const,
0};
byte code4[] = {kExprI32StoreMem,
- WasmOpcodes::LoadStoreAccessOf(true),
+ ZERO_ALIGNMENT,
0x83,
0x84,
0x85,
@@ -1127,53 +955,48 @@ TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
kExprI8Const,
0};
- EXPECT_VERIFIES(&env_i_i, code1);
- EXPECT_VERIFIES(&env_i_i, code2);
- EXPECT_VERIFIES(&env_i_i, code3);
- EXPECT_VERIFIES(&env_i_i, code4);
+ EXPECT_VERIFIES(sigs.i_i(), code1);
+ EXPECT_VERIFIES(sigs.i_i(), code2);
+ EXPECT_VERIFIES(sigs.i_i(), code3);
+ EXPECT_VERIFIES(sigs.i_i(), code4);
}
-
-TEST_F(WasmDecoderTest, AllLoadMemCombinations) {
+TEST_F(AstDecoderTest, AllLoadMemCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
byte code[] = {
static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, false)),
- WasmOpcodes::LoadStoreAccessOf(false), kExprI8Const, 0};
- FunctionEnv env;
+ ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const, 0};
FunctionSig sig(1, 0, &local_type);
- init_env(&env, &sig);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
} else {
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
}
-
-TEST_F(WasmDecoderTest, AllStoreMemCombinations) {
+TEST_F(AstDecoderTest, AllStoreMemCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
byte code[] = {
static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, true)),
- WasmOpcodes::LoadStoreAccessOf(false),
+ ZERO_ALIGNMENT,
+ ZERO_OFFSET,
kExprI8Const,
0,
kExprGetLocal,
0};
- FunctionEnv env;
FunctionSig sig(0, 1, &local_type);
- init_env(&env, &sig);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
} else {
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
@@ -1189,30 +1012,26 @@ class TestModuleEnv : public ModuleEnv {
instance = nullptr;
module = &mod;
linker = nullptr;
- mod.globals = new std::vector<WasmGlobal>;
- mod.signatures = new std::vector<FunctionSig*>;
- mod.functions = new std::vector<WasmFunction>;
- mod.import_table = new std::vector<WasmImport>;
}
byte AddGlobal(MachineType mem_type) {
- mod.globals->push_back({0, mem_type, 0, false});
- CHECK(mod.globals->size() <= 127);
- return static_cast<byte>(mod.globals->size() - 1);
+ mod.globals.push_back({0, 0, mem_type, 0, false});
+ CHECK(mod.globals.size() <= 127);
+ return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(FunctionSig* sig) {
- mod.signatures->push_back(sig);
- CHECK(mod.signatures->size() <= 127);
- return static_cast<byte>(mod.signatures->size() - 1);
+ mod.signatures.push_back(sig);
+ CHECK(mod.signatures.size() <= 127);
+ return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
- mod.functions->push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
- CHECK(mod.functions->size() <= 127);
- return static_cast<byte>(mod.functions->size() - 1);
+ mod.functions.push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ CHECK(mod.functions.size() <= 127);
+ return static_cast<byte>(mod.functions.size() - 1);
}
byte AddImport(FunctionSig* sig) {
- mod.import_table->push_back({sig, 0, 0});
- CHECK(mod.import_table->size() <= 127);
- return static_cast<byte>(mod.import_table->size() - 1);
+ mod.import_table.push_back({sig, 0, 0});
+ CHECK(mod.import_table.size() <= 127);
+ return static_cast<byte>(mod.import_table.size() - 1);
}
private:
@@ -1220,194 +1039,184 @@ class TestModuleEnv : public ModuleEnv {
};
} // namespace
-
-TEST_F(WasmDecoderTest, SimpleCalls) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, SimpleCalls) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddFunction(sigs.i_v());
module_env.AddFunction(sigs.i_i());
module_env.AddFunction(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(0));
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(27)));
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
}
-
-TEST_F(WasmDecoderTest, CallsWithTooFewArguments) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddFunction(sigs.i_i());
module_env.AddFunction(sigs.i_ii());
module_env.AddFunction(sigs.f_ff());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION0(0));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_ZERO));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION0(0));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, CallsWithSpilloverArgs) {
+TEST_F(AstDecoderTest, CallsWithSpilloverArgs) {
static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
FunctionSig sig_i_ff(1, 2, a_i_ff);
- FunctionEnv env_i_ff;
- init_env(&env_i_ff, &sig_i_ff);
TestModuleEnv module_env;
- env_i_ff.module = &module_env;
- env_i_i.module = &module_env;
- env_f_ff.module = &module_env;
+ module = &module_env;
module_env.AddFunction(&sig_i_ff);
- EXPECT_VERIFIES_INLINE(&env_i_i,
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
- EXPECT_VERIFIES_INLINE(&env_i_ff,
+ EXPECT_VERIFIES_INLINE(sigs.i_ff(),
WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
- EXPECT_FAILURE_INLINE(&env_f_ff,
+ EXPECT_FAILURE_INLINE(sigs.f_ff(),
WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
EXPECT_FAILURE_INLINE(
- &env_i_i,
+ sigs.i_i(),
WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(0.2)));
EXPECT_VERIFIES_INLINE(
- &env_f_ff,
+ sigs.f_ff(),
WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(11)));
}
-
-TEST_F(WasmDecoderTest, CallsWithMismatchedSigs2) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddFunction(sigs.i_i());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(17)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
}
-
-TEST_F(WasmDecoderTest, CallsWithMismatchedSigs3) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(27)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I64(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
}
-
-TEST_F(WasmDecoderTest, SimpleIndirectCalls) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, SimpleIndirectCalls) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
byte f0 = module_env.AddSignature(sigs.i_v());
byte f1 = module_env.AddSignature(sigs.i_i());
byte f2 = module_env.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
EXPECT_VERIFIES_INLINE(
- env, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+ sig, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
}
-
-TEST_F(WasmDecoderTest, IndirectCallsOutOfBounds) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
module_env.AddSignature(sigs.i_v());
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
module_env.AddSignature(sigs.i_i());
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
}
-
-TEST_F(WasmDecoderTest, IndirectCallsWithMismatchedSigs3) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
byte f0 = module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64(27)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I64(27)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
byte f1 = module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
}
-TEST_F(WasmDecoderTest, SimpleImportCalls) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, SimpleImportCalls) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
byte f0 = module_env.AddImport(sigs.i_v());
byte f1 = module_env.AddImport(sigs.i_i());
byte f2 = module_env.AddImport(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT0(f0));
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I8(22)));
- EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT(f2, WASM_I8(32), WASM_I8(72)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT0(f0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f2, WASM_I8(32), WASM_I8(72)));
}
-TEST_F(WasmDecoderTest, ImportCallsWithMismatchedSigs3) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
byte f0 = module_env.AddImport(sigs.i_f());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT0(f0));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_I64(27)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f0));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_F64(37.2)));
byte f1 = module_env.AddImport(sigs.i_d());
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT0(f1));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I64(16)));
- EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f1));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_F32(17.6)));
}
-TEST_F(WasmDecoderTest, Int32Globals) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, Int32Globals) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddGlobal(MachineType::Int8());
module_env.AddGlobal(MachineType::Uint8());
@@ -1416,131 +1225,116 @@ TEST_F(WasmDecoderTest, Int32Globals) {
module_env.AddGlobal(MachineType::Int32());
module_env.AddGlobal(MachineType::Uint32());
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(2));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(3));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(4));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(5));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(2));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(3));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(4));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(5));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, Int32Globals_fail) {
- FunctionEnv* env = &env_i_i;
+TEST_F(AstDecoderTest, Int32Globals_fail) {
+ FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddGlobal(MachineType::Int64());
module_env.AddGlobal(MachineType::Uint64());
module_env.AddGlobal(MachineType::Float32());
module_env.AddGlobal(MachineType::Float64());
- EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(0));
- EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(1));
- EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(2));
- EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(3));
+ EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(1));
+ EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(2));
+ EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(3));
- EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, Int64Globals) {
- FunctionEnv* env = &env_l_l;
+TEST_F(AstDecoderTest, Int64Globals) {
+ FunctionSig* sig = sigs.l_l();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddGlobal(MachineType::Int64());
module_env.AddGlobal(MachineType::Uint64());
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, Float32Globals) {
- FunctionEnv env_f_ff;
- FunctionEnv* env = &env_f_ff;
- init_env(env, sigs.f_ff());
+TEST_F(AstDecoderTest, Float32Globals) {
+ FunctionSig* sig = sigs.f_ff();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddGlobal(MachineType::Float32());
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, Float64Globals) {
- FunctionEnv env_d_dd;
- FunctionEnv* env = &env_d_dd;
- init_env(env, sigs.d_dd());
+TEST_F(AstDecoderTest, Float64Globals) {
+ FunctionSig* sig = sigs.d_dd();
TestModuleEnv module_env;
- env->module = &module_env;
+ module = &module_env;
module_env.AddGlobal(MachineType::Float64());
- EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
}
-
-TEST_F(WasmDecoderTest, AllLoadGlobalCombinations) {
+TEST_F(AstDecoderTest, AllLoadGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- FunctionEnv env;
FunctionSig sig(1, 0, &local_type);
TestModuleEnv module_env;
- init_env(&env, &sig);
- env.module = &module_env;
+ module = &module_env;
module_env.AddGlobal(mem_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(&sig, WASM_LOAD_GLOBAL(0));
} else {
- EXPECT_FAILURE_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(&sig, WASM_LOAD_GLOBAL(0));
}
}
}
}
-
-TEST_F(WasmDecoderTest, AllStoreGlobalCombinations) {
+TEST_F(AstDecoderTest, AllStoreGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- FunctionEnv env;
FunctionSig sig(0, 1, &local_type);
TestModuleEnv module_env;
- init_env(&env, &sig);
- env.module = &module_env;
+ module = &module_env;
module_env.AddGlobal(mem_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
} else {
- EXPECT_FAILURE_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
}
}
}
}
-
-TEST_F(WasmDecoderTest, BreakNesting1) {
+TEST_F(AstDecoderTest, BreakNesting1) {
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
byte code[] = {WASM_BLOCK(
@@ -1548,65 +1342,60 @@ TEST_F(WasmDecoderTest, BreakNesting1) {
WASM_SET_LOCAL(0, WASM_I8(1))),
WASM_GET_LOCAL(0))};
if (i < 3) {
- EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(sigs.i_i(), code);
} else {
- EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, BreakNesting2) {
- env_v_v.AddLocals(kAstI32, 1);
+TEST_F(AstDecoderTest, BreakNesting2) {
+ AddLocals(kAstI32, 1);
for (int i = 0; i < 5; i++) {
- // (block[2] (loop[2] (if (get p) break[N]) (set p 1)) (return p)) (11)
- byte code[] = {
- WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)),
- WASM_SET_LOCAL(0, WASM_I8(1)))),
- WASM_I8(11)};
+ // (block[2] (loop[2] (if 0 break[N]) (set p 1)) (return p)) (11)
+ byte code[] = {WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BREAK(i)),
+ WASM_SET_LOCAL(0, WASM_I8(1)))),
+ WASM_I8(11)};
if (i < 2) {
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
} else {
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, BreakNesting3) {
- env_v_v.AddLocals(kAstI32, 1);
+TEST_F(AstDecoderTest, BreakNesting3) {
for (int i = 0; i < 5; i++) {
- // (block[1] (loop[1] (block[1] (if (get p) break[N])
+ // (block[1] (loop[1] (block[1] (if 0 break[N])
byte code[] = {WASM_BLOCK(
- 1, WASM_LOOP(
- 1, WASM_BLOCK(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)))))};
+ 1, WASM_LOOP(1, WASM_BLOCK(1, WASM_IF(WASM_ZERO, WASM_BREAK(i)))))};
if (i < 3) {
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
} else {
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
}
}
-
-TEST_F(WasmDecoderTest, BreaksWithMultipleTypes) {
+TEST_F(AstDecoderTest, BreaksWithMultipleTypes) {
EXPECT_FAILURE_INLINE(
- &env_i_i, WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
+ sigs.i_i(),
+ WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
- EXPECT_FAILURE_INLINE(&env_i_i,
+ EXPECT_FAILURE_INLINE(sigs.i_i(),
WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)),
WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(&env_i_i,
+ EXPECT_FAILURE_INLINE(sigs.i_i(),
WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(8)),
WASM_BRV_IF_ZERO(0, WASM_I8(0)),
WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(9)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
- WASM_BRV_IF_ZERO(0, WASM_I8(11))));
+ EXPECT_FAILURE_INLINE(sigs.i_i(),
+ WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(11))));
}
-
-TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
+TEST_F(AstDecoderTest, BreakNesting_6_levels) {
for (int mask = 0; mask < 64; mask++) {
for (int i = 0; i < 14; i++) {
byte code[] = {
@@ -1629,39 +1418,37 @@ TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
}
if (i < depth) {
- EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_VERIFIES(sigs.v_v(), code);
} else {
- EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(sigs.v_v(), code);
}
}
}
}
-
-TEST_F(WasmDecoderTest, ExprBreak_TypeCheck) {
- FunctionEnv* envs[] = {&env_i_i, &env_l_l, &env_f_ff, &env_d_dd};
- for (size_t i = 0; i < arraysize(envs); i++) {
- FunctionEnv* env = envs[i];
+TEST_F(AstDecoderTest, ExprBreak_TypeCheck) {
+ FunctionSig* sigarray[] = {sigs.i_i(), sigs.l_l(), sigs.f_ff(), sigs.d_dd()};
+ for (size_t i = 0; i < arraysize(sigarray); i++) {
+ FunctionSig* sig = sigarray[i];
// unify X and X => OK
EXPECT_VERIFIES_INLINE(
- env, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ sig, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(0)));
}
// unify i32 and f32 => fail
EXPECT_FAILURE_INLINE(
- &env_i_i,
+ sigs.i_i(),
WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
// unify f64 and f64 => OK
EXPECT_VERIFIES_INLINE(
- &env_d_dd,
+ sigs.d_dd(),
WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_F64(1.2)));
}
-
-TEST_F(WasmDecoderTest, ExprBreak_TypeCheckAll) {
+TEST_F(AstDecoderTest, ExprBreak_TypeCheckAll) {
byte code1[] = {WASM_BLOCK(2,
WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
@@ -1671,32 +1458,26 @@ TEST_F(WasmDecoderTest, ExprBreak_TypeCheckAll) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
- FunctionEnv env;
LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
FunctionSig sig(1, 2, storage);
- init_env(&env, &sig);
if (i == j) {
- EXPECT_VERIFIES(&env, code1);
- EXPECT_VERIFIES(&env, code2);
+ EXPECT_VERIFIES(&sig, code1);
+ EXPECT_VERIFIES(&sig, code2);
} else {
- EXPECT_FAILURE(&env, code1);
- EXPECT_FAILURE(&env, code2);
+ EXPECT_FAILURE(&sig, code1);
+ EXPECT_FAILURE(&sig, code2);
}
}
}
}
-
-TEST_F(WasmDecoderTest, ExprBr_Unify) {
- FunctionEnv env;
-
+TEST_F(AstDecoderTest, ExprBr_Unify) {
for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType type = kLocalTypes[i];
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- init_env(&env, &sig); // (i32, X) -> i32
byte code1[] = {
WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(which))),
@@ -1707,37 +1488,34 @@ TEST_F(WasmDecoderTest, ExprBr_Unify) {
if (type == kAstI32) {
- EXPECT_VERIFIES(&env, code1);
- EXPECT_VERIFIES(&env, code2);
+ EXPECT_VERIFIES(&sig, code1);
+ EXPECT_VERIFIES(&sig, code2);
} else {
- EXPECT_FAILURE(&env, code1);
- EXPECT_FAILURE(&env, code2);
+ EXPECT_FAILURE(&sig, code1);
+ EXPECT_FAILURE(&sig, code2);
}
}
}
}
-TEST_F(WasmDecoderTest, ExprBrIf_cond_type) {
- FunctionEnv env;
+TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
byte code[] = {
WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
FunctionSig sig(0, 2, types);
- init_env(&env, &sig);
if (types[1] == kAstI32) {
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
} else {
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
}
-TEST_F(WasmDecoderTest, ExprBrIf_val_type) {
- FunctionEnv env;
+TEST_F(AstDecoderTest, ExprBrIf_val_type) {
byte code[] = {
WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
WASM_GET_LOCAL(0))};
@@ -1746,27 +1524,22 @@ TEST_F(WasmDecoderTest, ExprBrIf_val_type) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j],
kAstI32};
FunctionSig sig(1, 3, types);
- init_env(&env, &sig);
if (i == j) {
- EXPECT_VERIFIES(&env, code);
+ EXPECT_VERIFIES(&sig, code);
} else {
- EXPECT_FAILURE(&env, code);
+ EXPECT_FAILURE(&sig, code);
}
}
}
}
-
-TEST_F(WasmDecoderTest, ExprBrIf_Unify) {
- FunctionEnv env;
-
+TEST_F(AstDecoderTest, ExprBrIf_Unify) {
for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType type = kLocalTypes[i];
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- init_env(&env, &sig); // (i32, X) -> i32
byte code1[] = {WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
WASM_GET_LOCAL(which ^ 1))};
@@ -1774,221 +1547,171 @@ TEST_F(WasmDecoderTest, ExprBrIf_Unify) {
WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
- EXPECT_VERIFIES(&env, code1);
- EXPECT_VERIFIES(&env, code2);
+ EXPECT_VERIFIES(&sig, code1);
+ EXPECT_VERIFIES(&sig, code2);
} else {
- EXPECT_FAILURE(&env, code1);
- EXPECT_FAILURE(&env, code2);
+ EXPECT_FAILURE(&sig, code1);
+ EXPECT_FAILURE(&sig, code2);
}
}
}
}
-
-TEST_F(WasmDecoderTest, TableSwitch0) {
- static byte code[] = {kExprTableSwitch, 0, 0, 0, 0};
- EXPECT_FAILURE(&env_v_v, code);
+TEST_F(AstDecoderTest, BrTable0) {
+ static byte code[] = {kExprBrTable, 0, 0};
+ EXPECT_FAILURE(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, TableSwitch0b) {
- static byte code[] = {kExprTableSwitch, 0, 0, 0, 0, kExprI8Const, 11};
- EXPECT_FAILURE(&env_v_v, code);
- EXPECT_FAILURE(&env_i_i, code);
+TEST_F(AstDecoderTest, BrTable0b) {
+ static byte code[] = {kExprBrTable, 0, 0, kExprI32Const, 11};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-
-TEST_F(WasmDecoderTest, TableSwitch0c) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)), WASM_I8(67))};
- EXPECT_VERIFIES(&env_v_v, code);
+TEST_F(AstDecoderTest, BrTable0c) {
+ static byte code[] = {kExprBrTable, 0, 1, 0, 0, kExprI32Const, 11};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(WasmDecoderTest, TableSwitch0d) {
+TEST_F(AstDecoderTest, BrTable1a) {
static byte code[] = {
- WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0), WASM_CASE_BR(1)),
- WASM_I8(67))};
- EXPECT_VERIFIES(&env_v_v, code);
+ WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-TEST_F(WasmDecoderTest, TableSwitch1) {
- static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_VERIFIES(&env_v_v, code);
- EXPECT_FAILURE(&env_f_ff, code);
- EXPECT_FAILURE(&env_d_dd, code);
-}
-
-
-TEST_F(WasmDecoderTest, TableSwitch_off_end) {
- static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
- for (size_t len = arraysize(code) - 1; len > 0; len--) {
- Verify(kError, &env_v_v, code, code + len);
- }
+TEST_F(AstDecoderTest, BrTable1b) {
+ static byte code[] = {
+ WASM_BLOCK(1, WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
}
-
-TEST_F(WasmDecoderTest, TableSwitch2) {
+TEST_F(AstDecoderTest, BrTable2a) {
static byte code[] = {
- WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_I8(3), WASM_I8(10), WASM_I8(11))};
- EXPECT_VERIFIES(&env_i_i, code);
- EXPECT_VERIFIES(&env_v_v, code);
- EXPECT_FAILURE(&env_f_ff, code);
- EXPECT_FAILURE(&env_d_dd, code);
+ WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-
-TEST_F(WasmDecoderTest, TableSwitch1b) {
- EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
-
- EXPECT_VERIFIES_INLINE(&env_f_ff, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F32(0.0)));
-
- EXPECT_VERIFIES_INLINE(&env_d_dd, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F64(0.0)));
+TEST_F(AstDecoderTest, BrTable2b) {
+ static byte code[] = {WASM_BLOCK(
+ 1, WASM_BLOCK(
+ 1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
+ EXPECT_VERIFIES(sigs.v_v(), code);
}
-TEST_F(WasmDecoderTest, TableSwitch_br1) {
- for (int depth = 0; depth < 2; depth++) {
- byte code[] = {WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
- WASM_GET_LOCAL(0))};
- EXPECT_VERIFIES(&env_v_i, code);
- EXPECT_FAILURE(&env_i_i, code);
+TEST_F(AstDecoderTest, BrTable_off_end) {
+ static byte code[] = {
+ WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
+ for (size_t len = 1; len < sizeof(code); len++) {
+ Verify(kError, sigs.i_i(), code, code + len);
}
}
-
-TEST_F(WasmDecoderTest, TableSwitch_invalid_br) {
- for (int depth = 1; depth < 4; depth++) {
- EXPECT_FAILURE_INLINE(&env_v_i,
- WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
- WASM_GET_LOCAL(0));
- EXPECT_FAILURE_INLINE(
- &env_v_i,
- WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(depth), WASM_CASE_BR(depth)),
- WASM_GET_LOCAL(0));
+TEST_F(AstDecoderTest, BrTable_invalid_br1) {
+ for (int depth = 0; depth < 4; depth++) {
+ byte code[] = {
+ WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ if (depth == 0) {
+ EXPECT_VERIFIES(sigs.v_i(), code);
+ } else {
+ EXPECT_FAILURE(sigs.v_i(), code);
+ }
}
}
-
-TEST_F(WasmDecoderTest, TableSwitch_invalid_case_ref) {
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(0)),
- WASM_GET_LOCAL(0));
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
-}
-
-
-TEST_F(WasmDecoderTest, TableSwitch1_br) {
- EXPECT_VERIFIES_INLINE(
- &env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_ZERO)));
-}
-
-
-TEST_F(WasmDecoderTest, TableSwitch2_br) {
- EXPECT_VERIFIES_INLINE(
- &env_i_i, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(0)),
- WASM_BRV(0, WASM_I8(1))));
-
- EXPECT_FAILURE_INLINE(
- &env_f_ff, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_BRV(0, WASM_I8(3)),
- WASM_BRV(0, WASM_I8(4))));
-}
-
-
-TEST_F(WasmDecoderTest, TableSwitch2x2) {
- EXPECT_VERIFIES_INLINE(
- &env_i_i, WASM_TABLESWITCH_OP(2, 4, WASM_CASE(0), WASM_CASE(1),
- WASM_CASE(0), WASM_CASE(1)),
- WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(3)),
- WASM_BRV(0, WASM_I8(4))));
+TEST_F(AstDecoderTest, BrTable_invalid_br2) {
+ for (int depth = 0; depth < 4; depth++) {
+ byte code[] = {
+ WASM_LOOP(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ if (depth <= 1) {
+ EXPECT_VERIFIES(sigs.v_i(), code);
+ } else {
+ EXPECT_FAILURE(sigs.v_i(), code);
+ }
+ }
}
-
-TEST_F(WasmDecoderTest, ExprBreakNesting1) {
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(&env_v_v,
+TEST_F(AstDecoderTest, ExprBreakNesting1) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(),
WASM_BLOCK(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(&env_v_v,
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(),
WASM_LOOP(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(1)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(1)));
}
-
-TEST_F(WasmDecoderTest, Select) {
+TEST_F(AstDecoderTest, Select) {
EXPECT_VERIFIES_INLINE(
- &env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_f_ff,
+ sigs.i_i(), WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.f_ff(),
WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_d_dd,
+ EXPECT_VERIFIES_INLINE(sigs.d_dd(),
WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(&env_l_l,
- WASM_SELECT(WASM_I64(0), WASM_I64(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(
+ sigs.l_l(), WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO));
}
-TEST_F(WasmDecoderTest, Select_fail1) {
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0),
- WASM_GET_LOCAL(0)));
+TEST_F(AstDecoderTest, Select_fail1) {
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(),
+ WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
EXPECT_FAILURE_INLINE(
- &env_i_i,
+ sigs.i_i(),
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(),
WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_F32(0.0)));
}
-TEST_F(WasmDecoderTest, Select_fail2) {
+TEST_F(AstDecoderTest, Select_fail2) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType type = kLocalTypes[i];
if (type == kAstI32) continue;
LocalType types[] = {type, kAstI32, type};
FunctionSig sig(1, 2, types);
- FunctionEnv env;
- init_env(&env, &sig);
EXPECT_VERIFIES_INLINE(
- &env,
+ &sig,
WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
EXPECT_FAILURE_INLINE(
- &env,
+ &sig,
WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
EXPECT_FAILURE_INLINE(
- &env,
+ &sig,
WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
EXPECT_FAILURE_INLINE(
- &env,
+ &sig,
WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
}
}
+TEST_F(AstDecoderTest, Select_TypeCheck) {
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(),
+ WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
-TEST_F(WasmDecoderTest, Select_TypeCheck) {
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)));
-
- EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25),
- WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(),
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25), WASM_GET_LOCAL(0)));
EXPECT_FAILURE_INLINE(
- &env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64(0)));
+ sigs.i_i(),
+ WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
}
@@ -2003,6 +1726,12 @@ class WasmOpcodeLengthTest : public TestWithZone {
EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code))); \
}
+#define EXPECT_LENGTH_N(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code))); \
+ }
+
TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprNop);
EXPECT_LENGTH(2, kExprBlock);
@@ -2017,9 +1746,7 @@ TEST_F(WasmOpcodeLengthTest, Statements) {
TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprI8Const);
- EXPECT_LENGTH(5, kExprI32Const);
EXPECT_LENGTH(5, kExprF32Const);
- EXPECT_LENGTH(9, kExprI64Const);
EXPECT_LENGTH(9, kExprF64Const);
EXPECT_LENGTH(2, kExprGetLocal);
EXPECT_LENGTH(2, kExprSetLocal);
@@ -2036,47 +1763,59 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprBrIf);
}
+TEST_F(WasmOpcodeLengthTest, I32Const) {
+ EXPECT_LENGTH_N(2, kExprI32Const, U32V_1(1));
+ EXPECT_LENGTH_N(3, kExprI32Const, U32V_2(999));
+ EXPECT_LENGTH_N(4, kExprI32Const, U32V_3(9999));
+ EXPECT_LENGTH_N(5, kExprI32Const, U32V_4(999999));
+ EXPECT_LENGTH_N(6, kExprI32Const, U32V_5(99999999));
+}
-TEST_F(WasmOpcodeLengthTest, VariableLength) {
- byte size2[] = {kExprLoadGlobal, 1};
- byte size3[] = {kExprLoadGlobal, 1 | 0x80, 2};
- byte size4[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3};
- byte size5[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4};
- byte size6[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4 | 0x80, 5};
-
- EXPECT_EQ(2, OpcodeLength(size2, size2 + sizeof(size2)));
- EXPECT_EQ(3, OpcodeLength(size3, size3 + sizeof(size3)));
- EXPECT_EQ(4, OpcodeLength(size4, size4 + sizeof(size4)));
- EXPECT_EQ(5, OpcodeLength(size5, size5 + sizeof(size5)));
- EXPECT_EQ(6, OpcodeLength(size6, size6 + sizeof(size6)));
+TEST_F(WasmOpcodeLengthTest, I64Const) {
+ EXPECT_LENGTH_N(2, kExprI64Const, U32V_1(1));
+ EXPECT_LENGTH_N(3, kExprI64Const, U32V_2(99));
+ EXPECT_LENGTH_N(4, kExprI64Const, U32V_3(9999));
+ EXPECT_LENGTH_N(5, kExprI64Const, U32V_4(99999));
+ EXPECT_LENGTH_N(6, kExprI64Const, U32V_5(9999999));
+ EXPECT_LENGTH_N(7, WASM_I64V_6(777777));
+ EXPECT_LENGTH_N(8, WASM_I64V_7(7777777));
+ EXPECT_LENGTH_N(9, WASM_I64V_8(77777777));
+ EXPECT_LENGTH_N(10, WASM_I64V_9(777777777));
}
+TEST_F(WasmOpcodeLengthTest, VariableLength) {
+ EXPECT_LENGTH_N(2, kExprLoadGlobal, U32V_1(1));
+ EXPECT_LENGTH_N(3, kExprLoadGlobal, U32V_2(33));
+ EXPECT_LENGTH_N(4, kExprLoadGlobal, U32V_3(44));
+ EXPECT_LENGTH_N(5, kExprLoadGlobal, U32V_4(66));
+ EXPECT_LENGTH_N(6, kExprLoadGlobal, U32V_5(77));
+}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
- EXPECT_LENGTH(2, kExprI32LoadMem8S);
- EXPECT_LENGTH(2, kExprI32LoadMem8U);
- EXPECT_LENGTH(2, kExprI32LoadMem16S);
- EXPECT_LENGTH(2, kExprI32LoadMem16U);
- EXPECT_LENGTH(2, kExprI32LoadMem);
- EXPECT_LENGTH(2, kExprI64LoadMem8S);
- EXPECT_LENGTH(2, kExprI64LoadMem8U);
- EXPECT_LENGTH(2, kExprI64LoadMem16S);
- EXPECT_LENGTH(2, kExprI64LoadMem16U);
- EXPECT_LENGTH(2, kExprI64LoadMem32S);
- EXPECT_LENGTH(2, kExprI64LoadMem32U);
- EXPECT_LENGTH(2, kExprI64LoadMem);
- EXPECT_LENGTH(2, kExprF32LoadMem);
- EXPECT_LENGTH(2, kExprF64LoadMem);
-
- EXPECT_LENGTH(2, kExprI32StoreMem8);
- EXPECT_LENGTH(2, kExprI32StoreMem16);
- EXPECT_LENGTH(2, kExprI32StoreMem);
- EXPECT_LENGTH(2, kExprI64StoreMem8);
- EXPECT_LENGTH(2, kExprI64StoreMem16);
- EXPECT_LENGTH(2, kExprI64StoreMem32);
- EXPECT_LENGTH(2, kExprI64StoreMem);
- EXPECT_LENGTH(2, kExprF32StoreMem);
- EXPECT_LENGTH(2, kExprF64StoreMem);
+ EXPECT_LENGTH(3, kExprI32LoadMem8S);
+ EXPECT_LENGTH(3, kExprI32LoadMem8U);
+ EXPECT_LENGTH(3, kExprI32LoadMem16S);
+ EXPECT_LENGTH(3, kExprI32LoadMem16U);
+ EXPECT_LENGTH(3, kExprI32LoadMem);
+ EXPECT_LENGTH(3, kExprI64LoadMem8S);
+ EXPECT_LENGTH(3, kExprI64LoadMem8U);
+ EXPECT_LENGTH(3, kExprI64LoadMem16S);
+ EXPECT_LENGTH(3, kExprI64LoadMem16U);
+ EXPECT_LENGTH(3, kExprI64LoadMem32S);
+ EXPECT_LENGTH(3, kExprI64LoadMem32U);
+ EXPECT_LENGTH(3, kExprI64LoadMem);
+ EXPECT_LENGTH(3, kExprF32LoadMem);
+ EXPECT_LENGTH(3, kExprF64LoadMem);
+
+ EXPECT_LENGTH(3, kExprI32StoreMem8);
+ EXPECT_LENGTH(3, kExprI32StoreMem16);
+ EXPECT_LENGTH(3, kExprI32StoreMem);
+ EXPECT_LENGTH(3, kExprI64StoreMem8);
+ EXPECT_LENGTH(3, kExprI64StoreMem16);
+ EXPECT_LENGTH(3, kExprI64StoreMem32);
+ EXPECT_LENGTH(3, kExprI64StoreMem);
+ EXPECT_LENGTH(3, kExprF32StoreMem);
+ EXPECT_LENGTH(3, kExprF64StoreMem);
}
@@ -2113,7 +1852,7 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI32Clz);
EXPECT_LENGTH(1, kExprI32Ctz);
EXPECT_LENGTH(1, kExprI32Popcnt);
- EXPECT_LENGTH(1, kExprBoolNot);
+ EXPECT_LENGTH(1, kExprI32Eqz);
EXPECT_LENGTH(1, kExprI64Add);
EXPECT_LENGTH(1, kExprI64Sub);
EXPECT_LENGTH(1, kExprI64Mul);
@@ -2211,16 +1950,18 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
class WasmOpcodeArityTest : public TestWithZone {
public:
WasmOpcodeArityTest() : TestWithZone() {}
+ TestModuleEnv module;
+ TestSignatures sigs;
};
-#define EXPECT_ARITY(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeArity(&env, code, code + sizeof(code))); \
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(&module, sig, code, code + sizeof(code))); \
}
TEST_F(WasmOpcodeArityTest, Control) {
- FunctionEnv env;
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprNop);
EXPECT_ARITY(0, kExprBlock, 0);
@@ -2243,19 +1984,16 @@ TEST_F(WasmOpcodeArityTest, Control) {
EXPECT_ARITY(2, kExprBrIf);
{
- TestSignatures sigs;
- FunctionEnv env;
- WasmDecoderTest::init_env(&env, sigs.v_v());
+ sig = sigs.v_v();
EXPECT_ARITY(0, kExprReturn);
- WasmDecoderTest::init_env(&env, sigs.i_i());
+ sig = sigs.i_i();
EXPECT_ARITY(1, kExprReturn);
}
}
TEST_F(WasmOpcodeArityTest, Misc) {
- FunctionEnv env;
-
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprI8Const);
EXPECT_ARITY(0, kExprI32Const);
EXPECT_ARITY(0, kExprF32Const);
@@ -2269,8 +2007,6 @@ TEST_F(WasmOpcodeArityTest, Misc) {
TEST_F(WasmOpcodeArityTest, Calls) {
- TestSignatures sigs;
- TestModuleEnv module;
module.AddFunction(sigs.i_ii());
module.AddFunction(sigs.i_i());
@@ -2281,9 +2017,7 @@ TEST_F(WasmOpcodeArityTest, Calls) {
module.AddImport(sigs.i_d());
{
- FunctionEnv env;
- WasmDecoderTest::init_env(&env, sigs.i_ii());
- env.module = &module;
+ FunctionSig* sig = sigs.i_ii();
EXPECT_ARITY(2, kExprCallFunction, 0);
EXPECT_ARITY(2, kExprCallImport, 0);
@@ -2293,9 +2027,7 @@ TEST_F(WasmOpcodeArityTest, Calls) {
}
{
- FunctionEnv env;
- WasmDecoderTest::init_env(&env, sigs.v_v());
- env.module = &module;
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(1, kExprCallFunction, 1);
EXPECT_ARITY(1, kExprCallImport, 1);
@@ -2307,8 +2039,7 @@ TEST_F(WasmOpcodeArityTest, Calls) {
TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
- FunctionEnv env;
-
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(1, kExprI32LoadMem8S);
EXPECT_ARITY(1, kExprI32LoadMem8U);
EXPECT_ARITY(1, kExprI32LoadMem16S);
@@ -2338,16 +2069,14 @@ TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
- FunctionEnv env;
-
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprMemorySize);
EXPECT_ARITY(1, kExprGrowMemory);
}
TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
- FunctionEnv env;
-
+ FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(2, kExprI32Add);
EXPECT_ARITY(2, kExprI32Sub);
EXPECT_ARITY(2, kExprI32Mul);
@@ -2374,7 +2103,7 @@ TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
EXPECT_ARITY(1, kExprI32Clz);
EXPECT_ARITY(1, kExprI32Ctz);
EXPECT_ARITY(1, kExprI32Popcnt);
- EXPECT_ARITY(1, kExprBoolNot);
+ EXPECT_ARITY(1, kExprI32Eqz);
EXPECT_ARITY(2, kExprI64Add);
EXPECT_ARITY(2, kExprI64Sub);
EXPECT_ARITY(2, kExprI64Mul);
@@ -2467,6 +2196,127 @@ TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
EXPECT_ARITY(1, kExprI32ReinterpretF32);
EXPECT_ARITY(1, kExprI64ReinterpretF64);
}
+
+typedef ZoneVector<LocalType> LocalTypeMap;
+
+class LocalDeclDecoderTest : public TestWithZone {
+ public:
+ base::AccountingAllocator allocator;
+
+ size_t ExpectRun(LocalTypeMap map, size_t pos, LocalType expected,
+ size_t count) {
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected, map[pos++]);
+ }
+ return pos;
+ }
+
+ LocalTypeMap Expand(AstLocalDecls& decls) {
+ ZoneVector<LocalType> map(zone());
+ for (auto p : decls.local_types) {
+ map.insert(map.end(), p.second, p.first);
+ }
+ return map;
+ }
+};
+
+TEST_F(LocalDeclDecoderTest, EmptyLocals) {
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, nullptr, nullptr);
+ EXPECT_FALSE(result);
+}
+
+TEST_F(LocalDeclDecoderTest, NoLocals) {
+ static const byte data[] = {0};
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(0, decls.total_local_count);
+}
+
+TEST_F(LocalDeclDecoderTest, OneLocal) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ const byte data[] = {
+ 1, 1, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(type))};
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(1, decls.total_local_count);
+
+ LocalTypeMap map = Expand(decls);
+ EXPECT_EQ(1, map.size());
+ EXPECT_EQ(type, map.at(0));
+ }
+}
+
+TEST_F(LocalDeclDecoderTest, FiveLocals) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ const byte data[] = {
+ 1, 5, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(type))};
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(sizeof(data), decls.decls_encoded_size);
+ EXPECT_EQ(5, decls.total_local_count);
+
+ LocalTypeMap map = Expand(decls);
+ EXPECT_EQ(5, map.size());
+ ExpectRun(map, 0, type, 5);
+ }
+}
+
+TEST_F(LocalDeclDecoderTest, MixedLocals) {
+ for (byte a = 0; a < 3; a++) {
+ for (byte b = 0; b < 3; b++) {
+ for (byte c = 0; c < 3; c++) {
+ for (byte d = 0; d < 3; d++) {
+ const byte data[] = {4, a, kLocalI32, b, kLocalI64,
+ c, kLocalF32, d, kLocalF64};
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(sizeof(data), decls.decls_encoded_size);
+ EXPECT_EQ(a + b + c + d, decls.total_local_count);
+
+ LocalTypeMap map = Expand(decls);
+ EXPECT_EQ(a + b + c + d, map.size());
+
+ size_t pos = 0;
+ pos = ExpectRun(map, pos, kAstI32, a);
+ pos = ExpectRun(map, pos, kAstI64, b);
+ pos = ExpectRun(map, pos, kAstF32, c);
+ pos = ExpectRun(map, pos, kAstF64, d);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(LocalDeclDecoderTest, UseEncoder) {
+ const byte* data = nullptr;
+ const byte* end = nullptr;
+ LocalDeclEncoder local_decls;
+
+ local_decls.AddLocals(5, kAstF32);
+ local_decls.AddLocals(1337, kAstI32);
+ local_decls.AddLocals(212, kAstI64);
+ local_decls.Prepend(&data, &end);
+
+ AstLocalDecls decls(zone());
+ bool result = DecodeLocalDecls(decls, data, end);
+ EXPECT_TRUE(result);
+ EXPECT_EQ(5 + 1337 + 212, decls.total_local_count);
+
+ LocalTypeMap map = Expand(decls);
+ size_t pos = 0;
+ pos = ExpectRun(map, pos, kAstF32, 5);
+ pos = ExpectRun(map, pos, kAstI32, 1337);
+ pos = ExpectRun(map, pos, kAstI64, 212);
+ delete[] data;
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
new file mode 100644
index 0000000000..11d68f161e
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -0,0 +1,667 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class DecoderTest : public TestWithZone {
+ public:
+ DecoderTest() : decoder(nullptr, nullptr) {}
+
+ Decoder decoder;
+};
+
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ int length; \
+ EXPECT_EQ(expected, \
+ decoder.checked_read_u32v(decoder.start(), 0, &length)); \
+ EXPECT_EQ(expected_length, length); \
+ } while (false)
+
+#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ int length; \
+ EXPECT_EQ(expected, \
+ decoder.checked_read_i32v(decoder.start(), 0, &length)); \
+ EXPECT_EQ(expected_length, length); \
+ } while (false)
+
+#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ int length; \
+ EXPECT_EQ(expected, \
+ decoder.checked_read_u64v(decoder.start(), 0, &length)); \
+ EXPECT_EQ(expected_length, length); \
+ } while (false)
+
+#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ int length; \
+ EXPECT_EQ(expected, \
+ decoder.checked_read_i64v(decoder.start(), 0, &length)); \
+ EXPECT_EQ(expected_length, length); \
+ } while (false)
+
+TEST_F(DecoderTest, ReadU32v_OneByte) {
+ CHECK_UINT32V_INLINE(0, 1, 0);
+ CHECK_UINT32V_INLINE(5, 1, 5);
+ CHECK_UINT32V_INLINE(7, 1, 7);
+ CHECK_UINT32V_INLINE(9, 1, 9);
+ CHECK_UINT32V_INLINE(37, 1, 37);
+ CHECK_UINT32V_INLINE(69, 1, 69);
+ CHECK_UINT32V_INLINE(110, 1, 110);
+ CHECK_UINT32V_INLINE(125, 1, 125);
+ CHECK_UINT32V_INLINE(126, 1, 126);
+ CHECK_UINT32V_INLINE(127, 1, 127);
+}
+
+TEST_F(DecoderTest, ReadU32v_TwoByte) {
+ CHECK_UINT32V_INLINE(0, 1, 0, 0);
+ CHECK_UINT32V_INLINE(10, 1, 10, 0);
+ CHECK_UINT32V_INLINE(27, 1, 27, 0);
+ CHECK_UINT32V_INLINE(100, 1, 100, 0);
+
+ CHECK_UINT32V_INLINE(444, 2, U32V_2(444));
+ CHECK_UINT32V_INLINE(544, 2, U32V_2(544));
+ CHECK_UINT32V_INLINE(1311, 2, U32V_2(1311));
+ CHECK_UINT32V_INLINE(2333, 2, U32V_2(2333));
+
+ for (uint32_t i = 0; i < 1 << 14; i = i * 13 + 1) {
+ CHECK_UINT32V_INLINE(i, 2, U32V_2(i));
+ }
+
+ const uint32_t max = (1 << 14) - 1;
+ CHECK_UINT32V_INLINE(max, 2, U32V_2(max));
+}
+
+TEST_F(DecoderTest, ReadU32v_ThreeByte) {
+ CHECK_UINT32V_INLINE(0, 1, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(10, 1, 10, 0, 0, 0);
+ CHECK_UINT32V_INLINE(27, 1, 27, 0, 0, 0);
+ CHECK_UINT32V_INLINE(100, 1, 100, 0, 0, 0);
+
+ CHECK_UINT32V_INLINE(11, 3, U32V_3(11));
+ CHECK_UINT32V_INLINE(101, 3, U32V_3(101));
+ CHECK_UINT32V_INLINE(446, 3, U32V_3(446));
+ CHECK_UINT32V_INLINE(546, 3, U32V_3(546));
+ CHECK_UINT32V_INLINE(1319, 3, U32V_3(1319));
+ CHECK_UINT32V_INLINE(2338, 3, U32V_3(2338));
+ CHECK_UINT32V_INLINE(8191, 3, U32V_3(8191));
+ CHECK_UINT32V_INLINE(9999, 3, U32V_3(9999));
+ CHECK_UINT32V_INLINE(14444, 3, U32V_3(14444));
+ CHECK_UINT32V_INLINE(314444, 3, U32V_3(314444));
+ CHECK_UINT32V_INLINE(614444, 3, U32V_3(614444));
+
+ const uint32_t max = (1 << 21) - 1;
+
+ for (uint32_t i = 0; i <= max; i = i * 13 + 3) {
+ CHECK_UINT32V_INLINE(i, 3, U32V_3(i), 0);
+ }
+
+ CHECK_UINT32V_INLINE(max, 3, U32V_3(max));
+}
+
+TEST_F(DecoderTest, ReadU32v_FourByte) {
+ CHECK_UINT32V_INLINE(0, 1, 0, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(10, 1, 10, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(27, 1, 27, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(100, 1, 100, 0, 0, 0, 0);
+
+ CHECK_UINT32V_INLINE(13, 4, U32V_4(13));
+ CHECK_UINT32V_INLINE(107, 4, U32V_4(107));
+ CHECK_UINT32V_INLINE(449, 4, U32V_4(449));
+ CHECK_UINT32V_INLINE(541, 4, U32V_4(541));
+ CHECK_UINT32V_INLINE(1317, 4, U32V_4(1317));
+ CHECK_UINT32V_INLINE(2334, 4, U32V_4(2334));
+ CHECK_UINT32V_INLINE(8191, 4, U32V_4(8191));
+ CHECK_UINT32V_INLINE(9994, 4, U32V_4(9994));
+ CHECK_UINT32V_INLINE(14442, 4, U32V_4(14442));
+ CHECK_UINT32V_INLINE(314442, 4, U32V_4(314442));
+ CHECK_UINT32V_INLINE(614442, 4, U32V_4(614442));
+ CHECK_UINT32V_INLINE(1614442, 4, U32V_4(1614442));
+ CHECK_UINT32V_INLINE(5614442, 4, U32V_4(5614442));
+ CHECK_UINT32V_INLINE(19614442, 4, U32V_4(19614442));
+
+ const uint32_t max = (1 << 28) - 1;
+
+ for (uint32_t i = 0; i <= max; i = i * 13 + 5) {
+ CHECK_UINT32V_INLINE(i, 4, U32V_4(i), 0);
+ }
+
+ CHECK_UINT32V_INLINE(max, 4, U32V_4(max));
+}
+
+TEST_F(DecoderTest, ReadU32v_FiveByte) {
+ CHECK_UINT32V_INLINE(0, 1, 0, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(10, 1, 10, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(27, 1, 27, 0, 0, 0, 0);
+ CHECK_UINT32V_INLINE(100, 1, 100, 0, 0, 0, 0);
+
+ CHECK_UINT32V_INLINE(13, 5, U32V_5(13));
+ CHECK_UINT32V_INLINE(107, 5, U32V_5(107));
+ CHECK_UINT32V_INLINE(449, 5, U32V_5(449));
+ CHECK_UINT32V_INLINE(541, 5, U32V_5(541));
+ CHECK_UINT32V_INLINE(1317, 5, U32V_5(1317));
+ CHECK_UINT32V_INLINE(2334, 5, U32V_5(2334));
+ CHECK_UINT32V_INLINE(8191, 5, U32V_5(8191));
+ CHECK_UINT32V_INLINE(9994, 5, U32V_5(9994));
+ CHECK_UINT32V_INLINE(24442, 5, U32V_5(24442));
+ CHECK_UINT32V_INLINE(414442, 5, U32V_5(414442));
+ CHECK_UINT32V_INLINE(714442, 5, U32V_5(714442));
+ CHECK_UINT32V_INLINE(1614442, 5, U32V_5(1614442));
+ CHECK_UINT32V_INLINE(6614442, 5, U32V_5(6614442));
+ CHECK_UINT32V_INLINE(89614442, 5, U32V_5(89614442));
+ CHECK_UINT32V_INLINE(2219614442u, 5, U32V_5(2219614442u));
+ CHECK_UINT32V_INLINE(3219614442u, 5, U32V_5(3219614442u));
+ CHECK_UINT32V_INLINE(4019614442u, 5, U32V_5(4019614442u));
+
+ const uint32_t max = 0xFFFFFFFFu;
+
+ for (uint32_t i = 1; i < 32; i++) {
+ uint32_t val = 0x983489aau << i;
+ CHECK_UINT32V_INLINE(val, 5, U32V_5(val), 0);
+ }
+
+ CHECK_UINT32V_INLINE(max, 5, U32V_5(max));
+}
+
+TEST_F(DecoderTest, ReadU32v_various) {
+ for (int i = 0; i < 10; i++) {
+ uint32_t x = 0xCCCCCCCCu * i;
+ for (int width = 0; width < 32; width++) {
+ uint32_t val = x >> width;
+
+ CHECK_UINT32V_INLINE(val & MASK_7, 1, U32V_1(val));
+ CHECK_UINT32V_INLINE(val & MASK_14, 2, U32V_2(val));
+ CHECK_UINT32V_INLINE(val & MASK_21, 3, U32V_3(val));
+ CHECK_UINT32V_INLINE(val & MASK_28, 4, U32V_4(val));
+ CHECK_UINT32V_INLINE(val, 5, U32V_5(val));
+ }
+ }
+}
+
+TEST_F(DecoderTest, ReadI32v_OneByte) {
+ CHECK_INT32V_INLINE(0, 1, 0);
+ CHECK_INT32V_INLINE(4, 1, 4);
+ CHECK_INT32V_INLINE(6, 1, 6);
+ CHECK_INT32V_INLINE(9, 1, 9);
+ CHECK_INT32V_INLINE(33, 1, 33);
+ CHECK_INT32V_INLINE(61, 1, 61);
+ CHECK_INT32V_INLINE(63, 1, 63);
+
+ CHECK_INT32V_INLINE(-1, 1, 127);
+ CHECK_INT32V_INLINE(-2, 1, 126);
+ CHECK_INT32V_INLINE(-11, 1, 117);
+ CHECK_INT32V_INLINE(-62, 1, 66);
+ CHECK_INT32V_INLINE(-63, 1, 65);
+ CHECK_INT32V_INLINE(-64, 1, 64);
+}
+
+TEST_F(DecoderTest, ReadI32v_TwoByte) {
+ CHECK_INT32V_INLINE(0, 2, U32V_2(0));
+ CHECK_INT32V_INLINE(9, 2, U32V_2(9));
+ CHECK_INT32V_INLINE(61, 2, U32V_2(61));
+ CHECK_INT32V_INLINE(63, 2, U32V_2(63));
+
+ CHECK_INT32V_INLINE(-1, 2, U32V_2(-1));
+ CHECK_INT32V_INLINE(-2, 2, U32V_2(-2));
+ CHECK_INT32V_INLINE(-63, 2, U32V_2(-63));
+ CHECK_INT32V_INLINE(-64, 2, U32V_2(-64));
+
+ CHECK_INT32V_INLINE(-200, 2, U32V_2(-200));
+ CHECK_INT32V_INLINE(-1002, 2, U32V_2(-1002));
+ CHECK_INT32V_INLINE(-2004, 2, U32V_2(-2004));
+ CHECK_INT32V_INLINE(-4077, 2, U32V_2(-4077));
+
+ CHECK_INT32V_INLINE(207, 2, U32V_2(207));
+ CHECK_INT32V_INLINE(1009, 2, U32V_2(1009));
+ CHECK_INT32V_INLINE(2003, 2, U32V_2(2003));
+ CHECK_INT32V_INLINE(4072, 2, U32V_2(4072));
+
+ const int32_t min = 0 - (1 << 13);
+ for (int i = min; i < min + 10; i++) {
+ CHECK_INT32V_INLINE(i, 2, U32V_2(i));
+ }
+
+ const int32_t max = (1 << 13) - 1;
+ for (int i = max; i > max - 10; i--) {
+ CHECK_INT32V_INLINE(i, 2, U32V_2(i));
+ }
+}
+
+TEST_F(DecoderTest, ReadI32v_ThreeByte) {
+ CHECK_INT32V_INLINE(0, 3, U32V_3(0));
+ CHECK_INT32V_INLINE(9, 3, U32V_3(9));
+ CHECK_INT32V_INLINE(61, 3, U32V_3(61));
+ CHECK_INT32V_INLINE(63, 3, U32V_3(63));
+
+ CHECK_INT32V_INLINE(-1, 3, U32V_3(-1));
+ CHECK_INT32V_INLINE(-2, 3, U32V_3(-2));
+ CHECK_INT32V_INLINE(-63, 3, U32V_3(-63));
+ CHECK_INT32V_INLINE(-64, 3, U32V_3(-64));
+
+ CHECK_INT32V_INLINE(-207, 3, U32V_3(-207));
+ CHECK_INT32V_INLINE(-1012, 3, U32V_3(-1012));
+ CHECK_INT32V_INLINE(-4067, 3, U32V_3(-4067));
+ CHECK_INT32V_INLINE(-14067, 3, U32V_3(-14067));
+ CHECK_INT32V_INLINE(-234061, 3, U32V_3(-234061));
+
+ CHECK_INT32V_INLINE(237, 3, U32V_3(237));
+ CHECK_INT32V_INLINE(1309, 3, U32V_3(1309));
+ CHECK_INT32V_INLINE(4372, 3, U32V_3(4372));
+ CHECK_INT32V_INLINE(64372, 3, U32V_3(64372));
+ CHECK_INT32V_INLINE(374372, 3, U32V_3(374372));
+
+ const int32_t min = 0 - (1 << 20);
+ for (int i = min; i < min + 10; i++) {
+ CHECK_INT32V_INLINE(i, 3, U32V_3(i));
+ }
+
+ const int32_t max = (1 << 20) - 1;
+ for (int i = max; i > max - 10; i--) {
+ CHECK_INT32V_INLINE(i, 3, U32V_3(i));
+ }
+}
+
+TEST_F(DecoderTest, ReadI32v_FourByte) {
+ CHECK_INT32V_INLINE(0, 4, U32V_4(0));
+ CHECK_INT32V_INLINE(9, 4, U32V_4(9));
+ CHECK_INT32V_INLINE(61, 4, U32V_4(61));
+ CHECK_INT32V_INLINE(63, 4, U32V_4(63));
+
+ CHECK_INT32V_INLINE(-1, 4, U32V_4(-1));
+ CHECK_INT32V_INLINE(-2, 4, U32V_4(-2));
+ CHECK_INT32V_INLINE(-63, 4, U32V_4(-63));
+ CHECK_INT32V_INLINE(-64, 4, U32V_4(-64));
+
+ CHECK_INT32V_INLINE(-267, 4, U32V_4(-267));
+ CHECK_INT32V_INLINE(-1612, 4, U32V_4(-1612));
+ CHECK_INT32V_INLINE(-4667, 4, U32V_4(-4667));
+ CHECK_INT32V_INLINE(-16067, 4, U32V_4(-16067));
+ CHECK_INT32V_INLINE(-264061, 4, U32V_4(-264061));
+ CHECK_INT32V_INLINE(-1264061, 4, U32V_4(-1264061));
+ CHECK_INT32V_INLINE(-6264061, 4, U32V_4(-6264061));
+ CHECK_INT32V_INLINE(-8264061, 4, U32V_4(-8264061));
+
+ CHECK_INT32V_INLINE(277, 4, U32V_4(277));
+ CHECK_INT32V_INLINE(1709, 4, U32V_4(1709));
+ CHECK_INT32V_INLINE(4772, 4, U32V_4(4772));
+ CHECK_INT32V_INLINE(67372, 4, U32V_4(67372));
+ CHECK_INT32V_INLINE(374372, 4, U32V_4(374372));
+ CHECK_INT32V_INLINE(2374372, 4, U32V_4(2374372));
+ CHECK_INT32V_INLINE(7374372, 4, U32V_4(7374372));
+ CHECK_INT32V_INLINE(9374372, 4, U32V_4(9374372));
+
+ const int32_t min = 0 - (1 << 27);
+ for (int i = min; i < min + 10; i++) {
+ CHECK_INT32V_INLINE(i, 4, U32V_4(i));
+ }
+
+ const int32_t max = (1 << 27) - 1;
+ for (int i = max; i > max - 10; i--) {
+ CHECK_INT32V_INLINE(i, 4, U32V_4(i));
+ }
+}
+
+TEST_F(DecoderTest, ReadI32v_FiveByte) {
+ CHECK_INT32V_INLINE(0, 5, U32V_5(0));
+ CHECK_INT32V_INLINE(16, 5, U32V_5(16));
+ CHECK_INT32V_INLINE(94, 5, U32V_5(94));
+ CHECK_INT32V_INLINE(127, 5, U32V_5(127));
+
+ CHECK_INT32V_INLINE(-1, 5, U32V_5(-1));
+ CHECK_INT32V_INLINE(-2, 5, U32V_5(-2));
+ CHECK_INT32V_INLINE(-63, 5, U32V_5(-63));
+ CHECK_INT32V_INLINE(-64, 5, U32V_5(-64));
+
+ CHECK_INT32V_INLINE(-257, 5, U32V_5(-257));
+ CHECK_INT32V_INLINE(-1512, 5, U32V_5(-1512));
+ CHECK_INT32V_INLINE(-4567, 5, U32V_5(-4567));
+ CHECK_INT32V_INLINE(-15067, 5, U32V_5(-15067));
+ CHECK_INT32V_INLINE(-254061, 5, U32V_5(-254061));
+ CHECK_INT32V_INLINE(-1364061, 5, U32V_5(-1364061));
+ CHECK_INT32V_INLINE(-6364061, 5, U32V_5(-6364061));
+ CHECK_INT32V_INLINE(-8364061, 5, U32V_5(-8364061));
+ CHECK_INT32V_INLINE(-28364061, 5, U32V_5(-28364061));
+ CHECK_INT32V_INLINE(-228364061, 5, U32V_5(-228364061));
+
+ CHECK_INT32V_INLINE(227, 5, U32V_5(227));
+ CHECK_INT32V_INLINE(1209, 5, U32V_5(1209));
+ CHECK_INT32V_INLINE(4272, 5, U32V_5(4272));
+ CHECK_INT32V_INLINE(62372, 5, U32V_5(62372));
+ CHECK_INT32V_INLINE(324372, 5, U32V_5(324372));
+ CHECK_INT32V_INLINE(2274372, 5, U32V_5(2274372));
+ CHECK_INT32V_INLINE(7274372, 5, U32V_5(7274372));
+ CHECK_INT32V_INLINE(9274372, 5, U32V_5(9274372));
+ CHECK_INT32V_INLINE(42374372, 5, U32V_5(42374372));
+ CHECK_INT32V_INLINE(429374372, 5, U32V_5(429374372));
+
+ const int32_t min = kMinInt;
+ for (int i = min; i < min + 10; i++) {
+ CHECK_INT32V_INLINE(i, 5, U32V_5(i));
+ }
+
+ const int32_t max = kMaxInt;
+ for (int i = max; i > max - 10; i--) {
+ CHECK_INT32V_INLINE(i, 5, U32V_5(i));
+ }
+}
+
+TEST_F(DecoderTest, ReadU32v_off_end1) {
+ static const byte data[] = {U32V_1(11)};
+ int length = 0;
+ decoder.Reset(data, data);
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(0, length);
+ EXPECT_FALSE(decoder.ok());
+}
+
+TEST_F(DecoderTest, ReadU32v_off_end2) {
+ static const byte data[] = {U32V_2(1111)};
+ for (size_t i = 0; i < sizeof(data); i++) {
+ int length = 0;
+ decoder.Reset(data, data + i);
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(i, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadU32v_off_end3) {
+ static const byte data[] = {U32V_3(111111)};
+ for (size_t i = 0; i < sizeof(data); i++) {
+ int length = 0;
+ decoder.Reset(data, data + i);
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(i, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadU32v_off_end4) {
+ static const byte data[] = {U32V_4(11111111)};
+ for (size_t i = 0; i < sizeof(data); i++) {
+ int length = 0;
+ decoder.Reset(data, data + i);
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(i, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadU32v_off_end5) {
+ static const byte data[] = {U32V_5(111111111)};
+ for (size_t i = 0; i < sizeof(data); i++) {
+ int length = 0;
+ decoder.Reset(data, data + i);
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(i, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadU32v_extra_bits) {
+ byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
+ for (int i = 1; i < 16; i++) {
+ data[4] = static_cast<byte>(i << 4);
+ int length = 0;
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_u32v(decoder.start(), 0, &length);
+ EXPECT_EQ(5, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
+ // OK for negative signed values to have extra ones.
+ int length = 0;
+ byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_i32v(decoder.start(), 0, &length);
+ EXPECT_EQ(5, length);
+ EXPECT_TRUE(decoder.ok());
+}
+
+TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
+ // Not OK for positive signed values to have extra ones.
+ int length = 0;
+ byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_i32v(decoder.start(), 0, &length);
+ EXPECT_EQ(5, length);
+ EXPECT_FALSE(decoder.ok());
+}
+
+TEST_F(DecoderTest, ReadU32v_Bits) {
+ // A more exhaustive test.
+ const int kMaxSize = 5;
+ const uint32_t kVals[] = {
+ 0xaabbccdd, 0x11223344, 0x33445566, 0xffeeddcc, 0xF0F0F0F0, 0x0F0F0F0F,
+ 0xEEEEEEEE, 0xAAAAAAAA, 0x12345678, 0x9abcdef0, 0x80309488, 0x729ed997,
+ 0xc4a0cf81, 0x16c6eb85, 0x4206db8e, 0xf3b089d5, 0xaa2e223e, 0xf99e29c8,
+ 0x4a4357d8, 0x1890b1c1, 0x8d80a085, 0xacb6ae4c, 0x1b827e10, 0xeb5c7bd9,
+ 0xbb1bc146, 0xdf57a33l};
+ byte data[kMaxSize];
+
+ // foreach value in above array
+ for (size_t v = 0; v < arraysize(kVals); v++) {
+ // foreach length 1...32
+ for (int i = 1; i <= 32; i++) {
+ uint32_t val = kVals[v];
+ if (i < 32) val &= ((1 << i) - 1);
+
+ int length = 1 + i / 7;
+ for (int j = 0; j < kMaxSize; j++) {
+ data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
+ }
+ for (int j = 0; j < length - 1; j++) {
+ data[j] |= 0x80;
+ }
+
+ // foreach buffer size 0...5
+ for (int limit = 0; limit <= kMaxSize; limit++) {
+ decoder.Reset(data, data + limit);
+ int rlen;
+ uint32_t result = decoder.checked_read_u32v(data, 0, &rlen);
+ if (limit < length) {
+ EXPECT_FALSE(decoder.ok());
+ } else {
+ EXPECT_TRUE(decoder.ok());
+ EXPECT_EQ(val, result);
+ EXPECT_EQ(length, rlen);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(DecoderTest, ReadU64v_OneByte) {
+ CHECK_UINT64V_INLINE(0, 1, 0);
+ CHECK_UINT64V_INLINE(6, 1, 6);
+ CHECK_UINT64V_INLINE(8, 1, 8);
+ CHECK_UINT64V_INLINE(12, 1, 12);
+ CHECK_UINT64V_INLINE(33, 1, 33);
+ CHECK_UINT64V_INLINE(59, 1, 59);
+ CHECK_UINT64V_INLINE(110, 1, 110);
+ CHECK_UINT64V_INLINE(125, 1, 125);
+ CHECK_UINT64V_INLINE(126, 1, 126);
+ CHECK_UINT64V_INLINE(127, 1, 127);
+}
+
+TEST_F(DecoderTest, ReadI64v_OneByte) {
+ CHECK_INT64V_INLINE(0, 1, 0);
+ CHECK_INT64V_INLINE(4, 1, 4);
+ CHECK_INT64V_INLINE(6, 1, 6);
+ CHECK_INT64V_INLINE(9, 1, 9);
+ CHECK_INT64V_INLINE(33, 1, 33);
+ CHECK_INT64V_INLINE(61, 1, 61);
+ CHECK_INT64V_INLINE(63, 1, 63);
+
+ CHECK_INT64V_INLINE(-1, 1, 127);
+ CHECK_INT64V_INLINE(-2, 1, 126);
+ CHECK_INT64V_INLINE(-11, 1, 117);
+ CHECK_INT64V_INLINE(-62, 1, 66);
+ CHECK_INT64V_INLINE(-63, 1, 65);
+ CHECK_INT64V_INLINE(-64, 1, 64);
+}
+
+TEST_F(DecoderTest, ReadU64v_PowerOf2) {
+ const int kMaxSize = 10;
+ byte data[kMaxSize];
+
+ for (int i = 0; i < 64; i++) {
+ const uint64_t val = 1ull << i;
+ int index = i / 7;
+ data[index] = 1 << (i % 7);
+ memset(data, 0x80, index);
+
+ for (int limit = 0; limit <= kMaxSize; limit++) {
+ decoder.Reset(data, data + limit);
+ int length;
+ uint64_t result = decoder.checked_read_u64v(data, 0, &length);
+ if (limit <= index) {
+ EXPECT_FALSE(decoder.ok());
+ } else {
+ EXPECT_TRUE(decoder.ok());
+ EXPECT_EQ(val, result);
+ EXPECT_EQ(index + 1, length);
+ }
+ }
+ }
+}
+
+TEST_F(DecoderTest, ReadU64v_Bits) {
+ const int kMaxSize = 10;
+ const uint64_t kVals[] = {
+ 0xaabbccdd11223344ull, 0x33445566ffeeddccull, 0xF0F0F0F0F0F0F0F0ull,
+ 0x0F0F0F0F0F0F0F0Full, 0xEEEEEEEEEEEEEEEEull, 0xAAAAAAAAAAAAAAAAull,
+ 0x123456789abcdef0ull, 0x80309488729ed997ull, 0xc4a0cf8116c6eb85ull,
+ 0x4206db8ef3b089d5ull, 0xaa2e223ef99e29c8ull, 0x4a4357d81890b1c1ull,
+ 0x8d80a085acb6ae4cull, 0x1b827e10eb5c7bd9ull, 0xbb1bc146df57a338ull};
+ byte data[kMaxSize];
+
+ // foreach value in above array
+ for (size_t v = 0; v < arraysize(kVals); v++) {
+ // foreach length 1...64
+ for (int i = 1; i <= 64; i++) {
+ uint64_t val = kVals[v];
+ if (i < 64) val &= ((1ull << i) - 1);
+
+ int length = 1 + i / 7;
+ for (int j = 0; j < kMaxSize; j++) {
+ data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
+ }
+ for (int j = 0; j < length - 1; j++) {
+ data[j] |= 0x80;
+ }
+
+ // foreach buffer size 0...10
+ for (int limit = 0; limit <= kMaxSize; limit++) {
+ decoder.Reset(data, data + limit);
+ int rlen;
+ uint64_t result = decoder.checked_read_u64v(data, 0, &rlen);
+ if (limit < length) {
+ EXPECT_FALSE(decoder.ok());
+ } else {
+ EXPECT_TRUE(decoder.ok());
+ EXPECT_EQ(val, result);
+ EXPECT_EQ(length, rlen);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(DecoderTest, ReadI64v_Bits) {
+ const int kMaxSize = 10;
+ // Exhaustive signedness test.
+ const uint64_t kVals[] = {
+ 0xaabbccdd11223344ull, 0x33445566ffeeddccull, 0xF0F0F0F0F0F0F0F0ull,
+ 0x0F0F0F0F0F0F0F0Full, 0xEEEEEEEEEEEEEEEEull, 0xAAAAAAAAAAAAAAAAull,
+ 0x123456789abcdef0ull, 0x80309488729ed997ull, 0xc4a0cf8116c6eb85ull,
+ 0x4206db8ef3b089d5ull, 0xaa2e223ef99e29c8ull, 0x4a4357d81890b1c1ull,
+ 0x8d80a085acb6ae4cull, 0x1b827e10eb5c7bd9ull, 0xbb1bc146df57a338ull};
+ byte data[kMaxSize];
+
+ // foreach value in above array
+ for (size_t v = 0; v < arraysize(kVals); v++) {
+ // foreach length 1...64
+ for (int i = 1; i <= 64; i++) {
+ const int64_t val = bit_cast<int64_t>(kVals[v] << (64 - i)) >> (64 - i);
+
+ int length = 1 + i / 7;
+ for (int j = 0; j < kMaxSize; j++) {
+ data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
+ }
+ for (int j = 0; j < length - 1; j++) {
+ data[j] |= 0x80;
+ }
+
+ // foreach buffer size 0...10
+ for (int limit = 0; limit <= kMaxSize; limit++) {
+ decoder.Reset(data, data + limit);
+ int rlen;
+ int64_t result = decoder.checked_read_i64v(data, 0, &rlen);
+ if (limit < length) {
+ EXPECT_FALSE(decoder.ok());
+ } else {
+ EXPECT_TRUE(decoder.ok());
+ EXPECT_EQ(val, result);
+ EXPECT_EQ(length, rlen);
+ }
+ }
+ }
+ }
+}
+
+TEST_F(DecoderTest, ReadU64v_extra_bits) {
+ byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
+ for (int i = 1; i < 128; i++) {
+ data[9] = static_cast<byte>(i << 1);
+ int length = 0;
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_u64v(decoder.start(), 0, &length);
+ EXPECT_EQ(10, length);
+ EXPECT_FALSE(decoder.ok());
+ }
+}
+
+TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
+ // OK for negative signed values to have extra ones.
+ int length = 0;
+ byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_i64v(decoder.start(), 0, &length);
+ EXPECT_EQ(10, length);
+ EXPECT_TRUE(decoder.ok());
+}
+
+TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
+ // Not OK for positive signed values to have extra ones.
+ int length = 0;
+ byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
+ decoder.Reset(data, data + sizeof(data));
+ decoder.checked_read_i64v(decoder.start(), 0, &length);
+ EXPECT_EQ(10, length);
+ EXPECT_FALSE(decoder.ok());
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/encoder-unittest.cc
index e09e71aeb8..740c0540dc 100644
--- a/deps/v8/test/unittests/wasm/encoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/encoder-unittest.cc
@@ -52,7 +52,8 @@ class EncoderTest : public TestWithZone {
TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* function = builder->FunctionAt(f_index);
@@ -86,14 +87,12 @@ TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
byte* header = buffer;
byte* body = buffer + f->HeaderSize();
f->Serialize(buffer, &header, &body);
- for (size_t i = 0; i < 7; i++) {
- CHECK_EQ(i, static_cast<size_t>(*(buffer + 2 * i + f->HeaderSize() + 1)));
- }
}
TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* function = builder->FunctionAt(f_index);
@@ -109,17 +108,85 @@ TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
byte* body = buffer + f->HeaderSize();
f->Serialize(buffer, &header, &body);
body = buffer + f->HeaderSize();
- for (size_t i = 0; i < 127; i++) {
- CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * i)));
- CHECK_EQ(i + 1, static_cast<size_t>(*(body + 2 * i + 1)));
+}
+
+TEST_F(EncoderTest, Function_Builder_Block_Variable_Width) {
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ function->EmitWithVarInt(kExprBlock, 200);
+ for (int i = 0; i < 200; ++i) {
+ function->Emit(kExprNop);
+ }
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ CHECK_EQ(f->BodySize(), 204);
+}
+
+TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate) {
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ function->Emit(kExprLoop);
+ uint32_t offset = function->EmitEditableVarIntImmediate();
+ for (int i = 0; i < 200; ++i) {
+ function->Emit(kExprNop);
}
- CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127)));
- CHECK_EQ(0x80, static_cast<size_t>(*(body + 2 * 127 + 1)));
- CHECK_EQ(0x01, static_cast<size_t>(*(body + 2 * 127 + 2)));
- CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127 + 3)));
- CHECK_EQ(0x00, static_cast<size_t>(*(body + 2 * 127 + 4)));
+ function->EditVarIntImmediate(offset, 200);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ CHECK_EQ(f->BodySize(), 204);
}
+TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate_Locals) {
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ function->Emit(kExprBlock);
+ uint32_t offset = function->EmitEditableVarIntImmediate();
+ for (int i = 0; i < 200; ++i) {
+ AddLocal(function, kAstI32);
+ }
+ function->EditVarIntImmediate(offset, 200);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + f->HeaderSize();
+ f->Serialize(buffer, &header, &body);
+ body = buffer + f->HeaderSize();
+
+ CHECK_EQ(f->BodySize(), 479);
+ const uint8_t varint200_low = (200 & 0x7f) | 0x80;
+ const uint8_t varint200_high = (200 >> 7) & 0x7f;
+ offset = 0;
+ CHECK_EQ(body[offset++], 1); // Local decl count.
+ CHECK_EQ(body[offset++], varint200_low);
+ CHECK_EQ(body[offset++], varint200_high);
+ CHECK_EQ(body[offset++], kLocalI32);
+ CHECK_EQ(body[offset++], kExprBlock);
+ CHECK_EQ(body[offset++], varint200_low);
+ CHECK_EQ(body[offset++], varint200_high);
+ // GetLocal with one-byte indices.
+ for (int i = 0; i <= 127; ++i) {
+ CHECK_EQ(body[offset++], kExprGetLocal);
+ CHECK_EQ(body[offset++], i);
+ }
+ // GetLocal with two-byte indices.
+ for (int i = 128; i < 200; ++i) {
+ CHECK_EQ(body[offset++], kExprGetLocal);
+ CHECK_EQ(body[offset++], (i & 0x7f) | 0x80);
+ CHECK_EQ(body[offset++], (i >> 7) & 0x7f);
+ }
+ CHECK_EQ(offset, 479);
+}
TEST_F(EncoderTest, LEB_Functions) {
byte leb_value[5] = {0, 0, 0, 0, 0};
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 958621970c..e77c1cfff5 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -23,25 +23,12 @@ namespace wasm {
class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
public:
- WasmLoopAssignmentAnalyzerTest() : TestWithZone(), sigs() {
- init_env(&env, sigs.v_v());
- }
-
+ WasmLoopAssignmentAnalyzerTest() : num_locals(0) {}
TestSignatures sigs;
- FunctionEnv env;
-
- static void init_env(FunctionEnv* env, FunctionSig* sig) {
- env->module = nullptr;
- env->sig = sig;
- env->local_i32_count = 0;
- env->local_i64_count = 0;
- env->local_f32_count = 0;
- env->local_f64_count = 0;
- env->SumLocals();
- }
+ uint32_t num_locals;
BitVector* Analyze(const byte* start, const byte* end) {
- return AnalyzeLoopAssignmentForTesting(zone(), &env, start, end);
+ return AnalyzeLoopAssignmentForTesting(zone(), num_locals, start, end);
}
};
@@ -60,13 +47,13 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
for (int j = 0; j < assigned->length(); j++) {
CHECK_EQ(false, assigned->Contains(j));
}
- env.AddLocals(kAstI32, 1);
+ num_locals++;
}
}
TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
- env.AddLocals(kAstI32, 5);
+ num_locals = 5;
for (int i = 0; i < 5; i++) {
byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i))};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -78,7 +65,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
- env.AddLocals(kAstI32, 5);
+ num_locals = 5;
for (int i = 0; i < 5; i++) {
byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -90,7 +77,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
- env.AddLocals(kAstI32, 5);
+ num_locals = 5;
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
byte code[] = {WASM_LOOP(2, WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
@@ -105,7 +92,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
- env.AddLocals(kAstI32, 5);
+ num_locals = 5;
for (int i = 0; i < 5; i++) {
byte code[] = {WASM_LOOP(
1, WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
@@ -126,7 +113,7 @@ static byte LEBByte(uint32_t val, byte which) {
TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
- env.AddLocals(kAstI32, 65000);
+ num_locals = 65000;
for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
byte code[] = {kExprLoop,
1,
@@ -148,7 +135,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
- env.AddLocals(kAstI32, 3);
+ num_locals = 3;
byte code[] = {
WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
WASM_SET_ZERO(0)};
@@ -162,7 +149,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
- env.AddLocals(kAstI32, 5);
+ num_locals = 5;
byte code[] = {
WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0),
WASM_BRV(0, WASM_SET_LOCAL(
@@ -179,9 +166,8 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
- env.AddLocals(kAstI32, 3);
+ num_locals = 6;
const byte kIter = 0;
- env.AddLocals(kAstF32, 3);
const byte kSum = 3;
byte code[] = {WASM_BLOCK(
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 467ffcc232..44e78653e3 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -5,20 +5,26 @@
#include "test/unittests/test-utils.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
-class WasmModuleVerifyTest : public TestWithZone {
- public:
- ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
- return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
- false);
- }
-};
+#define EMPTY_FUNCTION(sig_index) 0, SIG_INDEX(sig_index), U16_LE(0)
+#define EMPTY_FUNCTION_SIZE ((size_t)5)
+#define EMPTY_BODY 0
+#define EMPTY_BODY_SIZE ((size_t)1)
+#define NOP_BODY 2, 0, kExprNop
+#define NOP_BODY_SIZE ((size_t)3)
+#define VOID_VOID_SIG 0, kLocalVoid
+#define VOID_VOID_SIG_SIZE ((size_t)2)
+#define INT_INT_SIG 1, kLocalI32, kLocalI32
+#define INT_INT_SIG_SIZE ((size_t)3)
+#define SECTION(NAME, EXTRA_SIZE) \
+ U32V_1(WASM_SECTION_##NAME##_SIZE + (EXTRA_SIZE)), WASM_SECTION_##NAME
#define EXPECT_VERIFIES(data) \
do { \
@@ -27,14 +33,30 @@ class WasmModuleVerifyTest : public TestWithZone {
if (result.val) delete result.val; \
} while (false)
+#define EXPECT_FAILURE_LEN(data, length) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + length); \
+ EXPECT_FALSE(result.ok()); \
+ if (result.val) delete result.val; \
+ } while (false)
-#define EXPECT_FAILURE(data) \
- do { \
- ModuleResult result = DecodeModule(data, data + arraysize(data)); \
- EXPECT_FALSE(result.ok()); \
- if (result.val) delete result.val; \
+#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
+
+#define EXPECT_OFF_END_FAILURE(data, min, max) \
+ do { \
+ for (size_t length = min; length < max; length++) { \
+ EXPECT_FAILURE_LEN(data, length); \
+ } \
} while (false)
+static size_t SizeOfVarInt(size_t value) {
+ size_t size = 0;
+ do {
+ size++;
+ value = value >> 7;
+ } while (value > 0);
+ return size;
+}
struct LocalTypePair {
uint8_t code;
@@ -44,39 +66,59 @@ struct LocalTypePair {
{kLocalF32, kAstF32},
{kLocalF64, kAstF64}};
+class WasmModuleVerifyTest : public TestWithZone {
+ public:
+ ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
+ // Add the WASM magic and version number automatically.
+ size_t size = static_cast<size_t>(module_end - module_start);
+ byte header[] = {WASM_MODULE_HEADER};
+ size_t total = sizeof(header) + size;
+ auto temp = new byte[total];
+ memcpy(temp, header, sizeof(header));
+ memcpy(temp + sizeof(header), module_start, size);
+ ModuleResult result = DecodeWasmModule(nullptr, zone(), temp, temp + total,
+ false, kWasmOrigin);
+ delete[] temp;
+ return result;
+ }
+ ModuleResult DecodeModuleNoHeader(const byte* module_start,
+ const byte* module_end) {
+ return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
+ kWasmOrigin);
+ }
+};
-// TODO(titzer): use these macros everywhere below.
-#define U32_LE(v) \
- static_cast<byte>(v), static_cast<byte>((v) >> 8), \
- static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
-
-
-#define U16_LE(v) static_cast<byte>(v), static_cast<byte>((v) >> 8)
-
-
-TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- static const byte data[1]{kDeclEnd};
- {
- ModuleResult result = DecodeModule(data, data);
- EXPECT_TRUE(result.ok());
+TEST_F(WasmModuleVerifyTest, WrongMagic) {
+ for (uint32_t x = 1; x; x <<= 1) {
+ const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion),
+ SECTION(END, 0)};
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
}
- {
- ModuleResult result = DecodeModule(data, data + 1);
- EXPECT_TRUE(result.ok());
+}
+
+TEST_F(WasmModuleVerifyTest, WrongVersion) {
+ for (uint32_t x = 1; x; x <<= 1) {
+ const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x),
+ SECTION(END, 0)};
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
}
}
+TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
+ static const byte data[] = {SECTION(END, 0)};
+ EXPECT_VERIFIES(data);
+}
TEST_F(WasmModuleVerifyTest, OneGlobal) {
static const byte data[] = {
- kDeclGlobals,
+ SECTION(GLOBALS, 7), // --
1,
- 0,
- 0,
- 0,
- 0, // name offset
+ NAME_LENGTH(1),
+ 'g', // name
kMemI32, // memory type
0, // exported
};
@@ -85,13 +127,13 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->globals->size());
- EXPECT_EQ(0, result.val->functions->size());
- EXPECT_EQ(0, result.val->data_segments->size());
+ EXPECT_EQ(1, result.val->globals.size());
+ EXPECT_EQ(0, result.val->functions.size());
+ EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals->back();
+ WasmGlobal* global = &result.val->globals.back();
- EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(1, global->name_length);
EXPECT_EQ(MachineType::Int32(), global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
@@ -99,18 +141,14 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
if (result.val) delete result.val;
}
- for (size_t size = 1; size < arraysize(data); size++) {
- // Should fall off end of module bytes.
- ModuleResult result = DecodeModule(data, data + size);
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
+ EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {
- kDeclGlobals, 0, // declare 0 globals
+ SECTION(GLOBALS, 1), // --
+ 0, // declare 0 globals
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
@@ -135,16 +173,22 @@ static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
TEST_F(WasmModuleVerifyTest, NGlobals) {
static const byte data[] = {
- 0, 0, 0, 0, // name offset
- kMemI32, // memory type
- 0, // exported
+ NO_NAME, // name length
+ kMemI32, // memory type
+ 0, // exported
};
- for (uint32_t i = 0; i < 1000000; i = i * 7 + 1) {
+
+ for (uint32_t i = 0; i < 1000000; i = i * 13 + 1) {
std::vector<byte> buffer;
- buffer.push_back(kDeclGlobals);
- AppendUint32v(buffer, i);
+ size_t size =
+ WASM_SECTION_GLOBALS_SIZE + SizeOfVarInt(i) + i * sizeof(data);
+ const byte globals[] = {U32V_5(size), WASM_SECTION_GLOBALS};
+ for (size_t g = 0; g != sizeof(globals); ++g) {
+ buffer.push_back(globals[g]);
+ }
+ AppendUint32v(buffer, i); // Number of globals.
for (uint32_t j = 0; j < i; j++) {
- buffer.insert(buffer.end(), data, data + arraysize(data));
+ buffer.insert(buffer.end(), data, data + sizeof(data));
}
ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
@@ -153,33 +197,25 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
}
}
-
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
static const byte data[] = {
- kDeclGlobals,
- 1, // declare one global
- 0,
- 3,
- 0,
- 0, // name offset
- kMemI32, // memory type
+ SECTION(GLOBALS, 7),
+ 1, // declare one global
+ NO_NAME, // name offset
+ 33, // memory type
0, // exported
};
EXPECT_FAILURE(data);
}
-
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
static const byte data[] = {
- kDeclGlobals,
- 1, // declare one global
- 0,
- 0,
- 0,
- 0, // name offset
- 33, // memory type
- 0, // exported
+ SECTION(GLOBALS, 7),
+ 1, // declare one global
+ NO_NAME, // name offset
+ 33, // memory type
+ 0, // exported
};
EXPECT_FAILURE(data);
@@ -188,18 +224,12 @@ TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
static const byte data[] = {
- kDeclGlobals,
+ SECTION(GLOBALS, 13),
2,
- 0,
- 0,
- 0,
- 0, // #0: name offset
+ NO_NAME, // #0: name length
kMemF32, // memory type
0, // exported
- 0,
- 0,
- 0,
- 0, // #1: name offset
+ NO_NAME, // #1: name length
kMemF64, // memory type
1, // exported
};
@@ -208,19 +238,19 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
// Should decode to exactly two globals.
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(2, result.val->globals->size());
- EXPECT_EQ(0, result.val->functions->size());
- EXPECT_EQ(0, result.val->data_segments->size());
+ EXPECT_EQ(2, result.val->globals.size());
+ EXPECT_EQ(0, result.val->functions.size());
+ EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* g0 = &result.val->globals->at(0);
- WasmGlobal* g1 = &result.val->globals->at(1);
+ WasmGlobal* g0 = &result.val->globals[0];
+ WasmGlobal* g1 = &result.val->globals[1];
- EXPECT_EQ(0, g0->name_offset);
+ EXPECT_EQ(0, g0->name_length);
EXPECT_EQ(MachineType::Float32(), g0->type);
EXPECT_EQ(0, g0->offset);
EXPECT_FALSE(g0->exported);
- EXPECT_EQ(0, g1->name_offset);
+ EXPECT_EQ(0, g1->name_length);
EXPECT_EQ(MachineType::Float64(), g1->type);
EXPECT_EQ(0, g1->offset);
EXPECT_TRUE(g1->exported);
@@ -228,26 +258,28 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
if (result.val) delete result.val;
}
- for (size_t size = 1; size < arraysize(data); size++) {
- // Should fall off end of module bytes.
- ModuleResult result = DecodeModule(data, data + size);
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
+ EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, OneSignature) {
- static const byte data[] = {
- kDeclSignatures, 1, 0, kLocalVoid // void -> void
- };
- EXPECT_VERIFIES(data);
+ {
+ static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
+ VOID_VOID_SIG};
+ EXPECT_VERIFIES(data);
+ }
+
+ {
+ static const byte data[] = {SECTION(SIGNATURES, 1 + INT_INT_SIG_SIZE), 1,
+ INT_INT_SIG};
+ EXPECT_VERIFIES(data);
+ }
}
TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
static const byte data[] = {
- kDeclSignatures,
+ SECTION(SIGNATURES, 10),
3,
0,
kLocalVoid, // void -> void
@@ -262,41 +294,36 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(3, result.val->signatures->size());
- if (result.val->signatures->size() == 3) {
- EXPECT_EQ(0, result.val->signatures->at(0)->return_count());
- EXPECT_EQ(1, result.val->signatures->at(1)->return_count());
- EXPECT_EQ(1, result.val->signatures->at(2)->return_count());
-
- EXPECT_EQ(0, result.val->signatures->at(0)->parameter_count());
- EXPECT_EQ(1, result.val->signatures->at(1)->parameter_count());
- EXPECT_EQ(2, result.val->signatures->at(2)->parameter_count());
+ EXPECT_EQ(3, result.val->signatures.size());
+ if (result.val->signatures.size() == 3) {
+ EXPECT_EQ(0, result.val->signatures[0]->return_count());
+ EXPECT_EQ(1, result.val->signatures[1]->return_count());
+ EXPECT_EQ(1, result.val->signatures[2]->return_count());
+
+ EXPECT_EQ(0, result.val->signatures[0]->parameter_count());
+ EXPECT_EQ(1, result.val->signatures[1]->parameter_count());
+ EXPECT_EQ(2, result.val->signatures[2]->parameter_count());
}
if (result.val) delete result.val;
- for (size_t size = 1; size < arraysize(data); size++) {
- ModuleResult result = DecodeModule(data, data + size);
- // Should fall off the end of module bytes.
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
+ EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
static const byte data[] = {
- kDeclFunctions, 1,
+ SECTION(FUNCTIONS, 25), 1,
// func#0 ------------------------------------------------------
- 0, 0, // signature index
- 0, 0, 0, 0, // name offset
- 0, 0, 0, 0, // code start offset
- 0, 0, 0, 0, // code end offset
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 0, // exported
- 1 // external
+ SIG_INDEX(0), // signature index
+ NO_NAME, // name length
+ U32_LE(0), // code start offset
+ U32_LE(0), // code end offset
+ U16_LE(899), // local int32 count
+ U16_LE(799), // local int64 count
+ U16_LE(699), // local float32 count
+ U16_LE(599), // local float64 count
+ 0, // exported
+ 1 // external
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
@@ -306,23 +333,23 @@ TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
- const int kCodeStartOffset = 23;
+ const int kCodeStartOffset = 51;
const int kCodeEndOffset = kCodeStartOffset + 1;
static const byte data[] = {
- kDeclSignatures, 1,
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
// sig#0 -------------------------------------------------------
- 0, 0, // void -> void
+ VOID_VOID_SIG,
// func#0 ------------------------------------------------------
- kDeclFunctions, 1,
- kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName, 0,
- 0, // signature index
- 9, 0, 0, 0, // name offset
- 11, 2, // local int32 count
- 13, 4, // local int64 count
- 15, 6, // local float32 count
- 17, 8, // local float64 count
- 1, 0, // size
+ SECTION(FUNCTIONS, 19), 1,
+ kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName,
+ SIG_INDEX(0), // signature index
+ NAME_LENGTH(2), 'h', 'i', // name
+ U16_LE(1466), // local int32 count
+ U16_LE(1355), // local int64 count
+ U16_LE(1244), // local float32 count
+ U16_LE(1133), // local float64 count
+ 1, 0, // size
kExprNop,
};
@@ -330,22 +357,23 @@ TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
// Should decode to exactly one function.
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(0, result.val->globals->size());
- EXPECT_EQ(1, result.val->signatures->size());
- EXPECT_EQ(1, result.val->functions->size());
- EXPECT_EQ(0, result.val->data_segments->size());
- EXPECT_EQ(0, result.val->function_table->size());
+ EXPECT_EQ(0, result.val->globals.size());
+ EXPECT_EQ(1, result.val->signatures.size());
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(0, result.val->data_segments.size());
+ EXPECT_EQ(0, result.val->function_table.size());
- WasmFunction* function = &result.val->functions->back();
+ WasmFunction* function = &result.val->functions.back();
- EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(39, function->name_offset);
+ EXPECT_EQ(2, function->name_length);
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
- EXPECT_EQ(523, function->local_i32_count);
- EXPECT_EQ(1037, function->local_i64_count);
- EXPECT_EQ(1551, function->local_f32_count);
- EXPECT_EQ(2065, function->local_f64_count);
+ EXPECT_EQ(1466, function->local_i32_count);
+ EXPECT_EQ(1355, function->local_i64_count);
+ EXPECT_EQ(1244, function->local_f32_count);
+ EXPECT_EQ(1133, function->local_f64_count);
EXPECT_TRUE(function->exported);
EXPECT_FALSE(function->external);
@@ -353,32 +381,26 @@ TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
if (result.val) delete result.val;
}
- for (size_t size = 5; size < arraysize(data); size++) {
- // Should fall off end of module bytes.
- ModuleResult result = DecodeModule(data, data + size);
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
+ EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
static const byte data[] = {
- kDeclSignatures, 1,
+ SECTION(SIGNATURES, VOID_VOID_SIG_SIZE), 1,
// sig#0 -------------------------------------------------------
- 0, 0, // void -> void
- kDeclFunctions, 1,
+ VOID_VOID_SIG, SECTION(FUNCTIONS, 6), 1,
// func#0 ------------------------------------------------------
kDeclFunctionImport, // no name, no locals, imported
- 0, 0, // signature index
+ SIG_INDEX(0),
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions->size());
- WasmFunction* function = &result.val->functions->back();
+ EXPECT_EQ(1, result.val->functions.size());
+ WasmFunction* function = &result.val->functions.back();
- EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(0, function->name_length);
EXPECT_EQ(0, function->code_start_offset);
EXPECT_EQ(0, function->code_end_offset);
@@ -393,16 +415,15 @@ TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
if (result.val) delete result.val;
}
-
TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
- static const byte kCodeStartOffset = 11;
+ static const byte kCodeStartOffset = 40;
static const byte kCodeEndOffset = kCodeStartOffset + 1;
static const byte data[] = {
- kDeclSignatures, 1,
+ SECTION(SIGNATURES, 3), 1,
// sig#0 -------------------------------------------------------
0, 0, // void -> void
- kDeclFunctions, 1,
+ SECTION(FUNCTIONS, 7), 1,
// func#0 ------------------------------------------------------
0, // no name, no locals
0, 0, // signature index
@@ -412,10 +433,10 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions->size());
- WasmFunction* function = &result.val->functions->back();
+ EXPECT_EQ(1, result.val->functions.size());
+ WasmFunction* function = &result.val->functions.back();
- EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(0, function->name_length);
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
@@ -432,14 +453,14 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
- static const byte kCodeStartOffset = 19;
+ static const byte kCodeStartOffset = 48;
static const byte kCodeEndOffset = kCodeStartOffset + 1;
static const byte data[] = {
- kDeclSignatures, 1,
+ SECTION(SIGNATURES, 3), 1,
// sig#0 -------------------------------------------------------
0, 0, // void -> void
- kDeclFunctions, 1,
+ SECTION(FUNCTIONS, 15), 1,
// func#0 ------------------------------------------------------
kDeclFunctionLocals, 0, 0, // signature index
1, 2, // local int32 count
@@ -452,10 +473,10 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions->size());
- WasmFunction* function = &result.val->functions->back();
+ EXPECT_EQ(1, result.val->functions.size());
+ WasmFunction* function = &result.val->functions.back();
- EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(0, function->name_length);
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
@@ -472,67 +493,68 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
- static const byte kDeclMemorySize = 4;
- static const byte kCodeStartOffset =
- 2 + kDeclMemorySize + kDeclGlobalSize + 4 + 2 + 17;
+ static const byte kCodeStartOffset = 75;
static const byte kCodeEndOffset = kCodeStartOffset + 3;
+ static const byte kDataSegmentSourceOffset = kCodeEndOffset + 20;
static const byte data[] = {
- kDeclMemory, 28, 28, 1,
+ SECTION(MEMORY, 3), 28, 28, 1,
// global#0 --------------------------------------------------
- kDeclGlobals, 1, 0, 0, 0, 0, // name offset
- kMemU8, // memory type
- 0, // exported
+ SECTION(GLOBALS, 7), 1,
+ 0, // name length
+ kMemU8, // memory type
+ 0, // exported
// sig#0 -----------------------------------------------------
- kDeclSignatures, 1, 0, 0, // void -> void
+ SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
// func#0 ----------------------------------------------------
- kDeclFunctions, 1, kDeclFunctionLocals | kDeclFunctionName, 0,
- 0, // signature index
- 9, 0, 0, 0, // name offset
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 3, 0, // body size
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- kExprNop, // func#0 body
+ SECTION(FUNCTIONS, 20), 1, kDeclFunctionLocals | kDeclFunctionName, 0,
+ 0, // signature index
+ 2, 'h', 'i', // name
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 3, 0, // body size
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
// segment#0 -------------------------------------------------
- kDeclDataSegments, 1, 0xae, 0xb3, 0x08, 0, // dest addr
- 15, 0, 0, 0, // source offset
- 5, 0, 0, 0, // source size
- 1, // init
+ SECTION(DATA_SEGMENTS, 14), 1,
+ U32V_3(0x8b3ae), // dest addr
+ U32V_1(5), // source size
+ 0, 1, 2, 3, 4, // data bytes
// rest ------------------------------------------------------
- kDeclEnd,
+ SECTION(END, 0),
};
{
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->globals->size());
- EXPECT_EQ(1, result.val->functions->size());
- EXPECT_EQ(1, result.val->data_segments->size());
+ EXPECT_EQ(1, result.val->globals.size());
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals->back();
+ WasmGlobal* global = &result.val->globals.back();
- EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(0, global->name_length);
EXPECT_EQ(MachineType::Uint8(), global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
- WasmFunction* function = &result.val->functions->back();
+ WasmFunction* function = &result.val->functions.back();
- EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(63, function->name_offset);
+ EXPECT_EQ(2, function->name_length);
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
EXPECT_FALSE(function->exported);
EXPECT_FALSE(function->external);
- WasmDataSegment* segment = &result.val->data_segments->back();
+ WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(0x8b3ae, segment->dest_addr);
- EXPECT_EQ(15, segment->source_offset);
+ EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
EXPECT_EQ(5, segment->source_size);
EXPECT_TRUE(segment->init);
@@ -542,147 +564,119 @@ TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
+ const byte kDataSegmentSourceOffset = 39;
const byte data[] = {
- kDeclMemory, 28, 28, 1, kDeclDataSegments, 1, 0xaa, 0xbb, 0x09,
- 0, // dest addr
- 11, 0, 0,
- 0, // source offset
- 3, 0, 0,
- 0, // source size
- 1, // init
+ SECTION(MEMORY, 3),
+ 28,
+ 28,
+ 1,
+ SECTION(DATA_SEGMENTS, 8),
+ 1,
+ U32V_3(0x9bbaa), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
};
{
EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(0, result.val->globals->size());
- EXPECT_EQ(0, result.val->functions->size());
- EXPECT_EQ(1, result.val->data_segments->size());
+ EXPECT_EQ(0, result.val->globals.size());
+ EXPECT_EQ(0, result.val->functions.size());
+ EXPECT_EQ(1, result.val->data_segments.size());
- WasmDataSegment* segment = &result.val->data_segments->back();
+ WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(0x9bbaa, segment->dest_addr);
- EXPECT_EQ(11, segment->source_offset);
+ EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
EXPECT_EQ(3, segment->source_size);
EXPECT_TRUE(segment->init);
if (result.val) delete result.val;
}
- for (size_t size = 5; size < arraysize(data); size++) {
- // Should fall off end of module bytes.
- ModuleResult result = DecodeModule(data, data + size);
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
+ EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
+ const byte kDataSegment0SourceOffset = 39;
+ const byte kDataSegment1SourceOffset = 39 + 8;
+
const byte data[] = {
- kDeclMemory, 28, 28, 1, kDeclDataSegments, 2, 0xee, 0xff, 0x07,
- 0, // dest addr
- 9, 0, 0,
- 0, // #0: source offset
- 4, 0, 0,
- 0, // source size
- 0, // init
- 0xcc, 0xdd, 0x06,
- 0, // #1: dest addr
- 6, 0, 0,
- 0, // source offset
- 10, 0, 0,
- 0, // source size
- 1, // init
+ SECTION(MEMORY, 3),
+ 28,
+ 28,
+ 1,
+ SECTION(DATA_SEGMENTS, 31),
+ 2, // segment count
+ U32V_3(0x7ffee), // #0: dest addr
+ U32V_1(4), // source size
+ 1,
+ 2,
+ 3,
+ 4, // data bytes
+ U32V_3(0x6ddcc), // #1: dest addr
+ U32V_1(10), // source size
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10 // data bytes
};
{
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(0, result.val->globals->size());
- EXPECT_EQ(0, result.val->functions->size());
- EXPECT_EQ(2, result.val->data_segments->size());
+ EXPECT_EQ(0, result.val->globals.size());
+ EXPECT_EQ(0, result.val->functions.size());
+ EXPECT_EQ(2, result.val->data_segments.size());
- WasmDataSegment* s0 = &result.val->data_segments->at(0);
- WasmDataSegment* s1 = &result.val->data_segments->at(1);
+ WasmDataSegment* s0 = &result.val->data_segments[0];
+ WasmDataSegment* s1 = &result.val->data_segments[1];
EXPECT_EQ(0x7ffee, s0->dest_addr);
- EXPECT_EQ(9, s0->source_offset);
+ EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
EXPECT_EQ(4, s0->source_size);
- EXPECT_FALSE(s0->init);
+ EXPECT_TRUE(s0->init);
EXPECT_EQ(0x6ddcc, s1->dest_addr);
- EXPECT_EQ(6, s1->source_offset);
+ EXPECT_EQ(kDataSegment1SourceOffset, s1->source_offset);
EXPECT_EQ(10, s1->source_size);
EXPECT_TRUE(s1->init);
if (result.val) delete result.val;
}
- for (size_t size = 5; size < arraysize(data); size++) {
- // Should fall off end of module bytes.
- ModuleResult result = DecodeModule(data, data + size);
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
- }
-}
-
-
-TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidSource) {
- const int dest_addr = 0x100;
- const byte mem_size_log2 = 15;
- const int kDataSize = 19;
-
- for (int source_offset = 0; source_offset < 5 + kDataSize; source_offset++) {
- for (int source_size = -1; source_size < 5 + kDataSize; source_size += 3) {
- byte data[] = {
- kDeclMemory,
- mem_size_log2,
- mem_size_log2,
- 1,
- kDeclDataSegments,
- 1,
- U32_LE(dest_addr),
- U32_LE(source_offset),
- U32_LE(source_size),
- 1, // init
- };
-
- STATIC_ASSERT(kDataSize == arraysize(data));
-
- if (source_offset < kDataSize && source_size >= 0 &&
- (source_offset + source_size) <= kDataSize) {
- EXPECT_VERIFIES(data);
- } else {
- EXPECT_FAILURE(data);
- }
- }
- }
+ EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
}
-
TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
const int source_size = 3;
- const int source_offset = 11;
- for (byte mem_size_log2 = 12; mem_size_log2 < 20; mem_size_log2++) {
- int mem_size = 1 << mem_size_log2;
+ for (byte mem_pages = 1; mem_pages < 16; mem_pages++) {
+ int mem_size = mem_pages * 0x10000; // 64k pages.
for (int dest_addr = mem_size - source_size;
dest_addr < mem_size + source_size; dest_addr++) {
- byte data[] = {
- kDeclMemory,
- mem_size_log2,
- mem_size_log2,
- 1,
- kDeclDataSegments,
- 1,
- U32_LE(dest_addr),
- U32_LE(source_offset),
- U32_LE(source_size),
- 1, // init
- };
+ byte data[] = {SECTION(MEMORY, 3),
+ mem_pages,
+ mem_pages,
+ 1,
+ SECTION(DATA_SEGMENTS, 14),
+ 1,
+ U32V_3(dest_addr),
+ U32V_1(source_size),
+ 'a',
+ 'b',
+ 'c'};
if (dest_addr <= (mem_size - source_size)) {
EXPECT_VERIFIES(data);
@@ -695,27 +689,24 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
// To make below tests for indirect calls much shorter.
-#define FUNCTION(sig_index, external) \
- kDeclFunctionImport, static_cast<byte>(sig_index), \
- static_cast<byte>(sig_index >> 8)
-
+#define FUNCTION(sig_index, external) kDeclFunctionImport, SIG_INDEX(sig_index)
TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- kDeclSignatures, 1, 0, 0, // void -> void
+ SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
// func#0 ------------------------------------------------------
- kDeclFunctions, 1, FUNCTION(0, 0),
+ SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 0),
// indirect table ----------------------------------------------
- kDeclFunctionTable, 1, 0, 0};
+ SECTION(FUNCTION_TABLE, 2), 1, U32V_1(0)};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
if (result.ok()) {
- EXPECT_EQ(1, result.val->signatures->size());
- EXPECT_EQ(1, result.val->functions->size());
- EXPECT_EQ(1, result.val->function_table->size());
- EXPECT_EQ(0, result.val->function_table->at(0));
+ EXPECT_EQ(1, result.val->signatures.size());
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->function_table.size());
+ EXPECT_EQ(0, result.val->function_table[0]);
}
if (result.val) delete result.val;
}
@@ -724,23 +715,33 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- kDeclSignatures, 2, 0, 0, // void -> void
- 0, kLocalI32, // void -> i32
+ SECTION(SIGNATURES, 5), 2, 0, 0, // void -> void
+ 0, kLocalI32, // void -> i32
// func#0 ------------------------------------------------------
- kDeclFunctions, 4, FUNCTION(0, 1), FUNCTION(1, 1), FUNCTION(0, 1),
- FUNCTION(1, 1),
+ SECTION(FUNCTIONS, 13), 4, FUNCTION(0, 1), // --
+ FUNCTION(1, 1), // --
+ FUNCTION(0, 1), // --
+ FUNCTION(1, 1), // --
// indirect table ----------------------------------------------
- kDeclFunctionTable, 8, 0, 0, 1, 0, 2, 0, 3, 0, 0, 0, 1, 0, 2, 0, 3, 0,
+ SECTION(FUNCTION_TABLE, 9), 8,
+ U32V_1(0), // --
+ U32V_1(1), // --
+ U32V_1(2), // --
+ U32V_1(3), // --
+ U32V_1(0), // --
+ U32V_1(1), // --
+ U32V_1(2), // --
+ U32V_1(3), // --
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
if (result.ok()) {
- EXPECT_EQ(2, result.val->signatures->size());
- EXPECT_EQ(4, result.val->functions->size());
- EXPECT_EQ(8, result.val->function_table->size());
+ EXPECT_EQ(2, result.val->signatures.size());
+ EXPECT_EQ(4, result.val->functions.size());
+ EXPECT_EQ(8, result.val->function_table.size());
for (int i = 0; i < 8; i++) {
- EXPECT_EQ(i & 3, result.val->function_table->at(i));
+ EXPECT_EQ(i & 3, result.val->function_table[i]);
}
}
if (result.val) delete result.val;
@@ -750,9 +751,9 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- kDeclSignatures, 1, 0, 0, // void -> void
+ SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
// indirect table ----------------------------------------------
- kDeclFunctionTable, 1, 0, 0,
+ SECTION(FUNCTION_TABLE, 3), 1, 0, 0,
};
EXPECT_FAILURE(data);
@@ -762,11 +763,11 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- kDeclSignatures, 1, 0, 0, // void -> void
+ SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
// functions ---------------------------------------------------
- kDeclFunctions, 1, FUNCTION(0, 1),
+ SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 1),
// indirect table ----------------------------------------------
- kDeclFunctionTable, 1, 1, 0,
+ SECTION(FUNCTION_TABLE, 3), 1, 1, 0,
};
EXPECT_FAILURE(data);
@@ -778,7 +779,8 @@ class WasmSignatureDecodeTest : public TestWithZone {};
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {0, 0};
- Zone zone;
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
FunctionSig* sig =
DecodeWasmSignatureForTesting(&zone, data, data + arraysize(data));
@@ -906,10 +908,11 @@ class WasmFunctionVerifyTest : public TestWithZone {};
TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
static const byte data[] = {
0, kLocalVoid, // signature
- 3, 0, // local int32 count
- 4, 0, // local int64 count
- 5, 0, // local float32 count
- 6, 0, // local float64 count
+ 4, // locals
+ 3, kLocalI32, // --
+ 4, kLocalI64, // --
+ 5, kLocalF32, // --
+ 6, kLocalF64, // --
kExprNop // body
};
@@ -922,12 +925,9 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
EXPECT_EQ(0, function->sig->parameter_count());
EXPECT_EQ(0, function->sig->return_count());
EXPECT_EQ(0, function->name_offset);
- EXPECT_EQ(arraysize(data) - 1, function->code_start_offset);
+ EXPECT_EQ(2, function->code_start_offset);
EXPECT_EQ(arraysize(data), function->code_end_offset);
- EXPECT_EQ(3, function->local_i32_count);
- EXPECT_EQ(4, function->local_i64_count);
- EXPECT_EQ(5, function->local_f32_count);
- EXPECT_EQ(6, function->local_f64_count);
+ // TODO(titzer): verify encoding of local declarations
EXPECT_FALSE(function->external);
EXPECT_FALSE(function->exported);
}
@@ -935,156 +935,445 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
if (result.val) delete result.val;
}
+TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
+ const byte data[] = {1};
+ EXPECT_FAILURE(data);
+}
-TEST_F(WasmModuleVerifyTest, WLLSectionNoLen) {
+TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
const byte data[] = {
- kDeclWLL, // section without length.
+ 1, // Section size.
+ 0, // Empty section name.
+ // No section name, no content, nothing but sadness.
};
- EXPECT_FAILURE(data);
+ EXPECT_VERIFIES(data);
}
-
-TEST_F(WasmModuleVerifyTest, WLLSectionEmpty) {
- static const byte data[] = {
- kDeclWLL, 0, // empty section
+TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionEmpty) {
+ const byte data[] = {
+ 5, // Section size.
+ 4, 'l', 'u', 'l', 'z', // unknown section.
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- if (result.val) delete result.val;
+ EXPECT_VERIFIES(data);
}
-
-TEST_F(WasmModuleVerifyTest, WLLSectionOne) {
- static const byte data[] = {
- kDeclWLL,
- 1, // LEB128 1
- 0, // one byte section
+TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionNonEmpty) {
+ const byte data[] = {
+ 10, // Section size.
+ 4, 'l', 'u', 'l', 'z', // unknown section.
+ // Section content:
+ 0xff, 0xff, 0xff, 0xff, 0xff,
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- if (result.val) delete result.val;
+ EXPECT_VERIFIES(data);
}
+TEST_F(WasmModuleVerifyTest, SignatureFollowedByEmptyUnknownSection) {
+ const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // -----------------------------------------------------------
+ 5, // Section size.
+ 4, 'l', 'u', 'l', 'z', // unknown section.
+ };
+ EXPECT_VERIFIES(data);
+}
-TEST_F(WasmModuleVerifyTest, WLLSectionTen) {
- static const byte data[] = {
- kDeclWLL,
- 10, // LEB128 10
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
+ const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // -----------------------------------------------------------
+ 10, // Section size.
+ 4, 'l', 'u', 'l', 'z', // unknown section.
+ 0xff, 0xff, 0xff, 0xff, 0xff,
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- if (result.val) delete result.val;
+ EXPECT_VERIFIES(data);
}
+TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSectionWithLongLEB) {
+ const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // -----------------------------------------------------------
+ 0x85, 0x80, 0x80, 0x80, 0x00, // Section size: 1 but in a 5-byte LEB.
+ 4, 'l', 'u', 'l', 'z', // unknown section.
+ };
+ EXPECT_VERIFIES(data);
+}
-TEST_F(WasmModuleVerifyTest, WLLSectionOverflow) {
+TEST_F(WasmModuleVerifyTest, UnknownSectionOverflow) {
static const byte data[] = {
- kDeclWLL,
- 11, // LEB128 11
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ 13, // Section size.
+ 1, // Section name length.
+ '\0', // Section name.
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
};
EXPECT_FAILURE(data);
}
-
-TEST_F(WasmModuleVerifyTest, WLLSectionUnderflow) {
+TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
static const byte data[] = {
- kDeclWLL, 0xff, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xffffffff
- 1, 2, 3, 4, // 4 byte section
+ 0xff, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xffffffff
+ 1, '\0', // Section name and name length.
+ 1, 2, 3, 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
-
-TEST_F(WasmModuleVerifyTest, WLLSectionLoop) {
+TEST_F(WasmModuleVerifyTest, UnknownSectionLoop) {
// Would infinite loop decoding if wrapping and allowed.
static const byte data[] = {
- kDeclWLL, 0xfa, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xfffffffa
- 1, 2, 3, 4, // 4 byte section
+ 0xfa, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xfffffffa
+ 1, '\0', // Section name and name length.
+ 1, 2, 3, 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
+TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
+ static const byte data[] = {
+ 3, // Section size.
+ 1,
+ '\0', // Section name: LEB128 1, string '\0'
+ 0, // one byte section
+ SECTION(GLOBALS, 7),
+ 1,
+ 0, // name length
+ kMemI32, // memory type
+ 0, // exported
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+
+ EXPECT_EQ(1, result.val->globals.size());
+ EXPECT_EQ(0, result.val->functions.size());
+ EXPECT_EQ(0, result.val->data_segments.size());
+
+ WasmGlobal* global = &result.val->globals.back();
+
+ EXPECT_EQ(0, global->name_length);
+ EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(0, global->offset);
+ EXPECT_FALSE(global->exported);
+
+ if (result.val) delete result.val;
+}
+
TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
- static const byte data[] = {kDeclSignatures, 0, kDeclImportTable, 0};
+ static const byte data[] = {SECTION(SIGNATURES, 1), 0,
+ SECTION(IMPORT_TABLE, 1), 0};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs) {
- static const byte data[] = {kDeclImportTable, 0};
+ static const byte data[] = {SECTION(IMPORT_TABLE, 1), 0};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
static const byte data[] = {
- kDeclSignatures,
- 0,
- kDeclImportTable,
- 1,
- 0,
- 0, // sig index
- 1,
- 0,
- 0,
- 0, // module name
- 1,
- 0,
- 0,
- 0 // function name
+ SECTION(SIGNATURES, 1), 0, SECTION(IMPORT_TABLE, 6), 1,
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
static const byte data[] = {
- kDeclSignatures,
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
1,
- 0,
- static_cast<byte>(kAstStmt),
- kDeclImportTable,
- 1,
- 0,
- 0, // sig index
- 1,
- 0,
- 0,
- 0, // module name
- 1,
- 0,
- 0,
- 0 // function name
+ VOID_VOID_SIG,
+ SECTION(IMPORT_TABLE, 6),
+ 1, // --
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1),
+ 'm', // module name
+ NAME_LENGTH(1),
+ 'f', // function name
};
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
+TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
static const byte data[] = {
- kDeclSignatures,
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
1,
- 0,
- static_cast<byte>(kAstStmt),
- kDeclImportTable,
- 1,
- 0,
- 0, // sig index
+ VOID_VOID_SIG,
+ SECTION(IMPORT_TABLE, 6),
+ 1, // --
+ IMPORT_SIG_INDEX(0), // sig index
+ NO_NAME, // module name
+ NAME_LENGTH(1),
+ 'f' // function name
+ };
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
1,
- 0,
- 0,
- 0, // module name
+ VOID_VOID_SIG,
+ SECTION(IMPORT_TABLE, 6),
1,
- 0,
- 0,
- 0 // function name
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1),
+ 'm', // module name
+ NAME_LENGTH(1),
+ 'f', // function name
};
- for (size_t length = 5; length < sizeof(data); length++) {
+ EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
+ static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1,
+ VOID_VOID_SIG,
+ SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
+ 1,
+ EMPTY_FUNCTION(0),
+ SECTION(EXPORT_TABLE, 1),
+ 0};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
+ static const byte data[] = {SECTION(SIGNATURES, 1), 0,
+ SECTION(FUNCTIONS, 1), 0,
+ SECTION(EXPORT_TABLE, 1), 0};
+ // TODO(titzer): current behavior treats empty functions section as missing.
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions1) {
+ static const byte data[] = {SECTION(SIGNATURES, 1), 0,
+ SECTION(EXPORT_TABLE, 1), 0};
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions2) {
+ static const byte data[] = {SECTION(EXPORT_TABLE, 1), 0};
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTableOne) {
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1, // sigs
+ VOID_VOID_SIG, // --
+ SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
+ 1, // functions
+ EMPTY_FUNCTION(0), // --
+ SECTION(EXPORT_TABLE, 7),
+ 1, // exports
+ FUNC_INDEX(0), // --
+ NO_NAME // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1, // sigs
+ VOID_VOID_SIG, // --
+ SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
+ 1, // functions
+ EMPTY_FUNCTION(0), // --
+ SECTION(EXPORT_TABLE, 12),
+ 2, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(4),
+ 'n',
+ 'a',
+ 'm',
+ 'e', // --
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(3),
+ 'n',
+ 'o',
+ 'm' // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTableThree) {
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1, // sigs
+ VOID_VOID_SIG, // --
+ SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
+ 3, // functions
+ EMPTY_FUNCTION(0), // --
+ EMPTY_FUNCTION(0), // --
+ EMPTY_FUNCTION(0), // --
+ SECTION(EXPORT_TABLE, 10),
+ 3, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(1),
+ 'a', // --
+ FUNC_INDEX(1), // --
+ NAME_LENGTH(1),
+ 'b', // --
+ FUNC_INDEX(2), // --
+ NAME_LENGTH(1),
+ 'c' // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
+ for (int i = 0; i < 6; i++) {
+ const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1, // sigs
+ VOID_VOID_SIG, // --
+ SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
+ 3, // functions
+ EMPTY_FUNCTION(0), // --
+ EMPTY_FUNCTION(0), // --
+ EMPTY_FUNCTION(0), // --
+ SECTION(EXPORT_TABLE, 5),
+ 1, // exports
+ FUNC_INDEX(i), // --
+ NAME_LENGTH(2),
+ 'e',
+ 'x', // --
+ };
+
+ if (i < 3) {
+ EXPECT_VERIFIES(data);
+ } else {
+ EXPECT_FAILURE(data);
+ }
+ }
+}
+
+TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
+ 1, // sigs
+ VOID_VOID_SIG, // --
+ SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
+ 1, // functions
+ EMPTY_FUNCTION(0), // --
+ SECTION(EXPORT_TABLE, 1 + 6),
+ 1, // exports
+ FUNC_INDEX(0), // --
+ NO_NAME // --
+ };
+
+ for (int length = 33; length < sizeof(data); length++) {
ModuleResult result = DecodeModule(data, data + length);
EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
}
}
+#define SIGNATURES_SECTION(count, ...) \
+ SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
+#define FUNCTION_SIGNATURES_SECTION(count, ...) \
+ SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
+
+#define FOO_STRING 3, 'f', 'o', 'o'
+#define NO_LOCAL_NAMES 0
+
+#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
+#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
+
+TEST_F(WasmModuleVerifyTest, FunctionSignatures_empty) {
+ static const byte data[] = {SECTION(SIGNATURES, 1), 0,
+ SECTION(FUNCTION_SIGNATURES, 1), 0};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionSignatures_one) {
+ static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
+ FUNCTION_SIGNATURES_SECTION(1, 0)};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
+ static const byte data[] = {EMPTY_SIGNATURES_SECTION,
+ EMPTY_FUNCTION_SIGNATURES_SECTION,
+ EMPTY_FUNCTION_BODIES_SECTION};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionBodies_one_empty) {
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
+ SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1, EMPTY_BODY};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionBodies_one_nop) {
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
+ SECTION(FUNCTION_BODIES, 1 + NOP_BODY_SIZE), 1, NOP_BODY};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch1) {
+ static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0),
+ SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1,
+ EMPTY_BODY};
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch2) {
+ static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
+ FUNCTION_SIGNATURES_SECTION(1, 0),
+ SECTION(FUNCTION_BODIES, 1 + 2 * NOP_BODY_SIZE),
+ 2,
+ NOP_BODY,
+ NOP_BODY};
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, Names_empty) {
+ static const byte data[] = {
+ EMPTY_SIGNATURES_SECTION, EMPTY_FUNCTION_SIGNATURES_SECTION,
+ EMPTY_FUNCTION_BODIES_SECTION, EMPTY_NAMES_SECTION};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, Names_one_empty) {
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE),
+ 1,
+ EMPTY_BODY, // --
+ SECTION(NAMES, 1 + 5),
+ 1,
+ FOO_STRING,
+ NO_LOCAL_NAMES // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, Names_two_empty) {
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + 2 * EMPTY_BODY_SIZE), // --
+ 2,
+ EMPTY_BODY,
+ EMPTY_BODY, // --
+ SECTION(NAMES, 1 + 10),
+ 2, // --
+ FOO_STRING,
+ NO_LOCAL_NAMES, // --
+ FOO_STRING,
+ NO_LOCAL_NAMES, // --
+ };
+ EXPECT_VERIFIES(data);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index f3f604b3ed..ec188c00c9 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -26,14 +26,18 @@ TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(2, WASM_I8(122));
EXPECT_SIZE(2, WASM_I8(254));
- EXPECT_SIZE(5, WASM_I32(1));
- EXPECT_SIZE(5, WASM_I32(10000));
- EXPECT_SIZE(5, WASM_I32(-9828934));
-
- EXPECT_SIZE(9, WASM_I64(1));
- EXPECT_SIZE(9, WASM_I64(10000));
- EXPECT_SIZE(9, WASM_I64(-9828934));
- EXPECT_SIZE(9, WASM_I64(0x123456789abcdef0ULL));
+ EXPECT_SIZE(2, WASM_I32V_1(1));
+ EXPECT_SIZE(3, WASM_I32V_2(200));
+ EXPECT_SIZE(4, WASM_I32V_3(10000));
+ EXPECT_SIZE(5, WASM_I32V_4(-9828934));
+ EXPECT_SIZE(6, WASM_I32V_5(-1119828934));
+
+ EXPECT_SIZE(2, WASM_I64V_1(1));
+ EXPECT_SIZE(3, WASM_I64V_2(300));
+ EXPECT_SIZE(4, WASM_I64V_3(10000));
+ EXPECT_SIZE(5, WASM_I64V_4(-9828934));
+ EXPECT_SIZE(6, WASM_I64V_5(-1119828934));
+ EXPECT_SIZE(10, WASM_I64V_9(0x123456789abcdef0ULL));
EXPECT_SIZE(5, WASM_F32(1.0f));
EXPECT_SIZE(5, WASM_F32(10000.0f));
@@ -52,7 +56,7 @@ TEST_F(WasmMacroGenTest, Statements) {
EXPECT_SIZE(4, WASM_STORE_GLOBAL(0, WASM_ZERO));
- EXPECT_SIZE(6, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(4, WASM_IF(WASM_ZERO, WASM_NOP));
@@ -92,17 +96,8 @@ TEST_F(WasmMacroGenTest, MacroStatements) {
EXPECT_SIZE(3, WASM_CONTINUE(0));
}
-
-TEST_F(WasmMacroGenTest, TableSwitch) {
- EXPECT_SIZE(2, WASM_CASE(9));
- EXPECT_SIZE(2, WASM_CASE_BR(11));
-
- EXPECT_SIZE(7, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(7)));
- EXPECT_SIZE(9, WASM_TABLESWITCH_OP(0, 2, WASM_CASE(7), WASM_CASE(8)));
-
- EXPECT_SIZE(4, WASM_TABLESWITCH_BODY(WASM_I8(88), WASM_I8(77)));
- EXPECT_SIZE(
- 6, WASM_TABLESWITCH_BODY(WASM_I8(33), WASM_I8(44), WASM_GET_LOCAL(0)));
+TEST_F(WasmMacroGenTest, BrTable) {
+ EXPECT_SIZE(8, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
}
@@ -113,9 +108,9 @@ TEST_F(WasmMacroGenTest, Expressions) {
EXPECT_SIZE(2, WASM_LOAD_GLOBAL(0));
EXPECT_SIZE(2, WASM_LOAD_GLOBAL(1));
EXPECT_SIZE(2, WASM_LOAD_GLOBAL(12));
- EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
- EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
- EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
@@ -173,6 +168,8 @@ TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(5, WASM_I32_SHL(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_SHR(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_ROR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_ROL(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_EQ(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_LTS(WASM_ZERO, WASM_ZERO));
@@ -188,6 +185,8 @@ TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(3, WASM_I32_CLZ(WASM_ZERO));
EXPECT_SIZE(3, WASM_I32_CTZ(WASM_ZERO));
EXPECT_SIZE(3, WASM_I32_POPCNT(WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I32_EQZ(WASM_ZERO));
}
@@ -205,6 +204,8 @@ TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(5, WASM_I64_SHL(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_SHR(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_ROR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_ROL(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_EQ(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_LTS(WASM_ZERO, WASM_ZERO));
@@ -220,6 +221,8 @@ TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(3, WASM_I64_CLZ(WASM_ZERO));
EXPECT_SIZE(3, WASM_I64_CTZ(WASM_ZERO));
EXPECT_SIZE(3, WASM_I64_POPCNT(WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I64_EQZ(WASM_ZERO));
}
@@ -307,10 +310,10 @@ static const MachineType kMemTypes[] = {
TEST_F(WasmMacroGenTest, LoadsAndStores) {
for (size_t i = 0; i < arraysize(kMemTypes); i++) {
- EXPECT_SIZE(4, WASM_LOAD_MEM(kMemTypes[i], WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOAD_MEM(kMemTypes[i], WASM_ZERO));
}
for (size_t i = 0; i < arraysize(kMemTypes); i++) {
- EXPECT_SIZE(6, WASM_STORE_MEM(kMemTypes[i], WASM_ZERO, WASM_GET_LOCAL(0)));
+ EXPECT_SIZE(7, WASM_STORE_MEM(kMemTypes[i], WASM_ZERO, WASM_GET_LOCAL(0)));
}
}
diff --git a/deps/v8/test/webkit/class-syntax-declaration.js b/deps/v8/test/webkit/class-syntax-declaration.js
index f8ecdbb151..3c9aed7441 100644
--- a/deps/v8/test/webkit/class-syntax-declaration.js
+++ b/deps/v8/test/webkit/class-syntax-declaration.js
@@ -21,7 +21,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-destructuring-bind
+// Flags: --harmony-sloppy
description('Tests for ES6 class syntax declarations');
diff --git a/deps/v8/test/webkit/class-syntax-expression.js b/deps/v8/test/webkit/class-syntax-expression.js
index 182afb1ff0..3272b81f7e 100644
--- a/deps/v8/test/webkit/class-syntax-expression.js
+++ b/deps/v8/test/webkit/class-syntax-expression.js
@@ -21,7 +21,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-destructuring-bind
+// Flags: --harmony-sloppy
description('Tests for ES6 class syntax expressions');
diff --git a/deps/v8/test/webkit/const-without-initializer-expected.txt b/deps/v8/test/webkit/const-without-initializer-expected.txt
deleted file mode 100644
index 30ff8ef84b..0000000000
--- a/deps/v8/test/webkit/const-without-initializer-expected.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Tests that declaring a const variable without initializing has the correct behavior and does not crash
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS f is undefined
-PASS f is undefined
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/const-without-initializer.js b/deps/v8/test/webkit/const-without-initializer.js
deleted file mode 100644
index b1a86b9629..0000000000
--- a/deps/v8/test/webkit/const-without-initializer.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-description(
-'Tests that declaring a const variable without initializing has the correct behavior and does not crash'
-);
-
-const f;
-
-shouldBe('f', 'undefined');
-
-f = 10;
-
-shouldBe('f', 'undefined');
diff --git a/deps/v8/test/webkit/constant-count-expected.txt b/deps/v8/test/webkit/constant-count-expected.txt
deleted file mode 100644
index dde93477f6..0000000000
--- a/deps/v8/test/webkit/constant-count-expected.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-This test checks exceptional cases for constant counting in the parser.
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS a is undefined
-PASS f() is undefined
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/constant-count.js b/deps/v8/test/webkit/constant-count.js
deleted file mode 100644
index 2e1ec5b92d..0000000000
--- a/deps/v8/test/webkit/constant-count.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --legacy-const
-
-description(
-"This test checks exceptional cases for constant counting in the parser."
-);
-
-const a;
-const b;
---a;
---b;
-
-shouldBe("a", "undefined");
-
-function f()
-{
- const a;
- const b;
- --a;
- --b;
-
- return a;
-}
-
-shouldBe("f()", "undefined");
diff --git a/deps/v8/test/webkit/exception-for-nonobject-expected.txt b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
index 7b8883aa2a..866070bf47 100644
--- a/deps/v8/test/webkit/exception-for-nonobject-expected.txt
+++ b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
@@ -27,7 +27,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS new {}.undefined threw exception TypeError: (intermediate value).undefined is not a constructor.
-PASS 1 instanceof {}.undefined threw exception TypeError: Expecting a function in instanceof check, but got undefined.
+PASS 1 instanceof {}.undefined threw exception TypeError: Right-hand side of 'instanceof' is not an object.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/exception-for-nonobject.js b/deps/v8/test/webkit/exception-for-nonobject.js
index d39c3e0512..f54915eb2e 100644
--- a/deps/v8/test/webkit/exception-for-nonobject.js
+++ b/deps/v8/test/webkit/exception-for-nonobject.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-instanceof
+
description("Test for correct handling of exceptions from instanceof and 'new' expressions");
shouldThrow("new {}.undefined");
diff --git a/deps/v8/test/webkit/fast/js/arguments-expected.txt b/deps/v8/test/webkit/fast/js/arguments-expected.txt
index 17fbd88113..92ef56ed2d 100644
--- a/deps/v8/test/webkit/fast/js/arguments-expected.txt
+++ b/deps/v8/test/webkit/fast/js/arguments-expected.txt
@@ -157,7 +157,6 @@ PASS access_after_delete_extra_5(1, 2, 3, 4, 5) is 5
PASS argumentsParam(true) is true
PASS argumentsFunctionConstructorParam(true) is true
PASS argumentsVarUndefined() is '[object Arguments]'
-FAIL argumentsConstUndefined() should be [object Arguments]. Threw exception SyntaxError: Missing initializer in const declaration
PASS argumentCalleeInException() is argumentCalleeInException
PASS shadowedArgumentsApply([true]) is true
PASS shadowedArgumentsLength([]) is 0
diff --git a/deps/v8/test/webkit/fast/js/arguments.js b/deps/v8/test/webkit/fast/js/arguments.js
index f5b3abfdfb..0e23faaf15 100644
--- a/deps/v8/test/webkit/fast/js/arguments.js
+++ b/deps/v8/test/webkit/fast/js/arguments.js
@@ -574,13 +574,6 @@ function argumentsVarUndefined()
}
shouldBe("argumentsVarUndefined()", "'[object Arguments]'");
-function argumentsConstUndefined()
-{
- const arguments;
- return String(arguments);
-}
-shouldBe("argumentsConstUndefined()", "'[object Arguments]'");
-
function argumentCalleeInException() {
try {
throw "";
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index 4bda2b173a..a7a29606ed 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -206,14 +206,14 @@ PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescri
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'caller'); return descriptor.get === descriptor.set; })() is true
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'arguments'); return descriptor.get === descriptor.set; })() is true
PASS 'use strict'; (function f() { for(var i in this); })(); true; is true
-PASS 'use strict'̻ threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS (function(){'use strict'̻}) threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS 'use strict'5.f threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS (function(){'use strict'5.f}) threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS 'use strict';̻ threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS (function(){'use strict';̻}) threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS 'use strict';5.f threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS (function(){'use strict';5.f}) threw exception SyntaxError: Unexpected token ILLEGAL.
+PASS 'use strict'̻ threw exception SyntaxError: Invalid or unexpected token.
+PASS (function(){'use strict'̻}) threw exception SyntaxError: Invalid or unexpected token.
+PASS 'use strict'5.f threw exception SyntaxError: Invalid or unexpected token.
+PASS (function(){'use strict'5.f}) threw exception SyntaxError: Invalid or unexpected token.
+PASS 'use strict';̻ threw exception SyntaxError: Invalid or unexpected token.
+PASS (function(){'use strict';̻}) threw exception SyntaxError: Invalid or unexpected token.
+PASS 'use strict';5.f threw exception SyntaxError: Invalid or unexpected token.
+PASS (function(){'use strict';5.f}) threw exception SyntaxError: Invalid or unexpected token.
PASS 'use strict';1-(eval=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function(){'use strict';1-(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict';arguments=1; threw exception SyntaxError: Unexpected eval or arguments in strict mode.
diff --git a/deps/v8/test/webkit/fast/js/kde/parse-expected.txt b/deps/v8/test/webkit/fast/js/kde/parse-expected.txt
index 5caefd1ef0..d1a9afe7af 100644
--- a/deps/v8/test/webkit/fast/js/kde/parse-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/parse-expected.txt
@@ -39,13 +39,13 @@ PASS function test() { return 0 } lab: 1 is 1
PASS function test() { while(0) break lab } lab: 1 threw exception SyntaxError: Undefined label 'lab'.
PASS function test() { while(0) continue lab } lab: 1 threw exception SyntaxError: Undefined label 'lab'.
PASS var éĀʯΈᢨ = 101; éĀʯΈᢨ; is 101
-PASS var f÷; threw exception SyntaxError: Unexpected token ILLEGAL.
+PASS var f÷; threw exception SyntaxError: Invalid or unexpected token.
PASS var \u0061 = 102; a is 102
PASS var f\u0030 = 103; f0 is 103
PASS var \u00E9\u0100\u02AF\u0388\u18A8 = 104; \u00E9\u0100\u02AF\u0388\u18A8; is 104
-PASS var f\u00F7; threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS var \u0030; threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS var test = { }; test.i= 0; test.i\u002b= 1; test.i; threw exception SyntaxError: Unexpected token ILLEGAL.
+PASS var f\u00F7; threw exception SyntaxError: Invalid or unexpected token.
+PASS var \u0030; threw exception SyntaxError: Invalid or unexpected token.
+PASS var test = { }; test.i= 0; test.i\u002b= 1; test.i; threw exception SyntaxError: Invalid or unexpected token.
PASS var test = { }; test.i= 0; test.i+= 1; test.i; is 1
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check.js b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
index a3fef13474..c00374506d 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check.js
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-restrictive-declarations
+
description(
"This test checks that the following expressions or statements are valid ECMASCRIPT code or should throw parse error"
);
diff --git a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
index 1e0959c3f9..71292c77b1 100644
--- a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
@@ -42,10 +42,10 @@ PASS var re = Object.defineProperty(/x/, 'lastIndex', {value:42}); re.lastIndex
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {writable:true}); true threw exception TypeError: Cannot redefine property: lastIndex.
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {value:42}); true threw exception TypeError: Cannot redefine property: lastIndex.
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {value:0}); true is true
-PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('') is null
+FAIL Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('') should be null. Threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'
PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('x') is ["x"]
-FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') should throw an exception. Was null.
-FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') should throw an exception. Was x.
+PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
+PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
PASS var re = /x/; Object.freeze(re); Object.isFrozen(re); is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index 08852f9543..1f92569934 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -28,7 +28,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS RegExp('/').source is "\\/"
PASS RegExp('').source is "(?:)"
-FAIL RegExp.prototype.source should be (?:) (of type string). Was undefined (of type undefined).
+PASS RegExp.prototype.source is "(?:)"
PASS RegExp('/').toString() is "/\\//"
PASS RegExp('').toString() is "/(?:)/"
PASS RegExp.prototype.toString() is "/(?:)/"
diff --git a/deps/v8/test/webkit/function-declaration-statement.js b/deps/v8/test/webkit/function-declaration-statement.js
index e2ee343b62..34e3fdd14f 100644
--- a/deps/v8/test/webkit/function-declaration-statement.js
+++ b/deps/v8/test/webkit/function-declaration-statement.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-restrictive-declarations
+
description(
"This test checks that function declarations are treated as statements."
);
diff --git a/deps/v8/test/webkit/instance-of-immediates-expected.txt b/deps/v8/test/webkit/instance-of-immediates-expected.txt
index db58f00d30..c713b35351 100644
--- a/deps/v8/test/webkit/instance-of-immediates-expected.txt
+++ b/deps/v8/test/webkit/instance-of-immediates-expected.txt
@@ -26,12 +26,12 @@ This test makes sure that instance of behaves correctly when the value, construc
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS (1 instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
-PASS ({} instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
-PASS (obj instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
-PASS (1 instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
-PASS ({} instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
-PASS (obj instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
+PASS (1 instanceof 1) threw exception TypeError: Right-hand side of 'instanceof' is not an object.
+PASS ({} instanceof 1) threw exception TypeError: Right-hand side of 'instanceof' is not an object.
+PASS (obj instanceof 1) threw exception TypeError: Right-hand side of 'instanceof' is not an object.
+PASS (1 instanceof {}) threw exception TypeError: Right-hand side of 'instanceof' is not callable.
+PASS ({} instanceof {}) threw exception TypeError: Right-hand side of 'instanceof' is not callable.
+PASS (obj instanceof {}) threw exception TypeError: Right-hand side of 'instanceof' is not callable.
PASS (1 instanceof Constructor) is false
PASS ({} instanceof Constructor) is false
PASS (obj instanceof Constructor) is true
diff --git a/deps/v8/test/webkit/instance-of-immediates.js b/deps/v8/test/webkit/instance-of-immediates.js
index 649a29f5a4..be63e4b00b 100644
--- a/deps/v8/test/webkit/instance-of-immediates.js
+++ b/deps/v8/test/webkit/instance-of-immediates.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-instanceof
+
description('This test makes sure that instance of behaves correctly when the value, constructor, or its prototype are immediates.');
// A Constructor to use check for instances of, and an instance called obj.
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index ed811d2922..01a2713312 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -117,10 +117,10 @@ class WebkitTestSuite(testsuite.TestSuite):
string == "Warning: unknown flag --enable-slow-asserts." or
string == "Try --help for options")
- def IsFailureOutput(self, output, testpath):
- if super(WebkitTestSuite, self).IsFailureOutput(output, testpath):
+ def IsFailureOutput(self, testcase):
+ if super(WebkitTestSuite, self).IsFailureOutput(testcase):
return True
- file_name = os.path.join(self.root, testpath) + "-expected.txt"
+ file_name = os.path.join(self.root, testcase.path) + "-expected.txt"
with file(file_name, "r") as expected:
expected_lines = expected.readlines()
@@ -136,7 +136,7 @@ class WebkitTestSuite(testsuite.TestSuite):
def ActBlockIterator():
"""Iterates over blocks of actual output lines."""
- lines = output.stdout.splitlines()
+ lines = testcase.output.stdout.splitlines()
start_index = 0
found_eqeq = False
for index, line in enumerate(lines):
@@ -147,7 +147,7 @@ class WebkitTestSuite(testsuite.TestSuite):
found_eqeq = True
else:
yield ActIterator(lines[start_index:index])
- # The next block of ouput lines starts after the separator.
+ # The next block of output lines starts after the separator.
start_index = index + 1
# Iterate over complete output if no separator was found.
if not found_eqeq:
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index fa527427bc..e23b9cfa0b 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -97,6 +97,19 @@
}], # 'gc_stress == True and mode == debug'
##############################################################################
+['ignition == True', {
+ # TODO(4680): Throws a RangeError due to stack overflow. Need investigation.
+ 'fast/js/excessive-comma-usage': [SKIP], # Stack is brittle, SKIP not FAIL.
+}], # ignition == True
+
+##############################################################################
+['ignition == True and msan', {
+ # TODO(mythria,4680): Too slow and timeout on ignition.
+ 'dfg-double-vote-fuzz': [SKIP],
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # ignition == True and msan
+
+##############################################################################
['gcov_coverage', {
# Tests taking too long or getting too large call stacks.
'fast/js/excessive-comma-usage': [SKIP],
diff --git a/deps/v8/tools/eval_gc_nvp.py b/deps/v8/tools/eval_gc_nvp.py
index f18a579391..fcb6d8b9a2 100755
--- a/deps/v8/tools/eval_gc_nvp.py
+++ b/deps/v8/tools/eval_gc_nvp.py
@@ -74,10 +74,11 @@ class Histogram:
class Category:
- def __init__(self, key, histogram):
+ def __init__(self, key, histogram, csv):
self.key = key
self.values = []
self.histogram = histogram
+ self.csv = csv
def process_entry(self, entry):
if self.key in entry:
@@ -92,18 +93,32 @@ class Category:
return max(self.values)
def avg(self):
+ if len(self.values) == 0:
+ return 0.0
return sum(self.values) / len(self.values)
+ def empty(self):
+ return len(self.values) == 0
+
def __str__(self):
- ret = [self.key]
- ret.append(" len: {0}".format(len(self.values)))
- if len(self.values) > 0:
- ret.append(" min: {0}".format(min(self.values)))
- ret.append(" max: {0}".format(max(self.values)))
- ret.append(" avg: {0}".format(sum(self.values) / len(self.values)))
- if self.histogram:
- ret.append(str(self.histogram))
- return "\n".join(ret)
+ if self.csv:
+ ret = [self.key]
+ ret.append(len(self.values))
+ ret.append(self.min())
+ ret.append(self.max())
+ ret.append(self.avg())
+ ret = [str(x) for x in ret]
+ return ",".join(ret)
+ else:
+ ret = [self.key]
+ ret.append(" len: {0}".format(len(self.values)))
+ if len(self.values) > 0:
+ ret.append(" min: {0}".format(self.min()))
+ ret.append(" max: {0}".format(self.max()))
+ ret.append(" avg: {0}".format(self.avg()))
+ if self.histogram:
+ ret.append(str(self.histogram))
+ return "\n".join(ret)
def __repr__(self):
return "<Category: {0}>".format(self.key)
@@ -143,6 +158,8 @@ def main():
type=str, nargs='?',
default="no",
help="rank keys by metric (default: no)")
+ parser.add_argument('--csv', dest='csv',
+ action='store_true', help='provide output as csv')
args = parser.parse_args()
histogram = None
@@ -154,7 +171,7 @@ def main():
bucket_trait = LinearBucket(args.linear_histogram_granularity)
histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
- categories = [ Category(key, deepcopy(histogram))
+ categories = [ Category(key, deepcopy(histogram), args.csv)
for key in args.keys ]
while True:
@@ -165,6 +182,9 @@ def main():
for category in categories:
category.process_entry(obj)
+ # Filter out empty categories.
+ categories = [x for x in categories if not x.empty()]
+
if args.rank != "no":
categories = sorted(categories, key=make_key_func(args.rank), reverse=True)
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
index 92246d3866..ceb4db54cb 100755
--- a/deps/v8/tools/eval_gc_time.sh
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -7,38 +7,73 @@
# Convenience Script used to rank GC NVP output.
print_usage_and_die() {
- echo "Usage: $0 new-gen-rank|old-gen-rank max|avg logfile"
+ echo "Usage: $0 [OPTIONS]"
+ echo ""
+ echo "OPTIONS"
+ echo " -r|--rank new-gen-rank|old-gen-rank GC mode to profile"
+ echo " (default: old-gen-rank)"
+ echo " -s|--sort avg|max sorting mode (default: max)"
+ echo " -t|--top-level include top-level categories"
+ echo " -c|--csv provide csv output"
+ echo " -f|--file FILE profile input in a file"
+ echo " (default: stdin)"
exit 1
}
-if [ $# -ne 3 ]; then
- print_usage_and_die
-fi
-
-case $1 in
- new-gen-rank|old-gen-rank)
- OP=$1
- ;;
- *)
- print_usage_and_die
-esac
-
-case $2 in
- max|avg)
- RANK_MODE=$2
- ;;
- *)
- print_usage_and_die
-esac
+OP=old-gen-rank
+RANK_MODE=max
+TOP_LEVEL=no
+CSV=""
+LOGFILE=/dev/stdin
-LOGFILE=$3
+while [[ $# -ge 1 ]]
+do
+ key="$1"
+ case $key in
+ -r|--rank)
+ case $2 in
+ new-gen-rank|old-gen-rank)
+ OP="$2"
+ ;;
+ *)
+ print_usage_and_die
+ esac
+ shift
+ ;;
+ -s|--sort)
+ case $2 in
+ max|avg)
+ RANK_MODE=$2
+ ;;
+ *)
+ print_usage_and_die
+ esac
+ shift
+ ;;
+ -t|--top-level)
+ TOP_LEVEL=yes
+ ;;
+ -c|--csv)
+ CSV=" --csv "
+ ;;
+ -f|--file)
+ LOGFILE=$2
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
-GENERAL_INTERESTING_KEYS="\
- pause \
-"
+if [[ $# -ne 0 ]]; then
+ echo "Unknown option(s): $@"
+ echo ""
+ print_usage_and_die
+fi
INTERESTING_NEW_GEN_KEYS="\
- ${GENERAL_INTERESTING_KEYS} \
scavenge \
weak \
roots \
@@ -49,9 +84,6 @@ INTERESTING_NEW_GEN_KEYS="\
"
INTERESTING_OLD_GEN_KEYS="\
- ${GENERAL_INTERESTING_KEYS} \
- external \
- clear \
clear.code_flush \
clear.dependent_code \
clear.global_handles \
@@ -62,28 +94,48 @@ INTERESTING_OLD_GEN_KEYS="\
clear.weak_cells \
clear.weak_collections \
clear.weak_lists \
- finish \
- evacuate \
evacuate.candidates \
evacuate.clean_up \
- evacuate.new_space \
+ evacuate.copy \
evacuate.update_pointers \
evacuate.update_pointers.between_evacuated \
evacuate.update_pointers.to_evacuated \
evacuate.update_pointers.to_new \
evacuate.update_pointers.weak \
- mark \
+ external.mc_prologue \
+ external.mc_epilogue \
+ external.mc_incremental_prologue \
+ external.mc_incremental_epilogue \
+ external.weak_global_handles \
mark.finish_incremental \
mark.prepare_code_flush \
mark.roots \
mark.weak_closure \
- sweep \
+ mark.weak_closure.ephemeral \
+ mark.weak_closure.weak_handles \
+ mark.weak_closure.weak_roots \
+ mark.weak_closure.harmony \
sweep.code \
sweep.map \
sweep.old \
- incremental_finalize \
"
+if [[ "$TOP_LEVEL" = "yes" ]]; then
+ INTERESTING_OLD_GEN_KEYS="\
+ ${INTERESTING_OLD_GEN_KEYS} \
+ clear \
+ evacuate \
+ finish \
+ incremental_finalize \
+ mark \
+ pause
+ sweep \
+ "
+ INTERESTING_NEW_GEN_KEYS="\
+ ${INTERESTING_NEW_GEN_KEYS} \
+ "
+fi
+
BASE_DIR=$(dirname $0)
case $OP in
@@ -92,16 +144,17 @@ case $OP in
| $BASE_DIR/eval_gc_nvp.py \
--no-histogram \
--rank $RANK_MODE \
+ $CSV \
${INTERESTING_NEW_GEN_KEYS}
;;
old-gen-rank)
- cat $LOGFILE | grep "gc=ms" | grep "reduce_memory=0" | grep -v "steps=0" \
+ cat $LOGFILE | grep "gc=ms" \
| $BASE_DIR/eval_gc_nvp.py \
--no-histogram \
--rank $RANK_MODE \
+ $CSV \
${INTERESTING_OLD_GEN_KEYS}
;;
*)
;;
esac
-
diff --git a/deps/v8/tools/external-reference-check.py b/deps/v8/tools/external-reference-check.py
index 287eca4251..be01dec1d0 100644
--- a/deps/v8/tools/external-reference-check.py
+++ b/deps/v8/tools/external-reference-check.py
@@ -8,7 +8,7 @@ import os
import sys
DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/snapshot/serialize.cc"
+REGISTER_FILE = "src/external-reference-table.cc"
DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 9739684629..82ea4e0295 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -111,6 +111,7 @@ local function MakeClangCommandLine(
.. " -DENABLE_DEBUGGER_SUPPORT"
.. " -DV8_I18N_SUPPORT"
.. " -I./"
+ .. " -Iinclude/"
.. " -Ithird_party/icu/source/common"
.. " -Ithird_party/icu/source/i18n"
.. " " .. arch_options
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index d808a2f2d6..a0afc06ab9 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -92,8 +92,6 @@ consts_misc = [
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
- { 'name': 'prop_type_const_field',
- 'value': 'DATA_CONSTANT' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
@@ -156,8 +154,6 @@ consts_misc = [
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_constant_pool',
'value': 'StandardFrameConstants::kConstantPoolOffset' },
- { 'name': 'off_fp_marker',
- 'value': 'StandardFrameConstants::kMarkerOffset' },
{ 'name': 'off_fp_function',
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 66f579d33d..b09fd1f289 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -32,6 +32,7 @@
'v8_random_seed%': 314159265,
'v8_vector_stores%': 0,
'embed_script%': "",
+ 'warmup_script%': "",
'v8_extra_library_files%': [],
'v8_experimental_extra_library_files%': [],
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
@@ -204,15 +205,13 @@
'inputs': [
'<(mksnapshot_exec)',
'<(embed_script)',
+ '<(warmup_script)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'variables': {
- 'mksnapshot_flags': [
- '--log-snapshot-positions',
- '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
- ],
+ 'mksnapshot_flags': [],
'conditions': [
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
@@ -227,6 +226,7 @@
'<@(mksnapshot_flags)',
'--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
'<(embed_script)',
+ '<(warmup_script)',
],
},
],
@@ -308,8 +308,6 @@
# variable.
'mksnapshot_flags_ignition': [
'--ignition',
- '--log-snapshot-positions',
- '--logfile', '<(INTERMEDIATE_DIR)/snapshot_ignition.log',
],
'conditions': [
['v8_random_seed!=0', {
@@ -330,6 +328,7 @@
'<@(mksnapshot_flags_ignition)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}, {
'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
@@ -338,6 +337,7 @@
'<@(mksnapshot_flags_ignition)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}],
],
@@ -348,6 +348,7 @@
'<@(mksnapshot_flags_ignition)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}],
],
@@ -370,10 +371,7 @@
'action_name': 'run_mksnapshot (external)',
'inputs': ['<(mksnapshot_exec)'],
'variables': {
- 'mksnapshot_flags': [
- '--log-snapshot-positions',
- '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
- ],
+ 'mksnapshot_flags': [],
'conditions': [
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
@@ -393,6 +391,7 @@
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}, {
'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
@@ -401,6 +400,7 @@
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}],
],
@@ -411,6 +411,7 @@
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
+ '<(warmup_script)',
],
}],
],
@@ -459,6 +460,8 @@
'../../src/api-experimental.h',
'../../src/api.cc',
'../../src/api.h',
+ '../../src/api-arguments.cc',
+ '../../src/api-arguments.h',
'../../src/api-natives.cc',
'../../src/api-natives.h',
'../../src/arguments.cc',
@@ -521,6 +524,7 @@
'../../src/code-stubs-hydrogen.cc',
'../../src/codegen.cc',
'../../src/codegen.h',
+ '../../src/collector.h',
'../../src/compilation-cache.cc',
'../../src/compilation-cache.h',
'../../src/compilation-dependencies.cc',
@@ -574,8 +578,6 @@
'../../src/compiler/escape-analysis.h',
"../../src/compiler/escape-analysis-reducer.cc",
"../../src/compiler/escape-analysis-reducer.h",
- '../../src/compiler/fast-accessor-assembler.cc',
- '../../src/compiler/fast-accessor-assembler.h',
'../../src/compiler/frame.cc',
'../../src/compiler/frame.h',
'../../src/compiler/frame-elider.cc',
@@ -730,11 +732,11 @@
'../../src/conversions.h',
'../../src/counters.cc',
'../../src/counters.h',
+ '../../src/crankshaft/compilation-phase.cc',
+ '../../src/crankshaft/compilation-phase.h',
'../../src/crankshaft/hydrogen-alias-analysis.h',
'../../src/crankshaft/hydrogen-bce.cc',
'../../src/crankshaft/hydrogen-bce.h',
- '../../src/crankshaft/hydrogen-bch.cc',
- '../../src/crankshaft/hydrogen-bch.h',
'../../src/crankshaft/hydrogen-canonicalize.cc',
'../../src/crankshaft/hydrogen-canonicalize.h',
'../../src/crankshaft/hydrogen-check-elimination.cc',
@@ -835,8 +837,12 @@
'../../src/extensions/statistics-extension.h',
'../../src/extensions/trigger-failure-extension.cc',
'../../src/extensions/trigger-failure-extension.h',
+ '../../src/external-reference-table.cc',
+ '../../src/external-reference-table.h',
'../../src/factory.cc',
'../../src/factory.h',
+ '../../src/fast-accessor-assembler.cc',
+ '../../src/fast-accessor-assembler.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
'../../src/field-index.h',
@@ -889,6 +895,7 @@
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
+ '../../src/heap/page-parallel-job.h',
'../../src/heap/remembered-set.cc',
'../../src/heap/remembered-set.h',
'../../src/heap/scavenge-job.h',
@@ -897,12 +904,9 @@
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',
'../../src/heap/slot-set.h',
- '../../src/heap/slots-buffer.cc',
- '../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
'../../src/heap/spaces.cc',
'../../src/heap/spaces.h',
- '../../src/heap/store-buffer-inl.h',
'../../src/heap/store-buffer.cc',
'../../src/heap/store-buffer.h',
'../../src/i18n.cc',
@@ -947,8 +951,8 @@
'../../src/interpreter/interpreter.h',
'../../src/interpreter/interpreter-assembler.cc',
'../../src/interpreter/interpreter-assembler.h',
- '../../src/interpreter/register-translator.cc',
- '../../src/interpreter/register-translator.h',
+ '../../src/interpreter/interpreter-intrinsics.cc',
+ '../../src/interpreter/interpreter-intrinsics.h',
'../../src/interpreter/source-position-table.cc',
'../../src/interpreter/source-position-table.h',
'../../src/isolate-inl.h',
@@ -956,8 +960,8 @@
'../../src/isolate.h',
'../../src/json-parser.h',
'../../src/json-stringifier.h',
- '../../src/key-accumulator.h',
- '../../src/key-accumulator.cc',
+ '../../src/keys.h',
+ '../../src/keys.cc',
'../../src/layout-descriptor-inl.h',
'../../src/layout-descriptor.cc',
'../../src/layout-descriptor.h',
@@ -1013,6 +1017,8 @@
'../../src/parsing/token.h',
'../../src/pending-compilation-error-handler.cc',
'../../src/pending-compilation-error-handler.h',
+ '../../src/perf-jit.cc',
+ '../../src/perf-jit.h',
'../../src/profiler/allocation-tracker.cc',
'../../src/profiler/allocation-tracker.h',
'../../src/profiler/circular-queue-inl.h',
@@ -1104,14 +1110,24 @@
'../../src/signature.h',
'../../src/simulator.h',
'../../src/small-pointer-list.h',
+ '../../src/snapshot/code-serializer.cc',
+ '../../src/snapshot/code-serializer.h',
+ '../../src/snapshot/deserializer.cc',
+ '../../src/snapshot/deserializer.h',
'../../src/snapshot/natives.h',
'../../src/snapshot/natives-common.cc',
- '../../src/snapshot/serialize.cc',
- '../../src/snapshot/serialize.h',
+ '../../src/snapshot/partial-serializer.cc',
+ '../../src/snapshot/partial-serializer.h',
+ '../../src/snapshot/serializer.cc',
+ '../../src/snapshot/serializer.h',
+ '../../src/snapshot/serializer-common.cc',
+ '../../src/snapshot/serializer-common.h',
'../../src/snapshot/snapshot.h',
'../../src/snapshot/snapshot-common.cc',
'../../src/snapshot/snapshot-source-sink.cc',
'../../src/snapshot/snapshot-source-sink.h',
+ '../../src/snapshot/startup-serializer.cc',
+ '../../src/snapshot/startup-serializer.h',
'../../src/source-position.h',
'../../src/splay-tree.h',
'../../src/splay-tree-inl.h',
@@ -1538,7 +1554,6 @@
'../../src/ppc/frames-ppc.cc',
'../../src/ppc/frames-ppc.h',
'../../src/ppc/interface-descriptors-ppc.cc',
- '../../src/ppc/interface-descriptors-ppc.h',
'../../src/ppc/macro-assembler-ppc.cc',
'../../src/ppc/macro-assembler-ppc.h',
'../../src/ppc/simulator-ppc.cc',
@@ -1547,6 +1562,49 @@
'../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [ ### gcmole(arch:s390) ###
+ '../../src/compiler/s390/code-generator-s390.cc',
+ '../../src/compiler/s390/instruction-codes-s390.h',
+ '../../src/compiler/s390/instruction-scheduler-s390.cc',
+ '../../src/compiler/s390/instruction-selector-s390.cc',
+ '../../src/crankshaft/s390/lithium-codegen-s390.cc',
+ '../../src/crankshaft/s390/lithium-codegen-s390.h',
+ '../../src/crankshaft/s390/lithium-gap-resolver-s390.cc',
+ '../../src/crankshaft/s390/lithium-gap-resolver-s390.h',
+ '../../src/crankshaft/s390/lithium-s390.cc',
+ '../../src/crankshaft/s390/lithium-s390.h',
+ '../../src/debug/s390/debug-s390.cc',
+ '../../src/full-codegen/s390/full-codegen-s390.cc',
+ '../../src/ic/s390/access-compiler-s390.cc',
+ '../../src/ic/s390/handler-compiler-s390.cc',
+ '../../src/ic/s390/ic-compiler-s390.cc',
+ '../../src/ic/s390/ic-s390.cc',
+ '../../src/ic/s390/stub-cache-s390.cc',
+ '../../src/regexp/s390/regexp-macro-assembler-s390.cc',
+ '../../src/regexp/s390/regexp-macro-assembler-s390.h',
+ '../../src/s390/assembler-s390.cc',
+ '../../src/s390/assembler-s390.h',
+ '../../src/s390/assembler-s390-inl.h',
+ '../../src/s390/builtins-s390.cc',
+ '../../src/s390/codegen-s390.cc',
+ '../../src/s390/codegen-s390.h',
+ '../../src/s390/code-stubs-s390.cc',
+ '../../src/s390/code-stubs-s390.h',
+ '../../src/s390/constants-s390.cc',
+ '../../src/s390/constants-s390.h',
+ '../../src/s390/cpu-s390.cc',
+ '../../src/s390/deoptimizer-s390.cc',
+ '../../src/s390/disasm-s390.cc',
+ '../../src/s390/frames-s390.cc',
+ '../../src/s390/frames-s390.h',
+ '../../src/s390/interface-descriptors-s390.cc',
+ '../../src/s390/macro-assembler-s390.cc',
+ '../../src/s390/macro-assembler-s390.h',
+ '../../src/s390/simulator-s390.cc',
+ '../../src/s390/simulator-s390.h',
+ ],
+ }],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
@@ -1607,6 +1665,8 @@
'../..',
],
'sources': [
+ '../../src/base/accounting-allocator.cc',
+ '../../src/base/accounting-allocator.h',
'../../src/base/adapters.h',
'../../src/base/atomicops.h',
'../../src/base/atomicops_internals_arm64_gcc.h',
@@ -1890,6 +1950,11 @@
'toolsets': ['target'],
}],
],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../../include',
+ ],
+ },
},
{
'target_name': 'natives_blob',
@@ -1999,6 +2064,7 @@
'../../src/js/string-iterator.js',
'../../src/js/templates.js',
'../../src/js/spread.js',
+ '../../src/js/proxy.js',
'../../src/debug/mirrors.js',
'../../src/debug/debug.js',
'../../src/debug/liveedit.js',
@@ -2006,15 +2072,15 @@
'experimental_library_files': [
'../../src/js/macros.py',
'../../src/messages.h',
- '../../src/js/proxy.js',
'../../src/js/generator.js',
'../../src/js/harmony-atomics.js',
- '../../src/js/harmony-regexp.js',
+ '../../src/js/harmony-regexp-exec.js',
'../../src/js/harmony-object-observe.js',
'../../src/js/harmony-sharedarraybuffer.js',
'../../src/js/harmony-simd.js',
'../../src/js/harmony-species.js',
'../../src/js/harmony-unicode-regexps.js',
+ '../../src/js/harmony-string-padding.js',
'../../src/js/promise-extra.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index e65796145e..ca2cb00e4b 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -66,20 +66,20 @@ We have a convenience script that handles all of the above for you:
Examples:
# Print flat profile with annotated disassembly for the 10 top
- # symbols. Use default log names and include the snapshot log.
- $ %prog --snapshot --disasm-top=10
+ # symbols. Use default log names.
+ $ %prog --disasm-top=10
# Print flat profile with annotated disassembly for all used symbols.
# Use default log names and include kernel symbols into analysis.
$ %prog --disasm-all --kernel
# Print flat profile. Use custom log names.
- $ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
+ $ %prog --log=foo.log --trace=foo.data
"""
JS_ORIGIN = "js"
-JS_SNAPSHOT_ORIGIN = "js-snapshot"
+
class Code(object):
"""Code object."""
@@ -199,7 +199,7 @@ class Code(object):
self.origin)
def _GetDisasmLines(self, arch, options):
- if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
+ if self.origin == JS_ORIGIN:
inplace = False
filename = options.log + ".ll"
else:
@@ -328,30 +328,6 @@ class CodeInfo(object):
self.header_size = header_size
-class SnapshotLogReader(object):
- """V8 snapshot log reader."""
-
- _SNAPSHOT_CODE_NAME_RE = re.compile(
- r"snapshot-code-name,(\d+),\"(.*)\"")
-
- def __init__(self, log_name):
- self.log_name = log_name
-
- def ReadNameMap(self):
- log = open(self.log_name, "r")
- try:
- snapshot_pos_to_name = {}
- for line in log:
- match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
- if match:
- pos = int(match.group(1))
- name = match.group(2)
- snapshot_pos_to_name[pos] = name
- finally:
- log.close()
- return snapshot_pos_to_name
-
-
class LogReader(object):
"""V8 low-level (binary) log reader."""
@@ -365,17 +341,13 @@ class LogReader(object):
_CODE_CREATE_TAG = "C"
_CODE_MOVE_TAG = "M"
- _CODE_DELETE_TAG = "D"
- _SNAPSHOT_POSITION_TAG = "P"
_CODE_MOVING_GC_TAG = "G"
- def __init__(self, log_name, code_map, snapshot_pos_to_name):
+ def __init__(self, log_name, code_map):
self.log_file = open(log_name, "r")
self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
self.log_pos = 0
self.code_map = code_map
- self.snapshot_pos_to_name = snapshot_pos_to_name
- self.address_to_snapshot_name = {}
self.arch = self.log[:self.log.find("\0")]
self.log_pos += len(self.arch) + 1
@@ -395,17 +367,12 @@ class LogReader(object):
self.code_delete_struct = LogReader._DefineStruct([
("address", pointer_type)])
- self.snapshot_position_struct = LogReader._DefineStruct([
- ("address", pointer_type),
- ("position", ctypes.c_int32)])
-
def ReadUpToGC(self):
while self.log_pos < self.log.size():
tag = self.log[self.log_pos]
self.log_pos += 1
if tag == LogReader._CODE_MOVING_GC_TAG:
- self.address_to_snapshot_name.clear()
return
if tag == LogReader._CODE_CREATE_TAG:
@@ -413,12 +380,8 @@ class LogReader(object):
self.log_pos += ctypes.sizeof(event)
start_address = event.code_address
end_address = start_address + event.code_size
- if start_address in self.address_to_snapshot_name:
- name = self.address_to_snapshot_name[start_address]
- origin = JS_SNAPSHOT_ORIGIN
- else:
- name = self.log[self.log_pos:self.log_pos + event.name_size]
- origin = JS_ORIGIN
+ name = self.log[self.log_pos:self.log_pos + event.name_size]
+ origin = JS_ORIGIN
self.log_pos += event.name_size
origin_offset = self.log_pos
self.log_pos += event.code_size
@@ -459,30 +422,6 @@ class LogReader(object):
self.code_map.Add(code)
continue
- if tag == LogReader._CODE_DELETE_TAG:
- event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
- self.log_pos += ctypes.sizeof(event)
- old_start_address = event.address
- code = self.code_map.Find(old_start_address)
- if not code:
- print >>sys.stderr, "Warning: Not found %x" % old_start_address
- continue
- assert code.start_address == old_start_address, \
- "Inexact delete address %x for %s" % (old_start_address, code)
- self.code_map.Remove(code)
- continue
-
- if tag == LogReader._SNAPSHOT_POSITION_TAG:
- event = self.snapshot_position_struct.from_buffer(self.log,
- self.log_pos)
- self.log_pos += ctypes.sizeof(event)
- start_address = event.address
- snapshot_pos = event.position
- if snapshot_pos in self.snapshot_pos_to_name:
- self.address_to_snapshot_name[start_address] = \
- self.snapshot_pos_to_name[snapshot_pos]
- continue
-
assert False, "Unknown tag %s" % tag
def Dispose(self):
@@ -898,16 +837,9 @@ def PrintDot(code_map, options):
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
- parser.add_option("--snapshot-log",
- default="obj/release/snapshot.log",
- help="V8 snapshot log file name [default: %default]")
parser.add_option("--log",
default="v8.log",
help="V8 log file name [default: %default]")
- parser.add_option("--snapshot",
- default=False,
- action="store_true",
- help="process V8 snapshot log [default: %default]")
parser.add_option("--trace",
default="perf.data",
help="perf trace file name [default: %default]")
@@ -945,12 +877,7 @@ if __name__ == "__main__":
options, args = parser.parse_args()
if not options.quiet:
- if options.snapshot:
- print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
- options.log,
- options.log)
- else:
- print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
+ print "V8 log: %s, %s.ll" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
V8_GC_FAKE_MMAP = options.gc_fake_mmap
@@ -972,17 +899,10 @@ if __name__ == "__main__":
mmap_time = 0
sample_time = 0
- # Process the snapshot log to fill the snapshot name map.
- snapshot_name_map = {}
- if options.snapshot:
- snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
- snapshot_name_map = snapshot_log_reader.ReadNameMap()
-
# Initialize the log reader.
code_map = CodeMap()
log_reader = LogReader(log_name=options.log + ".ll",
- code_map=code_map,
- snapshot_pos_to_name=snapshot_name_map)
+ code_map=code_map)
if not options.quiet:
print "Generated code architecture: %s" % log_reader.arch
print
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 5d4b0cc490..ad687c9efe 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -102,7 +102,7 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
i::ScriptData* cached_data_impl = NULL;
// First round of parsing (produce data to cache).
{
- Zone zone;
+ Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
ParseInfo info(&zone, script);
info.set_global();
info.set_cached_data(&cached_data_impl);
@@ -120,7 +120,7 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
}
// Second round of parsing (consume cached data).
{
- Zone zone;
+ Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
ParseInfo info(&zone, script);
info.set_global();
info.set_cached_data(&cached_data_impl);
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 23940bb686..dd3533bcf4 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -58,7 +58,6 @@ from testrunner.local import utils
LINT_RULES = """
-build/header_guard
-+build/include_alpha
-build/include_what_you_use
-build/namespaces
-readability/check
diff --git a/deps/v8/tools/profviz/composer.js b/deps/v8/tools/profviz/composer.js
index 85729b6d57..108911de69 100644
--- a/deps/v8/tools/profviz/composer.js
+++ b/deps/v8/tools/profviz/composer.js
@@ -104,15 +104,15 @@ function PlotScriptComposer(kResX, kResY, error_output) {
new TimerEvent("recompile sync", "#CC0044", true, 0),
'V8.RecompileConcurrent':
new TimerEvent("recompile async", "#CC4499", false, 1),
- 'V8.CompileEval':
+ 'V8.CompileEvalMicroSeconds':
new TimerEvent("compile eval", "#CC4400", true, 0),
'V8.IcMiss':
new TimerEvent("ic miss", "#CC9900", false, 0),
- 'V8.Parse':
+ 'V8.ParseMicroSeconds':
new TimerEvent("parse", "#00CC00", true, 0),
- 'V8.PreParse':
+ 'V8.PreParseMicroSeconds':
new TimerEvent("preparse", "#44CC00", true, 0),
- 'V8.ParseLazy':
+ 'V8.ParseLazyMicroSeconds':
new TimerEvent("lazy parse", "#00CC44", true, 0),
'V8.GCScavenger':
new TimerEvent("gc scavenge", "#0044CC", true, 0),
@@ -331,7 +331,7 @@ function PlotScriptComposer(kResX, kResY, error_output) {
var line;
while (line = input()) {
- logreader.processLogLine(line);
+ for (var s of line.split("\n")) logreader.processLogLine(s);
}
// Collect execution pauses.
diff --git a/deps/v8/tools/profviz/worker.js b/deps/v8/tools/profviz/worker.js
index b17ca29f58..7f163088e4 100644
--- a/deps/v8/tools/profviz/worker.js
+++ b/deps/v8/tools/profviz/worker.js
@@ -106,7 +106,6 @@ function run(args) {
var callGraphSize = 5;
var ignoreUnknown = true;
var stateFilter = null;
- var snapshotLogProcessor = null;
var range = range_start_override + "," + range_end_override;
var tickProcessor = new TickProcessor(entriesProvider,
@@ -114,7 +113,6 @@ function run(args) {
callGraphSize,
ignoreUnknown,
stateFilter,
- snapshotLogProcessor,
distortion,
range);
for (var i = 0; i < content_lines.length; i++) {
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index fc9aeee461..b71cac5a10 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -155,6 +155,7 @@ class UploadCL(Step):
if not self._options.dry_run:
self.GitUpload(author=self._options.author,
force=True,
+ bypass_hooks=True,
cq=self._options.use_commit_queue,
cwd=cwd)
print "CL uploaded."
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index fc826c1df3..cd73051685 100755
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
@@ -71,6 +71,15 @@ BUG_SPECS = [
},
{
"args": {
+ "job_type": "linux_asan_d8_ignition_dbg",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
+ {
+ "args": {
"job_type": "linux_asan_d8_v8_arm_dbg",
"reproducible": "True",
"open": "True",
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index c3a216c664..5c03236223 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -382,7 +382,7 @@ class GitInterface(VCInterface):
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
- for wait_interval in [5, 10, 20, 40, 60, 60]:
+ for wait_interval in [10, 30, 60, 60, 60, 60, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
diff --git a/deps/v8/tools/release/test_mergeinfo.py b/deps/v8/tools/release/test_mergeinfo.py
new file mode 100755
index 0000000000..d455fa2374
--- /dev/null
+++ b/deps/v8/tools/release/test_mergeinfo.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mergeinfo
+import shutil
+import unittest
+
+from collections import namedtuple
+from os import path
+from subprocess import Popen, PIPE, check_call
+
+TEST_CONFIG = {
+ "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestMergeInfo(unittest.TestCase):
+
+ base_dir = TEST_CONFIG["GIT_REPO"]
+
+ def _execute_git(self, git_args):
+
+ fullCommand = ["git", "-C", self.base_dir] + git_args
+ p = Popen(args=fullCommand, stdin=PIPE,
+ stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate()
+ rc = p.returncode
+ if rc != 0:
+ raise Exception(err)
+ return output
+
+ def setUp(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ check_call(["git", "init", self.base_dir])
+
+ # Initial commit
+ message = '''Initial commit'''
+
+ self._make_empty_commit(message)
+
+ def tearDown(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ def _assert_correct_standard_result(
+ self, result, all_commits, hash_of_first_commit):
+ self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertTrue(
+ result.get(hash_of_first_commit),
+ "Master commit is wrong")
+
+ self.assertEqual(
+ len(result[hash_of_first_commit]),
+ 1,
+ "Child commit not found")
+ self.assertEqual(
+ all_commits[2],
+ result[hash_of_first_commit][0],
+ "Child commit wrong")
+
+ def _get_commits(self):
+ commits = self._execute_git(
+ ["log", "--format=%H", "--reverse"]).splitlines()
+ return commits
+
+ def _make_empty_commit(self, message):
+ self._execute_git(["commit", "--allow-empty", "-m", message])
+ return self._get_commits()[-1]
+
+ def testCanDescribeCommit(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit).splitlines()
+
+ self.assertEqual(
+ result[0],
+ 'commit ' + hash_of_first_commit)
+
+ def testCanDescribeCommitSingleLine(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit, True).splitlines()
+
+ self.assertEqual(
+ str(result[0]),
+ str(hash_of_first_commit[0:7]) + ' Initial commit')
+
+ def testSearchFollowUpCommits(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+ self.assertEqual(set(followups), set(commits[1:]))
+
+ def testSearchMerges(self):
+ self._execute_git(['branch', 'test'])
+ self._execute_git(['checkout', 'master'])
+ message = 'real initial commit'
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self._execute_git(['checkout', 'test'])
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ # This should be found
+ message = 'Merge ' + hash_of_first_commit
+ hash_of_hit = self._make_empty_commit(message)
+
+ # This should be ignored
+ message = 'Cr-Branched-From: ' + hash_of_first_commit
+ hash_of_ignored = self._make_empty_commit(message)
+
+ self._execute_git(['checkout', 'master'])
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+
+ # Check if follow ups and merges are not overlapping
+ self.assertEqual(len(followups), 0)
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ hash_of_followup = self._make_empty_commit(message)
+
+ merges = mergeinfo.get_merge_commits(self.base_dir, hash_of_first_commit)
+ # Check if follow up is ignored
+ self.assertTrue(hash_of_followup not in merges)
+
+ # Check for proper return of merges
+ self.assertTrue(hash_of_hit in merges)
+ self.assertTrue(hash_of_ignored not in merges)
+
+ def testIsLkgr(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+ self._make_empty_commit('This one is the lkgr head')
+ self._execute_git(['branch', 'remotes/origin/lkgr'])
+ hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr')
+
+ self.assertTrue(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_first_commit))
+ self.assertFalse(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_not_lkgr))
+
+ def testShowFirstCanary(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), 'No Canary coverage')
+
+ self._execute_git(['branch', 'remotes/origin/chromium/2345'])
+ self._execute_git(['branch', 'remotes/origin/chromium/2346'])
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), '2345')
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 4f133ac28a..05457c9285 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -1121,7 +1121,7 @@ deps = {
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
- "--use-commit-queue", "", cwd=chrome_dir),
+ "--use-commit-queue --bypass-hooks", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index e4d8f16b4f..970aa8e616 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -71,6 +71,8 @@ SUPPORTED_ARCHS = ["android_arm",
"ia32",
"ppc",
"ppc64",
+ "s390",
+ "s390x",
"mipsel",
"nacl_ia32",
"nacl_x64",
@@ -321,7 +323,6 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(BASE_DIR, "test", root))
if suite:
- suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data:
@@ -387,7 +388,8 @@ def Execute(arch, mode, args, options, suites, workspace):
0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode.
False, # No no_harness mode.
- False) # Don't use perf data.
+ False, # Don't use perf data.
+ False) # Coverage not supported.
# Find available test suites and read test cases from them.
variables = {
diff --git a/deps/v8/tools/run-perf.sh b/deps/v8/tools/run-perf.sh
new file mode 100755
index 0000000000..24053b40fb
--- /dev/null
+++ b/deps/v8/tools/run-perf.sh
@@ -0,0 +1,52 @@
+#! /bin/sh
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+########## Global variable definitions
+
+# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
+MAXIMUM_SAMPLE_RATE=10000000
+SAMPLE_EVERY_N_CYCLES=10000
+SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
+KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
+CALL_GRAPH_METHOD="fp" # dwarf does not play nice with JITted objects.
+
+########## Usage
+
+usage() {
+cat << EOF
+usage: $0 <benchmark_command>
+
+Executes <benchmark_command> under observation by Linux perf.
+Sampling event is cycles in user space, call graphs are recorded.
+EOF
+}
+
+if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ] ; then
+ usage
+ exit 1
+fi
+
+########## Actual script execution
+
+ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
+if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
+ echo "Setting appropriate maximum sample rate..."
+ echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
+fi
+
+ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
+if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
+ echo "Disabling kernel address map restriction..."
+ echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
+fi
+
+echo "Running..."
+perf record -R \
+ -e cycles:u \
+ -c $SAMPLE_EVERY_N_CYCLES \
+ --call-graph $CALL_GRAPH_METHOD \
+ -i $@ --perf_basic_prof
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index c94457fe6d..a380c97ad3 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -85,6 +85,8 @@ TEST_MAP = {
"ignition": [
"mjsunit",
"cctest",
+ "webkit",
+ "message",
],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
@@ -173,6 +175,8 @@ SUPPORTED_ARCHS = ["android_arm",
"mips64el",
"nacl_ia32",
"nacl_x64",
+ "s390",
+ "s390x",
"ppc",
"ppc64",
"x64",
@@ -208,6 +212,8 @@ def BuildOptions():
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
+ result.add_option("--sancov-dir",
+ help="Directory where to collect coverage data")
result.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
@@ -222,9 +228,6 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
- result.add_option("--flaky-tests",
- help="Regard tests marked as flaky (run|skip|dontcare)",
- default="dontcare")
result.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
@@ -298,7 +301,7 @@ def BuildOptions():
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--quickcheck", default=False, action="store_true",
- help=("Quick check mode (skip slow/flaky tests)"))
+ help=("Quick check mode (skip slow tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--json-test-results",
@@ -385,6 +388,14 @@ def SetupEnvironment(options):
if options.asan:
os.environ['ASAN_OPTIONS'] = symbolizer
+ if options.sancov_dir:
+ assert os.path.exists(options.sancov_dir)
+ os.environ['ASAN_OPTIONS'] = ":".join([
+ 'coverage=1',
+ 'coverage_dir=%s' % options.sancov_dir,
+ symbolizer,
+ ])
+
if options.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
@@ -490,7 +501,6 @@ def ProcessOptions(options):
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
- options.flaky_tests = "skip"
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_stress:
@@ -524,8 +534,6 @@ def ProcessOptions(options):
print "Unknown %s mode %s" % (name, option)
return False
return True
- if not CheckTestMode("flaky test", options.flaky_tests):
- return False
if not CheckTestMode("slow test", options.slow_tests):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
@@ -616,7 +624,6 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(BASE_DIR, "test", root))
if suite:
- suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data or options.download_data_only:
@@ -688,7 +695,8 @@ def Execute(arch, mode, args, options, suites):
options.rerun_failures_max,
options.predictable,
options.no_harness,
- use_perf_data=not options.swarming)
+ use_perf_data=not options.swarming,
+ sancov_dir=options.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -725,8 +733,8 @@ def Execute(arch, mode, args, options, suites):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
- s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
- options.slow_tests, options.pass_fail_tests)
+ s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
+ options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
@@ -815,6 +823,18 @@ def Execute(arch, mode, args, options, suites):
"with failure information.")
exit_code = 0
+ if options.sancov_dir:
+ # If tests ran with sanitizer coverage, merge coverage files in the end.
+ try:
+ print "Merging sancov files."
+ subprocess.check_call([
+ sys.executable,
+ join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+ "--coverage-dir=%s" % options.sancov_dir])
+ except:
+ print >> sys.stderr, "Error: Merging sancov files failed."
+ exit_code = 1
+
return exit_code
diff --git a/deps/v8/tools/sanitizers/sancov_formatter.py b/deps/v8/tools/sanitizers/sancov_formatter.py
new file mode 100755
index 0000000000..4f3ea9e5cb
--- /dev/null
+++ b/deps/v8/tools/sanitizers/sancov_formatter.py
@@ -0,0 +1,446 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to transform and merge sancov files into human readable json-format.
+
+The script supports three actions:
+all: Writes a json file with all instrumented lines of all executables.
+merge: Merges sancov files with coverage output into an existing json file.
+split: Split json file into separate files per covered source file.
+
+The json data is structured as follows:
+{
+ "version": 1,
+ "tests": ["executable1", "executable2", ...],
+ "files": {
+ "file1": [[<instr line 1>, <bit_mask>], [<instr line 2>, <bit_mask>], ...],
+ "file2": [...],
+ ...
+ }
+}
+
+The executables are sorted and determine the test bit mask. Their index+1 is
+the bit, e.g. executable1 = 1, executable3 = 4, etc. Hence, a line covered by
+executable1 and executable3 will have bit_mask == 5 == 0b101. The number of
+tests is restricted to 52 in version 1, to allow javascript JSON parsing of
+the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+
+The line-number-bit_mask pairs are sorted by line number and don't contain
+duplicates.
+
+Split json data preserves the same format, but only contains one file per
+json file.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+ 'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+import argparse
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# Files to exclude from coverage. Dropping their data early adds more speed.
+# The contained cc files are already excluded from instrumentation, but inlined
+# data is referenced through v8's object files.
+EXCLUSIONS = [
+ 'buildtools',
+ 'src/third_party',
+ 'third_party',
+ 'test',
+ 'testing',
+]
+
+# Executables found in the build output for which no coverage is generated.
+# Exclude them from the coverage data file.
+EXE_BLACKLIST = [
+ 'generate-bytecode-expectations',
+ 'hello-world',
+ 'mksnapshot',
+ 'parser-shell',
+ 'process',
+ 'shell',
+]
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+
+# Executable location. TODO(machenbach): Only release is supported for now.
+BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
+
+# Path prefix added by the llvm symbolizer including trailing slash.
+OUTPUT_PATH_PREFIX = os.path.join(BUILD_DIR, '..', '..', '')
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+ BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+ 'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Simple script to sanitize the PCs from objdump.
+SANITIZE_PCS = os.path.join(BASE_DIR, 'tools', 'sanitizers', 'sanitize_pcs.py')
+
+# The llvm symbolizer location.
+SYMBOLIZER = os.path.join(
+ BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-symbolizer')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov files as output by sancov_merger.py. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.result.sancov$')
+
+
+def executables():
+ """Iterates over executable files in the build directory."""
+ for f in os.listdir(BUILD_DIR):
+ file_path = os.path.join(BUILD_DIR, f)
+ if (os.path.isfile(file_path) and
+ os.access(file_path, os.X_OK) and
+ f not in EXE_BLACKLIST):
+ yield file_path
+
+
+def process_symbolizer_output(output):
+ """Post-process llvm symbolizer output.
+
+ Excludes files outside the v8 checkout or given in exclusion list above
+ from further processing. Drops the character index in each line.
+
+ Returns: A mapping of file names to lists of line numbers. The file names
+ have relative paths to the v8 base directory. The lists of line
+ numbers don't contain duplicate lines and are sorted.
+ """
+ # Drop path prefix when iterating lines. The path is redundant and takes
+ # too much space. Drop files outside that path, e.g. generated files in
+ # the build dir and absolute paths to c++ library headers.
+ def iter_lines():
+ for line in output.strip().splitlines():
+ if line.startswith(OUTPUT_PATH_PREFIX):
+ yield line[len(OUTPUT_PATH_PREFIX):]
+
+ # Map file names to sets of instrumented line numbers.
+ file_map = {}
+ for line in iter_lines():
+ # Drop character number, we only care for line numbers. Each line has the
+ # form: <file name>:<line number>:<character number>.
+ file_name, number, _ = line.split(':')
+ file_map.setdefault(file_name, set([])).add(int(number))
+
+ # Remove exclusion patterns from file map. It's cheaper to do it after the
+ # mapping, as there are few excluded files and we don't want to do this
+ # check for numerous lines in ordinary files.
+ def keep(file_name):
+ for e in EXCLUSIONS:
+ if file_name.startswith(e):
+ return False
+ return True
+
+ # Return in serializable form and filter.
+ return {k: sorted(file_map[k]) for k in file_map if keep(k)}
+
+
+def get_instrumented_lines(executable):
+ """Return the instrumented lines of an executable.
+
+ Called trough multiprocessing pool.
+
+ Returns: Post-processed llvm output as returned by process_symbolizer_output.
+ """
+ # The first two pipes are from llvm's tool sancov.py with 0x added to the hex
+ # numbers. The results are piped into the llvm symbolizer, which outputs for
+ # each PC: <file name with abs path>:<line number>:<character number>.
+ # We don't call the sancov tool to get more speed.
+ process = subprocess.Popen(
+ 'objdump -d %s | '
+ 'grep \'^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ '
+ '<__sanitizer_cov\(_with_check\|\)\(@plt\|\)>\' | '
+ 'grep \'^\s\+[0-9a-f]\+\' -o | '
+ '%s | '
+ '%s --obj %s -functions=none' %
+ (executable, SANITIZE_PCS, SYMBOLIZER, executable),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ cwd=BASE_DIR,
+ shell=True,
+ )
+ output, _ = process.communicate()
+ assert process.returncode == 0
+ return process_symbolizer_output(output)
+
+
+def merge_instrumented_line_results(exe_list, results):
+ """Merge multiprocessing results for all instrumented lines.
+
+ Args:
+ exe_list: List of all executable names with absolute paths.
+ results: List of results as returned by get_instrumented_lines.
+
+ Returns: Dict to be used as json data as specified on the top of this page.
+ The dictionary contains all instrumented lines of all files
+ referenced by all executables.
+ """
+ def merge_files(x, y):
+ for file_name, lines in y.iteritems():
+ x.setdefault(file_name, set([])).update(lines)
+ return x
+ result = reduce(merge_files, results, {})
+
+ # Return data as file->lines mapping. The lines are saved as lists
+ # with (line number, test bits (as int)). The test bits are initialized with
+ # 0, meaning instrumented, but no coverage.
+ # The order of the test bits is given with key 'tests'. For now, these are
+ # the executable names. We use a _list_ with two items instead of a tuple to
+ # ease merging by allowing mutation of the second item.
+ return {
+ 'version': 1,
+ 'tests': sorted(map(os.path.basename, exe_list)),
+ 'files': {f: map(lambda l: [l, 0], sorted(result[f])) for f in result},
+ }
+
+
+def write_instrumented(options):
+ """Implements the 'all' action of this tool."""
+ exe_list = list(executables())
+ logging.info('Reading instrumented lines from %d executables.',
+ len(exe_list))
+ pool = Pool(CPUS)
+ try:
+ results = pool.imap_unordered(get_instrumented_lines, exe_list)
+ finally:
+ pool.close()
+
+ # Merge multiprocessing results and prepare output data.
+ data = merge_instrumented_line_results(exe_list, results)
+
+ logging.info('Read data from %d executables, which covers %d files.',
+ len(data['tests']), len(data['files']))
+ logging.info('Writing results to %s', options.json_output)
+
+ # Write json output.
+ with open(options.json_output, 'w') as f:
+ json.dump(data, f, sort_keys=True)
+
+
+def get_covered_lines(args):
+ """Return the covered lines of an executable.
+
+ Called trough multiprocessing pool. The args are expected to unpack to:
+ cov_dir: Folder with sancov files merged by sancov_merger.py.
+ executable: The executable that was called to produce the given coverage
+ data.
+ sancov_file: The merged sancov file with coverage data.
+
+ Returns: A tuple of post-processed llvm output as returned by
+ process_symbolizer_output and the executable name.
+ """
+ cov_dir, executable, sancov_file = args
+
+ # Let the sancov tool print the covered PCs and pipe them through the llvm
+ # symbolizer.
+ process = subprocess.Popen(
+ '%s print %s 2> /dev/null | '
+ '%s --obj %s -functions=none' %
+ (SANCOV_TOOL,
+ os.path.join(cov_dir, sancov_file),
+ SYMBOLIZER,
+ os.path.join(BUILD_DIR, executable)),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ cwd=BASE_DIR,
+ shell=True,
+ )
+ output, _ = process.communicate()
+ assert process.returncode == 0
+ return process_symbolizer_output(output), executable
+
+
+def merge_covered_line_results(data, results):
+ """Merge multiprocessing results for covered lines.
+
+ The data is mutated, the results are merged into it in place.
+
+ Args:
+ data: Existing coverage data from json file containing all instrumented
+ lines.
+ results: List of results as returned by get_covered_lines.
+ """
+
+ # List of executables and mapping to the test bit mask. The number of
+ # tests is restricted to 52, to allow javascript JSON parsing of
+ # the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+ exe_list = data['tests']
+ assert len(exe_list) <= 52, 'Max 52 different tests are supported.'
+ test_bit_masks = {exe:1<<i for i, exe in enumerate(exe_list)}
+
+ def merge_lines(old_lines, new_lines, mask):
+ """Merge the coverage data of a list of lines.
+
+ Args:
+ old_lines: Lines as list of pairs with line number and test bit mask.
+ The new lines will be merged into the list in place.
+ new_lines: List of new (covered) lines (sorted).
+ mask: The bit to be set for covered lines. The bit index is the test
+ index of the executable that covered the line.
+ """
+ i = 0
+ # Iterate over old and new lines, both are sorted.
+ for l in new_lines:
+ while old_lines[i][0] < l:
+ # Forward instrumented lines not present in this coverage data.
+ i += 1
+ # TODO: Add more context to the assert message.
+ assert i < len(old_lines), 'Covered line %d not in input file.' % l
+ assert old_lines[i][0] == l, 'Covered line %d not in input file.' % l
+
+ # Add coverage information to the line.
+ old_lines[i][1] |= mask
+
+ def merge_files(data, result):
+ """Merge result into data.
+
+ The data is mutated in place.
+
+ Args:
+ data: Merged coverage data from the previous reduce step.
+ result: New result to be merged in. The type is as returned by
+ get_covered_lines.
+ """
+ file_map, executable = result
+ files = data['files']
+ for file_name, lines in file_map.iteritems():
+ merge_lines(files[file_name], lines, test_bit_masks[executable])
+ return data
+
+ reduce(merge_files, results, data)
+
+
+def merge(options):
+ """Implements the 'merge' action of this tool."""
+
+ # Check if folder with coverage output exists.
+ assert (os.path.exists(options.coverage_dir) and
+ os.path.isdir(options.coverage_dir))
+
+ # Inputs for multiprocessing. List of tuples of:
+ # Coverage dir, executable name, sancov file name.
+ inputs = []
+ for f in os.listdir(options.coverage_dir):
+ match = SANCOV_FILE_RE.match(f)
+ if match:
+ inputs.append((options.coverage_dir, match.group(1), f))
+
+ logging.info('Merging %d sancov files into %s',
+ len(inputs), options.json_input)
+
+ # Post-process covered lines in parallel.
+ pool = Pool(CPUS)
+ try:
+ results = pool.imap_unordered(get_covered_lines, inputs)
+ finally:
+ pool.close()
+
+ # Load existing json data file for merging the results.
+ with open(options.json_input, 'r') as f:
+ data = json.load(f)
+
+ # Merge muliprocessing results. Mutates data.
+ merge_covered_line_results(data, results)
+
+ logging.info('Merged data from %d executables, which covers %d files.',
+ len(data['tests']), len(data['files']))
+ logging.info('Writing results to %s', options.json_output)
+
+ # Write merged results to file.
+ with open(options.json_output, 'w') as f:
+ json.dump(data, f, sort_keys=True)
+
+
+def split(options):
+ """Implements the 'split' action of this tool."""
+ # Load existing json data file for splitting.
+ with open(options.json_input, 'r') as f:
+ data = json.load(f)
+
+ logging.info('Splitting off %d coverage files from %s',
+ len(data['files']), options.json_input)
+
+ for file_name, coverage in data['files'].iteritems():
+ # Preserve relative directories that are part of the file name.
+ file_path = os.path.join(options.output_dir, file_name + '.json')
+ try:
+ os.makedirs(os.path.dirname(file_path))
+ except OSError:
+ # Ignore existing directories.
+ pass
+
+ with open(file_path, 'w') as f:
+ # Flat-copy the old dict.
+ new_data = dict(data)
+
+ # Update current file.
+ new_data['files'] = {file_name: coverage}
+
+ # Write json data.
+ json.dump(new_data, f, sort_keys=True)
+
+
+def main(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--coverage-dir',
+ help='Path to the sancov output files.')
+ parser.add_argument('--json-input',
+ help='Path to an existing json file with coverage data.')
+ parser.add_argument('--json-output',
+ help='Path to a file to write json output to.')
+ parser.add_argument('--output-dir',
+ help='Directory where to put split output files to.')
+ parser.add_argument('action', choices=['all', 'merge', 'split'],
+ help='Action to perform.')
+
+ options = parser.parse_args(args)
+ if options.action.lower() == 'all':
+ if not options.json_output:
+ print '--json-output is required'
+ return 1
+ write_instrumented(options)
+ elif options.action.lower() == 'merge':
+ if not options.coverage_dir:
+ print '--coverage-dir is required'
+ return 1
+ if not options.json_input:
+ print '--json-input is required'
+ return 1
+ if not options.json_output:
+ print '--json-output is required'
+ return 1
+ merge(options)
+ elif options.action.lower() == 'split':
+ if not options.json_input:
+ print '--json-input is required'
+ return 1
+ if not options.output_dir:
+ print '--output-dir is required'
+ return 1
+ split(options)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/tools/sanitizers/sancov_formatter_test.py b/deps/v8/tools/sanitizers/sancov_formatter_test.py
new file mode 100644
index 0000000000..6a741c804c
--- /dev/null
+++ b/deps/v8/tools/sanitizers/sancov_formatter_test.py
@@ -0,0 +1,222 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Requires python-coverage. Native python coverage version >= 3.7.1 should
+# be installed to get the best speed.
+
+import copy
+import coverage
+import logging
+import json
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+
+# Directory of this file.
+LOCATION = os.path.dirname(os.path.abspath(__file__))
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(LOCATION))
+
+# Executable location.
+BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
+
+def abs_line(line):
+ """Absolute paths as output by the llvm symbolizer."""
+ return '%s/%s' % (BUILD_DIR, line)
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_process_symbolizer_output. This simulates output from the
+# llvm symbolizer. The paths are not normlized.
+SYMBOLIZER_OUTPUT = (
+ abs_line('../../src/foo.cc:87:7\n') +
+ abs_line('../../src/foo.cc:92:0\n') + # Test sorting.
+ abs_line('../../src/baz/bar.h:1234567:0\n') + # Test large line numbers.
+ abs_line('../../src/foo.cc:92:0\n') + # Test duplicates.
+ abs_line('../../src/baz/bar.h:0:0\n') + # Test subdirs.
+ '/usr/include/cool_stuff.h:14:2\n' + # Test dropping absolute paths.
+ abs_line('../../src/foo.cc:87:10\n') + # Test dropping character indexes.
+ abs_line('../../third_party/icu.cc:0:0\n') + # Test dropping excluded dirs.
+ abs_line('../../src/baz/bar.h:11:0\n')
+)
+
+# The expected post-processed output maps relative file names to line numbers.
+# The numbers are sorted and unique.
+EXPECTED_PROCESSED_OUTPUT = {
+ 'src/baz/bar.h': [0, 11, 1234567],
+ 'src/foo.cc': [87, 92],
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_instrumented_line_results. A list of absolute paths to
+# all executables.
+EXE_LIST = [
+ '/path/to/d8',
+ '/path/to/cctest',
+ '/path/to/unittests',
+]
+
+# Post-processed llvm symbolizer output as returned by
+# process_symbolizer_output. These are lists of this output for merging.
+INSTRUMENTED_LINE_RESULTS = [
+ {
+ 'src/baz/bar.h': [0, 3, 7],
+ 'src/foo.cc': [11],
+ },
+ {
+ 'src/baz/bar.h': [3, 7, 8],
+ 'src/baz.cc': [2],
+ 'src/foo.cc': [1, 92],
+ },
+ {
+ 'src/baz.cc': [1],
+ 'src/foo.cc': [92, 93],
+ },
+]
+
+# This shows initial instrumentation. No lines are covered, hence,
+# the coverage mask is 0 for all lines. The line tuples remain sorted by
+# line number and contain no duplicates.
+EXPECTED_INSTRUMENTED_LINES_DATA = {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
+ 'src/baz.cc': [[1, 0], [2, 0]],
+ 'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
+ },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_covered_line_results. List of post-processed
+# llvm-symbolizer output as a tuple including the executable name of each data
+# set.
+COVERED_LINE_RESULTS = [
+ ({
+ 'src/baz/bar.h': [3, 7],
+ 'src/foo.cc': [11],
+ }, 'd8'),
+ ({
+ 'src/baz/bar.h': [3, 7],
+ 'src/baz.cc': [2],
+ 'src/foo.cc': [1],
+ }, 'cctest'),
+ ({
+ 'src/foo.cc': [92],
+ 'src/baz.cc': [2],
+ }, 'unittests'),
+]
+
+# This shows initial instrumentation + coverage. The mask bits are:
+# cctest: 1, d8: 2, unittests:4. So a line covered by cctest and unittests
+# has a coverage mask of 0b101, e.g. line 2 in src/baz.cc.
+EXPECTED_COVERED_LINES_DATA = {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+ 'src/baz.cc': [[1, 0b0], [2, 0b101]],
+ 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+ },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_split.
+
+EXPECTED_SPLIT_FILES = [
+ (
+ os.path.join('src', 'baz', 'bar.h.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'unittests'],
+ 'files': {
+ 'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+ },
+ },
+ ),
+ (
+ os.path.join('src', 'baz.cc.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'unittests'],
+ 'files': {
+ 'src/baz.cc': [[1, 0b0], [2, 0b101]],
+ },
+ },
+ ),
+ (
+ os.path.join('src', 'foo.cc.json'),
+ {
+ 'version': 1,
+ 'tests': ['cctest', 'd8', 'unittests'],
+ 'files': {
+ 'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+ },
+ },
+ ),
+]
+
+
+class FormatterTests(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ sys.path.append(LOCATION)
+ cls._cov = coverage.coverage(
+ include=([os.path.join(LOCATION, 'sancov_formatter.py')]))
+ cls._cov.start()
+ import sancov_formatter
+ global sancov_formatter
+
+ @classmethod
+ def tearDownClass(cls):
+ cls._cov.stop()
+ cls._cov.report()
+
+ def test_process_symbolizer_output(self):
+ result = sancov_formatter.process_symbolizer_output(SYMBOLIZER_OUTPUT)
+ self.assertEquals(EXPECTED_PROCESSED_OUTPUT, result)
+
+ def test_merge_instrumented_line_results(self):
+ result = sancov_formatter.merge_instrumented_line_results(
+ EXE_LIST, INSTRUMENTED_LINE_RESULTS)
+ self.assertEquals(EXPECTED_INSTRUMENTED_LINES_DATA, result)
+
+ def test_merge_covered_line_results(self):
+ data = copy.deepcopy(EXPECTED_INSTRUMENTED_LINES_DATA)
+ sancov_formatter.merge_covered_line_results(
+ data, COVERED_LINE_RESULTS)
+ self.assertEquals(EXPECTED_COVERED_LINES_DATA, data)
+
+ def test_split(self):
+ _, json_input = tempfile.mkstemp(prefix='tmp_coverage_test_split')
+ with open(json_input, 'w') as f:
+ json.dump(EXPECTED_COVERED_LINES_DATA, f)
+ output_dir = tempfile.mkdtemp(prefix='tmp_coverage_test_split')
+
+ try:
+ sancov_formatter.main([
+ 'split',
+ '--json-input', json_input,
+ '--output-dir', output_dir,
+ ])
+
+ for file_name, expected_data in EXPECTED_SPLIT_FILES:
+ full_path = os.path.join(output_dir, file_name)
+ self.assertTrue(os.path.exists(full_path))
+ with open(full_path) as f:
+ self.assertEquals(expected_data, json.load(f))
+ finally:
+ os.remove(json_input)
+ shutil.rmtree(output_dir)
diff --git a/deps/v8/tools/sanitizers/sancov_merger.py b/deps/v8/tools/sanitizers/sancov_merger.py
new file mode 100755
index 0000000000..a4cfec1b0c
--- /dev/null
+++ b/deps/v8/tools/sanitizers/sancov_merger.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script for merging sancov files in parallel.
+
+When merging test runner output, the sancov files are expected
+to be located in one directory with the file-name pattern:
+<executable name>.test.<id>.sancov
+
+For each executable, this script writes a new file:
+<executable name>.result.sancov
+
+When --swarming-output-dir is specified, this script will merge the result
+files found there into the coverage folder.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+ 'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+import argparse
+import logging
+import math
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+ BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+ 'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov file as output by the v8 test runner. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.sancov$')
+
+# Regexp to find sancov result files as returned from swarming.
+SANCOV_RESULTS_FILE_RE = re.compile(r'^.*\.result\.sancov$')
+
+
+def merge(args):
+ """Merge several sancov files into one.
+
+ Called trough multiprocessing pool. The args are expected to unpack to:
+ keep: Option if source and intermediate sancov files should be kept.
+ coverage_dir: Folder where to find the sancov files.
+ executable: Name of the executable whose sancov files should be merged.
+ index: A number to be put into the intermediate result file name.
+ If None, this is a final result.
+ bucket: The list of sancov files to be merged.
+ Returns: A tuple with the executable name and the result file name.
+ """
+ keep, coverage_dir, executable, index, bucket = args
+ process = subprocess.Popen(
+ [SANCOV_TOOL, 'merge'] + bucket,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=coverage_dir,
+ )
+ output, _ = process.communicate()
+ assert process.returncode == 0
+ if index is not None:
+ # This is an intermediate result, add the bucket index to the file name.
+ result_file_name = '%s.result.%d.sancov' % (executable, index)
+ else:
+ # This is the final result without bucket index.
+ result_file_name = '%s.result.sancov' % executable
+ with open(os.path.join(coverage_dir, result_file_name), "wb") as f:
+ f.write(output)
+ if not keep:
+ for f in bucket:
+ os.remove(os.path.join(coverage_dir, f))
+ return executable, result_file_name
+
+
+def generate_inputs(keep, coverage_dir, file_map, cpus):
+ """Generate inputs for multiprocessed merging.
+
+ Splits the sancov files into several buckets, so that each bucket can be
+ merged in a separate process. We have only few executables in total with
+ mostly lots of associated files. In the general case, with many executables
+ we might need to avoid splitting buckets of executables with few files.
+
+ Returns: List of args as expected by merge above.
+ """
+ inputs = []
+ for executable, files in file_map.iteritems():
+ # What's the bucket size for distributing files for merging? E.g. with
+ # 2 cpus and 9 files we want bucket size 5.
+ n = max(2, int(math.ceil(len(files) / float(cpus))))
+
+ # Chop files into buckets.
+ buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
+
+ # Inputs for multiprocessing. List of tuples containing:
+ # Keep-files option, base path, executable name, index of bucket,
+ # list of files.
+ inputs.extend([(keep, coverage_dir, executable, i, b)
+ for i, b in enumerate(buckets)])
+ return inputs
+
+
+def merge_parallel(inputs, merge_fun=merge):
+ """Process several merge jobs in parallel."""
+ pool = Pool(CPUS)
+ try:
+ return pool.map(merge_fun, inputs)
+ finally:
+ pool.close()
+
+
+def merge_test_runner_output(options):
+ # Map executable names to their respective sancov files.
+ file_map = {}
+ for f in os.listdir(options.coverage_dir):
+ match = SANCOV_FILE_RE.match(f)
+ if match:
+ file_map.setdefault(match.group(1), []).append(f)
+
+ inputs = generate_inputs(
+ options.keep, options.coverage_dir, file_map, CPUS)
+
+ logging.info('Executing %d merge jobs in parallel for %d executables.' %
+ (len(inputs), len(file_map)))
+
+ results = merge_parallel(inputs)
+
+ # Map executable names to intermediate bucket result files.
+ file_map = {}
+ for executable, f in results:
+ file_map.setdefault(executable, []).append(f)
+
+ # Merge the bucket results for each executable.
+ # The final result has index None, so no index will appear in the
+ # file name.
+ inputs = [(options.keep, options.coverage_dir, executable, None, files)
+ for executable, files in file_map.iteritems()]
+
+ logging.info('Merging %d intermediate results.' % len(inputs))
+
+ merge_parallel(inputs)
+
+
+def merge_two(args):
+ """Merge two sancov files.
+
+ Called trough multiprocessing pool. The args are expected to unpack to:
+ swarming_output_dir: Folder where to find the new file.
+ coverage_dir: Folder where to find the existing file.
+ f: File name of the file to be merged.
+ """
+ swarming_output_dir, coverage_dir, f = args
+ input_file = os.path.join(swarming_output_dir, f)
+ output_file = os.path.join(coverage_dir, f)
+ process = subprocess.Popen(
+ [SANCOV_TOOL, 'merge', input_file, output_file],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ output, _ = process.communicate()
+ assert process.returncode == 0
+ with open(output_file, "wb") as f:
+ f.write(output)
+
+
+def merge_swarming_output(options):
+ # Iterate sancov files from swarming.
+ files = []
+ for f in os.listdir(options.swarming_output_dir):
+ match = SANCOV_RESULTS_FILE_RE.match(f)
+ if match:
+ if os.path.exists(os.path.join(options.coverage_dir, f)):
+ # If the same file already exists, we'll merge the data.
+ files.append(f)
+ else:
+ # No file yet? Just move it.
+ os.rename(os.path.join(options.swarming_output_dir, f),
+ os.path.join(options.coverage_dir, f))
+
+ inputs = [(options.swarming_output_dir, options.coverage_dir, f)
+ for f in files]
+
+ logging.info('Executing %d merge jobs in parallel.' % len(inputs))
+ merge_parallel(inputs, merge_two)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--coverage-dir', required=True,
+ help='Path to the sancov output files.')
+ parser.add_argument('--keep', default=False, action='store_true',
+ help='Keep sancov output files after merging.')
+ parser.add_argument('--swarming-output-dir',
+ help='Folder containing a results shard from swarming.')
+ options = parser.parse_args()
+
+ # Check if folder with coverage output exists.
+ assert (os.path.exists(options.coverage_dir) and
+ os.path.isdir(options.coverage_dir))
+
+ if options.swarming_output_dir:
+ # Check if folder with swarming output exists.
+ assert (os.path.exists(options.swarming_output_dir) and
+ os.path.isdir(options.swarming_output_dir))
+ merge_swarming_output(options)
+ else:
+ merge_test_runner_output(options)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/tools/sanitizers/sancov_merger_test.py b/deps/v8/tools/sanitizers/sancov_merger_test.py
new file mode 100644
index 0000000000..93b89eb8a7
--- /dev/null
+++ b/deps/v8/tools/sanitizers/sancov_merger_test.py
@@ -0,0 +1,82 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import sancov_merger
+
+
+# Files on disk after test runner completes. The files are mapped by
+# executable name -> file list.
+FILE_MAP = {
+ 'd8': [
+ 'd8.test.1.sancov',
+ 'd8.test.2.sancov',
+ 'd8.test.3.sancov',
+ 'd8.test.4.sancov',
+ 'd8.test.5.sancov',
+ 'd8.test.6.sancov',
+ 'd8.test.7.sancov',
+ ],
+ 'cctest': [
+ 'cctest.test.1.sancov',
+ 'cctest.test.2.sancov',
+ 'cctest.test.3.sancov',
+ 'cctest.test.4.sancov',
+ ],
+}
+
+
+# Inputs for merge process with 2 cpus. The tuples contain:
+# (flag, path, executable name, intermediate result index, file list).
+EXPECTED_INPUTS_2 = [
+ (False, '/some/path', 'cctest', 0, [
+ 'cctest.test.1.sancov',
+ 'cctest.test.2.sancov']),
+ (False, '/some/path', 'cctest', 1, [
+ 'cctest.test.3.sancov',
+ 'cctest.test.4.sancov']),
+ (False, '/some/path', 'd8', 0, [
+ 'd8.test.1.sancov',
+ 'd8.test.2.sancov',
+ 'd8.test.3.sancov',
+ 'd8.test.4.sancov']),
+ (False, '/some/path', 'd8', 1, [
+ 'd8.test.5.sancov',
+ 'd8.test.6.sancov',
+ 'd8.test.7.sancov']),
+]
+
+
+# The same for 4 cpus.
+EXPECTED_INPUTS_4 = [
+ (True, '/some/path', 'cctest', 0, [
+ 'cctest.test.1.sancov',
+ 'cctest.test.2.sancov']),
+ (True, '/some/path', 'cctest', 1, [
+ 'cctest.test.3.sancov',
+ 'cctest.test.4.sancov']),
+ (True, '/some/path', 'd8', 0, [
+ 'd8.test.1.sancov',
+ 'd8.test.2.sancov']),
+ (True, '/some/path', 'd8', 1, [
+ 'd8.test.3.sancov',
+ 'd8.test.4.sancov']),
+ (True, '/some/path', 'd8', 2, [
+ 'd8.test.5.sancov',
+ 'd8.test.6.sancov']),
+ (True, '/some/path', 'd8', 3, [
+ 'd8.test.7.sancov'])]
+
+
+class MergerTests(unittest.TestCase):
+ def test_generate_inputs_2_cpu(self):
+ inputs = sancov_merger.generate_inputs(
+ False, '/some/path', FILE_MAP, 2)
+ self.assertEquals(EXPECTED_INPUTS_2, inputs)
+
+ def test_generate_inputs_4_cpu(self):
+ inputs = sancov_merger.generate_inputs(
+ True, '/some/path', FILE_MAP, 4)
+ self.assertEquals(EXPECTED_INPUTS_4, inputs)
diff --git a/deps/v8/tools/sanitizers/sanitize_pcs.py b/deps/v8/tools/sanitizers/sanitize_pcs.py
new file mode 100755
index 0000000000..47f2715096
--- /dev/null
+++ b/deps/v8/tools/sanitizers/sanitize_pcs.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Corrects objdump output. The logic is from sancov.py, see comments there."""
+
+import sys;
+
+for line in sys.stdin:
+ print '0x%x' % (int(line.strip(), 16) + 4)
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index a4df32c52a..e725d112f9 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -107,14 +107,16 @@ def RunProcess(verbose, timeout, args, **rest):
timer.start()
stdout, stderr = process.communicate()
timer.cancel()
- return process.returncode, timeout_result[0], stdout, stderr
+
+ return output.Output(
+ process.returncode,
+ timeout_result[0],
+ stdout,
+ stderr,
+ process.pid,
+ )
def Execute(args, verbose=False, timeout=None):
args = [ c for c in args if c != "" ]
- exit_code, timed_out, stdout, stderr = RunProcess(
- verbose,
- timeout,
- args=args,
- )
- return output.Output(exit_code, timed_out, stdout, stderr)
+ return RunProcess(verbose, timeout, args=args)
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index 0d90ab8d0d..e0aec0bb90 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -49,9 +49,8 @@ TEST_DIR = os.path.join(BASE_DIR, "test")
class Instructions(object):
- def __init__(self, command, dep_command, test_id, timeout, verbose):
+ def __init__(self, command, test_id, timeout, verbose):
self.command = command
- self.dep_command = dep_command
self.id = test_id
self.timeout = timeout
self.verbose = verbose
@@ -112,12 +111,7 @@ def _GetInstructions(test, context):
# the like.
if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
timeout *= 2
- if test.dependency is not None:
- dep_command = [ c.replace(test.path, test.dependency) for c in command ]
- else:
- dep_command = None
- return Instructions(
- command, dep_command, test.id, timeout, context.verbose)
+ return Instructions(command, test.id, timeout, context.verbose)
class Job(object):
@@ -143,13 +137,33 @@ def SetupProblem(exception, test):
# Extra debuging information when files are claimed missing.
f = match.group(1)
stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
- return test.id, output.Output(1, False, "", stderr), 0
+ return test.id, output.Output(1, False, "", stderr, None), 0
class TestJob(Job):
def __init__(self, test):
self.test = test
+ def _rename_coverage_data(self, output, context):
+ """Rename coverage data.
+
+ Rename files with PIDs to files with unique test IDs, because the number
+ of tests might be higher than pid_max. E.g.:
+ d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID
+ and 1 is the test ID.
+ """
+ if context.sancov_dir and output.pid is not None:
+ sancov_file = os.path.join(
+ context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
+
+ # Some tests are expected to fail and don't produce coverage data.
+ if os.path.exists(sancov_file):
+ parts = sancov_file.split(".")
+ new_sancov_file = ".".join(
+ parts[:-2] + ["test", str(self.test.id)] + parts[-1:])
+ assert not os.path.exists(new_sancov_file)
+ os.rename(sancov_file, new_sancov_file)
+
def Run(self, process_context):
try:
# Retrieve a new suite object on the worker-process side. The original
@@ -160,16 +174,8 @@ class TestJob(Job):
return SetupProblem(e, self.test)
start_time = time.time()
- if instr.dep_command is not None:
- dep_output = commands.Execute(
- instr.dep_command, instr.verbose, instr.timeout)
- # TODO(jkummerow): We approximate the test suite specific function
- # IsFailureOutput() by just checking the exit code here. Currently
- # only cctests define dependencies, for which this simplification is
- # correct.
- if dep_output.exit_code != 0:
- return (instr.id, dep_output, time.time() - start_time)
output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+ self._rename_coverage_data(output, process_context.context)
return (instr.id, output, time.time() - start_time)
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index f86106b9d9..7e96cc3715 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -35,7 +35,6 @@ OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
-FLAKY = "FLAKY"
FAST_VARIANTS = "FAST_VARIANTS"
NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
@@ -46,7 +45,7 @@ FAIL_SLOPPY = "FAIL_SLOPPY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
+for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
KEYWORDS[key] = key
@@ -59,7 +58,7 @@ for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
"mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
- "macos", "windows", "linux", "aix"]:
+ "s390", "s390x", "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
@@ -79,10 +78,6 @@ def OnlyFastVariants(outcomes):
return FAST_VARIANTS in outcomes
-def IsFlaky(outcomes):
- return FLAKY in outcomes
-
-
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 55e0eb21ae..f43d008b22 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -102,7 +102,6 @@ class TestSuite(object):
def __init__(self, name, root):
# Note: This might be called concurrently from different processes.
- # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
@@ -110,11 +109,6 @@ class TestSuite(object):
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
- def SetupWorkingDirectory(self):
- # This is called once per test suite object in a multi-process setting.
- # Multi-process-unsafe work-directory setup can go here.
- pass
-
def shell(self):
return "d8"
@@ -159,10 +153,6 @@ class TestSuite(object):
self.tests = self.ListTests(context)
@staticmethod
- def _FilterFlaky(flaky, mode):
- return (mode == "run" and not flaky) or (mode == "skip" and flaky)
-
- @staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@@ -171,13 +161,11 @@ class TestSuite(object):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
- flaky_tests="dontcare",
slow_tests="dontcare",
pass_fail_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
- flaky = False
slow = False
pass_fail = False
testname = self.CommonTestName(t)
@@ -191,7 +179,6 @@ class TestSuite(object):
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
- flaky = statusfile.IsFlaky(t.outcomes)
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
@@ -203,10 +190,9 @@ class TestSuite(object):
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in self.wildcards"
- flaky = flaky or statusfile.IsFlaky(t.outcomes)
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
- if (skip or self._FilterFlaky(flaky, flaky_tests)
+ if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
@@ -262,14 +248,14 @@ class TestSuite(object):
def GetSourceForTest(self, testcase):
return "(no source available)"
- def IsFailureOutput(self, output, testpath):
- return output.exit_code != 0
+ def IsFailureOutput(self, testcase):
+ return testcase.output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
- execution_failed = self.IsFailureOutput(testcase.output, testcase.path)
+ execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
@@ -328,9 +314,9 @@ class GoogleTestSuite(TestSuite):
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
- test = testcase.TestCase(self, test_case + test_desc, dependency=None)
+ test = testcase.TestCase(self, test_case + test_desc)
tests.append(test)
- tests.sort()
+ tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index cb6c350e4e..c880dfc34e 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -102,6 +102,8 @@ def DefaultArch():
return 'ia32'
elif machine == 'amd64':
return 'ia32'
+ elif machine == 's390x':
+ return 's390'
elif machine == 'ppc64':
return 'ppc'
else:
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index c9853d07cc..6bcbfb67aa 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
- predictable, no_harness, use_perf_data):
+ predictable, no_harness, use_perf_data, sancov_dir):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -48,13 +48,14 @@ class Context():
self.predictable = predictable
self.no_harness = no_harness
self.use_perf_data = use_perf_data
+ self.sancov_dir = sancov_dir
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count,
self.rerun_failures_max, self.predictable, self.no_harness,
- self.use_perf_data]
+ self.use_perf_data, self.sancov_dir]
@staticmethod
def Unpack(packed):
@@ -62,4 +63,4 @@ class Context():
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
packed[8], packed[9], packed[10], packed[11], packed[12],
- packed[13], packed[14])
+ packed[13], packed[14], packed[15])
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 87b4c84e19..b4bb01f797 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -32,11 +32,12 @@ from ..local import utils
class Output(object):
- def __init__(self, exit_code, timed_out, stdout, stderr):
+ def __init__(self, exit_code, timed_out, stdout, stderr, pid):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
+ self.pid = pid
def HasCrashed(self):
if utils.IsWindows():
@@ -52,9 +53,9 @@ class Output(object):
return self.timed_out
def Pack(self):
- return [self.exit_code, self.timed_out, self.stdout, self.stderr]
+ return [self.exit_code, self.timed_out, self.stdout, self.stderr, self.pid]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
- return Output(packed[0], packed[1], packed[2], packed[3])
+ return Output(packed[0], packed[1], packed[2], packed[3], packed[4])
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index b91f8b4b56..113c624a35 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -30,12 +30,11 @@ from . import output
class TestCase(object):
def __init__(self, suite, path, variant='default', flags=None,
- dependency=None, override_shell=None):
+ override_shell=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
- self.dependency = dependency # |path| for testcase that must be run first
self.override_shell = override_shell
self.outcomes = set([])
self.output = None
@@ -45,7 +44,7 @@ class TestCase(object):
def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags,
- self.dependency, self.override_shell)
+ self.override_shell)
copy.outcomes = self.outcomes
return copy
@@ -56,16 +55,16 @@ class TestCase(object):
"""
assert self.id is not None
return [self.suitename(), self.path, self.variant, self.flags,
- self.dependency, self.override_shell, list(self.outcomes or []),
+ self.override_shell, list(self.outcomes or []),
self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3], task[4], task[5])
- test.outcomes = set(task[6])
- test.id = task[7]
+ test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
+ test.outcomes = set(task[5])
+ test.id = task[6]
test.run = 1
return test
@@ -101,3 +100,11 @@ class TestCase(object):
send the name only and retrieve a process-local suite later.
"""
return dict(self.__dict__, suite=self.suite.name)
+
+ def __cmp__(self, other):
+ # Make sure that test cases are sorted correctly if sorted without
+ # key function. But using a key function is preferred for speed.
+ return cmp(
+ (self.suite.name, self.path, self.flags),
+ (other.suite.name, other.path, other.flags),
+ )
diff --git a/deps/v8/tools/testrunner/testrunner.isolate b/deps/v8/tools/testrunner/testrunner.isolate
index 669614b283..1e8e9dccb9 100644
--- a/deps/v8/tools/testrunner/testrunner.isolate
+++ b/deps/v8/tools/testrunner/testrunner.isolate
@@ -11,4 +11,14 @@
'./'
],
},
-} \ No newline at end of file
+ 'conditions': [
+ ['coverage==1 and sanitizer_coverage=="bb"', {
+ 'variables': {
+ 'files': [
+ '../sanitizers/sancov_merger.py',
+ '../../third_party/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py',
+ ],
+ },
+ }],
+ ],
+}
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
index bc9f636cb7..a785a6e3c8 100644
--- a/deps/v8/tools/tick-processor.html
+++ b/deps/v8/tools/tick-processor.html
@@ -82,7 +82,6 @@ function print(arg) {
function start_process() {
ArgumentsProcessor.DEFAULTS = {
logFileName: 'v8.log',
- snapshotLogFileName: null,
platform: 'unix',
stateFilter: null,
callGraphSize: 5,
@@ -98,8 +97,6 @@ function start_process() {
'mac': MacCppEntriesProvider
};
- var snapshotLogProcessor; // not used
-
var tickProcessor = new TickProcessor(
new (entriesProviders[ArgumentsProcessor.DEFAULTS.platform])(
ArgumentsProcessor.DEFAULTS.nm,
@@ -107,8 +104,7 @@ function start_process() {
ArgumentsProcessor.DEFAULTS.separateIc,
ArgumentsProcessor.DEFAULTS.callGraphSize,
ArgumentsProcessor.DEFAULTS.ignoreUnknown,
- ArgumentsProcessor.DEFAULTS.stateFilter,
- snapshotLogProcessor);
+ ArgumentsProcessor.DEFAULTS.stateFilter);
tickProcessor.processLogChunk(v8log_content);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index dc8a87d9ec..3f2321fed1 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -61,18 +61,12 @@ if (params.sourceMap) {
initSourceMapSupport();
sourceMap = SourceMap.load(params.sourceMap);
}
-var snapshotLogProcessor;
-if (params.snapshotLogFileName) {
- snapshotLogProcessor = new SnapshotLogProcessor();
- snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
-}
var tickProcessor = new TickProcessor(
new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
params.separateIc,
params.callGraphSize,
params.ignoreUnknown,
params.stateFilter,
- snapshotLogProcessor,
params.distortion,
params.range,
sourceMap,
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index e62720bd8a..ba7401a223 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -70,88 +70,12 @@ function parseState(s) {
}
-function SnapshotLogProcessor() {
- LogReader.call(this, {
- 'code-creation': {
- parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
- processor: this.processCodeCreation },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
- 'function-creation': null,
- 'function-move': null,
- 'function-delete': null,
- 'sfi-move': null,
- 'snapshot-pos': { parsers: [parseInt, parseInt],
- processor: this.processSnapshotPosition }});
-
- V8Profile.prototype.handleUnknownCode = function(operation, addr) {
- var op = Profile.Operation;
- switch (operation) {
- case op.MOVE:
- print('Snapshot: Code move event for unknown code: 0x' +
- addr.toString(16));
- break;
- case op.DELETE:
- print('Snapshot: Code delete event for unknown code: 0x' +
- addr.toString(16));
- break;
- }
- };
-
- this.profile_ = new V8Profile();
- this.serializedEntries_ = [];
-}
-inherits(SnapshotLogProcessor, LogReader);
-
-
-SnapshotLogProcessor.prototype.processCodeCreation = function(
- type, kind, start, size, name, maybe_func) {
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, start, size);
- }
-};
-
-
-SnapshotLogProcessor.prototype.processCodeMove = function(from, to) {
- this.profile_.moveCode(from, to);
-};
-
-
-SnapshotLogProcessor.prototype.processCodeDelete = function(start) {
- this.profile_.deleteCode(start);
-};
-
-
-SnapshotLogProcessor.prototype.processSnapshotPosition = function(addr, pos) {
- this.serializedEntries_[pos] = this.profile_.findEntry(addr);
-};
-
-
-SnapshotLogProcessor.prototype.processLogFile = function(fileName) {
- var contents = readFile(fileName);
- this.processLogChunk(contents);
-};
-
-
-SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) {
- var entry = this.serializedEntries_[pos];
- return entry ? entry.getRawName() : null;
-};
-
-
function TickProcessor(
cppEntriesProvider,
separateIc,
callGraphSize,
ignoreUnknown,
stateFilter,
- snapshotLogProcessor,
distortion,
range,
sourceMap,
@@ -170,8 +94,6 @@ function TickProcessor(
processor: this.processCodeDelete },
'sfi-move': { parsers: [parseInt, parseInt],
processor: this.processFunctionMove },
- 'snapshot-pos': { parsers: [parseInt, parseInt],
- processor: this.processSnapshotPosition },
'tick': {
parsers: [parseInt, parseInt, parseInt,
parseInt, parseInt, 'var-args'],
@@ -202,7 +124,6 @@ function TickProcessor(
this.callGraphSize_ = callGraphSize;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
- this.snapshotLogProcessor_ = snapshotLogProcessor;
this.sourceMap = sourceMap;
this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
@@ -362,14 +283,6 @@ TickProcessor.prototype.processFunctionMove = function(from, to) {
};
-TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
- if (this.snapshotLogProcessor_) {
- this.deserializedEntriesNames_[addr] =
- this.snapshotLogProcessor_.getSerializedEntryName(pos);
- }
-};
-
-
TickProcessor.prototype.includeTick = function(vmState) {
return this.stateFilter_ == null || this.stateFilter_ == vmState;
};
@@ -883,8 +796,6 @@ function ArgumentsProcessor(args) {
'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
'--target': ['targetRootFS', '',
'Specify the target root directory for cross environment'],
- '--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
- 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)'],
'--range': ['range', 'auto,auto',
'Specify the range limit as [start],[end]'],
'--distortion': ['distortion', 0,
@@ -909,7 +820,6 @@ function ArgumentsProcessor(args) {
ArgumentsProcessor.DEFAULTS = {
logFileName: 'v8.log',
- snapshotLogFileName: null,
platform: 'unix',
stateFilter: null,
callGraphSize: 5,